Skip to content

Commit 5602d3b

Browse files
committed
atomics: Remove cmpset_64 on IA32
The recent changes to remove non-inline atomics have caused a cascade of issues with cmpset_64 on IA32. cmpxchg8 requires the use of a bunch of registers (2 for every operand, 3 operands), and one of them is ebx, which is used by the compiler to do shared library things. Some compilers don't deal well with ebx being clobbered (I'm looking at you, gcc 4.1). Rather than continue trying to fight, remove cmpset_64 from the supported atomic operations on IA32. Other 32 bit platforms (MIPS32, SPARC32, ARM, etc.) already don't support a 64 bit compare-and- swap, so while this might slightly reduce performance, it will at least be correct. Signed-off-by: Brian Barrett <bbarrett@amazon.com>
1 parent afe7f69 commit 5602d3b

File tree

1 file changed

+0
-46
lines changed

1 file changed

+0
-46
lines changed

opal/include/opal/sys/ia32/atomic.h

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,6 @@
4646
#define OPAL_HAVE_ATOMIC_ADD_32 1
4747
#define OPAL_HAVE_ATOMIC_SUB_32 1
4848

49-
#define OPAL_HAVE_ATOMIC_CMPSET_64 1
50-
51-
#undef OPAL_HAVE_INLINE_ATOMIC_CMPSET_64
52-
#define OPAL_HAVE_INLINE_ATOMIC_CMPSET_64 0
53-
5449
/**********************************************************************
5550
*
5651
* Memory Barriers
@@ -111,47 +106,6 @@ static inline int opal_atomic_cmpset_32(volatile int32_t *addr,
111106

112107
#if OPAL_GCC_INLINE_ASSEMBLY
113108

114-
#ifndef ll_low /* GLIBC provides these somewhere, so protect */
115-
#define ll_low(x) *(((unsigned int*)&(x))+0)
116-
#define ll_high(x) *(((unsigned int*)&(x))+1)
117-
#endif
118-
119-
/* On Linux the EBX register is used by the shared libraries
120-
* to keep the global offset. In same time this register is
121-
* required by the cmpxchg8b instruction (as an input parameter).
122-
* This conflict force us to save the EBX before the cmpxchg8b
123-
* and to restore it afterward.
124-
*/
125-
static inline int opal_atomic_cmpset_64(volatile int64_t *addr,
126-
int64_t oldval,
127-
int64_t newval)
128-
{
129-
/*
130-
* Compare EDX:EAX with m64. If equal, set ZF and load ECX:EBX into
131-
* m64. Else, clear ZF and load m64 into EDX:EAX.
132-
*/
133-
unsigned char ret;
134-
135-
__asm__ __volatile__(
136-
"push %%ebx \n\t"
137-
"movl %4, %%ebx \n\t"
138-
SMPLOCK "cmpxchg8b (%1) \n\t"
139-
"sete %0 \n\t"
140-
"pop %%ebx \n\t"
141-
: "=qm"(ret)
142-
: "D"(addr), "a"(ll_low(oldval)), "d"(ll_high(oldval)),
143-
"r"(ll_low(newval)), "c"(ll_high(newval))
144-
: "cc", "memory", "ebx");
145-
return (int) ret;
146-
}
147-
148-
#endif /* OPAL_GCC_INLINE_ASSEMBLY */
149-
150-
#define opal_atomic_cmpset_acq_64 opal_atomic_cmpset_64
151-
#define opal_atomic_cmpset_rel_64 opal_atomic_cmpset_64
152-
153-
#if OPAL_GCC_INLINE_ASSEMBLY
154-
155109
#define OPAL_HAVE_ATOMIC_SWAP_32 1
156110

157111
static inline int32_t opal_atomic_swap_32( volatile int32_t *addr,

0 commit comments

Comments
 (0)