mirror of git://sourceware.org/git/glibc.git
powerpc: Fix missing barriers in atomic_exchange_and_add_{acq,rel}
On powerpc, atomic_exchange_and_add is implemented without any barriers. This patchs adds the missing instruction and memory barrier for acquire and release semanthics.
This commit is contained in:
parent
cdcb42d7f7
commit
704f794714
16
ChangeLog
16
ChangeLog
|
@ -1,3 +1,19 @@
|
|||
2014-11-26 Adhemerval Zanella <azanella@linux.ibm.com>
|
||||
|
||||
* csu/tst-atomic.c (do_test): Add atomic_exchange_and_add_{acq,rel}
|
||||
tests.
|
||||
* sysdeps/powerpc/bits/atomic.h
|
||||
(__arch_atomic_exchange_and_add_32_acq): Add definition.
|
||||
(__arch_atomic_exchange_and_add_32_rel): Likewise.
|
||||
(atomic_exchange_and_add_acq): Likewise.
|
||||
(atomic_exchange_and_add_rel): Likewise.
|
||||
* sysdeps/powerpc/powerpc32/bits/atomic.h
|
||||
(__arch_atomic_exchange_and_add_64_acq): Add definition.
|
||||
(__arch_atomic_exchange_and_add_64_rel): Likewise.
|
||||
* sysdeps/powerpc/powerpc64/bits/atomic.h
|
||||
(__arch_atomic_exchange_and_add_64_acq): Add definition.
|
||||
(__arch_atomic_exchange_and_add_64_rel): Likewise.
|
||||
|
||||
2014-11-26 Torvald Riegel <triegel@redhat.com>
|
||||
|
||||
* nptl/tpp.c (__init_sched_fifo_prio, __pthread_tpp_change_priority):
|
||||
|
|
|
@ -113,6 +113,22 @@ do_test (void)
|
|||
ret = 1;
|
||||
}
|
||||
|
||||
mem = 2;
|
||||
if (atomic_exchange_and_add_acq (&mem, 11) != 2
|
||||
|| mem != 13)
|
||||
{
|
||||
puts ("atomic_exchange_and_add test failed");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
mem = 2;
|
||||
if (atomic_exchange_and_add_rel (&mem, 11) != 2
|
||||
|| mem != 13)
|
||||
{
|
||||
puts ("atomic_exchange_and_add test failed");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
mem = -21;
|
||||
atomic_add (&mem, 22);
|
||||
if (mem != 1)
|
||||
|
|
|
@ -152,6 +152,34 @@ typedef uintmax_t uatomic_max_t;
|
|||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_exchange_and_add_32_acq(mem, value) \
|
||||
({ \
|
||||
__typeof (*mem) __val, __tmp; \
|
||||
__asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
|
||||
" add %1,%0,%4\n" \
|
||||
" stwcx. %1,0,%3\n" \
|
||||
" bne- 1b\n" \
|
||||
__ARCH_ACQ_INSTR \
|
||||
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
|
||||
: "b" (mem), "r" (value), "m" (*mem) \
|
||||
: "cr0", "memory"); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_exchange_and_add_32_rel(mem, value) \
|
||||
({ \
|
||||
__typeof (*mem) __val, __tmp; \
|
||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||
"1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \
|
||||
" add %1,%0,%4\n" \
|
||||
" stwcx. %1,0,%3\n" \
|
||||
" bne- 1b" \
|
||||
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
|
||||
: "b" (mem), "r" (value), "m" (*mem) \
|
||||
: "cr0", "memory"); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_increment_val_32(mem) \
|
||||
({ \
|
||||
__typeof (*(mem)) __val; \
|
||||
|
@ -252,6 +280,28 @@ typedef uintmax_t uatomic_max_t;
|
|||
abort (); \
|
||||
__result; \
|
||||
})
|
||||
#define atomic_exchange_and_add_acq(mem, value) \
|
||||
({ \
|
||||
__typeof (*(mem)) __result; \
|
||||
if (sizeof (*mem) == 4) \
|
||||
__result = __arch_atomic_exchange_and_add_32_acq (mem, value); \
|
||||
else if (sizeof (*mem) == 8) \
|
||||
__result = __arch_atomic_exchange_and_add_64_acq (mem, value); \
|
||||
else \
|
||||
abort (); \
|
||||
__result; \
|
||||
})
|
||||
#define atomic_exchange_and_add_rel(mem, value) \
|
||||
({ \
|
||||
__typeof (*(mem)) __result; \
|
||||
if (sizeof (*mem) == 4) \
|
||||
__result = __arch_atomic_exchange_and_add_32_rel (mem, value); \
|
||||
else if (sizeof (*mem) == 8) \
|
||||
__result = __arch_atomic_exchange_and_add_64_rel (mem, value); \
|
||||
else \
|
||||
abort (); \
|
||||
__result; \
|
||||
})
|
||||
|
||||
#define atomic_increment_val(mem) \
|
||||
({ \
|
||||
|
|
|
@ -98,6 +98,12 @@
|
|||
#define __arch_atomic_exchange_and_add_64(mem, value) \
|
||||
({ abort (); (*mem) = (value); })
|
||||
|
||||
#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
|
||||
({ abort (); (*mem) = (value); })
|
||||
|
||||
#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
|
||||
({ abort (); (*mem) = (value); })
|
||||
|
||||
#define __arch_atomic_increment_val_64(mem) \
|
||||
({ abort (); (*mem)++; })
|
||||
|
||||
|
|
|
@ -186,6 +186,34 @@
|
|||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
|
||||
({ \
|
||||
__typeof (*mem) __val, __tmp; \
|
||||
__asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
|
||||
" add %1,%0,%4\n" \
|
||||
" stdcx. %1,0,%3\n" \
|
||||
" bne- 1b\n" \
|
||||
__ARCH_ACQ_INSTR \
|
||||
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
|
||||
: "b" (mem), "r" (value), "m" (*mem) \
|
||||
: "cr0", "memory"); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
|
||||
({ \
|
||||
__typeof (*mem) __val, __tmp; \
|
||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||
"1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \
|
||||
" add %1,%0,%4\n" \
|
||||
" stdcx. %1,0,%3\n" \
|
||||
" bne- 1b" \
|
||||
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
|
||||
: "b" (mem), "r" (value), "m" (*mem) \
|
||||
: "cr0", "memory"); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define __arch_atomic_increment_val_64(mem) \
|
||||
({ \
|
||||
__typeof (*(mem)) __val; \
|
||||
|
|
Loading…
Reference in New Issue