mirror of git://sourceware.org/git/glibc.git
* sysdeps/powerpc/bits/atomic.h
[!MUTEX_HINT_ACQ]: Define MUTEX_HINT_ACQ. [!MUTEX_HINT_REL]: Define MUTEX_HINT_REL. (__arch_compare_and_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_compare_and_exchange_val_32_rel): Add MUTEX_HINT_REL to lwarx. (__arch_atomic_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_atomic_exchange_rel_32_rel): Add MUTEX_HINT_REL to lwarx. * sysdeps/powerpc/powerpc32/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6X]: Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0". (__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx. * sysdeps/powerpc/powerpc64/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6D]: Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0". (__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx. (__arch_compare_and_exchange_bool_64_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_compare_and_exchange_bool_64_rel): Add MUTEX_HINT_REL to lwarx. (__arch_compare_and_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_compare_and_exchange_val_64_rel): Add MUTEX_HINT_REL to lwarx. (__arch_atomic_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx. (__arch_atomic_exchange_rel_64_rel): Add MUTEX_HINT_REL to lwarx. 2007-03-20 Jakub Jelinek <jakub@redhat.com>
This commit is contained in:
parent
c7693af7ef
commit
fa6e3bc38a
26
ChangeLog
26
ChangeLog
|
|
@ -1,4 +1,28 @@
|
||||||
007-03-20 Jakub Jelinek <jakub@redhat.com>
|
2007-03-19 Steven Munroe <sjmunroe@us.ibm.com>
|
||||||
|
|
||||||
|
* sysdeps/powerpc/bits/atomic.h
|
||||||
|
[!MUTEX_HINT_ACQ]: Define MUTEX_HINT_ACQ.
|
||||||
|
[!MUTEX_HINT_REL]: Define MUTEX_HINT_REL.
|
||||||
|
(__arch_compare_and_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_compare_and_exchange_val_32_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
(__arch_atomic_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_atomic_exchange_rel_32_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
* sysdeps/powerpc/powerpc32/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6X]:
|
||||||
|
Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
|
||||||
|
(__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
* sysdeps/powerpc/powerpc64/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6D]:
|
||||||
|
Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
|
||||||
|
(__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
(__arch_compare_and_exchange_bool_64_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_compare_and_exchange_bool_64_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
(__arch_compare_and_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_compare_and_exchange_val_64_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
(__arch_atomic_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
|
||||||
|
(__arch_atomic_exchange_rel_64_rel): Add MUTEX_HINT_REL to lwarx.
|
||||||
|
|
||||||
|
2007-03-20 Jakub Jelinek <jakub@redhat.com>
|
||||||
|
|
||||||
* sysdeps/unix/sysv/linux/powerpc/libc-start.c
|
* sysdeps/unix/sysv/linux/powerpc/libc-start.c
|
||||||
(__cache_line_size): Define the variable here. Add
|
(__cache_line_size): Define the variable here. Add
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
|
/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||||
This file is part of the GNU C Library.
|
This file is part of the GNU C Library.
|
||||||
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
||||||
|
|
||||||
|
|
@ -25,7 +25,6 @@
|
||||||
#include <bits/pthreadtypes.h>
|
#include <bits/pthreadtypes.h>
|
||||||
#include <atomic.h>
|
#include <atomic.h>
|
||||||
|
|
||||||
|
|
||||||
#ifndef __NR_futex
|
#ifndef __NR_futex
|
||||||
# define __NR_futex 221
|
# define __NR_futex 221
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -133,7 +132,7 @@
|
||||||
/* Set *futex to ID if it is 0, atomically. Returns the old value */
|
/* Set *futex to ID if it is 0, atomically. Returns the old value */
|
||||||
#define __lll_robust_trylock(futex, id) \
|
#define __lll_robust_trylock(futex, id) \
|
||||||
({ int __val; \
|
({ int __val; \
|
||||||
__asm __volatile ("1: lwarx %0,0,%2\n" \
|
__asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||||
" cmpwi 0,%0,0\n" \
|
" cmpwi 0,%0,0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %3,0,%2\n" \
|
" stwcx. %3,0,%2\n" \
|
||||||
|
|
|
||||||
|
|
@ -70,6 +70,13 @@ typedef uintmax_t uatomic_max_t;
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef MUTEX_HINT_ACQ
|
||||||
|
# define MUTEX_HINT_ACQ
|
||||||
|
#endif
|
||||||
|
#ifndef MUTEX_HINT_REL
|
||||||
|
# define MUTEX_HINT_REL
|
||||||
|
#endif
|
||||||
|
|
||||||
#define atomic_full_barrier() __asm ("sync" ::: "memory")
|
#define atomic_full_barrier() __asm ("sync" ::: "memory")
|
||||||
#define atomic_write_barrier() __asm ("eieio" ::: "memory")
|
#define atomic_write_barrier() __asm ("eieio" ::: "memory")
|
||||||
|
|
||||||
|
|
@ -78,7 +85,7 @@ typedef uintmax_t uatomic_max_t;
|
||||||
__typeof (*(mem)) __tmp; \
|
__typeof (*(mem)) __tmp; \
|
||||||
__typeof (mem) __memp = (mem); \
|
__typeof (mem) __memp = (mem); \
|
||||||
__asm __volatile ( \
|
__asm __volatile ( \
|
||||||
"1: lwarx %0,0,%1\n" \
|
"1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
|
||||||
" cmpw %0,%2\n" \
|
" cmpw %0,%2\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %3,0,%1\n" \
|
" stwcx. %3,0,%1\n" \
|
||||||
|
|
@ -95,7 +102,7 @@ typedef uintmax_t uatomic_max_t;
|
||||||
__typeof (*(mem)) __tmp; \
|
__typeof (*(mem)) __tmp; \
|
||||||
__typeof (mem) __memp = (mem); \
|
__typeof (mem) __memp = (mem); \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: lwarx %0,0,%1\n" \
|
"1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
|
||||||
" cmpw %0,%2\n" \
|
" cmpw %0,%2\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %3,0,%1\n" \
|
" stwcx. %3,0,%1\n" \
|
||||||
|
|
@ -111,7 +118,7 @@ typedef uintmax_t uatomic_max_t;
|
||||||
({ \
|
({ \
|
||||||
__typeof (*mem) __val; \
|
__typeof (*mem) __val; \
|
||||||
__asm __volatile ( \
|
__asm __volatile ( \
|
||||||
"1: lwarx %0,0,%2\n" \
|
"1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||||
" stwcx. %3,0,%2\n" \
|
" stwcx. %3,0,%2\n" \
|
||||||
" bne- 1b\n" \
|
" bne- 1b\n" \
|
||||||
" " __ARCH_ACQ_INSTR \
|
" " __ARCH_ACQ_INSTR \
|
||||||
|
|
@ -125,7 +132,7 @@ typedef uintmax_t uatomic_max_t;
|
||||||
({ \
|
({ \
|
||||||
__typeof (*mem) __val; \
|
__typeof (*mem) __val; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: lwarx %0,0,%2\n" \
|
"1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
|
||||||
" stwcx. %3,0,%2\n" \
|
" stwcx. %3,0,%2\n" \
|
||||||
" bne- 1b" \
|
" bne- 1b" \
|
||||||
: "=&r" (__val), "=m" (*mem) \
|
: "=&r" (__val), "=m" (*mem) \
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/* Atomic operations. PowerPC32 version.
|
/* Atomic operations. PowerPC32 version.
|
||||||
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||||
This file is part of the GNU C Library.
|
This file is part of the GNU C Library.
|
||||||
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
||||||
|
|
||||||
|
|
@ -18,17 +18,33 @@
|
||||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||||
02111-1307 USA. */
|
02111-1307 USA. */
|
||||||
|
|
||||||
|
/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
|
||||||
|
This is a hint to the hardware to expect additional updates adjacent
|
||||||
|
to the lock word or not. If we are acquiring a Mutex, the hint
|
||||||
|
should be true. Otherwise we releasing a Mutex or doing a simple
|
||||||
|
atomic operation. In that case we don't expect addtional updates
|
||||||
|
adjacent to the lock word after the Store Conditional and the hint
|
||||||
|
should be false. */
|
||||||
|
|
||||||
|
#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
|
||||||
|
# define MUTEX_HINT_ACQ ",1"
|
||||||
|
# define MUTEX_HINT_REL ",0"
|
||||||
|
#else
|
||||||
|
# define MUTEX_HINT_ACQ
|
||||||
|
# define MUTEX_HINT_REL
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 32-bit exchange_bool is different on powerpc64 because the subf
|
* The 32-bit exchange_bool is different on powerpc64 because the subf
|
||||||
* does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
|
* does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
|
||||||
* (a load word and zero (high 32) form). So powerpc64 has a slightly
|
* (a load word and zero (high 32) form). So powerpc64 has a slightly
|
||||||
* different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
|
* different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
|
||||||
*/
|
*/
|
||||||
# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned int __tmp; \
|
unsigned int __tmp; \
|
||||||
__asm __volatile ( \
|
__asm __volatile ( \
|
||||||
"1: lwarx %0,0,%1\n" \
|
"1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
|
||||||
" subf. %0,%2,%0\n" \
|
" subf. %0,%2,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %3,0,%1\n" \
|
" stwcx. %3,0,%1\n" \
|
||||||
|
|
@ -40,11 +56,11 @@
|
||||||
__tmp != 0; \
|
__tmp != 0; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned int __tmp; \
|
unsigned int __tmp; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: lwarx %0,0,%1\n" \
|
"1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
|
||||||
" subf. %0,%2,%0\n" \
|
" subf. %0,%2,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %3,0,%1\n" \
|
" stwcx. %3,0,%1\n" \
|
||||||
|
|
@ -59,34 +75,34 @@
|
||||||
/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
|
/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
|
||||||
load and reserve (ldarx) and store conditional (stdcx.) instructions.
|
load and reserve (ldarx) and store conditional (stdcx.) instructions.
|
||||||
So for powerpc32 we stub out the 64-bit forms. */
|
So for powerpc32 we stub out the 64-bit forms. */
|
||||||
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
|
||||||
(abort (), 0)
|
(abort (), 0)
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
|
||||||
(abort (), (__typeof (*mem)) 0)
|
(abort (), (__typeof (*mem)) 0)
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
|
||||||
(abort (), 0)
|
(abort (), 0)
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
|
||||||
(abort (), (__typeof (*mem)) 0)
|
(abort (), (__typeof (*mem)) 0)
|
||||||
|
|
||||||
# define __arch_atomic_exchange_64_acq(mem, value) \
|
#define __arch_atomic_exchange_64_acq(mem, value) \
|
||||||
({ abort (); (*mem) = (value); })
|
({ abort (); (*mem) = (value); })
|
||||||
|
|
||||||
# define __arch_atomic_exchange_64_rel(mem, value) \
|
#define __arch_atomic_exchange_64_rel(mem, value) \
|
||||||
({ abort (); (*mem) = (value); })
|
({ abort (); (*mem) = (value); })
|
||||||
|
|
||||||
# define __arch_atomic_exchange_and_add_64(mem, value) \
|
#define __arch_atomic_exchange_and_add_64(mem, value) \
|
||||||
({ abort (); (*mem) = (value); })
|
({ abort (); (*mem) = (value); })
|
||||||
|
|
||||||
# define __arch_atomic_increment_val_64(mem) \
|
#define __arch_atomic_increment_val_64(mem) \
|
||||||
({ abort (); (*mem)++; })
|
({ abort (); (*mem)++; })
|
||||||
|
|
||||||
# define __arch_atomic_decrement_val_64(mem) \
|
#define __arch_atomic_decrement_val_64(mem) \
|
||||||
({ abort (); (*mem)--; })
|
({ abort (); (*mem)--; })
|
||||||
|
|
||||||
# define __arch_atomic_decrement_if_positive_64(mem) \
|
#define __arch_atomic_decrement_if_positive_64(mem) \
|
||||||
({ abort (); (*mem)--; })
|
({ abort (); (*mem)--; })
|
||||||
|
|
||||||
#ifdef _ARCH_PWR4
|
#ifdef _ARCH_PWR4
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/* Atomic operations. PowerPC64 version.
|
/* Atomic operations. PowerPC64 version.
|
||||||
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||||
This file is part of the GNU C Library.
|
This file is part of the GNU C Library.
|
||||||
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
|
||||||
|
|
||||||
|
|
@ -18,17 +18,33 @@
|
||||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||||
02111-1307 USA. */
|
02111-1307 USA. */
|
||||||
|
|
||||||
|
/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
|
||||||
|
This is a hint to the hardware to expect additional updates adjacent
|
||||||
|
to the lock word or not. If we are acquiring a Mutex, the hint
|
||||||
|
should be true. Otherwise we releasing a Mutex or doing a simple
|
||||||
|
atomic operation. In that case we don't expect addtional updates
|
||||||
|
adjacent to the lock word after the Store Conditional and the hint
|
||||||
|
should be false. */
|
||||||
|
|
||||||
|
#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
|
||||||
|
# define MUTEX_HINT_ACQ ",1"
|
||||||
|
# define MUTEX_HINT_REL ",0"
|
||||||
|
#else
|
||||||
|
# define MUTEX_HINT_ACQ
|
||||||
|
# define MUTEX_HINT_REL
|
||||||
|
#endif
|
||||||
|
|
||||||
/* The 32-bit exchange_bool is different on powerpc64 because the subf
|
/* The 32-bit exchange_bool is different on powerpc64 because the subf
|
||||||
does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
|
does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
|
||||||
(a load word and zero (high 32) form) load.
|
(a load word and zero (high 32) form) load.
|
||||||
In powerpc64 register values are 64-bit by default, including oldval.
|
In powerpc64 register values are 64-bit by default, including oldval.
|
||||||
The value in old val unknown sign extension, lwarx loads the 32-bit
|
The value in old val unknown sign extension, lwarx loads the 32-bit
|
||||||
value as unsigned. So we explicitly clear the high 32 bits in oldval. */
|
value as unsigned. So we explicitly clear the high 32 bits in oldval. */
|
||||||
# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned int __tmp, __tmp2; \
|
unsigned int __tmp, __tmp2; \
|
||||||
__asm __volatile (" clrldi %1,%1,32\n" \
|
__asm __volatile (" clrldi %1,%1,32\n" \
|
||||||
"1: lwarx %0,0,%2\n" \
|
"1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||||
" subf. %0,%1,%0\n" \
|
" subf. %0,%1,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %4,0,%2\n" \
|
" stwcx. %4,0,%2\n" \
|
||||||
|
|
@ -40,12 +56,12 @@
|
||||||
__tmp != 0; \
|
__tmp != 0; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned int __tmp, __tmp2; \
|
unsigned int __tmp, __tmp2; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
" clrldi %1,%1,32\n" \
|
" clrldi %1,%1,32\n" \
|
||||||
"1: lwarx %0,0,%2\n" \
|
"1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
|
||||||
" subf. %0,%1,%0\n" \
|
" subf. %0,%1,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stwcx. %4,0,%2\n" \
|
" stwcx. %4,0,%2\n" \
|
||||||
|
|
@ -62,11 +78,11 @@
|
||||||
* and Store doubleword conditional indexed (stdcx) instructions. So here
|
* and Store doubleword conditional indexed (stdcx) instructions. So here
|
||||||
* we define the 64-bit forms.
|
* we define the 64-bit forms.
|
||||||
*/
|
*/
|
||||||
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long __tmp; \
|
unsigned long __tmp; \
|
||||||
__asm __volatile ( \
|
__asm __volatile ( \
|
||||||
"1: ldarx %0,0,%1\n" \
|
"1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
|
||||||
" subf. %0,%2,%0\n" \
|
" subf. %0,%2,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stdcx. %3,0,%1\n" \
|
" stdcx. %3,0,%1\n" \
|
||||||
|
|
@ -78,11 +94,11 @@
|
||||||
__tmp != 0; \
|
__tmp != 0; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
|
#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long __tmp; \
|
unsigned long __tmp; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: ldarx %0,0,%1\n" \
|
"1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
|
||||||
" subf. %0,%2,%0\n" \
|
" subf. %0,%2,%0\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stdcx. %3,0,%1\n" \
|
" stdcx. %3,0,%1\n" \
|
||||||
|
|
@ -99,7 +115,7 @@
|
||||||
__typeof (*(mem)) __tmp; \
|
__typeof (*(mem)) __tmp; \
|
||||||
__typeof (mem) __memp = (mem); \
|
__typeof (mem) __memp = (mem); \
|
||||||
__asm __volatile ( \
|
__asm __volatile ( \
|
||||||
"1: ldarx %0,0,%1\n" \
|
"1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
|
||||||
" cmpd %0,%2\n" \
|
" cmpd %0,%2\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stdcx. %3,0,%1\n" \
|
" stdcx. %3,0,%1\n" \
|
||||||
|
|
@ -116,7 +132,7 @@
|
||||||
__typeof (*(mem)) __tmp; \
|
__typeof (*(mem)) __tmp; \
|
||||||
__typeof (mem) __memp = (mem); \
|
__typeof (mem) __memp = (mem); \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: ldarx %0,0,%1\n" \
|
"1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
|
||||||
" cmpd %0,%2\n" \
|
" cmpd %0,%2\n" \
|
||||||
" bne 2f\n" \
|
" bne 2f\n" \
|
||||||
" stdcx. %3,0,%1\n" \
|
" stdcx. %3,0,%1\n" \
|
||||||
|
|
@ -128,11 +144,11 @@
|
||||||
__tmp; \
|
__tmp; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_exchange_64_acq(mem, value) \
|
#define __arch_atomic_exchange_64_acq(mem, value) \
|
||||||
({ \
|
({ \
|
||||||
__typeof (*mem) __val; \
|
__typeof (*mem) __val; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: ldarx %0,0,%2\n" \
|
"1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
|
||||||
" stdcx. %3,0,%2\n" \
|
" stdcx. %3,0,%2\n" \
|
||||||
" bne- 1b\n" \
|
" bne- 1b\n" \
|
||||||
" " __ARCH_ACQ_INSTR \
|
" " __ARCH_ACQ_INSTR \
|
||||||
|
|
@ -142,11 +158,11 @@
|
||||||
__val; \
|
__val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_exchange_64_rel(mem, value) \
|
#define __arch_atomic_exchange_64_rel(mem, value) \
|
||||||
({ \
|
({ \
|
||||||
__typeof (*mem) __val; \
|
__typeof (*mem) __val; \
|
||||||
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
__asm __volatile (__ARCH_REL_INSTR "\n" \
|
||||||
"1: ldarx %0,0,%2\n" \
|
"1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
|
||||||
" stdcx. %3,0,%2\n" \
|
" stdcx. %3,0,%2\n" \
|
||||||
" bne- 1b" \
|
" bne- 1b" \
|
||||||
: "=&r" (__val), "=m" (*mem) \
|
: "=&r" (__val), "=m" (*mem) \
|
||||||
|
|
@ -155,7 +171,7 @@
|
||||||
__val; \
|
__val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_exchange_and_add_64(mem, value) \
|
#define __arch_atomic_exchange_and_add_64(mem, value) \
|
||||||
({ \
|
({ \
|
||||||
__typeof (*mem) __val, __tmp; \
|
__typeof (*mem) __val, __tmp; \
|
||||||
__asm __volatile ("1: ldarx %0,0,%3\n" \
|
__asm __volatile ("1: ldarx %0,0,%3\n" \
|
||||||
|
|
@ -168,7 +184,7 @@
|
||||||
__val; \
|
__val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_increment_val_64(mem) \
|
#define __arch_atomic_increment_val_64(mem) \
|
||||||
({ \
|
({ \
|
||||||
__typeof (*(mem)) __val; \
|
__typeof (*(mem)) __val; \
|
||||||
__asm __volatile ("1: ldarx %0,0,%2\n" \
|
__asm __volatile ("1: ldarx %0,0,%2\n" \
|
||||||
|
|
@ -181,7 +197,7 @@
|
||||||
__val; \
|
__val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_decrement_val_64(mem) \
|
#define __arch_atomic_decrement_val_64(mem) \
|
||||||
({ \
|
({ \
|
||||||
__typeof (*(mem)) __val; \
|
__typeof (*(mem)) __val; \
|
||||||
__asm __volatile ("1: ldarx %0,0,%2\n" \
|
__asm __volatile ("1: ldarx %0,0,%2\n" \
|
||||||
|
|
@ -194,7 +210,7 @@
|
||||||
__val; \
|
__val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
# define __arch_atomic_decrement_if_positive_64(mem) \
|
#define __arch_atomic_decrement_if_positive_64(mem) \
|
||||||
({ int __val, __tmp; \
|
({ int __val, __tmp; \
|
||||||
__asm __volatile ("1: ldarx %0,0,%3\n" \
|
__asm __volatile ("1: ldarx %0,0,%3\n" \
|
||||||
" cmpdi 0,%0,0\n" \
|
" cmpdi 0,%0,0\n" \
|
||||||
|
|
@ -212,13 +228,13 @@
|
||||||
/*
|
/*
|
||||||
* All powerpc64 processors support the new "light weight" sync (lwsync).
|
* All powerpc64 processors support the new "light weight" sync (lwsync).
|
||||||
*/
|
*/
|
||||||
# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
|
#define atomic_read_barrier() __asm ("lwsync" ::: "memory")
|
||||||
/*
|
/*
|
||||||
* "light weight" sync can also be used for the release barrier.
|
* "light weight" sync can also be used for the release barrier.
|
||||||
*/
|
*/
|
||||||
# ifndef UP
|
#ifndef UP
|
||||||
# define __ARCH_REL_INSTR "lwsync"
|
# define __ARCH_REL_INSTR "lwsync"
|
||||||
# endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Include the rest of the atomic ops macros which are common to both
|
* Include the rest of the atomic ops macros which are common to both
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue