2004-12-15 Steven Munroe <sjmunroe@us.ibm.com>

* sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Make no_vmx symbol
	local.
	* sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Make no_vmx symbol
	local.
	* sysdeps/powerpc/powerpc64/__longjmp-common.S: Make no_vmx symbol
	local.
	* sysdeps/powerpc/powerpc64/setjmp-common.S: Make no_vmx and
	aligned_save_vmx symbol local.
This commit is contained in:
Roland McGrath 2004-12-15 20:36:01 +00:00
parent ca5d7882cf
commit 372723065a
4 changed files with 16 additions and 16 deletions

View File

@ -50,7 +50,7 @@ ENTRY (BP_SYM (__longjmp))
lwz r5,_dl_hwcap@l(r5) lwz r5,_dl_hwcap@l(r5)
# endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq L(no_vmx)
la r5,((JB_VRS)*4)(3) la r5,((JB_VRS)*4)(3)
andi. r6,r5,0xf andi. r6,r5,0xf
lwz r0,((JB_VRSAVE)*4)(3) lwz r0,((JB_VRSAVE)*4)(3)
@ -78,7 +78,7 @@ ENTRY (BP_SYM (__longjmp))
load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5) load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
lvx v1,0,r5 lvx v1,0,r5
vperm v31,v31,v1,v0 vperm v31,v31,v1,v0
b no_vmx b L(no_vmx)
aligned_restore_vmx: aligned_restore_vmx:
addi r6,r5,16 addi r6,r5,16
lvx v20,0,r5 lvx v20,0,r5
@ -103,7 +103,7 @@ aligned_restore_vmx:
addi r6,r6,32 addi r6,r6,32
lvx v30,0,r5 lvx v30,0,r5
lvx v31,0,r6 lvx v31,0,r6
no_vmx: L(no_vmx):
#endif #endif
lwz r1,(JB_GPR1*4)(r3) lwz r1,(JB_GPR1*4)(r3)
lwz r0,(JB_LR*4)(r3) lwz r0,(JB_LR*4)(r3)

View File

@ -92,13 +92,13 @@ ENTRY (BP_SYM (__sigsetjmp))
lwz r5,_dl_hwcap@l(r5) lwz r5,_dl_hwcap@l(r5)
#endif #endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq L(no_vmx)
la r5,((JB_VRS)*4)(3) la r5,((JB_VRS)*4)(3)
andi. r6,r5,0xf andi. r6,r5,0xf
mfspr r0,VRSAVE mfspr r0,VRSAVE
stw r0,((JB_VRSAVE)*4)(3) stw r0,((JB_VRSAVE)*4)(3)
addi r6,r5,16 addi r6,r5,16
beq+ aligned_save_vmx beq+ L(aligned_save_vmx)
lvsr v0,0,r5 lvsr v0,0,r5
vspltisb v1,-1 /* set v1 to all 1's */ vspltisb v1,-1 /* set v1 to all 1's */
vspltisb v2,0 /* set v2 to all 0's */ vspltisb v2,0 /* set v2 to all 0's */
@ -137,9 +137,9 @@ ENTRY (BP_SYM (__sigsetjmp))
stvx v5,0,r6 stvx v5,0,r6
vsel v4,v31,v4,v3 vsel v4,v31,v4,v3
stvx v4,0,r5 stvx v4,0,r5
b no_vmx b L(no_vmx)
aligned_save_vmx: L(aligned_save_vmx):
stvx 20,0,r5 stvx 20,0,r5
addi r5,r5,32 addi r5,r5,32
stvx 21,0,r6 stvx 21,0,r6
@ -162,7 +162,7 @@ aligned_save_vmx:
addi r6,r6,32 addi r6,r6,32
stvx 30,0,r5 stvx 30,0,r5
stvx 31,0,r6 stvx 31,0,r6
no_vmx: L(no_vmx):
#endif #endif
b JUMPTARGET (BP_SYM (__sigjmp_save)) b JUMPTARGET (BP_SYM (__sigjmp_save))
END (BP_SYM (__sigsetjmp)) END (BP_SYM (__sigsetjmp))

View File

@ -53,7 +53,7 @@ ENTRY (BP_SYM (__longjmp))
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
# endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq L(no_vmx)
la r5,((JB_VRS)*8)(3) la r5,((JB_VRS)*8)(3)
andi. r6,r5,0xf andi. r6,r5,0xf
lwz r0,((JB_VRSAVE)*8)(3) lwz r0,((JB_VRSAVE)*8)(3)
@ -81,7 +81,7 @@ ENTRY (BP_SYM (__longjmp))
load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5) load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
lvx v1,0,r5 lvx v1,0,r5
vperm v31,v31,v1,v0 vperm v31,v31,v1,v0
b no_vmx b L(no_vmx)
aligned_restore_vmx: aligned_restore_vmx:
addi r6,r5,16 addi r6,r5,16
lvx v20,0,r5 lvx v20,0,r5
@ -106,7 +106,7 @@ aligned_restore_vmx:
addi r6,r6,32 addi r6,r6,32
lvx v30,0,r5 lvx v30,0,r5
lvx v31,0,r6 lvx v31,0,r6
no_vmx: L(no_vmx):
#endif #endif
ld r1,(JB_GPR1*8)(r3) ld r1,(JB_GPR1*8)(r3)
ld r2,(JB_GPR2*8)(r3) ld r2,(JB_GPR2*8)(r3)

View File

@ -102,13 +102,13 @@ JUMPTARGET(GLUE(__sigsetjmp,_ent)):
ld r5,0(r5) /* Load extern _dl_hwcap. */ ld r5,0(r5) /* Load extern _dl_hwcap. */
# endif # endif
andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
beq no_vmx beq L(no_vmx)
la r5,((JB_VRS)*8)(3) la r5,((JB_VRS)*8)(3)
andi. r6,r5,0xf andi. r6,r5,0xf
mfspr r0,VRSAVE mfspr r0,VRSAVE
stw r0,((JB_VRSAVE)*8)(3) stw r0,((JB_VRSAVE)*8)(3)
addi r6,r5,16 addi r6,r5,16
beq+ aligned_save_vmx beq+ L(aligned_save_vmx)
lvsr v0,0,r5 lvsr v0,0,r5
vspltisb v1,-1 /* set v1 to all 1's */ vspltisb v1,-1 /* set v1 to all 1's */
vspltisb v2,0 /* set v2 to all 0's */ vspltisb v2,0 /* set v2 to all 0's */
@ -150,9 +150,9 @@ JUMPTARGET(GLUE(__sigsetjmp,_ent)):
stvx v5,0,r6 stvx v5,0,r6
vsel v4,v31,v4,v3 vsel v4,v31,v4,v3
stvx v4,0,r5 stvx v4,0,r5
b no_vmx b L(no_vmx)
aligned_save_vmx: L(aligned_save_vmx):
stvx 20,0,r5 stvx 20,0,r5
addi r5,r5,32 addi r5,r5,32
stvx 21,0,r6 stvx 21,0,r6
@ -175,7 +175,7 @@ aligned_save_vmx:
addi r6,r6,32 addi r6,r6,32
stvx 30,0,r5 stvx 30,0,r5
stvx 31,0,r6 stvx 31,0,r6
no_vmx: L(no_vmx):
#endif #endif
b JUMPTARGET (BP_SYM (__sigjmp_save)) b JUMPTARGET (BP_SYM (__sigjmp_save))
END (BP_SYM (__sigsetjmp)) END (BP_SYM (__sigsetjmp))