AArch64: Cleanup SVE config and defines

Now we finally support modern GCC and binutils, it's time for a cleanup.
Remove HAVE_AARCH64_SVE_ASM define and conditional compilation.  Remove SVE
configure checks for SVE, ACLE and variant-PCS support.

Reviewed-by: Yury Khrustalev <yury.khrustalev@arm.com>
This commit is contained in:
Wilco Dijkstra 2025-05-14 11:38:19 +00:00
parent 2c421fc430
commit b990b0aee2
12 changed files with 5 additions and 219 deletions

View File

@ -113,11 +113,6 @@
/* AArch64 big endian ABI */
#undef HAVE_AARCH64_BE
/* Assembler support ARMv8.2-A SVE.
This macro becomes obsolete when glibc increased the minimum
required version of GNU 'binutils' to 2.28 or later. */
#define HAVE_AARCH64_SVE_ASM 0
/* C-SKY ABI version. */
#undef CSKYABI

View File

@ -43,13 +43,11 @@ gen-as-const-headers += \
tests-internal += tst-ifunc-arg-1 tst-ifunc-arg-2
ifeq (yes,$(aarch64-variant-pcs))
tests += tst-vpcs
modules-names += tst-vpcs-mod
LDFLAGS-tst-vpcs-mod.so = -Wl,-z,lazy
$(objpfx)tst-vpcs: $(objpfx)tst-vpcs-mod.so
endif
endif
ifeq ($(subdir),csu)
gen-as-const-headers += \

View File

@ -185,131 +185,11 @@ else
default-abi = lp64"
fi
# Check if binutils supports variant PCS symbols.
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for variant PCS support" >&5
printf %s "checking for variant PCS support... " >&6; }
if test ${libc_cv_aarch64_variant_pcs+y}
then :
printf %s "(cached) " >&6
else case e in #(
e) cat > conftest.S <<EOF
.global foo
.type foo, %function
.variant_pcs foo
foo:
ret
.global bar
.type bar, %function
bar:
b foo
EOF
libc_cv_aarch64_variant_pcs=no
if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS -nostdlib -nostartfiles $no_ssp -shared -fPIC -o conftest.so conftest.S'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; } \
&& { ac_try='$READELF -dW conftest.so | grep -q AARCH64_VARIANT_PCS'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; }
then
libc_cv_aarch64_variant_pcs=yes
fi
rm -rf conftest.* ;;
esac
fi
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_aarch64_variant_pcs" >&5
printf "%s\n" "$libc_cv_aarch64_variant_pcs" >&6; }
config_vars="$config_vars
aarch64-variant-pcs = $libc_cv_aarch64_variant_pcs"
# Check if asm support armv8.2-a+sve
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for SVE support in assembler" >&5
printf %s "checking for SVE support in assembler... " >&6; }
if test ${libc_cv_aarch64_sve_asm+y}
then :
printf %s "(cached) " >&6
else case e in #(
e) cat > conftest.s <<\EOF
.arch armv8.2-a+sve
ptrue p0.b
EOF
if { ac_try='${CC-cc} -c conftest.s 1>&5'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; }; then
libc_cv_aarch64_sve_asm=yes
else
libc_cv_aarch64_sve_asm=no
fi
rm -f conftest* ;;
esac
fi
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_aarch64_sve_asm" >&5
printf "%s\n" "$libc_cv_aarch64_sve_asm" >&6; }
if test $libc_cv_aarch64_sve_asm = yes; then
printf "%s\n" "#define HAVE_AARCH64_SVE_ASM 1" >>confdefs.h
fi
if test x"$build_mathvec" = xnotset; then
build_mathvec=yes
fi
# Check if compiler supports SVE ACLE.
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for availability of SVE ACLE" >&5
printf %s "checking for availability of SVE ACLE... " >&6; }
if test ${libc_cv_aarch64_sve_acle+y}
then :
printf %s "(cached) " >&6
else case e in #(
e) cat > conftest.c <<EOF
#include <arm_sve.h>
EOF
if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS -fsyntax-only -ffreestanding conftest.c'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; }; then
libc_cv_aarch64_sve_acle=yes
else
libc_cv_aarch64_sve_acle=no
fi
rm conftest.c ;;
esac
fi
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_aarch64_sve_acle" >&5
printf "%s\n" "$libc_cv_aarch64_sve_acle" >&6; }
# Check if compiler is sufficient to build mathvec
if test $build_mathvec = yes; then
fail=no
if test $libc_cv_aarch64_variant_pcs = no; then
fail=yes
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: mathvec is enabled but linker does not support variant PCS." >&5
printf "%s\n" "$as_me: WARNING: mathvec is enabled but linker does not support variant PCS." >&2;}
fi
if test $libc_cv_aarch64_sve_asm = no; then
fail=yes
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: mathvec is enabled but assembler does not support SVE." >&5
printf "%s\n" "$as_me: WARNING: mathvec is enabled but assembler does not support SVE." >&2;}
fi
if test $libc_cv_aarch64_sve_acle = no; then
fail=yes
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: mathvec is enabled but compiler does not have SVE ACLE." >&5
printf "%s\n" "$as_me: WARNING: mathvec is enabled but compiler does not have SVE ACLE." >&2;}
fi
if test $fail = yes; then
as_fn_error $? "use a compatible toolchain or configure with --disable-mathvec (this results in incomplete ABI)." "$LINENO" 5
fi
else
if test $build_mathvec = no; then
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: mathvec is disabled, this results in incomplete ABI." >&5
printf "%s\n" "$as_me: WARNING: mathvec is disabled, this results in incomplete ABI." >&2;}
fi

View File

@ -24,78 +24,10 @@ else
LIBC_CONFIG_VAR([default-abi], [lp64])
fi
# Check if binutils supports variant PCS symbols.
AC_CACHE_CHECK([for variant PCS support], [libc_cv_aarch64_variant_pcs], [dnl
cat > conftest.S <<EOF
.global foo
.type foo, %function
.variant_pcs foo
foo:
ret
.global bar
.type bar, %function
bar:
b foo
EOF
libc_cv_aarch64_variant_pcs=no
if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS -nostdlib -nostartfiles $no_ssp -shared -fPIC -o conftest.so conftest.S]) \
&& AC_TRY_COMMAND([$READELF -dW conftest.so | grep -q AARCH64_VARIANT_PCS])
then
libc_cv_aarch64_variant_pcs=yes
fi
rm -rf conftest.*])
LIBC_CONFIG_VAR([aarch64-variant-pcs], [$libc_cv_aarch64_variant_pcs])
# Check if asm support armv8.2-a+sve
AC_CACHE_CHECK([for SVE support in assembler], [libc_cv_aarch64_sve_asm], [dnl
cat > conftest.s <<\EOF
.arch armv8.2-a+sve
ptrue p0.b
EOF
if AC_TRY_COMMAND(${CC-cc} -c conftest.s 1>&AS_MESSAGE_LOG_FD); then
libc_cv_aarch64_sve_asm=yes
else
libc_cv_aarch64_sve_asm=no
fi
rm -f conftest*])
if test $libc_cv_aarch64_sve_asm = yes; then
AC_DEFINE(HAVE_AARCH64_SVE_ASM)
fi
if test x"$build_mathvec" = xnotset; then
build_mathvec=yes
fi
# Check if compiler supports SVE ACLE.
AC_CACHE_CHECK(for availability of SVE ACLE, libc_cv_aarch64_sve_acle, [dnl
cat > conftest.c <<EOF
#include <arm_sve.h>
EOF
if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS -fsyntax-only -ffreestanding conftest.c]); then
libc_cv_aarch64_sve_acle=yes
else
libc_cv_aarch64_sve_acle=no
fi
rm conftest.c])
# Check if compiler is sufficient to build mathvec
if test $build_mathvec = yes; then
fail=no
if test $libc_cv_aarch64_variant_pcs = no; then
fail=yes
AC_MSG_WARN([mathvec is enabled but linker does not support variant PCS.])
fi
if test $libc_cv_aarch64_sve_asm = no; then
fail=yes
AC_MSG_WARN([mathvec is enabled but assembler does not support SVE.])
fi
if test $libc_cv_aarch64_sve_acle = no; then
fail=yes
AC_MSG_WARN([mathvec is enabled but compiler does not have SVE ACLE.])
fi
if test $fail = yes; then
AC_MSG_ERROR([use a compatible toolchain or configure with --disable-mathvec (this results in incomplete ABI).])
fi
else
if test $build_mathvec = no; then
AC_MSG_WARN([mathvec is disabled, this results in incomplete ABI.])
fi

View File

@ -36,18 +36,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/aarch64/multiarch/memcpy.c, memmove.c and memset.c. */
IFUNC_IMPL (i, name, memcpy,
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_oryon1)
#if HAVE_AARCH64_SVE_ASM
IFUNC_IMPL_ADD (array, i, memcpy, sve, __memcpy_a64fx)
IFUNC_IMPL_ADD (array, i, memcpy, sve, __memcpy_sve)
#endif
IFUNC_IMPL_ADD (array, i, memcpy, mops, __memcpy_mops)
IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_generic))
IFUNC_IMPL (i, name, memmove,
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_oryon1)
#if HAVE_AARCH64_SVE_ASM
IFUNC_IMPL_ADD (array, i, memmove, sve, __memmove_a64fx)
IFUNC_IMPL_ADD (array, i, memmove, sve, __memmove_sve)
#endif
IFUNC_IMPL_ADD (array, i, memmove, mops, __memmove_mops)
IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_generic))
IFUNC_IMPL (i, name, memset,
@ -55,10 +51,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
IFUNC_IMPL_ADD (array, i, memset, (zva_size == 64), __memset_oryon1)
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_emag)
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_kunpeng)
#if HAVE_AARCH64_SVE_ASM
IFUNC_IMPL_ADD (array, i, memset, sve && zva_size == 256, __memset_a64fx)
IFUNC_IMPL_ADD (array, i, memset, sve && zva_size == 64, __memset_sve_zva64)
#endif
IFUNC_IMPL_ADD (array, i, memset, mops, __memset_mops)
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))
IFUNC_IMPL (i, name, memchr,

View File

@ -43,7 +43,7 @@ select_memcpy_ifunc (void)
if (mops)
return __memcpy_mops;
if (sve && HAVE_AARCH64_SVE_ASM)
if (sve)
{
if (IS_A64FX (midr))
return __memcpy_a64fx;

View File

@ -35,8 +35,6 @@
#define vlen x7
#define vlen8 x8
#if HAVE_AARCH64_SVE_ASM
.arch armv8.2-a+sve
.macro ld1b_unroll8
@ -290,4 +288,3 @@ L(full_overlap):
b L(last_bytes)
END (__memmove_a64fx)
#endif /* HAVE_AARCH64_SVE_ASM */

View File

@ -56,8 +56,6 @@
The loop tail is handled by always copying 64 bytes from the end.
*/
#if HAVE_AARCH64_SVE_ASM
.arch armv8.2-a+sve
ENTRY (__memcpy_sve)
@ -199,4 +197,3 @@ L(return):
ret
END (__memmove_sve)
#endif

View File

@ -41,7 +41,7 @@ select_memmove_ifunc (void)
if (mops)
return __memmove_mops;
if (sve && HAVE_AARCH64_SVE_ASM)
if (sve)
{
if (IS_A64FX (midr))
return __memmove_a64fx;

View File

@ -46,7 +46,7 @@ select_memset_ifunc (void)
if (mops)
return __memset_mops;
if (sve && HAVE_AARCH64_SVE_ASM)
if (sve)
{
if (IS_A64FX (midr) && zva_size == 256)
return __memset_a64fx;

View File

@ -31,8 +31,6 @@
#define PF_DIST_L1 (CACHE_LINE_SIZE * 16) // Prefetch distance L1
#define vector_length x9
#if HAVE_AARCH64_SVE_ASM
.arch armv8.2-a+sve
#define dstin x0
@ -166,5 +164,3 @@ L(L2):
b L(last)
END (__memset_a64fx)
#endif /* HAVE_AARCH64_SVE_ASM */

View File

@ -25,8 +25,6 @@
* ZVA size is 64.
*/
#if HAVE_AARCH64_SVE_ASM
.arch armv8.2-a+sve
#define dstin x0
@ -120,4 +118,3 @@ L(no_zva_loop):
ret
END (__memset_sve_zva64)
#endif