AArch64: Add SVE memset

Add SVE memset based on the generic memset with predicated load for sizes < 16.
Unaligned memsets of 128-1024 are improved by ~20% on average by using aligned
stores for the last 64 bytes.  Performance of random memset benchmark improves
by ~2% on Neoverse V1.

Reviewed-by: Yury Khrustalev <yury.khrustalev@arm.com>
(cherry picked from commit 163b1bbb76)
This commit is contained in:
Wilco Dijkstra 2024-12-24 18:01:59 +00:00
parent 27fa0268ea
commit 7038970f1f
4 changed files with 129 additions and 0 deletions

View File

@ -16,6 +16,7 @@ sysdep_routines += \
memset_kunpeng \
memset_mops \
memset_oryon1 \
memset_sve_zva64 \
memset_zva64 \
strlen_asimd \
strlen_generic \

View File

@ -61,6 +61,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_kunpeng)
#if HAVE_AARCH64_SVE_ASM
IFUNC_IMPL_ADD (array, i, memset, sve && !bti && zva_size == 256, __memset_a64fx)
IFUNC_IMPL_ADD (array, i, memset, sve && zva_size == 64, __memset_sve_zva64)
#endif
IFUNC_IMPL_ADD (array, i, memset, mops, __memset_mops)
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))

View File

@ -36,6 +36,7 @@ extern __typeof (__redirect_memset) __memset_a64fx attribute_hidden;
extern __typeof (__redirect_memset) __memset_generic attribute_hidden;
extern __typeof (__redirect_memset) __memset_mops attribute_hidden;
extern __typeof (__redirect_memset) __memset_oryon1 attribute_hidden;
extern __typeof (__redirect_memset) __memset_sve_zva64 attribute_hidden;
static inline __typeof (__redirect_memset) *
select_memset_ifunc (void)
@ -49,6 +50,9 @@ select_memset_ifunc (void)
{
if (IS_A64FX (midr) && zva_size == 256)
return __memset_a64fx;
if (zva_size == 64)
return __memset_sve_zva64;
}
if (IS_ORYON1 (midr) && zva_size == 64)

View File

@ -0,0 +1,123 @@
/* Optimized memset for SVE.
Copyright (C) 2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#include <sysdep.h>
/* Assumptions:
*
* ARMv8-a, AArch64, Advanced SIMD, SVE, unaligned accesses.
* ZVA size is 64.
*/
#if HAVE_AARCH64_SVE_ASM
.arch armv8.2-a+sve
#define dstin x0
#define val x1
#define valw w1
#define count x2
#define dst x3
#define dstend x4
#define zva_val x5
#define vlen x5
#define off x3
#define dstend2 x5
ENTRY (__memset_sve_zva64)
dup v0.16B, valw
cmp count, 16
b.lo L(set_16)
add dstend, dstin, count
cmp count, 64
b.hs L(set_128)
/* Set 16..63 bytes. */
mov off, 16
and off, off, count, lsr 1
sub dstend2, dstend, off
str q0, [dstin]
str q0, [dstin, off]
str q0, [dstend2, -16]
str q0, [dstend, -16]
ret
.p2align 4
L(set_16):
whilelo p0.b, xzr, count
st1b z0.b, p0, [dstin]
ret
.p2align 4
L(set_128):
bic dst, dstin, 15
cmp count, 128
b.hi L(set_long)
stp q0, q0, [dstin]
stp q0, q0, [dstin, 32]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 4
L(set_long):
cmp count, 256
b.lo L(no_zva)
tst valw, 255
b.ne L(no_zva)
str q0, [dstin]
str q0, [dst, 16]
bic dst, dstin, 31
stp q0, q0, [dst, 32]
bic dst, dstin, 63
sub count, dstend, dst /* Count is now 64 too large. */
sub count, count, 128 /* Adjust count and bias for loop. */
sub x8, dstend, 1 /* Write last bytes before ZVA loop. */
bic x8, x8, 15
stp q0, q0, [x8, -48]
str q0, [x8, -16]
str q0, [dstend, -16]
.p2align 4
L(zva64_loop):
add dst, dst, 64
dc zva, dst
subs count, count, 64
b.hi L(zva64_loop)
ret
L(no_zva):
str q0, [dstin]
sub count, dstend, dst /* Count is 16 too large. */
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
L(no_zva_loop):
stp q0, q0, [dst, 16]
stp q0, q0, [dst, 48]
add dst, dst, 64
subs count, count, 64
b.hi L(no_zva_loop)
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
END (__memset_sve_zva64)
#endif