mirror of git://sourceware.org/git/glibc.git
x86-64: Add memset family functions with 256-bit EVEX
Update ifunc-memset.h/ifunc-wmemset.h to select the function optimized
with 256-bit EVEX instructions using YMM16-YMM31 registers to avoid RTM
abort with usable AVX512VL and AVX512BW since VZEROUPPER isn't needed at
function exit.
(cherry picked from commit 1b968b6b9b
)
This commit is contained in:
parent
5141ddbe3a
commit
c7289e2bfd
|
@ -44,6 +44,7 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c \
|
|||
memchr-evex \
|
||||
memmove-evex-unaligned-erms \
|
||||
memrchr-evex \
|
||||
memset-evex-unaligned-erms \
|
||||
rawmemchr-evex \
|
||||
stpcpy-evex \
|
||||
stpncpy-evex \
|
||||
|
|
|
@ -160,6 +160,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|||
IFUNC_IMPL_ADD (array, i, __memset_chk,
|
||||
HAS_ARCH_FEATURE (AVX2_Usable),
|
||||
__memset_chk_avx2_unaligned_erms)
|
||||
IFUNC_IMPL_ADD (array, i, __memset_chk,
|
||||
(HAS_ARCH_FEATURE (AVX512VL_Usable)
|
||||
&& HAS_ARCH_FEATURE (AVX512BW_Usable)),
|
||||
__memset_chk_evex_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, __memset_chk,
|
||||
(HAS_ARCH_FEATURE (AVX512VL_Usable)
|
||||
&& HAS_ARCH_FEATURE (AVX512BW_Usable)),
|
||||
__memset_chk_evex_unaligned_erms)
|
||||
IFUNC_IMPL_ADD (array, i, __memset_chk,
|
||||
HAS_ARCH_FEATURE (AVX512F_Usable),
|
||||
__memset_chk_avx512_unaligned_erms)
|
||||
|
@ -185,6 +193,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|||
IFUNC_IMPL_ADD (array, i, memset,
|
||||
HAS_ARCH_FEATURE (AVX2_Usable),
|
||||
__memset_avx2_unaligned_erms)
|
||||
IFUNC_IMPL_ADD (array, i, memset,
|
||||
(HAS_ARCH_FEATURE (AVX512VL_Usable)
|
||||
&& HAS_ARCH_FEATURE (AVX512BW_Usable)),
|
||||
__memset_evex_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, memset,
|
||||
(HAS_ARCH_FEATURE (AVX512VL_Usable)
|
||||
&& HAS_ARCH_FEATURE (AVX512BW_Usable)),
|
||||
__memset_evex_unaligned_erms)
|
||||
IFUNC_IMPL_ADD (array, i, memset,
|
||||
HAS_ARCH_FEATURE (AVX512F_Usable),
|
||||
__memset_avx512_unaligned_erms)
|
||||
|
@ -543,6 +559,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|||
IFUNC_IMPL_ADD (array, i, wmemset,
|
||||
HAS_ARCH_FEATURE (AVX2_Usable),
|
||||
__wmemset_avx2_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, wmemset,
|
||||
HAS_ARCH_FEATURE (AVX512VL_Usable),
|
||||
__wmemset_evex_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, wmemset,
|
||||
HAS_ARCH_FEATURE (AVX512F_Usable),
|
||||
__wmemset_avx512_unaligned))
|
||||
|
@ -711,6 +730,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
|
|||
IFUNC_IMPL_ADD (array, i, __wmemset_chk,
|
||||
HAS_ARCH_FEATURE (AVX2_Usable),
|
||||
__wmemset_chk_avx2_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, __wmemset_chk,
|
||||
HAS_ARCH_FEATURE (AVX512VL_Usable),
|
||||
__wmemset_chk_evex_unaligned)
|
||||
IFUNC_IMPL_ADD (array, i, __wmemset_chk,
|
||||
HAS_ARCH_FEATURE (AVX512F_Usable),
|
||||
__wmemset_chk_avx512_unaligned))
|
||||
|
|
|
@ -27,6 +27,10 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
|
|||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms)
|
||||
attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
|
||||
attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
|
||||
attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
|
||||
attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
|
||||
|
@ -56,10 +60,22 @@ IFUNC_SELECTOR (void)
|
|||
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
|
||||
{
|
||||
if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
|
||||
return OPTIMIZE (avx2_unaligned_erms);
|
||||
else
|
||||
return OPTIMIZE (avx2_unaligned);
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable)
|
||||
&& CPU_FEATURES_ARCH_P (cpu_features, AVX512BW_Usable))
|
||||
{
|
||||
if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
|
||||
return OPTIMIZE (evex_unaligned_erms);
|
||||
|
||||
return OPTIMIZE (evex_unaligned);
|
||||
}
|
||||
|
||||
if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
|
||||
{
|
||||
if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
|
||||
return OPTIMIZE (avx2_unaligned_erms);
|
||||
|
||||
return OPTIMIZE (avx2_unaligned);
|
||||
}
|
||||
}
|
||||
|
||||
if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
|
||||
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
|
||||
|
||||
static inline void *
|
||||
|
@ -27,14 +28,18 @@ IFUNC_SELECTOR (void)
|
|||
{
|
||||
const struct cpu_features* cpu_features = __get_cpu_features ();
|
||||
|
||||
if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
|
||||
&& CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
|
||||
&& CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
|
||||
{
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
|
||||
&& !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
|
||||
&& !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)
|
||||
&& !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
|
||||
return OPTIMIZE (avx512_unaligned);
|
||||
else
|
||||
|
||||
if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable))
|
||||
return OPTIMIZE (evex_unaligned);
|
||||
|
||||
if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
|
||||
return OPTIMIZE (avx2_unaligned);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
#if IS_IN (libc)
|
||||
# define VEC_SIZE 32
|
||||
# define XMM0 xmm16
|
||||
# define YMM0 ymm16
|
||||
# define VEC0 ymm16
|
||||
# define VEC(i) VEC##i
|
||||
# define VMOVU vmovdqu64
|
||||
# define VMOVA vmovdqa64
|
||||
# define VZEROUPPER
|
||||
|
||||
# define MEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
|
||||
movq r, %rax; \
|
||||
vpbroadcastb d, %VEC0
|
||||
|
||||
# define WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
|
||||
movq r, %rax; \
|
||||
vpbroadcastd d, %VEC0
|
||||
|
||||
# define SECTION(p) p##.evex
|
||||
# define MEMSET_SYMBOL(p,s) p##_evex_##s
|
||||
# define WMEMSET_SYMBOL(p,s) p##_evex_##s
|
||||
|
||||
# include "memset-vec-unaligned-erms.S"
|
||||
#endif
|
|
@ -34,6 +34,14 @@
|
|||
# define WMEMSET_CHK_SYMBOL(p,s) WMEMSET_SYMBOL(p, s)
|
||||
#endif
|
||||
|
||||
#ifndef XMM0
|
||||
# define XMM0 xmm0
|
||||
#endif
|
||||
|
||||
#ifndef YMM0
|
||||
# define YMM0 ymm0
|
||||
#endif
|
||||
|
||||
#ifndef VZEROUPPER
|
||||
# if VEC_SIZE > 16
|
||||
# define VZEROUPPER vzeroupper
|
||||
|
@ -77,7 +85,7 @@
|
|||
ENTRY (__bzero)
|
||||
mov %RDI_LP, %RAX_LP /* Set return value. */
|
||||
mov %RSI_LP, %RDX_LP /* Set n. */
|
||||
pxor %xmm0, %xmm0
|
||||
pxor %XMM0, %XMM0
|
||||
jmp L(entry_from_bzero)
|
||||
END (__bzero)
|
||||
weak_alias (__bzero, bzero)
|
||||
|
@ -233,7 +241,7 @@ L(less_vec):
|
|||
cmpb $16, %dl
|
||||
jae L(between_16_31)
|
||||
# endif
|
||||
MOVQ %xmm0, %rcx
|
||||
MOVQ %XMM0, %rcx
|
||||
cmpb $8, %dl
|
||||
jae L(between_8_15)
|
||||
cmpb $4, %dl
|
||||
|
@ -248,16 +256,16 @@ L(less_vec):
|
|||
# if VEC_SIZE > 32
|
||||
/* From 32 to 63. No branch when size == 32. */
|
||||
L(between_32_63):
|
||||
vmovdqu %ymm0, -32(%rdi,%rdx)
|
||||
vmovdqu %ymm0, (%rdi)
|
||||
VMOVU %YMM0, -32(%rdi,%rdx)
|
||||
VMOVU %YMM0, (%rdi)
|
||||
VZEROUPPER
|
||||
ret
|
||||
# endif
|
||||
# if VEC_SIZE > 16
|
||||
/* From 16 to 31. No branch when size == 16. */
|
||||
L(between_16_31):
|
||||
vmovdqu %xmm0, -16(%rdi,%rdx)
|
||||
vmovdqu %xmm0, (%rdi)
|
||||
VMOVU %XMM0, -16(%rdi,%rdx)
|
||||
VMOVU %XMM0, (%rdi)
|
||||
VZEROUPPER
|
||||
ret
|
||||
# endif
|
||||
|
|
Loading…
Reference in New Issue