aarch64: thunderx2 memcpy implementation cleanup and streamlining

Here is the updated patch for improving the long unaligned
code path (the one using "ext" instruction).

1. Always taken conditional branch at the beginning is
removed.

2. Epilogue code is placed after the end of the loop to
reduce the number of branches.

3. The redundant "mov" instructions inside the loop are
gone due to the changed order of the registers in the "ext"
instructions inside the loop,  the prologue has additional
"ext" instruction.

4.Updating count in the prologue was hoisted out as
it is the same update for each prologue.

5. Invariant code of the loop epilogue was hoisted out.

6. As the current size of the ext chunk is exactly 16
instructions long "nop" was added at the beginning
of the code sequence so that the loop entry for all the
chunks be aligned.

	* sysdeps/aarch64/multiarch/memcpy_thunderx2.S: Cleanup branching
	and remove redundant code.
This commit is contained in:
Anton Youdkevitch 2019-04-05 13:59:54 -07:00 committed by Steve Ellcey
parent f82ed45d7f
commit 94e358f6d4
2 changed files with 27 additions and 21 deletions

View File

@ -1,3 +1,8 @@
2019-04-05 Anton Youdkevitch <anton.youdkevitch@bell-sw.com>
* sysdeps/aarch64/multiarch/memcpy_thunderx2.S: Cleanup branching
and remove redundant code.
2019-04-04 Adhemerval Zanella <adhemerval.zanella@linaro.org> 2019-04-04 Adhemerval Zanella <adhemerval.zanella@linaro.org>
* sysdeps/powerpc/Makefile [$(subdir) == wcsmbs] (CFLAGS-wcsrchr.c): * sysdeps/powerpc/Makefile [$(subdir) == wcsmbs] (CFLAGS-wcsrchr.c):

View File

@ -382,7 +382,8 @@ L(bytes_0_to_3):
strb A_lw, [dstin] strb A_lw, [dstin]
strb B_lw, [dstin, tmp1] strb B_lw, [dstin, tmp1]
strb A_hw, [dstend, -1] strb A_hw, [dstend, -1]
L(end): ret L(end):
ret
.p2align 4 .p2align 4
@ -544,43 +545,35 @@ L(dst_unaligned):
str C_q, [dst], #16 str C_q, [dst], #16
ldp F_q, G_q, [src], #32 ldp F_q, G_q, [src], #32
bic dst, dst, 15 bic dst, dst, 15
subs count, count, 32
adrp tmp2, L(ext_table) adrp tmp2, L(ext_table)
add tmp2, tmp2, :lo12:L(ext_table) add tmp2, tmp2, :lo12:L(ext_table)
add tmp2, tmp2, tmp1, LSL #2 add tmp2, tmp2, tmp1, LSL #2
ldr tmp3w, [tmp2] ldr tmp3w, [tmp2]
add tmp2, tmp2, tmp3w, SXTW add tmp2, tmp2, tmp3w, SXTW
br tmp2 br tmp2
#define EXT_CHUNK(shft) \
.p2align 4 ;\ .p2align 4 ;\
nop
#define EXT_CHUNK(shft) \
L(ext_size_ ## shft):;\ L(ext_size_ ## shft):;\
ext A_v.16b, C_v.16b, D_v.16b, 16-shft;\ ext A_v.16b, C_v.16b, D_v.16b, 16-shft;\
ext B_v.16b, D_v.16b, E_v.16b, 16-shft;\ ext B_v.16b, D_v.16b, E_v.16b, 16-shft;\
subs count, count, 32;\ ext H_v.16b, E_v.16b, F_v.16b, 16-shft;\
b.ge 2f;\
1:;\ 1:;\
stp A_q, B_q, [dst], #32;\
ext H_v.16b, E_v.16b, F_v.16b, 16-shft;\
ext I_v.16b, F_v.16b, G_v.16b, 16-shft;\
stp H_q, I_q, [dst], #16;\
add dst, dst, tmp1;\
str G_q, [dst], #16;\
b L(copy_long_check32);\
2:;\
stp A_q, B_q, [dst], #32;\ stp A_q, B_q, [dst], #32;\
prfm pldl1strm, [src, MEMCPY_PREFETCH_LDR];\ prfm pldl1strm, [src, MEMCPY_PREFETCH_LDR];\
ldp D_q, J_q, [src], #32;\ ldp C_q, D_q, [src], #32;\
ext H_v.16b, E_v.16b, F_v.16b, 16-shft;\
ext I_v.16b, F_v.16b, G_v.16b, 16-shft;\ ext I_v.16b, F_v.16b, G_v.16b, 16-shft;\
mov C_v.16b, G_v.16b;\
stp H_q, I_q, [dst], #32;\ stp H_q, I_q, [dst], #32;\
ext A_v.16b, G_v.16b, C_v.16b, 16-shft;\
ext B_v.16b, C_v.16b, D_v.16b, 16-shft;\
ldp F_q, G_q, [src], #32;\ ldp F_q, G_q, [src], #32;\
ext A_v.16b, C_v.16b, D_v.16b, 16-shft;\ ext H_v.16b, D_v.16b, F_v.16b, 16-shft;\
ext B_v.16b, D_v.16b, J_v.16b, 16-shft;\
mov E_v.16b, J_v.16b;\
subs count, count, 64;\ subs count, count, 64;\
b.ge 2b;\ b.ge 1b;\
b 1b;\ 2:;\
ext I_v.16b, F_v.16b, G_v.16b, 16-shft;\
b L(ext_tail);
EXT_CHUNK(1) EXT_CHUNK(1)
EXT_CHUNK(2) EXT_CHUNK(2)
@ -598,6 +591,14 @@ EXT_CHUNK(13)
EXT_CHUNK(14) EXT_CHUNK(14)
EXT_CHUNK(15) EXT_CHUNK(15)
L(ext_tail):
stp A_q, B_q, [dst], #32
stp H_q, I_q, [dst], #16
add dst, dst, tmp1
str G_q, [dst], #16
b L(copy_long_check32)
END (MEMCPY) END (MEMCPY)
.section .rodata .section .rodata
.p2align 4 .p2align 4