aarch64: MTE compatible strcpy

Add support for MTE to strcpy. Regression tested with xcheck and benchmarked
with glibc's benchtests on the Cortex-A53, Cortex-A72, and Neoverse N1.

The existing implementation assumes that any access to the pages in which the
string resides is safe. This assumption is not true when MTE is enabled. This
patch updates the algorithm to ensure that accesses remain within the bounds
of an MTE tag (16-byte chunks) and improves overall performance.

Co-authored-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
This commit is contained in:
Alex Butler 2020-06-09 15:57:03 +00:00 committed by Szabolcs Nagy
parent 8ec13b4639
commit bb2c12aecb
1 changed files with 121 additions and 262 deletions

View File

@ -26,297 +26,156 @@
/* Assumptions: /* Assumptions:
* *
* ARMv8-a, AArch64, unaligned accesses, min page size 4k. * ARMv8-a, AArch64, Advanced SIMD.
* MTE compatible.
*/ */
/* Arguments and results. */ /* Arguments and results. */
#define dstin x0 #define dstin x0
#define srcin x1 #define srcin x1
#define result x0
/* Locals and temporaries. */
#define src x2 #define src x2
#define dst x3 #define dst x3
#define data1 x4 #define len x4
#define data1w w4 #define synd x4
#define data2 x5 #define tmp x5
#define data2w w5 #define wtmp w5
#define has_nul1 x6 #define shift x5
#define has_nul2 x7 #define data1 x6
#define tmp1 x8 #define dataw1 w6
#define tmp2 x9 #define data2 x7
#define tmp3 x10 #define dataw2 w7
#define tmp4 x11
#define zeroones x12
#define data1a x13
#define data2a x14
#define pos x15
#define len x16
#define to_align x17
/* NEON register */ #define dataq q0
#define dataq q2 #define vdata v0
#define datav v2 #define vhas_nul v1
#define datab2 b3 #define vrepmask v2
#define datav2 v3 #define vend v3
#define dend d3
#define dataq2 q1
#ifdef BUILD_STPCPY #ifdef BUILD_STPCPY
#define STRCPY __stpcpy # define STRCPY __stpcpy
# define IFSTPCPY(X,...) X,__VA_ARGS__
#else #else
#define STRCPY strcpy # define STRCPY strcpy
# define IFSTPCPY(X,...)
#endif #endif
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80 /* Core algorithm:
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
#define REP8_01 0x0101010101010101 For each 16-byte chunk we calculate a 64-bit syndrome value with four bits
#define REP8_7f 0x7f7f7f7f7f7f7f7f per byte. For even bytes, bits 0-3 are set if the relevant byte matched the
#define REP8_80 0x8080808080808080 requested character or the byte is NUL. Bits 4-7 must be zero. Bits 4-7 are
set likewise for odd bytes so that adjacent bytes can be merged. Since the
bits in the syndrome reflect the order in which things occur in the original
string, counting trailing zeros identifies exactly which byte matched. */
/* AArch64 systems have a minimum page size of 4k. We can do a quick ENTRY (STRCPY)
page size check for crossing this boundary on entry and if we
do not, then we can short-circuit much of the entry code. We
expect early page-crossing strings to be rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), so the branch should be quite
predictable, even with random strings.
We don't bother checking for larger page sizes, the cost of setting
up the correct page size is just not worth the extra gain from
a small reduction in the cases taking the slow path. Note that
we only care about whether the first fetch, which may be
misaligned, crosses a page boundary - after that we move to aligned
fetches for the remainder of the string. */
#ifdef STRCPY_TEST_PAGE_CROSS
/* Make everything that isn't Qword aligned look like a page cross. */
#define MIN_PAGE_P2 4
#else
#define MIN_PAGE_P2 12
#endif
#define MIN_PAGE_SIZE (1 << MIN_PAGE_P2)
ENTRY_ALIGN (STRCPY, 6)
DELOUSE (0) DELOUSE (0)
DELOUSE (1) DELOUSE (1)
/* For moderately short strings, the fastest way to do the copy is to bic src, srcin, 15
calculate the length of the string in the same way as strlen, then mov wtmp, 0xf00f
essentially do a memcpy of the result. This avoids the need for ld1 {vdata.16b}, [src]
multiple byte copies and further means that by the time we dup vrepmask.8h, wtmp
reach the bulk copy loop we know we can always use DWord cmeq vhas_nul.16b, vdata.16b, 0
accesses. We expect strcpy to rarely be called repeatedly lsl shift, srcin, 2
with the same source string, so branch prediction is likely to and vhas_nul.16b, vhas_nul.16b, vrepmask.16b
always be difficult - we mitigate against this by preferring addp vend.16b, vhas_nul.16b, vhas_nul.16b
conditional select operations over branches whenever this is fmov synd, dend
feasible. */ lsr synd, synd, shift
and tmp2, srcin, #(MIN_PAGE_SIZE - 1) cbnz synd, L(tail)
mov zeroones, #REP8_01
and to_align, srcin, #15
cmp tmp2, #(MIN_PAGE_SIZE - 16)
neg tmp1, to_align
/* The first fetch will straddle a (possible) page boundary iff
srcin + 15 causes bit[MIN_PAGE_P2] to change value. A 16-byte
aligned string will never fail the page align check, so will
always take the fast path. */
b.gt L(page_cross)
L(page_cross_ok): ldr dataq, [src, 16]!
ldp data1, data2, [srcin] cmeq vhas_nul.16b, vdata.16b, 0
#ifdef __AARCH64EB__ and vhas_nul.16b, vhas_nul.16b, vrepmask.16b
/* Because we expect the end to be found within 16 characters addp vend.16b, vhas_nul.16b, vhas_nul.16b
(profiling shows this is the most common case), it's worth fmov synd, dend
swapping the bytes now to save having to recalculate the cbz synd, L(start_loop)
termination syndrome later. We preserve data1 and data2
so that we can re-use the values later on. */
rev tmp2, data1
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne L(fp_le8)
rev tmp4, data2
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne L(fp_le8)
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bics has_nul2, tmp3, tmp4
b.eq L(bulk_entry)
/* The string is short (<=16 bytes). We don't know exactly how #ifndef __AARCH64EB__
short though, yet. Work out the exact length so that we can rbit synd, synd
quickly select the optimal copy strategy. */
L(fp_gt8):
rev has_nul2, has_nul2
clz pos, has_nul2
mov tmp2, #56
add dst, dstin, pos, lsr #3 /* Bits to bytes. */
sub pos, tmp2, pos
#ifdef __AARCH64EB__
lsr data2, data2, pos
#else
lsl data2, data2, pos
#endif #endif
str data2, [dst, #1] sub tmp, src, srcin
clz len, synd
add len, tmp, len, lsr 2
tbz len, 4, L(less16)
sub tmp, len, 15
ldr dataq, [srcin]
ldr dataq2, [srcin, tmp]
str dataq, [dstin]
str dataq2, [dstin, tmp]
IFSTPCPY (add result, dstin, len)
ret
.p2align 4,,8
L(tail):
rbit synd, synd
clz len, synd
lsr len, len, 2
.p2align 4
L(less16):
tbz len, 3, L(less8)
sub tmp, len, 7
ldr data1, [srcin]
ldr data2, [srcin, tmp]
str data1, [dstin] str data1, [dstin]
#ifdef BUILD_STPCPY str data2, [dstin, tmp]
add dstin, dst, #8 IFSTPCPY (add result, dstin, len)
#endif
ret ret
L(fp_le8): .p2align 4
rev has_nul1, has_nul1 L(less8):
clz pos, has_nul1 subs tmp, len, 3
add dst, dstin, pos, lsr #3 /* Bits to bytes. */ b.lo L(less4)
subs tmp2, pos, #24 /* Pos in bits. */ ldr dataw1, [srcin]
b.lt L(fp_lt4) ldr dataw2, [srcin, tmp]
#ifdef __AARCH64EB__ str dataw1, [dstin]
mov tmp2, #56 str dataw2, [dstin, tmp]
sub pos, tmp2, pos IFSTPCPY (add result, dstin, len)
lsr data2, data1, pos
lsr data1, data1, #32
#else
lsr data2, data1, tmp2
#endif
/* 4->7 bytes to copy. */
str data2w, [dst, #-3]
str data1w, [dstin]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret
L(fp_lt4):
cbz pos, L(fp_lt2)
/* 2->3 bytes to copy. */
#ifdef __AARCH64EB__
lsr data1, data1, #48
#endif
strh data1w, [dstin]
/* Fall-through, one byte (max) to go. */
L(fp_lt2):
/* Null-terminated string. Last character must be zero! */
strb wzr, [dst]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret ret
/* Aligning here ensures that the entry code and main loop all lies L(less4):
within one 64-byte cache line. */ cbz len, L(zerobyte)
L(bulk_entry): ldrh dataw1, [srcin]
sub to_align, to_align, #16 strh dataw1, [dstin]
stp data1, data2, [dstin] L(zerobyte):
sub src, srcin, to_align strb wzr, [dstin, len]
sub dst, dstin, to_align IFSTPCPY (add result, dstin, len)
b L(entry_no_page_cross)
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
cycle, as we get much better parallelism out of the operations. */
L(main_loop):
str dataq, [dst], #16
L(entry_no_page_cross):
ldr dataq, [src], #16
uminv datab2, datav.16b
mov tmp3, datav2.d[0]
cbnz tmp3, L(main_loop)
/* Since we know we are copying at least 16 bytes, the fastest way
to deal with the tail is to determine the location of the
trailing NUL, then (re)copy the 16 bytes leading up to that. */
#ifdef __AARCH64EB__
rev64 datav.16b, datav.16b
#endif
/* calculate the loc value */
cmeq datav.16b, datav.16b, #0
#ifdef __AARCH64EB__
mov data1, datav.d[1]
mov data2, datav.d[0]
#else
mov data1, datav.d[0]
mov data2, datav.d[1]
#endif
cmp data1, 0
csel data1, data1, data2, ne
mov pos, 8
rev data1, data1
clz tmp1, data1
csel pos, xzr, pos, ne
add pos, pos, tmp1, lsr 3
add src, src, pos
add dst, dst, pos
ldr dataq,[src, #-31]
str dataq,[dst, #-15]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret ret
L(page_cross): .p2align 4
bic src, srcin, #15 L(start_loop):
/* Start by loading two words at [srcin & ~15], then forcing the sub len, src, srcin
bytes that precede srcin to 0xff. This means they never look ldr dataq2, [srcin]
like termination bytes. */ add dst, dstin, len
ldp data1, data2, [src] str dataq2, [dstin]
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
tst to_align, #7 .p2align 5
csetm tmp2, ne L(loop):
#ifdef __AARCH64EB__ str dataq, [dst], 16
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ ldr dataq, [src, 16]!
#else cmeq vhas_nul.16b, vdata.16b, 0
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */ umaxp vend.16b, vhas_nul.16b, vhas_nul.16b
fmov synd, dend
cbz synd, L(loop)
and vhas_nul.16b, vhas_nul.16b, vrepmask.16b
addp vend.16b, vhas_nul.16b, vhas_nul.16b /* 128->64 */
fmov synd, dend
#ifndef __AARCH64EB__
rbit synd, synd
#endif #endif
orr data1, data1, tmp2 clz len, synd
orr data2a, data2, tmp2 lsr len, len, 2
cmp to_align, #8 sub tmp, len, 15
csinv data1, data1, xzr, lt ldr dataq, [src, tmp]
csel data2, data2, data2a, lt str dataq, [dst, tmp]
sub tmp1, data1, zeroones IFSTPCPY (add result, dst, len)
orr tmp2, data1, #REP8_7f ret
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bics has_nul2, tmp3, tmp4
ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
b.eq L(page_cross_ok)
/* We now need to make data1 and data2 look like they've been
loaded directly from srcin. Do a rotate on the 128-bit value. */
lsl tmp1, to_align, #3 /* Bytes->bits. */
neg tmp2, to_align, lsl #3
#ifdef __AARCH64EB__
lsl data1a, data1, tmp1
lsr tmp4, data2, tmp2
lsl data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
rev tmp2, data1
rev tmp4, data2
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
lsr data1a, data1, tmp1
lsl tmp4, data2, tmp2
lsr data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bic has_nul1, tmp1, tmp2
cbnz has_nul1, L(fp_le8)
bic has_nul2, tmp3, tmp4
b L(fp_gt8)
END (STRCPY) END (STRCPY)
#ifdef BUILD_STPCPY #ifdef BUILD_STPCPY