aarch64: MTE compatible strlen

Introduce an Arm MTE compatible strlen implementation.

The existing implementation assumes that any access to the pages in
which the string resides is safe.  This assumption is not true when
MTE is enabled.  This patch updates the algorithm to ensure that
accesses remain within the bounds of an MTE tag (16-byte chunks) and
improves overall performance on modern cores. On cores with less
efficient Advanced SIMD implementation such as Cortex-A53 it can
be slower.

Benchmarked on Cortex-A72, Cortex-A53, Neoverse N1.

Co-authored-by: Wilco Dijkstra <wilco.dijkstra@arm.com>
This commit is contained in:
Andrea Corallo 2020-06-05 17:22:26 +02:00 committed by Szabolcs Nagy
parent 49beaaec1b
commit a365ac45b7
1 changed files with 50 additions and 177 deletions

View File

@ -20,205 +20,78 @@
/* Assumptions: /* Assumptions:
* *
* ARMv8-a, AArch64, unaligned accesses, min page size 4k. * ARMv8-a, AArch64, Advanced SIMD.
* MTE compatible.
*/ */
#ifndef STRLEN #ifndef STRLEN
# define STRLEN __strlen # define STRLEN __strlen
#endif #endif
/* To test the page crossing code path more thoroughly, compile with
-DTEST_PAGE_CROSS - this will force all calls through the slower
entry path. This option is not intended for production use. */
/* Arguments and results. */
#define srcin x0 #define srcin x0
#define len x0 #define result x0
/* Locals and temporaries. */
#define src x1 #define src x1
#define data1 x2 #define synd x2
#define data2 x3 #define tmp x3
#define has_nul1 x4 #define wtmp w3
#define has_nul2 x5 #define shift x4
#define tmp1 x4
#define tmp2 x5
#define tmp3 x6
#define tmp4 x7
#define zeroones x8
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80 #define data q0
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and #define vdata v0
can be done in parallel across the entire word. A faster check #define vhas_nul v1
(X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives #define vrepmask v2
false hits for characters 129..255. */ #define vend v3
#define dend d3
#define REP8_01 0x0101010101010101 /* Core algorithm:
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
#ifdef TEST_PAGE_CROSS For each 16-byte chunk we calculate a 64-bit syndrome value with four bits
# define MIN_PAGE_SIZE 16 per byte. For even bytes, bits 0-3 are set if the relevant byte matched the
#else requested character or the byte is NUL. Bits 4-7 must be zero. Bits 4-7 are
# define MIN_PAGE_SIZE 4096 set likewise for odd bytes so that adjacent bytes can be merged. Since the
#endif bits in the syndrome reflect the order in which things occur in the original
string, counting trailing zeros identifies exactly which byte matched. */
/* Since strings are short on average, we check the first 16 bytes ENTRY (STRLEN)
of the string for a NUL character. In order to do an unaligned ldp
safely we have to do a page cross check first. If there is a NUL
byte we calculate the length from the 2 8-byte words using
conditional select to reduce branch mispredictions (it is unlikely
strlen will be repeatedly called on strings with the same length).
If the string is longer than 16 bytes, we align src so don't need
further page cross checks, and process 32 bytes per iteration
using the fast NUL check. If we encounter non-ASCII characters,
fallback to a second loop using the full NUL check.
If the page cross check fails, we read 16 bytes from an aligned
address, remove any characters before the string, and continue
in the main loop using aligned loads. Since strings crossing a
page in the first 16 bytes are rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
AArch64 systems have a minimum page size of 4k. We don't bother
checking for larger page sizes - the cost of setting up the correct
page size is just not worth the extra gain from a small reduction in
the cases taking the slow path. Note that we only care about
whether the first fetch, which may be misaligned, crosses a page
boundary. */
ENTRY_ALIGN (STRLEN, 6)
DELOUSE (0) DELOUSE (0)
DELOUSE (1) DELOUSE (1)
and tmp1, srcin, MIN_PAGE_SIZE - 1 bic src, srcin, 15
mov zeroones, REP8_01 mov wtmp, 0xf00f
cmp tmp1, MIN_PAGE_SIZE - 16 ld1 {vdata.16b}, [src]
b.gt L(page_cross) dup vrepmask.8h, wtmp
ldp data1, data2, [srcin] cmeq vhas_nul.16b, vdata.16b, 0
#ifdef __AARCH64EB__ lsl shift, srcin, 2
/* For big-endian, carry propagation (if the final byte in the and vhas_nul.16b, vhas_nul.16b, vrepmask.16b
string is 0x01) means we cannot use has_nul1/2 directly. addp vend.16b, vhas_nul.16b, vhas_nul.16b /* 128->64 */
Since we expect strings to be small and early-exit, fmov synd, dend
byte-swap the data now so has_null1/2 will be correct. */ lsr synd, synd, shift
rev data1, data1 cbz synd, L(loop)
rev data2, data2
#endif
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(main_loop_entry)
/* Enter with C = has_nul1 == 0. */ rbit synd, synd
csel has_nul1, has_nul1, has_nul2, cc clz result, synd
mov len, 8 lsr result, result, 2
rev has_nul1, has_nul1
clz tmp1, has_nul1
csel len, xzr, len, cc
add len, len, tmp1, lsr 3
ret ret
/* The inner loop processes 32 bytes per iteration and uses the fast .p2align 5
NUL check. If we encounter non-ASCII characters, use a second L(loop):
loop with the accurate NUL check. */ ldr data, [src, 16]!
.p2align 4 cmeq vhas_nul.16b, vdata.16b, 0
L(main_loop_entry): umaxp vend.16b, vhas_nul.16b, vhas_nul.16b
bic src, srcin, 15 fmov synd, dend
sub src, src, 16 cbz synd, L(loop)
L(main_loop):
ldp data1, data2, [src, 32]!
L(page_cross_entry):
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
bne 1f
ldp data1, data2, [src, 16]
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
beq L(main_loop)
add src, src, 16
1:
/* The fast check failed, so do the slower, accurate NUL check. */
orr tmp2, data1, REP8_7f
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
/* Enter with C = has_nul1 == 0. */ and vhas_nul.16b, vhas_nul.16b, vrepmask.16b
L(tail): addp vend.16b, vhas_nul.16b, vhas_nul.16b /* 128->64 */
#ifdef __AARCH64EB__ sub result, src, srcin
/* For big-endian, carry propagation (if the final byte in the fmov synd, dend
string is 0x01) means we cannot use has_nul1/2 directly. The #ifndef __AARCH64EB__
easiest way to get the correct byte is to byte-swap the data rbit synd, synd
and calculate the syndrome a second time. */
csel data1, data1, data2, cc
rev data1, data1
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
bic has_nul1, tmp1, tmp2
#else
csel has_nul1, has_nul1, has_nul2, cc
#endif #endif
sub len, src, srcin clz tmp, synd
rev has_nul1, has_nul1 add result, result, tmp, lsr 2
add tmp2, len, 8
clz tmp1, has_nul1
csel len, len, tmp2, cc
add len, len, tmp1, lsr 3
ret ret
L(nonascii_loop):
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
bne L(tail)
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
b L(tail)
/* Load 16 bytes from [srcin & ~15] and force the bytes that precede
srcin to 0x7f, so we ignore any NUL bytes before the string.
Then continue in the aligned loop. */
L(page_cross):
bic src, srcin, 15
ldp data1, data2, [src]
lsl tmp1, srcin, 3
mov tmp4, -1
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#endif
orr tmp1, tmp1, REP8_80
orn data1, data1, tmp1
orn tmp2, data2, tmp1
tst srcin, 8
csel data1, data1, tmp4, eq
csel data2, data2, tmp2, eq
b L(page_cross_entry)
END (STRLEN) END (STRLEN)
weak_alias (STRLEN, strlen) weak_alias (STRLEN, strlen)
libc_hidden_builtin_def (strlen) libc_hidden_builtin_def (strlen)