From 8eb5ad2ebc94cc5bedbac57c226c02ec254479c7 Mon Sep 17 00:00:00 2001 From: Pierre Blanchard Date: Mon, 9 Dec 2024 15:54:34 +0000 Subject: [PATCH] AArch64: Improve codegen in AdvSIMD logs Remove spurious ADRP and a few MOVs. Reduce memory access by using more indexed MLAs in polynomial. Align notation so that algorithms are easier to compare. Speedup on Neoverse V1 for log10 (8%), log (8.5%), and log2 (10%). Update error threshold in AdvSIMD log (now matches SVE log). --- sysdeps/aarch64/fpu/log10_advsimd.c | 79 +++++++++++++----------- sysdeps/aarch64/fpu/log2_advsimd.c | 73 +++++++++++++--------- sysdeps/aarch64/fpu/log_advsimd.c | 94 ++++++++++++++++------------- 3 files changed, 140 insertions(+), 106 deletions(-) diff --git a/sysdeps/aarch64/fpu/log10_advsimd.c b/sysdeps/aarch64/fpu/log10_advsimd.c index c065aaebae..f69ed21c39 100644 --- a/sysdeps/aarch64/fpu/log10_advsimd.c +++ b/sysdeps/aarch64/fpu/log10_advsimd.c @@ -18,36 +18,36 @@ . */ #include "v_math.h" -#include "poly_advsimd_f64.h" - -#define N (1 << V_LOG10_TABLE_BITS) static const struct data { - uint64x2_t min_norm; + uint64x2_t off, sign_exp_mask, offset_lower_bound; uint32x4_t special_bound; - float64x2_t poly[5]; - float64x2_t invln10, log10_2, ln2; - uint64x2_t sign_exp_mask; + double invln10, log10_2; + double c1, c3; + float64x2_t c0, c2, c4; } data = { /* Computed from log coefficients divided by log(10) then rounded to double precision. */ - .poly = { V2 (-0x1.bcb7b1526e506p-3), V2 (0x1.287a7636be1d1p-3), - V2 (-0x1.bcb7b158af938p-4), V2 (0x1.63c78734e6d07p-4), - V2 (-0x1.287461742fee4p-4) }, - .ln2 = V2 (0x1.62e42fefa39efp-1), - .invln10 = V2 (0x1.bcb7b1526e50ep-2), - .log10_2 = V2 (0x1.34413509f79ffp-2), - .min_norm = V2 (0x0010000000000000), /* asuint64(0x1p-1022). */ - .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */ + .c0 = V2 (-0x1.bcb7b1526e506p-3), + .c1 = 0x1.287a7636be1d1p-3, + .c2 = V2 (-0x1.bcb7b158af938p-4), + .c3 = 0x1.63c78734e6d07p-4, + .c4 = V2 (-0x1.287461742fee4p-4), + .invln10 = 0x1.bcb7b1526e50ep-2, + .log10_2 = 0x1.34413509f79ffp-2, + .off = V2 (0x3fe6900900000000), .sign_exp_mask = V2 (0xfff0000000000000), + /* Lower bound is 0x0010000000000000. For + optimised register use subnormals are detected after offset has been + subtracted, so lower bound - offset (which wraps around). */ + .offset_lower_bound = V2 (0x0010000000000000 - 0x3fe6900900000000), + .special_bound = V4 (0x7fe00000), /* asuint64(inf) - 0x0010000000000000. */ }; -#define Off v_u64 (0x3fe6900900000000) +#define N (1 << V_LOG10_TABLE_BITS) #define IndexMask (N - 1) -#define T(s, i) __v_log10_data.s[i] - struct entry { float64x2_t invc; @@ -70,10 +70,11 @@ lookup (uint64x2_t i) } static float64x2_t VPCS_ATTR NOINLINE -special_case (float64x2_t x, float64x2_t y, float64x2_t hi, float64x2_t r2, - uint32x2_t special) +special_case (float64x2_t hi, uint64x2_t u_off, float64x2_t y, float64x2_t r2, + uint32x2_t special, const struct data *d) { - return v_call_f64 (log10, x, vfmaq_f64 (hi, r2, y), vmovl_u32 (special)); + float64x2_t x = vreinterpretq_f64_u64 (vaddq_u64 (u_off, d->off)); + return v_call_f64 (log10, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (special)); } /* Fast implementation of double-precision vector log10 @@ -85,19 +86,24 @@ special_case (float64x2_t x, float64x2_t y, float64x2_t hi, float64x2_t r2, float64x2_t VPCS_ATTR V_NAME_D1 (log10) (float64x2_t x) { const struct data *d = ptr_barrier (&data); - uint64x2_t ix = vreinterpretq_u64_f64 (x); - uint32x2_t special = vcge_u32 (vsubhn_u64 (ix, d->min_norm), - vget_low_u32 (d->special_bound)); + + /* To avoid having to mov x out of the way, keep u after offset has been + applied, and recover x by adding the offset back in the special-case + handler. */ + uint64x2_t u = vreinterpretq_u64_f64 (x); + uint64x2_t u_off = vsubq_u64 (u, d->off); /* x = 2^k z; where z is in range [OFF,2*OFF) and exact. The range is split into N subintervals. The ith subinterval contains z and c is near its center. */ - uint64x2_t tmp = vsubq_u64 (ix, Off); - int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); - uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask)); + int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (u_off), 52); + uint64x2_t iz = vsubq_u64 (u, vandq_u64 (u_off, d->sign_exp_mask)); float64x2_t z = vreinterpretq_f64_u64 (iz); - struct entry e = lookup (tmp); + struct entry e = lookup (u_off); + + uint32x2_t special = vcge_u32 (vsubhn_u64 (u_off, d->offset_lower_bound), + vget_low_u32 (d->special_bound)); /* log10(x) = log1p(z/c-1)/log(10) + log10(c) + k*log10(2). */ float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); @@ -105,17 +111,22 @@ float64x2_t VPCS_ATTR V_NAME_D1 (log10) (float64x2_t x) /* hi = r / log(10) + log10(c) + k*log10(2). Constants in v_log10_data.c are computed (in extended precision) as - e.log10c := e.logc * ivln10. */ - float64x2_t w = vfmaq_f64 (e.log10c, r, d->invln10); + e.log10c := e.logc * invln10. */ + float64x2_t cte = vld1q_f64 (&d->invln10); + float64x2_t hi = vfmaq_laneq_f64 (e.log10c, r, cte, 0); /* y = log10(1+r) + n * log10(2). */ - float64x2_t hi = vfmaq_f64 (w, kd, d->log10_2); + hi = vfmaq_laneq_f64 (hi, kd, cte, 1); /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ float64x2_t r2 = vmulq_f64 (r, r); - float64x2_t y = v_pw_horner_4_f64 (r, r2, d->poly); + float64x2_t odd_coeffs = vld1q_f64 (&d->c1); + float64x2_t y = vfmaq_laneq_f64 (d->c2, r, odd_coeffs, 1); + float64x2_t p = vfmaq_laneq_f64 (d->c0, r, odd_coeffs, 0); + y = vfmaq_f64 (y, d->c4, r2); + y = vfmaq_f64 (p, y, r2); if (__glibc_unlikely (v_any_u32h (special))) - return special_case (x, y, hi, r2, special); - return vfmaq_f64 (hi, r2, y); + return special_case (hi, u_off, y, r2, special, d); + return vfmaq_f64 (hi, y, r2); } diff --git a/sysdeps/aarch64/fpu/log2_advsimd.c b/sysdeps/aarch64/fpu/log2_advsimd.c index 4057c552d8..1eea1f86eb 100644 --- a/sysdeps/aarch64/fpu/log2_advsimd.c +++ b/sysdeps/aarch64/fpu/log2_advsimd.c @@ -18,31 +18,33 @@ . */ #include "v_math.h" -#include "poly_advsimd_f64.h" - -#define N (1 << V_LOG2_TABLE_BITS) static const struct data { - uint64x2_t min_norm; + uint64x2_t off, sign_exp_mask, offset_lower_bound; uint32x4_t special_bound; - float64x2_t poly[5]; - float64x2_t invln2; - uint64x2_t sign_exp_mask; + float64x2_t c0, c2; + double c1, c3, invln2, c4; } data = { /* Each coefficient was generated to approximate log(r) for |r| < 0x1.fp-9 and N = 128, then scaled by log2(e) in extended precision and rounded back to double precision. */ - .poly = { V2 (-0x1.71547652b83p-1), V2 (0x1.ec709dc340953p-2), - V2 (-0x1.71547651c8f35p-2), V2 (0x1.2777ebe12dda5p-2), - V2 (-0x1.ec738d616fe26p-3) }, - .invln2 = V2 (0x1.71547652b82fep0), - .min_norm = V2 (0x0010000000000000), /* asuint64(0x1p-1022). */ - .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */ + .c0 = V2 (-0x1.71547652b8300p-1), + .c1 = 0x1.ec709dc340953p-2, + .c2 = V2 (-0x1.71547651c8f35p-2), + .c3 = 0x1.2777ebe12dda5p-2, + .c4 = -0x1.ec738d616fe26p-3, + .invln2 = 0x1.71547652b82fep0, + .off = V2 (0x3fe6900900000000), .sign_exp_mask = V2 (0xfff0000000000000), + /* Lower bound is 0x0010000000000000. For + optimised register use subnormals are detected after offset has been + subtracted, so lower bound - offset (which wraps around). */ + .offset_lower_bound = V2 (0x0010000000000000 - 0x3fe6900900000000), + .special_bound = V4 (0x7fe00000), /* asuint64(inf) - asuint64(0x1p-1022). */ }; -#define Off v_u64 (0x3fe6900900000000) +#define N (1 << V_LOG2_TABLE_BITS) #define IndexMask (N - 1) struct entry @@ -67,10 +69,11 @@ lookup (uint64x2_t i) } static float64x2_t VPCS_ATTR NOINLINE -special_case (float64x2_t x, float64x2_t y, float64x2_t w, float64x2_t r2, - uint32x2_t special) +special_case (float64x2_t hi, uint64x2_t u_off, float64x2_t y, float64x2_t r2, + uint32x2_t special, const struct data *d) { - return v_call_f64 (log2, x, vfmaq_f64 (w, r2, y), vmovl_u32 (special)); + float64x2_t x = vreinterpretq_f64_u64 (vaddq_u64 (u_off, d->off)); + return v_call_f64 (log2, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (special)); } /* Double-precision vector log2 routine. Implements the same algorithm as @@ -81,31 +84,41 @@ special_case (float64x2_t x, float64x2_t y, float64x2_t w, float64x2_t r2, float64x2_t VPCS_ATTR V_NAME_D1 (log2) (float64x2_t x) { const struct data *d = ptr_barrier (&data); - uint64x2_t ix = vreinterpretq_u64_f64 (x); - uint32x2_t special = vcge_u32 (vsubhn_u64 (ix, d->min_norm), - vget_low_u32 (d->special_bound)); + + /* To avoid having to mov x out of the way, keep u after offset has been + applied, and recover x by adding the offset back in the special-case + handler. */ + uint64x2_t u = vreinterpretq_u64_f64 (x); + uint64x2_t u_off = vsubq_u64 (u, d->off); /* x = 2^k z; where z is in range [Off,2*Off) and exact. The range is split into N subintervals. The ith subinterval contains z and c is near its center. */ - uint64x2_t tmp = vsubq_u64 (ix, Off); - int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); - uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask)); + int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (u_off), 52); + uint64x2_t iz = vsubq_u64 (u, vandq_u64 (u_off, d->sign_exp_mask)); float64x2_t z = vreinterpretq_f64_u64 (iz); - struct entry e = lookup (tmp); + struct entry e = lookup (u_off); + + uint32x2_t special = vcge_u32 (vsubhn_u64 (u_off, d->offset_lower_bound), + vget_low_u32 (d->special_bound)); /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k. */ - float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); float64x2_t kd = vcvtq_f64_s64 (k); - float64x2_t w = vfmaq_f64 (e.log2c, r, d->invln2); + + float64x2_t invln2_and_c4 = vld1q_f64 (&d->invln2); + float64x2_t hi + = vfmaq_laneq_f64 (vaddq_f64 (e.log2c, kd), r, invln2_and_c4, 0); float64x2_t r2 = vmulq_f64 (r, r); - float64x2_t y = v_pw_horner_4_f64 (r, r2, d->poly); - w = vaddq_f64 (kd, w); + float64x2_t odd_coeffs = vld1q_f64 (&d->c1); + float64x2_t y = vfmaq_laneq_f64 (d->c2, r, odd_coeffs, 1); + float64x2_t p = vfmaq_laneq_f64 (d->c0, r, odd_coeffs, 0); + y = vfmaq_laneq_f64 (y, r2, invln2_and_c4, 1); + y = vfmaq_f64 (p, r2, y); if (__glibc_unlikely (v_any_u32h (special))) - return special_case (x, y, w, r2, special); - return vfmaq_f64 (w, r2, y); + return special_case (hi, u_off, y, r2, special, d); + return vfmaq_f64 (hi, y, r2); } diff --git a/sysdeps/aarch64/fpu/log_advsimd.c b/sysdeps/aarch64/fpu/log_advsimd.c index 015a6da7d7..b1a27fbc29 100644 --- a/sysdeps/aarch64/fpu/log_advsimd.c +++ b/sysdeps/aarch64/fpu/log_advsimd.c @@ -21,27 +21,29 @@ static const struct data { - uint64x2_t min_norm; + uint64x2_t off, sign_exp_mask, offset_lower_bound; uint32x4_t special_bound; - float64x2_t poly[5]; - float64x2_t ln2; - uint64x2_t sign_exp_mask; + float64x2_t c0, c2; + double c1, c3, ln2, c4; } data = { - /* Worst-case error: 1.17 + 0.5 ulp. - Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ]. */ - .poly = { V2 (-0x1.ffffffffffff7p-2), V2 (0x1.55555555170d4p-2), - V2 (-0x1.0000000399c27p-2), V2 (0x1.999b2e90e94cap-3), - V2 (-0x1.554e550bd501ep-3) }, - .ln2 = V2 (0x1.62e42fefa39efp-1), - .min_norm = V2 (0x0010000000000000), - .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */ - .sign_exp_mask = V2 (0xfff0000000000000) + /* Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ]. */ + .c0 = V2 (-0x1.ffffffffffff7p-2), + .c1 = 0x1.55555555170d4p-2, + .c2 = V2 (-0x1.0000000399c27p-2), + .c3 = 0x1.999b2e90e94cap-3, + .c4 = -0x1.554e550bd501ep-3, + .ln2 = 0x1.62e42fefa39efp-1, + .sign_exp_mask = V2 (0xfff0000000000000), + .off = V2 (0x3fe6900900000000), + /* Lower bound is 0x0010000000000000. For + optimised register use subnormals are detected after offset has been + subtracted, so lower bound - offset (which wraps around). */ + .offset_lower_bound = V2 (0x0010000000000000 - 0x3fe6900900000000), + .special_bound = V4 (0x7fe00000), /* asuint64(inf) - asuint64(0x1p-126). */ }; -#define A(i) d->poly[i] #define N (1 << V_LOG_TABLE_BITS) #define IndexMask (N - 1) -#define Off v_u64 (0x3fe6900900000000) struct entry { @@ -64,48 +66,56 @@ lookup (uint64x2_t i) } static float64x2_t VPCS_ATTR NOINLINE -special_case (float64x2_t x, float64x2_t y, float64x2_t hi, float64x2_t r2, - uint32x2_t cmp) +special_case (float64x2_t hi, uint64x2_t u_off, float64x2_t y, float64x2_t r2, + uint32x2_t special, const struct data *d) { - return v_call_f64 (log, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (cmp)); + float64x2_t x = vreinterpretq_f64_u64 (vaddq_u64 (u_off, d->off)); + return v_call_f64 (log, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (special)); } +/* Double-precision vector log routine. + The maximum observed error is 2.17 ULP: + _ZGVnN2v_log(0x1.a6129884398a3p+0) got 0x1.ffffff1cca043p-2 + want 0x1.ffffff1cca045p-2. */ float64x2_t VPCS_ATTR V_NAME_D1 (log) (float64x2_t x) { const struct data *d = ptr_barrier (&data); - float64x2_t z, r, r2, p, y, kd, hi; - uint64x2_t ix, iz, tmp; - uint32x2_t cmp; - int64x2_t k; - struct entry e; - ix = vreinterpretq_u64_f64 (x); - cmp = vcge_u32 (vsubhn_u64 (ix, d->min_norm), - vget_low_u32 (d->special_bound)); + /* To avoid having to mov x out of the way, keep u after offset has been + applied, and recover x by adding the offset back in the special-case + handler. */ + uint64x2_t u = vreinterpretq_u64_f64 (x); + uint64x2_t u_off = vsubq_u64 (u, d->off); /* x = 2^k z; where z is in range [Off,2*Off) and exact. The range is split into N subintervals. The ith subinterval contains z and c is near its center. */ - tmp = vsubq_u64 (ix, Off); - k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift. */ - iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask)); - z = vreinterpretq_f64_u64 (iz); - e = lookup (tmp); + int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (u_off), 52); + uint64x2_t iz = vsubq_u64 (u, vandq_u64 (u_off, d->sign_exp_mask)); + float64x2_t z = vreinterpretq_f64_u64 (iz); + + struct entry e = lookup (u_off); + + uint32x2_t special = vcge_u32 (vsubhn_u64 (u_off, d->offset_lower_bound), + vget_low_u32 (d->special_bound)); /* log(x) = log1p(z/c-1) + log(c) + k*Ln2. */ - r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); - kd = vcvtq_f64_s64 (k); + float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); + float64x2_t kd = vcvtq_f64_s64 (k); /* hi = r + log(c) + k*Ln2. */ - hi = vfmaq_f64 (vaddq_f64 (e.logc, r), kd, d->ln2); - /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ - r2 = vmulq_f64 (r, r); - y = vfmaq_f64 (A (2), A (3), r); - p = vfmaq_f64 (A (0), A (1), r); - y = vfmaq_f64 (y, A (4), r2); - y = vfmaq_f64 (p, y, r2); + float64x2_t ln2_and_c4 = vld1q_f64 (&d->ln2); + float64x2_t hi = vfmaq_laneq_f64 (vaddq_f64 (e.logc, r), kd, ln2_and_c4, 0); - if (__glibc_unlikely (v_any_u32h (cmp))) - return special_case (x, y, hi, r2, cmp); + /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */ + float64x2_t odd_coeffs = vld1q_f64 (&d->c1); + float64x2_t r2 = vmulq_f64 (r, r); + float64x2_t y = vfmaq_laneq_f64 (d->c2, r, odd_coeffs, 1); + float64x2_t p = vfmaq_laneq_f64 (d->c0, r, odd_coeffs, 0); + y = vfmaq_laneq_f64 (y, r2, ln2_and_c4, 1); + y = vfmaq_f64 (p, r2, y); + + if (__glibc_unlikely (v_any_u32h (special))) + return special_case (hi, u_off, y, r2, special, d); return vfmaq_f64 (hi, y, r2); }