aarch64: Optimise AdvSIMD log1p

Optimise AdvSIMD log1p by vectorising the special case.
The special cases are for when the input is:
Less than or equal to -1
+/- INFINITY
+/- NaN
This commit is contained in:
James Chesterman 2025-11-19 21:40:40 +00:00 committed by Adhemerval Zanella
parent 57215df30e
commit 015a13e780
1 changed files with 24 additions and 16 deletions

View File

@ -23,34 +23,42 @@
const static struct data const static struct data
{ {
struct v_log1p_data d; struct v_log1p_data d;
uint64x2_t inf, minus_one; float64x2_t nan, pinf, minf;
} data = { .d = V_LOG1P_CONSTANTS_TABLE, } data = {
.inf = V2 (0x7ff0000000000000), .d = V_LOG1P_CONSTANTS_TABLE,
.minus_one = V2 (0xbff0000000000000) }; .nan = V2 (NAN),
.pinf = V2 (INFINITY),
.minf = V2 (-INFINITY),
};
#define BottomMask v_u64 (0xffffffff) static inline float64x2_t
static float64x2_t NOINLINE VPCS_ATTR
special_case (float64x2_t x, uint64x2_t cmp, const struct data *d) special_case (float64x2_t x, uint64x2_t cmp, const struct data *d)
{ {
/* Side-step special lanes so fenv exceptions are not triggered float64x2_t y = log1p_inline (x, ptr_barrier (&d->d));
inadvertently. */ y = vbslq_f64 (cmp, d->nan, y);
return v_call_f64 (log1p, x, log1p_inline (x, &d->d), cmp); uint64x2_t ret_pinf = vceqq_f64 (x, d->pinf);
uint64x2_t ret_minf = vceqq_f64 (x, v_f64 (-1.0));
y = vbslq_f64 (ret_pinf, d->pinf, y);
return vbslq_f64 (ret_minf, d->minf, y);
} }
/* Vector log1p approximation using polynomial on reduced interval. Routine is /* Vector log1p approximation using polynomial on reduced interval. Routine is
a modification of the algorithm used in scalar log1p, with no shortcut for a modification of the algorithm used in scalar log1p, with no shortcut for
k=0 and no narrowing for f and k. Maximum observed error is 2.45 ULP: k=0 and no narrowing for f and k.
Maximum observed error is 1.95 + 0.5 ULP
_ZGVnN2v_log1p(0x1.658f7035c4014p+11) got 0x1.fd61d0727429dp+2 _ZGVnN2v_log1p(0x1.658f7035c4014p+11) got 0x1.fd61d0727429dp+2
want 0x1.fd61d0727429fp+2 . */ want 0x1.fd61d0727429fp+2 . */
VPCS_ATTR float64x2_t V_NAME_D1 (log1p) (float64x2_t x) float64x2_t VPCS_ATTR NOINLINE V_NAME_D1 (log1p) (float64x2_t x)
{ {
const struct data *d = ptr_barrier (&data); const struct data *d = ptr_barrier (&data);
uint64x2_t ix = vreinterpretq_u64_f64 (x);
uint64x2_t ia = vreinterpretq_u64_f64 (vabsq_f64 (x));
uint64x2_t special_cases /* Use signed integers here to ensure that negative numbers between 0 and -1
= vorrq_u64 (vcgeq_u64 (ia, d->inf), vcgeq_u64 (ix, d->minus_one)); don't make this expression true. */
uint64x2_t is_infnan
= vcgeq_s64 (vreinterpretq_s64_f64 (x), vreinterpretq_s64_f64 (d->pinf));
/* The OR-NOT is needed to catch -NaN. */
uint64x2_t special_cases = vornq_u64 (is_infnan, vcgtq_f64 (x, v_f64 (-1)));
if (__glibc_unlikely (v_any_u64 (special_cases))) if (__glibc_unlikely (v_any_u64 (special_cases)))
return special_case (x, special_cases, d); return special_case (x, special_cases, d);