summaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 16:10:51 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 16:10:51 +0300
commit1663be053d50c06bb0f971c87d41a7b83f96fe15 (patch)
tree4bfbbfac7a83c1e52b2a7ab23dd9677f5cab4267 /sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
parent9c02f663f6b387b3905b629ffe584c9abf2030dc (diff)
Vector expf for x86_64 and tests.
Here is implementation of vectorized expf containing SSE, AVX, AVX2 and AVX512 versions according to Vector ABI <https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>. * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added. * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm redirections for expf. * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files. * sysdeps/x86_64/fpu/Versions: New versions added. * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated. * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added build of SSE, AVX2 and AVX512 IFUNC versions. * sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_s_expf8_core_avx2.S: New file. * sysdeps/x86_64/fpu/svml_s_expf16_core.S: New file. * sysdeps/x86_64/fpu/svml_s_expf4_core.S: New file. * sysdeps/x86_64/fpu/svml_s_expf8_core.S: New file. * sysdeps/x86_64/fpu/svml_s_expf8_core_avx.S: New file. * sysdeps/x86_64/fpu/svml_s_expf_data.S: New file. * sysdeps/x86_64/fpu/svml_s_expf_data.h: New file. * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector expf tests. * sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise. * NEWS: Mention addition of x86_64 vector expf.
Diffstat (limited to 'sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S')
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S212
1 files changed, 212 insertions, 0 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
new file mode 100644
index 0000000000..fcc1859c3a
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expf4_core_sse4.S
@@ -0,0 +1,212 @@
+/* Function expf vectorized with SSE4.
+ Copyright (C) 2014-2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include "svml_s_expf_data.h"
+
+ .text
+ENTRY (_ZGVbN4v_expf_sse4)
+/*
+ ALGORITHM DESCRIPTION:
+
+ Argument representation:
+ M = rint(X*2^k/ln2) = 2^k*N+j
+ X = M*ln2/2^k + r = N*ln2 + ln2*(j/2^k) + r
+ then -ln2/2^(k+1) < r < ln2/2^(k+1)
+ Alternatively:
+ M = trunc(X*2^k/ln2)
+ then 0 < r < ln2/2^k
+
+ Result calculation:
+ exp(X) = exp(N*ln2 + ln2*(j/2^k) + r)
+ = 2^N * 2^(j/2^k) * exp(r)
+ 2^N is calculated by bit manipulation
+ 2^(j/2^k) is computed from table lookup
+ exp(r) is approximated by polynomial
+
+ The table lookup is skipped if k = 0.
+ For low accuracy approximation, exp(r) ~ 1 or 1+r. */
+
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $320, %rsp
+ movaps %xmm0, %xmm5
+ movq __svml_sexp_data@GOTPCREL(%rip), %rax
+ movups __sInvLn2(%rax), %xmm0
+
+/* m = x*2^k/ln2 + shifter */
+ mulps %xmm5, %xmm0
+ movups __sShifter(%rax), %xmm6
+ movups __sLn2hi(%rax), %xmm4
+ addps %xmm6, %xmm0
+
+/* n = m - shifter = rint(x*2^k/ln2) */
+ movaps %xmm0, %xmm2
+
+/* remove sign of x by "and" operation */
+ movdqu __iAbsMask(%rax), %xmm7
+ subps %xmm6, %xmm2
+
+/* r = x-n*ln2_hi/2^k */
+ mulps %xmm2, %xmm4
+ pand %xmm5, %xmm7
+
+/* compare against threshold */
+ pcmpgtd __iDomainRange(%rax), %xmm7
+ movups __sLn2lo(%rax), %xmm1
+
+/* set mask for overflow/underflow */
+ movmskps %xmm7, %ecx
+ movaps %xmm5, %xmm7
+ movups __sPC5(%rax), %xmm3
+ subps %xmm4, %xmm7
+
+/* r = r-n*ln2_lo/2^k = x - n*ln2/2^k */
+ mulps %xmm1, %xmm2
+
+/* compute 2^N with "shift" */
+ movdqu __iBias(%rax), %xmm6
+ subps %xmm2, %xmm7
+
+/* c5*r+c4 */
+ mulps %xmm7, %xmm3
+ paddd %xmm6, %xmm0
+ pslld $23, %xmm0
+ addps __sPC4(%rax), %xmm3
+
+/* (c5*r+c4)*r+c3 */
+ mulps %xmm7, %xmm3
+ addps __sPC3(%rax), %xmm3
+
+/* ((c5*r+c4)*r+c3)*r+c2 */
+ mulps %xmm7, %xmm3
+ addps __sPC2(%rax), %xmm3
+
+/* (((c5*r+c4)*r+c3)*r+c2)*r+c1 */
+ mulps %xmm7, %xmm3
+ addps __sPC1(%rax), %xmm3
+
+/* exp(r) = ((((c5*r+c4)*r+c3)*r+c2)*r+c1)*r+c0 */
+ mulps %xmm3, %xmm7
+ addps __sPC0(%rax), %xmm7
+
+/* 2^N*exp(r) */
+ mulps %xmm7, %xmm0
+ testl %ecx, %ecx
+ jne .LBL_1_3
+
+.LBL_1_2:
+ cfi_remember_state
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+
+.LBL_1_3:
+ cfi_restore_state
+ movups %xmm5, 192(%rsp)
+ movups %xmm0, 256(%rsp)
+ je .LBL_1_2
+
+ xorb %dl, %dl
+ xorl %eax, %eax
+ movups %xmm8, 112(%rsp)
+ movups %xmm9, 96(%rsp)
+ movups %xmm10, 80(%rsp)
+ movups %xmm11, 64(%rsp)
+ movups %xmm12, 48(%rsp)
+ movups %xmm13, 32(%rsp)
+ movups %xmm14, 16(%rsp)
+ movups %xmm15, (%rsp)
+ movq %rsi, 136(%rsp)
+ movq %rdi, 128(%rsp)
+ movq %r12, 168(%rsp)
+ cfi_offset_rel_rsp (12, 168)
+ movb %dl, %r12b
+ movq %r13, 160(%rsp)
+ cfi_offset_rel_rsp (13, 160)
+ movl %ecx, %r13d
+ movq %r14, 152(%rsp)
+ cfi_offset_rel_rsp (14, 152)
+ movl %eax, %r14d
+ movq %r15, 144(%rsp)
+ cfi_offset_rel_rsp (15, 144)
+ cfi_remember_state
+
+.LBL_1_6:
+ btl %r14d, %r13d
+ jc .LBL_1_12
+
+.LBL_1_7:
+ lea 1(%r14), %esi
+ btl %esi, %r13d
+ jc .LBL_1_10
+
+.LBL_1_8:
+ incb %r12b
+ addl $2, %r14d
+ cmpb $16, %r12b
+ jb .LBL_1_6
+
+ movups 112(%rsp), %xmm8
+ movups 96(%rsp), %xmm9
+ movups 80(%rsp), %xmm10
+ movups 64(%rsp), %xmm11
+ movups 48(%rsp), %xmm12
+ movups 32(%rsp), %xmm13
+ movups 16(%rsp), %xmm14
+ movups (%rsp), %xmm15
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 168(%rsp), %r12
+ cfi_restore (%r12)
+ movq 160(%rsp), %r13
+ cfi_restore (%r13)
+ movq 152(%rsp), %r14
+ cfi_restore (%r14)
+ movq 144(%rsp), %r15
+ cfi_restore (%r15)
+ movups 256(%rsp), %xmm0
+ jmp .LBL_1_2
+
+.LBL_1_10:
+ cfi_restore_state
+ movzbl %r12b, %r15d
+ movss 196(%rsp,%r15,8), %xmm0
+
+ call expf@PLT
+
+ movss %xmm0, 260(%rsp,%r15,8)
+ jmp .LBL_1_8
+
+.LBL_1_12:
+ movzbl %r12b, %r15d
+ movss 192(%rsp,%r15,8), %xmm0
+
+ call expf@PLT
+
+ movss %xmm0, 256(%rsp,%r15,8)
+ jmp .LBL_1_7
+
+END (_ZGVbN4v_expf_sse4)