summaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S')
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S283
1 files changed, 267 insertions, 16 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
index 758aeeaeed..5fa4bc412a 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
@@ -1,5 +1,5 @@
/* Function sincosf vectorized with AVX-512. KNL and SKX versions.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -41,7 +41,7 @@
b) Calculate 2 polynomials for sin and cos:
RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
- c) Swap RS & RC if if first bit of obtained value after
+ c) Swap RS & RC if first bit of obtained value after
Right Shifting is set to 1. Using And, Andnot & Or operations.
3) Destination sign setting
a) Set shifted destination sign using XOR operation:
@@ -49,9 +49,9 @@
R2 = XOR( RC, SC ). */
.text
-ENTRY (_ZGVeN16vvv_sincosf_knl)
-#ifndef HAVE_AVX512_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
+ENTRY (_ZGVeN16vl4l4_sincosf_knl)
+#ifndef HAVE_AVX512DQ_ASM_SUPPORT
+WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
#else
pushq %rbp
cfi_adjust_cfa_offset (8)
@@ -243,12 +243,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
movzbl %r12b, %r15d
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1284(%rsp,%r15,8)
jmp .LBL_1_8
@@ -257,20 +257,21 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
movzbl %r12b, %r15d
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1280(%rsp,%r15,8)
jmp .LBL_1_7
#endif
-END (_ZGVeN16vvv_sincosf_knl)
+END (_ZGVeN16vl4l4_sincosf_knl)
+libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_knl)
-ENTRY (_ZGVeN16vvv_sincosf_skx)
-#ifndef HAVE_AVX512_ASM_SUPPORT
+ENTRY (_ZGVeN16vl4l4_sincosf_skx)
+#ifndef HAVE_AVX512DQ_ASM_SUPPORT
WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
#else
pushq %rbp
@@ -470,12 +471,12 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
vzeroupper
vmovss 1156(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1220(%rsp,%r15,8)
vmovss 1156(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1284(%rsp,%r15,8)
jmp .LBL_2_8
@@ -486,16 +487,266 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
vzeroupper
vmovss 1152(%rsp,%r15,8), %xmm0
- call sinf@PLT
+ call JUMPTARGET(sinf)
vmovss %xmm0, 1216(%rsp,%r15,8)
vmovss 1152(%rsp,%r15,8), %xmm0
- call cosf@PLT
+ call JUMPTARGET(cosf)
vmovss %xmm0, 1280(%rsp,%r15,8)
jmp .LBL_2_7
#endif
+END (_ZGVeN16vl4l4_sincosf_skx)
+libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_skx)
+
+/* Wrapper between vvv and vl4l4 vector variants. */
+.macro WRAPPER_AVX512_vvv_vl4l4 callee
+#ifndef __ILP32__
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $384, %rsp
+ vmovups %zmm1, 128(%rsp)
+ lea (%rsp), %rdi
+ vmovups %zmm2, 192(%rdi)
+ vmovups %zmm3, 256(%rdi)
+ vmovups %zmm4, 320(%rdi)
+ lea 64(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ movq 128(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 144(%rsp), %r8
+ movq 152(%rsp), %r10
+ movl (%rsp), %eax
+ movl 4(%rsp), %ecx
+ movl 8(%rsp), %edi
+ movl 12(%rsp), %r9d
+ movl %eax, (%rdx)
+ movl %ecx, (%rsi)
+ movq 160(%rsp), %rax
+ movq 168(%rsp), %rcx
+ movl %edi, (%r8)
+ movl %r9d, (%r10)
+ movq 176(%rsp), %rdi
+ movq 184(%rsp), %r9
+ movl 16(%rsp), %r11d
+ movl 20(%rsp), %edx
+ movl 24(%rsp), %esi
+ movl 28(%rsp), %r8d
+ movl %r11d, (%rax)
+ movl %edx, (%rcx)
+ movq 192(%rsp), %r11
+ movq 200(%rsp), %rdx
+ movl %esi, (%rdi)
+ movl %r8d, (%r9)
+ movq 208(%rsp), %rsi
+ movq 216(%rsp), %r8
+ movl 32(%rsp), %r10d
+ movl 36(%rsp), %eax
+ movl 40(%rsp), %ecx
+ movl 44(%rsp), %edi
+ movl %r10d, (%r11)
+ movl %eax, (%rdx)
+ movq 224(%rsp), %r10
+ movq 232(%rsp), %rax
+ movl %ecx, (%rsi)
+ movl %edi, (%r8)
+ movq 240(%rsp), %rcx
+ movq 248(%rsp), %rdi
+ movl 48(%rsp), %r9d
+ movl 52(%rsp), %r11d
+ movl 56(%rsp), %edx
+ movl 60(%rsp), %esi
+ movl %r9d, (%r10)
+ movl %r11d, (%rax)
+ movq 256(%rsp), %r9
+ movq 264(%rsp), %r11
+ movl %edx, (%rcx)
+ movl %esi, (%rdi)
+ movq 272(%rsp), %rdx
+ movq 280(%rsp), %rsi
+ movl 64(%rsp), %r8d
+ movl 68(%rsp), %r10d
+ movl 72(%rsp), %eax
+ movl 76(%rsp), %ecx
+ movl %r8d, (%r9)
+ movl %r10d, (%r11)
+ movq 288(%rsp), %r8
+ movq 296(%rsp), %r10
+ movl %eax, (%rdx)
+ movl %ecx, (%rsi)
+ movq 304(%rsp), %rax
+ movq 312(%rsp), %rcx
+ movl 80(%rsp), %edi
+ movl 84(%rsp), %r9d
+ movl 88(%rsp), %r11d
+ movl 92(%rsp), %edx
+ movl %edi, (%r8)
+ movl %r9d, (%r10)
+ movq 320(%rsp), %rdi
+ movq 328(%rsp), %r9
+ movl %r11d, (%rax)
+ movl %edx, (%rcx)
+ movq 336(%rsp), %r11
+ movq 344(%rsp), %rdx
+ movl 96(%rsp), %esi
+ movl 100(%rsp), %r8d
+ movl 104(%rsp), %r10d
+ movl 108(%rsp), %eax
+ movl %esi, (%rdi)
+ movl %r8d, (%r9)
+ movq 352(%rsp), %rsi
+ movq 360(%rsp), %r8
+ movl %r10d, (%r11)
+ movl %eax, (%rdx)
+ movq 368(%rsp), %r10
+ movq 376(%rsp), %rax
+ movl 112(%rsp), %ecx
+ movl 116(%rsp), %edi
+ movl 120(%rsp), %r9d
+ movl 124(%rsp), %r11d
+ movl %ecx, (%rsi)
+ movl %edi, (%r8)
+ movl %r9d, (%r10)
+ movl %r11d, (%rax)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+#else
+ leal 8(%rsp), %r10d
+ .cfi_def_cfa 10, 0
+ andl $-64, %esp
+ pushq -8(%r10d)
+ pushq %rbp
+ .cfi_escape 0x10,0x6,0x2,0x76,0
+ movl %esp, %ebp
+ pushq %r10
+ .cfi_escape 0xf,0x3,0x76,0x78,0x6
+ leal -112(%rbp), %esi
+ leal -176(%rbp), %edi
+ subl $296, %esp
+ vmovdqa64 %zmm1, -240(%ebp)
+ vmovdqa64 %zmm2, -304(%ebp)
+ call HIDDEN_JUMPTARGET(\callee)
+ movl -240(%ebp), %eax
+ vmovss -176(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -236(%ebp), %eax
+ vmovss -172(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -232(%ebp), %eax
+ vmovss -168(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -228(%ebp), %eax
+ vmovss -164(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -224(%ebp), %eax
+ vmovss -160(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -220(%ebp), %eax
+ vmovss -156(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -216(%ebp), %eax
+ vmovss -152(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -212(%ebp), %eax
+ vmovss -148(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -208(%ebp), %eax
+ vmovss -144(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -204(%ebp), %eax
+ vmovss -140(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -200(%ebp), %eax
+ vmovss -136(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -196(%ebp), %eax
+ vmovss -132(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -192(%ebp), %eax
+ vmovss -128(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -188(%ebp), %eax
+ vmovss -124(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -184(%ebp), %eax
+ vmovss -120(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -180(%ebp), %eax
+ vmovss -116(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -304(%ebp), %eax
+ vmovss -112(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -300(%ebp), %eax
+ vmovss -108(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -296(%ebp), %eax
+ vmovss -104(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -292(%ebp), %eax
+ vmovss -100(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -288(%ebp), %eax
+ vmovss -96(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -284(%ebp), %eax
+ vmovss -92(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -280(%ebp), %eax
+ vmovss -88(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -276(%ebp), %eax
+ vmovss -84(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -272(%ebp), %eax
+ vmovss -80(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -268(%ebp), %eax
+ vmovss -76(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -264(%ebp), %eax
+ vmovss -72(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -260(%ebp), %eax
+ vmovss -68(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -256(%ebp), %eax
+ vmovss -64(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -252(%ebp), %eax
+ vmovss -60(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -248(%ebp), %eax
+ vmovss -56(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ movl -244(%ebp), %eax
+ vmovss -52(%ebp), %xmm0
+ vmovss %xmm0, (%eax)
+ addl $296, %esp
+ popq %r10
+ .cfi_def_cfa 10, 0
+ popq %rbp
+ leal -8(%r10), %esp
+ .cfi_def_cfa 7, 8
+ ret
+#endif
+.endm
+
+ENTRY (_ZGVeN16vvv_sincosf_knl)
+WRAPPER_AVX512_vvv_vl4l4 _ZGVeN16vl4l4_sincosf_knl
+END (_ZGVeN16vvv_sincosf_knl)
+
+ENTRY (_ZGVeN16vvv_sincosf_skx)
+WRAPPER_AVX512_vvv_vl4l4 _ZGVeN16vl4l4_sincosf_skx
END (_ZGVeN16vvv_sincosf_skx)
.section .rodata, "a"