diff options
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r-- | sysdeps/x86_64/cacheinfo.c | 42 | ||||
-rw-r--r-- | sysdeps/x86_64/dl-trampoline.S | 244 | ||||
-rw-r--r-- | sysdeps/x86_64/dl-trampoline.h | 269 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/Makefile | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/rawmemchr.S | 1 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strcmp-ssse3.S | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strcmp.S | 12 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strcspn-c.c | 6 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strlen.S | 1 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strncmp-ssse3.S | 4 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strspn-c.c | 4 | ||||
-rw-r--r-- | sysdeps/x86_64/strcmp.S | 211 |
12 files changed, 507 insertions, 292 deletions
diff --git a/sysdeps/x86_64/cacheinfo.c b/sysdeps/x86_64/cacheinfo.c index f252fc2c6c..5b66c62eb3 100644 --- a/sysdeps/x86_64/cacheinfo.c +++ b/sysdeps/x86_64/cacheinfo.c @@ -516,13 +516,15 @@ init_cacheinfo (void) shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, max_cpuid); } + unsigned int ebx_1; + #ifdef USE_MULTIARCH eax = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].eax; - ebx = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].ebx; + ebx_1 = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].ebx; ecx = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx; edx = __cpu_features.cpuid[COMMON_CPUID_INDEX_1].edx; #else - __cpuid (1, eax, ebx, ecx, edx); + __cpuid (1, eax, ebx_1, ecx, edx); #endif #ifndef DISABLE_PREFERRED_MEMORY_INSTRUCTION @@ -554,14 +556,46 @@ init_cacheinfo (void) } while (((eax >> 5) & 0x7) != level); - threads = ((eax >> 14) & 0x3ff) + 1; + threads = (eax >> 14) & 0x3ff; + + /* If max_cpuid >= 11, THREADS is the maximum number of + addressable IDs for logical processors sharing the + cache, instead of the maximum number of threads + sharing the cache. */ + if (threads && max_cpuid >= 11) + { + /* Find the number of logical processors shipped in + one core and apply count mask. */ + i = 0; + while (1) + { + __cpuid_count (11, i++, eax, ebx, ecx, edx); + + int shipped = ebx & 0xff; + int type = ecx & 0xff0; + if (shipped == 0 || type == 0) + break; + else if (type == 0x200) + { + int count_mask; + + /* Compute count mask. */ + asm ("bsr %1, %0" + : "=r" (count_mask) : "g" (threads)); + count_mask = ~(-1 << (count_mask + 1)); + threads = (shipped - 1) & count_mask; + break; + } + } + } + threads += 1; } else { intel_bug_no_cache_info: /* Assume that all logical threads share the highest cache level. */ - threads = (ebx >> 16) & 0xff; + threads = (ebx_1 >> 16) & 0xff; } /* Cap usage of highest cache level to the number of supported diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S index 20da6956f1..f9c60ad5cf 100644 --- a/sysdeps/x86_64/dl-trampoline.S +++ b/sysdeps/x86_64/dl-trampoline.S @@ -146,247 +146,17 @@ L(have_avx): 2: movl %eax, L(have_avx)(%rip) cmpl $0, %eax -1: js L(no_avx1) +1: js L(no_avx) - /* This is to support AVX audit modules. */ - vmovdqu %ymm0, (LR_VECTOR_OFFSET)(%rsp) - vmovdqu %ymm1, (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp) - vmovdqu %ymm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) - vmovdqu %ymm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) - vmovdqu %ymm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) - vmovdqu %ymm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) - vmovdqu %ymm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp) - vmovdqu %ymm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp) +# define RESTORE_AVX +# include "dl-trampoline.h" - /* Save xmm0-xmm7 registers to detect if any of them are - changed by audit module. */ - vmovdqa %xmm0, (LR_SIZE)(%rsp) - vmovdqa %xmm1, (LR_SIZE + XMM_SIZE)(%rsp) - vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp) - vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp) - vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp) - vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp) - vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp) - vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp) - -L(no_avx1): -# endif - - movq %rsp, %rcx # La_x86_64_regs pointer to %rcx. - movq 48(%rbx), %rdx # Load return address if needed. - movq 40(%rbx), %rsi # Copy args pushed by PLT in register. - movq 32(%rbx), %rdi # %rdi: link_map, %rsi: reloc_index - leaq 16(%rbx), %r8 - call _dl_profile_fixup # Call resolver. - - movq %rax, %r11 # Save return value. - - movq 8(%rbx), %rax # Get back register content. - movq LR_RDX_OFFSET(%rsp), %rdx - movq LR_R8_OFFSET(%rsp), %r8 - movq LR_R9_OFFSET(%rsp), %r9 - - movaps (LR_XMM_OFFSET)(%rsp), %xmm0 - movaps (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1 - movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2 - movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3 - movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4 - movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5 - movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6 - movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7 - -# ifdef HAVE_AVX_SUPPORT - cmpl $0, L(have_avx)(%rip) - js L(no_avx2) - - /* Check if any xmm0-xmm7 registers are changed by audit - module. */ - vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET)(%rsp), %ymm0 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6 - -1: vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8 - vpmovmskb %xmm8, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7 - -L(no_avx2): -1: -# endif - movq 16(%rbx), %r10 # Anything in framesize? - testq %r10, %r10 - jns 3f - - /* There's nothing in the frame size, so there - will be no call to the _dl_call_pltexit. */ - - /* Get back registers content. */ - movq LR_RCX_OFFSET(%rsp), %rcx - movq LR_RSI_OFFSET(%rsp), %rsi - movq LR_RDI_OFFSET(%rsp), %rdi - - movq %rbx, %rsp - movq (%rsp), %rbx - cfi_restore(rbx) - cfi_def_cfa_register(%rsp) - - addq $48, %rsp # Adjust the stack to the return value - # (eats the reloc index and link_map) - cfi_adjust_cfa_offset(-48) - jmp *%r11 # Jump to function address. - -3: - cfi_adjust_cfa_offset(48) - cfi_rel_offset(%rbx, 0) - cfi_def_cfa_register(%rbx) - - /* At this point we need to prepare new stack for the function - which has to be called. We copy the original stack to a - temporary buffer of the size specified by the 'framesize' - returned from _dl_profile_fixup */ - - leaq LR_RSP_OFFSET(%rbx), %rsi # stack - addq $8, %r10 - andq $0xfffffffffffffff0, %r10 - movq %r10, %rcx - subq %r10, %rsp - movq %rsp, %rdi - shrq $3, %rcx - rep - movsq - - movq 24(%rdi), %rcx # Get back register content. - movq 32(%rdi), %rsi - movq 40(%rdi), %rdi - - call *%r11 - - mov 24(%rbx), %rsp # Drop the copied stack content - - /* Now we have to prepare the La_x86_64_retval structure for the - _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now, - so we just need to allocate the sizeof(La_x86_64_retval) space on - the stack, since the alignment has already been taken care of. */ -# ifdef HAVE_AVX_SUPPORT - /* sizeof(La_x86_64_retval). Need extra space for 2 SSE - registers to detect if xmm0/xmm1 registers are changed - by audit module. */ - subq $(LRV_SIZE + XMM_SIZE*2), %rsp -# else - subq $LRV_SIZE, %rsp # sizeof(La_x86_64_retval) -# endif - movq %rsp, %rcx # La_x86_64_retval argument to %rcx. - - /* Fill in the La_x86_64_retval structure. */ - movq %rax, LRV_RAX_OFFSET(%rcx) - movq %rdx, LRV_RDX_OFFSET(%rcx) - - movaps %xmm0, LRV_XMM0_OFFSET(%rcx) - movaps %xmm1, LRV_XMM1_OFFSET(%rcx) - -# ifdef HAVE_AVX_SUPPORT - cmpl $0, L(have_avx)(%rip) - js L(no_avx3) - - /* This is to support AVX audit modules. */ - vmovdqu %ymm0, LRV_VECTOR0_OFFSET(%rcx) - vmovdqu %ymm1, LRV_VECTOR1_OFFSET(%rcx) - - /* Save xmm0/xmm1 registers to detect if they are changed - by audit module. */ - vmovdqa %xmm0, (LRV_SIZE)(%rcx) - vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE)(%rcx) - -L(no_avx3): -# endif - - fstpt LRV_ST0_OFFSET(%rcx) - fstpt LRV_ST1_OFFSET(%rcx) - - movq 24(%rbx), %rdx # La_x86_64_regs argument to %rdx. - movq 40(%rbx), %rsi # Copy args pushed by PLT in register. - movq 32(%rbx), %rdi # %rdi: link_map, %rsi: reloc_index - call _dl_call_pltexit - - /* Restore return registers. */ - movq LRV_RAX_OFFSET(%rsp), %rax - movq LRV_RDX_OFFSET(%rsp), %rdx - - movaps LRV_XMM0_OFFSET(%rsp), %xmm0 - movaps LRV_XMM1_OFFSET(%rsp), %xmm1 - -# ifdef HAVE_AVX_SUPPORT - cmpl $0, L(have_avx)(%rip) - js L(no_avx4) - - /* Check if xmm0/xmm1 registers are changed by audit module. */ - vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2 - vpmovmskb %xmm2, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0 - -1: vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2 - vpmovmskb %xmm2, %esi - cmpl $0xffff, %esi - jne 1f - vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1 - -L(no_avx4): -1: + .align 16 +L(no_avx): # endif - fldt LRV_ST1_OFFSET(%rsp) - fldt LRV_ST0_OFFSET(%rsp) - - movq %rbx, %rsp - movq (%rsp), %rbx - cfi_restore(rbx) - cfi_def_cfa_register(%rsp) - - addq $48, %rsp # Adjust the stack to the return value - # (eats the reloc index and link_map) - cfi_adjust_cfa_offset(-48) - retq +# undef RESTORE_AVX +# include "dl-trampoline.h" cfi_endproc .size _dl_runtime_profile, .-_dl_runtime_profile diff --git a/sysdeps/x86_64/dl-trampoline.h b/sysdeps/x86_64/dl-trampoline.h new file mode 100644 index 0000000000..5d49ed4408 --- /dev/null +++ b/sysdeps/x86_64/dl-trampoline.h @@ -0,0 +1,269 @@ +/* Partial PLT profile trampoline to save and restore x86-64 vector + registers. + Copyright (C) 2009 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifdef RESTORE_AVX + /* This is to support AVX audit modules. */ + vmovdqu %ymm0, (LR_VECTOR_OFFSET)(%rsp) + vmovdqu %ymm1, (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp) + vmovdqu %ymm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) + vmovdqu %ymm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) + vmovdqu %ymm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) + vmovdqu %ymm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) + vmovdqu %ymm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp) + vmovdqu %ymm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp) + + /* Save xmm0-xmm7 registers to detect if any of them are + changed by audit module. */ + vmovdqa %xmm0, (LR_SIZE)(%rsp) + vmovdqa %xmm1, (LR_SIZE + XMM_SIZE)(%rsp) + vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp) + vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp) + vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp) + vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp) + vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp) + vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp) +#endif + + movq %rsp, %rcx # La_x86_64_regs pointer to %rcx. + movq 48(%rbx), %rdx # Load return address if needed. + movq 40(%rbx), %rsi # Copy args pushed by PLT in register. + movq 32(%rbx), %rdi # %rdi: link_map, %rsi: reloc_index + leaq 16(%rbx), %r8 + call _dl_profile_fixup # Call resolver. + + movq %rax, %r11 # Save return value. + + movq 8(%rbx), %rax # Get back register content. + movq LR_RDX_OFFSET(%rsp), %rdx + movq LR_R8_OFFSET(%rsp), %r8 + movq LR_R9_OFFSET(%rsp), %r9 + + movaps (LR_XMM_OFFSET)(%rsp), %xmm0 + movaps (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1 + movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2 + movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3 + movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4 + movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5 + movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6 + movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7 + +#ifdef RESTORE_AVX + /* Check if any xmm0-xmm7 registers are changed by audit + module. */ + vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm0, (LR_VECTOR_OFFSET)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET)(%rsp), %ymm0 + vmovdqa %xmm0, (LR_XMM_OFFSET)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm1, (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1 + vmovdqa %xmm1, (LR_XMM_OFFSET + XMM_SIZE)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2 + vmovdqa %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3 + vmovdqa %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4 + vmovdqa %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5 + vmovdqa %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6 + vmovdqa %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp) + +1: vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8 + vpmovmskb %xmm8, %esi + cmpl $0xffff, %esi + je 2f + vmovdqa %xmm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp) + jmp 1f +2: vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7 + vmovdqa %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp) + +1: +#endif + movq 16(%rbx), %r10 # Anything in framesize? + testq %r10, %r10 + jns 3f + + /* There's nothing in the frame size, so there + will be no call to the _dl_call_pltexit. */ + + /* Get back registers content. */ + movq LR_RCX_OFFSET(%rsp), %rcx + movq LR_RSI_OFFSET(%rsp), %rsi + movq LR_RDI_OFFSET(%rsp), %rdi + + movq %rbx, %rsp + movq (%rsp), %rbx + cfi_restore(rbx) + cfi_def_cfa_register(%rsp) + + addq $48, %rsp # Adjust the stack to the return value + # (eats the reloc index and link_map) + cfi_adjust_cfa_offset(-48) + jmp *%r11 # Jump to function address. + +3: + cfi_adjust_cfa_offset(48) + cfi_rel_offset(%rbx, 0) + cfi_def_cfa_register(%rbx) + + /* At this point we need to prepare new stack for the function + which has to be called. We copy the original stack to a + temporary buffer of the size specified by the 'framesize' + returned from _dl_profile_fixup */ + + leaq LR_RSP_OFFSET(%rbx), %rsi # stack + addq $8, %r10 + andq $0xfffffffffffffff0, %r10 + movq %r10, %rcx + subq %r10, %rsp + movq %rsp, %rdi + shrq $3, %rcx + rep + movsq + + movq 24(%rdi), %rcx # Get back register content. + movq 32(%rdi), %rsi + movq 40(%rdi), %rdi + + call *%r11 + + mov 24(%rbx), %rsp # Drop the copied stack content + + /* Now we have to prepare the La_x86_64_retval structure for the + _dl_call_pltexit. The La_x86_64_regs is being pointed by rsp now, + so we just need to allocate the sizeof(La_x86_64_retval) space on + the stack, since the alignment has already been taken care of. */ +# ifdef RESTORE_AVX + /* sizeof(La_x86_64_retval). Need extra space for 2 SSE + registers to detect if xmm0/xmm1 registers are changed + by audit module. */ + subq $(LRV_SIZE + XMM_SIZE*2), %rsp +# else + subq $LRV_SIZE, %rsp # sizeof(La_x86_64_retval) +# endif + movq %rsp, %rcx # La_x86_64_retval argument to %rcx. + + /* Fill in the La_x86_64_retval structure. */ + movq %rax, LRV_RAX_OFFSET(%rcx) + movq %rdx, LRV_RDX_OFFSET(%rcx) + + movaps %xmm0, LRV_XMM0_OFFSET(%rcx) + movaps %xmm1, LRV_XMM1_OFFSET(%rcx) + +# ifdef RESTORE_AVX + /* This is to support AVX audit modules. */ + vmovdqu %ymm0, LRV_VECTOR0_OFFSET(%rcx) + vmovdqu %ymm1, LRV_VECTOR1_OFFSET(%rcx) + + /* Save xmm0/xmm1 registers to detect if they are changed + by audit module. */ + vmovdqa %xmm0, (LRV_SIZE)(%rcx) + vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE)(%rcx) +# endif + + fstpt LRV_ST0_OFFSET(%rcx) + fstpt LRV_ST1_OFFSET(%rcx) + + movq 24(%rbx), %rdx # La_x86_64_regs argument to %rdx. + movq 40(%rbx), %rsi # Copy args pushed by PLT in register. + movq 32(%rbx), %rdi # %rdi: link_map, %rsi: reloc_index + call _dl_call_pltexit + + /* Restore return registers. */ + movq LRV_RAX_OFFSET(%rsp), %rax + movq LRV_RDX_OFFSET(%rsp), %rdx + + movaps LRV_XMM0_OFFSET(%rsp), %xmm0 + movaps LRV_XMM1_OFFSET(%rsp), %xmm1 + +# ifdef RESTORE_AVX + /* Check if xmm0/xmm1 registers are changed by audit module. */ + vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2 + vpmovmskb %xmm2, %esi + cmpl $0xffff, %esi + jne 1f + vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0 + +1: vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2 + vpmovmskb %xmm2, %esi + cmpl $0xffff, %esi + jne 1f + vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1 + +1: +# endif + + fldt LRV_ST1_OFFSET(%rsp) + fldt LRV_ST0_OFFSET(%rsp) + + movq %rbx, %rsp + movq (%rsp), %rbx + cfi_restore(rbx) + cfi_def_cfa_register(%rsp) + + addq $48, %rsp # Adjust the stack to the return value + # (eats the reloc index and link_map) + cfi_adjust_cfa_offset(-48) + retq diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile index b066402204..0ded3b3261 100644 --- a/sysdeps/x86_64/multiarch/Makefile +++ b/sysdeps/x86_64/multiarch/Makefile @@ -4,7 +4,7 @@ gen-as-const-headers += ifunc-defines.sym endif ifeq ($(subdir),string) -sysdep_routines += stpncpy-c strncpy-c +sysdep_routines += stpncpy-c strncpy-c strcmp-ssse3 strncmp-ssse3 ifeq (yes,$(config-cflags-sse4)) sysdep_routines += strcspn-c strpbrk-c strspn-c strstr-c strcasestr-c CFLAGS-strcspn-c.c += -msse4 diff --git a/sysdeps/x86_64/multiarch/rawmemchr.S b/sysdeps/x86_64/multiarch/rawmemchr.S index d4f265f430..08fd8769fc 100644 --- a/sysdeps/x86_64/multiarch/rawmemchr.S +++ b/sysdeps/x86_64/multiarch/rawmemchr.S @@ -38,6 +38,7 @@ END(rawmemchr) strong_alias (rawmemchr, __rawmemchr) + .section .text.sse4.2,"ax",@progbits .align 16 .type __rawmemchr_sse42, @function __rawmemchr_sse42: diff --git a/sysdeps/x86_64/multiarch/strcmp-ssse3.S b/sysdeps/x86_64/multiarch/strcmp-ssse3.S new file mode 100644 index 0000000000..98cecb8942 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcmp-ssse3.S @@ -0,0 +1,3 @@ +#define USE_SSSE3 1 +#define STRCMP __strcmp_ssse3 +#include "../strcmp.S" diff --git a/sysdeps/x86_64/multiarch/strcmp.S b/sysdeps/x86_64/multiarch/strcmp.S index 1a315737af..05adf1e2e6 100644 --- a/sysdeps/x86_64/multiarch/strcmp.S +++ b/sysdeps/x86_64/multiarch/strcmp.S @@ -34,6 +34,7 @@ mov %r9, %r11 #define STRCMP_SSE42 __strncmp_sse42 +#define STRCMP_SSSE3 __strncmp_ssse3 #define STRCMP_SSE2 __strncmp_sse2 #define __GI_STRCMP __GI_strncmp #else @@ -41,6 +42,7 @@ #ifndef STRCMP #define STRCMP strcmp #define STRCMP_SSE42 __strcmp_sse42 +#define STRCMP_SSSE3 __strcmp_ssse3 #define STRCMP_SSE2 __strcmp_sse2 #define __GI_STRCMP __GI_strcmp #endif @@ -60,10 +62,14 @@ ENTRY(STRCMP) cmpl $0, __cpu_features+KIND_OFFSET(%rip) jne 1f call __init_cpu_features -1: leaq STRCMP_SSE2(%rip), %rax - testl $(1<<20), __cpu_features+CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET(%rip) - jz 2f +1: leaq STRCMP_SSE42(%rip), %rax + testl $(1<<20), __cpu_features+CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET(%rip) + jnz 2f + leaq STRCMP_SSSE3(%rip), %rax + testl $(1<<9), __cpu_features+CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET(%rip) + jnz 2f + leaq STRCMP_SSE2(%rip), %rax 2: ret END(STRCMP) diff --git a/sysdeps/x86_64/multiarch/strcspn-c.c b/sysdeps/x86_64/multiarch/strcspn-c.c index 4512267d3f..daeebe1bf5 100644 --- a/sysdeps/x86_64/multiarch/strcspn-c.c +++ b/sysdeps/x86_64/multiarch/strcspn-c.c @@ -86,11 +86,13 @@ STRCSPN_SSE42 (const char *s, const char *a) const char *aligned; __m128i mask; + /* Fake initialization. gcc otherwise will warn. */ + asm ("" : "=xm" (mask)); int offset = (int) ((size_t) a & 15); if (offset != 0) { /* Load masks. */ - aligned = (const char *) ((size_t) a & 0xfffffffffffffff0L); + aligned = (const char *) ((size_t) a & -16L); __m128i mask0 = _mm_load_si128 ((__m128i *) aligned); switch (offset) @@ -229,7 +231,7 @@ STRCSPN_SSE42 (const char *s, const char *a) if (offset != 0) { /* Check partial string. */ - aligned = (const char *) ((size_t) s & 0xfffffffffffffff0L); + aligned = (const char *) ((size_t) s & -16L); __m128i value = _mm_load_si128 ((__m128i *) aligned); switch (offset) diff --git a/sysdeps/x86_64/multiarch/strlen.S b/sysdeps/x86_64/multiarch/strlen.S index 82b03ccc28..4342c6cdab 100644 --- a/sysdeps/x86_64/multiarch/strlen.S +++ b/sysdeps/x86_64/multiarch/strlen.S @@ -40,6 +40,7 @@ ENTRY(strlen) END(strlen) + .section .text.sse4.2,"ax",@progbits .align 16 .type __strlen_sse42, @function __strlen_sse42: diff --git a/sysdeps/x86_64/multiarch/strncmp-ssse3.S b/sysdeps/x86_64/multiarch/strncmp-ssse3.S new file mode 100644 index 0000000000..a320a3e949 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncmp-ssse3.S @@ -0,0 +1,4 @@ +#define USE_SSSE3 1 +#define STRCMP __strncmp_ssse3 +#define USE_AS_STRNCMP +#include "../strcmp.S" diff --git a/sysdeps/x86_64/multiarch/strspn-c.c b/sysdeps/x86_64/multiarch/strspn-c.c index 5b99f0d383..be9e8ac0a8 100644 --- a/sysdeps/x86_64/multiarch/strspn-c.c +++ b/sysdeps/x86_64/multiarch/strspn-c.c @@ -68,7 +68,7 @@ __strspn_sse42 (const char *s, const char *a) if (offset != 0) { /* Load masks. */ - aligned = (const char *) ((size_t) a & 0xfffffffffffffff0L); + aligned = (const char *) ((size_t) a & -16L); __m128i mask0 = _mm_load_si128 ((__m128i *) aligned); switch (offset) @@ -207,7 +207,7 @@ __strspn_sse42 (const char *s, const char *a) if (offset != 0) { /* Check partial string. */ - aligned = (const char *) ((size_t) s & 0xfffffffffffffff0L); + aligned = (const char *) ((size_t) s & -16L); __m128i value = _mm_load_si128 ((__m128i *) aligned); switch (offset) diff --git a/sysdeps/x86_64/strcmp.S b/sysdeps/x86_64/strcmp.S index 340a64ba35..650ec173b6 100644 --- a/sysdeps/x86_64/strcmp.S +++ b/sysdeps/x86_64/strcmp.S @@ -51,7 +51,12 @@ # endif #endif +#ifndef USE_SSSE3 .text +#else + .section .text.ssse3,"ax",@progbits +#endif + ENTRY (BP_SYM (STRCMP)) #ifdef NOT_IN_libc /* Simple version since we can't use SSE registers in ld.so. */ @@ -244,9 +249,13 @@ LABEL(gobble_ashr_1): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 /* store for next cycle */ +#ifndef USE_SSSE3 psrldq $1, %xmm3 pslldq $15, %xmm2 por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -269,9 +278,13 @@ LABEL(gobble_ashr_1): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 /* store for next cycle */ +#ifndef USE_SSSE3 psrldq $1, %xmm3 - pslldq $15, %xmm2 + pslldq $15, %xmm2 por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $1, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -363,9 +376,13 @@ LABEL(gobble_ashr_2): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $2, %xmm3 pslldq $14, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -389,9 +406,13 @@ LABEL(gobble_ashr_2): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $2, %xmm3 - pslldq $14, %xmm2 - por %xmm3, %xmm2 + pslldq $14, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $2, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -477,9 +498,13 @@ LABEL(gobble_ashr_3): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $3, %xmm3 pslldq $13, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -503,9 +528,13 @@ LABEL(gobble_ashr_3): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $3, %xmm3 - pslldq $13, %xmm2 - por %xmm3, %xmm2 + pslldq $13, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $3, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -591,9 +620,13 @@ LABEL(gobble_ashr_4): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $4, %xmm3 pslldq $12, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -617,9 +650,13 @@ LABEL(gobble_ashr_4): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $4, %xmm3 - pslldq $12, %xmm2 - por %xmm3, %xmm2 + pslldq $12, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $4, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -705,9 +742,13 @@ LABEL(gobble_ashr_5): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $5, %xmm3 pslldq $11, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -731,9 +772,13 @@ LABEL(gobble_ashr_5): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $5, %xmm3 - pslldq $11, %xmm2 - por %xmm3, %xmm2 + pslldq $11, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $5, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -819,9 +864,13 @@ LABEL(gobble_ashr_6): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $6, %xmm3 pslldq $10, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -845,9 +894,13 @@ LABEL(gobble_ashr_6): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $6, %xmm3 - pslldq $10, %xmm2 - por %xmm3, %xmm2 + pslldq $10, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $6, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -933,9 +986,13 @@ LABEL(gobble_ashr_7): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $7, %xmm3 pslldq $9, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -959,9 +1016,13 @@ LABEL(gobble_ashr_7): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $7, %xmm3 - pslldq $9, %xmm2 - por %xmm3, %xmm2 + pslldq $9, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $7, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1047,9 +1108,13 @@ LABEL(gobble_ashr_8): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $8, %xmm3 pslldq $8, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1073,9 +1138,13 @@ LABEL(gobble_ashr_8): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $8, %xmm3 - pslldq $8, %xmm2 - por %xmm3, %xmm2 + pslldq $8, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $8, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1161,9 +1230,13 @@ LABEL(gobble_ashr_9): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $9, %xmm3 pslldq $7, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1187,9 +1260,13 @@ LABEL(gobble_ashr_9): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $9, %xmm3 - pslldq $7, %xmm2 - por %xmm3, %xmm2 + pslldq $7, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $9, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1275,9 +1352,13 @@ LABEL(gobble_ashr_10): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $10, %xmm3 pslldq $6, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1301,9 +1382,13 @@ LABEL(gobble_ashr_10): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $10, %xmm3 - pslldq $6, %xmm2 - por %xmm3, %xmm2 + pslldq $6, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $10, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1389,9 +1474,13 @@ LABEL(gobble_ashr_11): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $11, %xmm3 pslldq $5, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1415,9 +1504,13 @@ LABEL(gobble_ashr_11): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $11, %xmm3 - pslldq $5, %xmm2 - por %xmm3, %xmm2 + pslldq $5, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $11, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1503,9 +1596,13 @@ LABEL(gobble_ashr_12): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $12, %xmm3 pslldq $4, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1529,9 +1626,13 @@ LABEL(gobble_ashr_12): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $12, %xmm3 - pslldq $4, %xmm2 - por %xmm3, %xmm2 + pslldq $4, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $12, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1617,9 +1718,13 @@ LABEL(gobble_ashr_13): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $13, %xmm3 pslldq $3, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1643,9 +1748,13 @@ LABEL(gobble_ashr_13): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $13, %xmm3 - pslldq $3, %xmm2 - por %xmm3, %xmm2 + pslldq $3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $13, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1731,9 +1840,13 @@ LABEL(gobble_ashr_14): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $14, %xmm3 pslldq $2, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1757,9 +1870,13 @@ LABEL(gobble_ashr_14): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $14, %xmm3 - pslldq $2, %xmm2 - por %xmm3, %xmm2 + pslldq $2, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $14, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1847,9 +1964,13 @@ LABEL(gobble_ashr_15): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $15, %xmm3 pslldq $1, %xmm2 - por %xmm3, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 @@ -1873,9 +1994,13 @@ LABEL(gobble_ashr_15): movdqa (%rdi, %rcx), %xmm2 movdqa %xmm2, %xmm4 +#ifndef USE_SSSE3 psrldq $15, %xmm3 - pslldq $1, %xmm2 - por %xmm3, %xmm2 + pslldq $1, %xmm2 + por %xmm3, %xmm2 /* merge into one 16byte value */ +#else + palignr $15, %xmm3, %xmm2 /* merge into one 16byte value */ +#endif pcmpeqb %xmm1, %xmm0 pcmpeqb %xmm2, %xmm1 |