summaryrefslogtreecommitdiff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorAndreas Schwab <schwab@redhat.com>2009-07-27 10:51:39 +0200
committerAndreas Schwab <schwab@redhat.com>2009-07-27 10:51:39 +0200
commite1a51361e3ac137c56adc6083d095c68e9471795 (patch)
treebfec41199e84a74265d8e77b0e8444b06bc9ff00 /sysdeps/x86_64
parent9285e82ab3a0d1bdaa63fc740165d6a300ad0cc5 (diff)
parent16d2ea4c821502948d193a152c8b151f5497a0d3 (diff)
Merge commit 'origin/master' into fedora/master
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/Makefile4
-rw-r--r--sysdeps/x86_64/cacheinfo.c53
-rw-r--r--sysdeps/x86_64/multiarch/Makefile2
-rw-r--r--sysdeps/x86_64/multiarch/init-arch.c8
-rw-r--r--sysdeps/x86_64/multiarch/rtld-rawmemchr.c1
-rw-r--r--sysdeps/x86_64/multiarch/rtld-strlen.S1
-rw-r--r--sysdeps/x86_64/multiarch/strcmp.S369
-rw-r--r--sysdeps/x86_64/multiarch/strcpy.S13
-rw-r--r--sysdeps/x86_64/multiarch/strncmp-c.c8
-rw-r--r--sysdeps/x86_64/rtld-memchr.c1
-rw-r--r--sysdeps/x86_64/rtld-memcmp.c1
-rw-r--r--sysdeps/x86_64/rtld-rawmemchr.c1
-rw-r--r--sysdeps/x86_64/rtld-strchr.S291
-rw-r--r--sysdeps/x86_64/rtld-strcmp.S28
-rw-r--r--sysdeps/x86_64/rtld-strlen.S139
-rw-r--r--sysdeps/x86_64/strcmp.S1948
-rw-r--r--sysdeps/x86_64/strncmp.S3
-rwxr-xr-xsysdeps/x86_64/tst-xmmymm.sh17
18 files changed, 2587 insertions, 301 deletions
diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
index 78fdb04fcb..57cd88432a 100644
--- a/sysdeps/x86_64/Makefile
+++ b/sysdeps/x86_64/Makefile
@@ -19,6 +19,10 @@ ifeq ($(subdir),elf)
sysdep-dl-routines += tlsdesc dl-tlsdesc
sysdep_routines += tlsdesc dl-tlsdesc
sysdep-rtld-routines += tlsdesc dl-tlsdesc
+
+tests: $(objpfx)tst-xmmymm.out
+$(objpfx)tst-xmmymm.out: ../sysdeps/x86_64/tst-xmmymm.sh $(objpfx)ld.so
+ $(SHELL) -e $< $(objpfx) > $@
endif
ifeq ($(subdir),csu)
diff --git a/sysdeps/x86_64/cacheinfo.c b/sysdeps/x86_64/cacheinfo.c
index 362687c181..75b81958dd 100644
--- a/sysdeps/x86_64/cacheinfo.c
+++ b/sysdeps/x86_64/cacheinfo.c
@@ -25,6 +25,17 @@
#ifdef USE_MULTIARCH
# include "multiarch/init-arch.h"
+
+# define is_intel __cpu_features.kind == arch_kind_intel
+# define is_amd __cpu_features.kind == arch_kind_amd
+# define max_cpuid __cpu_features.max_cpuid
+#else
+ /* This spells out "GenuineIntel". */
+# define is_intel \
+ ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69
+ /* This spells out "AuthenticAMD". */
+# define is_amd \
+ ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65
#endif
static const struct intel_02_cache_info
@@ -100,6 +111,9 @@ static const struct intel_02_cache_info
{ 0xe3, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 2097152 },
{ 0xe3, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 4194304 },
{ 0xe4, 16, 64, M(_SC_LEVEL3_CACHE_SIZE), 8388608 },
+ { 0xea, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 12582912 },
+ { 0xeb, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 18874368 },
+ { 0xec, 24, 64, M(_SC_LEVEL3_CACHE_SIZE), 25165824 },
};
#define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known [0]))
@@ -152,6 +166,12 @@ intel_check_word (int name, unsigned int value, bool *has_level_2,
/* Intel reused this value. For family 15, model 6 it
specifies the 3rd level cache. Otherwise the 2nd
level cache. */
+ unsigned int family;
+ unsigned int model;
+#ifdef USE_MULTIARCH
+ family = __cpu_features.family;
+ model = __cpu_features.model;
+#else
unsigned int eax;
unsigned int ebx;
unsigned int ecx;
@@ -160,9 +180,10 @@ intel_check_word (int name, unsigned int value, bool *has_level_2,
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "0" (1));
- unsigned int family = ((eax >> 20) & 0xff) + ((eax >> 8) & 0xf);
- unsigned int model = ((((eax >>16) & 0xf) << 4)
- + ((eax >> 4) & 0xf));
+ family = ((eax >> 20) & 0xff) + ((eax >> 8) & 0xf);
+ model = (((eax >>16) & 0xf) << 4) + ((eax >> 4) & 0xf);
+#endif
+
if (family == 15 && model == 6)
{
/* The level 3 cache is encoded for this model like
@@ -394,21 +415,24 @@ long int
attribute_hidden
__cache_sysconf (int name)
{
+#ifdef USE_MULTIARCH
+ if (__cpu_features.kind == arch_kind_unknown)
+ __init_cpu_features ();
+#else
/* Find out what brand of processor. */
- unsigned int eax;
+ unsigned int max_cpuid;
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
asm volatile ("cpuid"
- : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "=a" (max_cpuid), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "0" (0));
+#endif
- /* This spells out "GenuineIntel". */
- if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
- return handle_intel (name, eax);
+ if (is_intel)
+ return handle_intel (name, max_cpuid);
- /* This spells out "AuthenticAMD". */
- if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+ if (is_amd)
return handle_amd (name);
// XXX Fill in more vendors.
@@ -457,20 +481,11 @@ init_cacheinfo (void)
#ifdef USE_MULTIARCH
if (__cpu_features.kind == arch_kind_unknown)
__init_cpu_features ();
-# define is_intel __cpu_features.kind == arch_kind_intel
-# define is_amd __cpu_features.kind == arch_kind_amd
-# define max_cpuid __cpu_features.max_cpuid
#else
int max_cpuid;
asm volatile ("cpuid"
: "=a" (max_cpuid), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "0" (0));
- /* This spells out "GenuineIntel". */
-# define is_intel \
- ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69
- /* This spells out "AuthenticAMD". */
-# define is_amd \
- ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65
#endif
if (is_intel)
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index 5ce14aad8d..b066402204 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -4,7 +4,7 @@ gen-as-const-headers += ifunc-defines.sym
endif
ifeq ($(subdir),string)
-sysdep_routines += stpncpy-c strncpy-c strncmp-c
+sysdep_routines += stpncpy-c strncpy-c
ifeq (yes,$(config-cflags-sse4))
sysdep_routines += strcspn-c strpbrk-c strspn-c strstr-c strcasestr-c
CFLAGS-strcspn-c.c += -msse4
diff --git a/sysdeps/x86_64/multiarch/init-arch.c b/sysdeps/x86_64/multiarch/init-arch.c
index 29e687344d..35fd19af0e 100644
--- a/sysdeps/x86_64/multiarch/init-arch.c
+++ b/sysdeps/x86_64/multiarch/init-arch.c
@@ -68,7 +68,13 @@ __init_cpu_features (void)
__cpu_features.model += extended_model;
}
else if (__cpu_features.family == 0x06)
- __cpu_features.model += extended_model;
+ {
+ __cpu_features.model += extended_model;
+
+ if (__cpu_features.model == 0x1c)
+ /* Avoid SSSE3 on Atom since it is slow. */
+ __cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx &= ~(1 << 9);
+ }
}
/* This spells out "AuthenticAMD". */
else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
diff --git a/sysdeps/x86_64/multiarch/rtld-rawmemchr.c b/sysdeps/x86_64/multiarch/rtld-rawmemchr.c
new file mode 100644
index 0000000000..53a90675ab
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rtld-rawmemchr.c
@@ -0,0 +1 @@
+#include "../rtld-rawmemchr.c"
diff --git a/sysdeps/x86_64/multiarch/rtld-strlen.S b/sysdeps/x86_64/multiarch/rtld-strlen.S
new file mode 100644
index 0000000000..596e0549ea
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rtld-strlen.S
@@ -0,0 +1 @@
+#include "../rtld-strlen.S"
diff --git a/sysdeps/x86_64/multiarch/strcmp.S b/sysdeps/x86_64/multiarch/strcmp.S
index 37985036aa..1a315737af 100644
--- a/sysdeps/x86_64/multiarch/strcmp.S
+++ b/sysdeps/x86_64/multiarch/strcmp.S
@@ -28,9 +28,9 @@
/* calculate left number to compare */ \
lea -16(%rcx, %r11), %r9; \
cmp %r9, %r11; \
- jb LABEL(strcmp_exitz); \
+ jb LABEL(strcmp_exitz_sse4_2); \
test %r9, %r9; \
- je LABEL(strcmp_exitz); \
+ je LABEL(strcmp_exitz_sse4_2); \
mov %r9, %r11
#define STRCMP_SSE42 __strncmp_sse42
@@ -106,9 +106,9 @@ STRCMP_SSE42:
*/
#ifdef USE_AS_STRNCMP
test %rdx, %rdx
- je LABEL(strcmp_exitz)
+ je LABEL(strcmp_exitz_sse4_2)
cmp $1, %rdx
- je LABEL(Byte0)
+ je LABEL(Byte0_sse4_2)
mov %rdx, %r11
#endif
mov %esi, %ecx
@@ -117,23 +117,21 @@ STRCMP_SSE42:
and $0x3f, %rcx /* rsi alignment in cache line */
and $0x3f, %rax /* rdi alignment in cache line */
cmp $0x30, %ecx
- ja LABEL(crosscache) /* rsi: 16-byte load will cross cache line */
+ ja LABEL(crosscache_sse4_2)/* rsi: 16-byte load will cross cache line */
cmp $0x30, %eax
- ja LABEL(crosscache) /* rdi: 16-byte load will cross cache line */
- movlpd (%rdi), %xmm1
- movlpd (%rsi), %xmm2
- movhpd 8(%rdi), %xmm1
- movhpd 8(%rsi), %xmm2
+ ja LABEL(crosscache_sse4_2)/* rdi: 16-byte load will cross cache line */
+ movdqu (%rdi), %xmm1
+ movdqu (%rsi), %xmm2
pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */
pcmpeqb %xmm1, %xmm0 /* Any null chars? */
pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */
psubb %xmm0, %xmm1 /* packed sub of comparison results*/
pmovmskb %xmm1, %edx
sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
- jnz LABEL(less16bytes) /* If not, find different value or null char */
+ jnz LABEL(less16bytes_sse4_2)/* If not, find different value or null char */
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz) /* finish comparision */
+ jbe LABEL(strcmp_exitz_sse4_2)/* finish comparision */
#endif
add $16, %rsi /* prepare to search next 16 bytes */
add $16, %rdi /* prepare to search next 16 bytes */
@@ -144,7 +142,7 @@ STRCMP_SSE42:
* below to use.
*/
.p2align 4
-LABEL(crosscache):
+LABEL(crosscache_sse4_2):
and $0xfffffffffffffff0, %rsi /* force %rsi is 16 byte aligned */
and $0xfffffffffffffff0, %rdi /* force %rdi is 16 byte aligned */
mov $0xffff, %edx /* for equivalent offset */
@@ -152,15 +150,15 @@ LABEL(crosscache):
and $0xf, %ecx /* offset of rsi */
and $0xf, %eax /* offset of rdi */
cmp %eax, %ecx
- je LABEL(ashr_0) /* rsi and rdi relative offset same */
- ja LABEL(bigger)
+ je LABEL(ashr_0_sse4_2) /* rsi and rdi relative offset same */
+ ja LABEL(bigger_sse4_2)
mov %edx, %r8d /* r8d is offset flag for exit tail */
xchg %ecx, %eax
xchg %rsi, %rdi
-LABEL(bigger):
+LABEL(bigger_sse4_2):
lea 15(%rax), %r9
sub %rcx, %r9
- lea LABEL(unaligned_table)(%rip), %r10
+ lea LABEL(unaligned_table_sse4_2)(%rip), %r10
movslq (%r10, %r9,4), %r9
lea (%r10, %r9), %r10
jmp *%r10 /* jump to corresponding case */
@@ -171,7 +169,7 @@ LABEL(bigger):
* n(0~15) n(0~15) 15(15+ n-n) ashr_0
*/
.p2align 4
-LABEL(ashr_0):
+LABEL(ashr_0_sse4_2):
movdqa (%rsi), %xmm1
pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */
@@ -186,7 +184,7 @@ LABEL(ashr_0):
* edx must be the same with r9d if in left byte (16-rcx) is equal to
* the start from (16-rax) and no null char was seen.
*/
- jne LABEL(less32bytes) /* mismatch or null char */
+ jne LABEL(less32bytes_sse4_2) /* mismatch or null char */
UPDATE_STRNCMP_COUNTER
mov $16, %rcx
mov $16, %r9
@@ -205,7 +203,7 @@ LABEL(ashr_0_use_sse4_2):
jbe LABEL(ashr_0_use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
movdqa (%rdi,%rdx), %xmm0
@@ -214,17 +212,17 @@ LABEL(ashr_0_use_sse4_2):
jbe LABEL(ashr_0_use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
jmp LABEL(ashr_0_use_sse4_2)
.p2align 4
LABEL(ashr_0_use_sse4_2_exit):
- jnc LABEL(strcmp_exitz)
+ jnc LABEL(strcmp_exitz_sse4_2)
#ifdef USE_AS_STRNCMP
sub %rcx, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
lea -16(%rdx, %rcx), %rcx
movzbl (%rdi, %rcx), %eax
@@ -241,7 +239,7 @@ LABEL(ashr_0_use_sse4_2_exit):
* n(15) n -15 0(15 +(n-15) - n) ashr_1
*/
.p2align 4
-LABEL(ashr_1):
+LABEL(ashr_1_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -253,7 +251,7 @@ LABEL(ashr_1):
shr %cl, %edx /* adjust 0xffff for offset */
shr %cl, %r9d /* adjust for 16-byte offset */
sub %r9d, %edx
- jnz LABEL(less32bytes) /* mismatch or null char seen */
+ jnz LABEL(less32bytes_sse4_2)/* mismatch or null char seen */
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -281,7 +279,7 @@ LABEL(loop_ashr_1_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -294,7 +292,7 @@ LABEL(loop_ashr_1_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_1_use_sse4_2)
@@ -320,7 +318,7 @@ LABEL(nibble_ashr_1_use_sse4_2):
* n(14~15) n -14 1(15 +(n-14) - n) ashr_2
*/
.p2align 4
-LABEL(ashr_2):
+LABEL(ashr_2_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -332,7 +330,7 @@ LABEL(ashr_2):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -360,7 +358,7 @@ LABEL(loop_ashr_2_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -373,7 +371,7 @@ LABEL(loop_ashr_2_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_2_use_sse4_2)
@@ -399,7 +397,7 @@ LABEL(nibble_ashr_2_use_sse4_2):
* n(13~15) n -13 2(15 +(n-13) - n) ashr_3
*/
.p2align 4
-LABEL(ashr_3):
+LABEL(ashr_3_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -411,7 +409,7 @@ LABEL(ashr_3):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -439,7 +437,7 @@ LABEL(loop_ashr_3_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -452,7 +450,7 @@ LABEL(loop_ashr_3_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_3_use_sse4_2)
@@ -478,7 +476,7 @@ LABEL(nibble_ashr_3_use_sse4_2):
* n(12~15) n -12 3(15 +(n-12) - n) ashr_4
*/
.p2align 4
-LABEL(ashr_4):
+LABEL(ashr_4_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -490,7 +488,7 @@ LABEL(ashr_4):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -519,7 +517,7 @@ LABEL(loop_ashr_4_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -532,7 +530,7 @@ LABEL(loop_ashr_4_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_4_use_sse4_2)
@@ -558,7 +556,7 @@ LABEL(nibble_ashr_4_use_sse4_2):
* n(11~15) n - 11 4(15 +(n-11) - n) ashr_5
*/
.p2align 4
-LABEL(ashr_5):
+LABEL(ashr_5_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -570,7 +568,7 @@ LABEL(ashr_5):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -599,7 +597,7 @@ LABEL(loop_ashr_5_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -613,7 +611,7 @@ LABEL(loop_ashr_5_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_5_use_sse4_2)
@@ -639,7 +637,7 @@ LABEL(nibble_ashr_5_use_sse4_2):
* n(10~15) n - 10 5(15 +(n-10) - n) ashr_6
*/
.p2align 4
-LABEL(ashr_6):
+LABEL(ashr_6_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -651,7 +649,7 @@ LABEL(ashr_6):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -680,7 +678,7 @@ LABEL(loop_ashr_6_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -693,7 +691,7 @@ LABEL(loop_ashr_6_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_6_use_sse4_2)
@@ -719,7 +717,7 @@ LABEL(nibble_ashr_6_use_sse4_2):
* n(9~15) n - 9 6(15 +(n - 9) - n) ashr_7
*/
.p2align 4
-LABEL(ashr_7):
+LABEL(ashr_7_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -731,7 +729,7 @@ LABEL(ashr_7):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -760,7 +758,7 @@ LABEL(loop_ashr_7_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -773,7 +771,7 @@ LABEL(loop_ashr_7_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_7_use_sse4_2)
@@ -799,7 +797,7 @@ LABEL(nibble_ashr_7_use_sse4_2):
* n(8~15) n - 8 7(15 +(n - 8) - n) ashr_8
*/
.p2align 4
-LABEL(ashr_8):
+LABEL(ashr_8_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -811,7 +809,7 @@ LABEL(ashr_8):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -840,7 +838,7 @@ LABEL(loop_ashr_8_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -853,7 +851,7 @@ LABEL(loop_ashr_8_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_8_use_sse4_2)
@@ -879,7 +877,7 @@ LABEL(nibble_ashr_8_use_sse4_2):
* n(7~15) n - 7 8(15 +(n - 7) - n) ashr_9
*/
.p2align 4
-LABEL(ashr_9):
+LABEL(ashr_9_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -891,7 +889,7 @@ LABEL(ashr_9):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -921,7 +919,7 @@ LABEL(loop_ashr_9_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -934,7 +932,7 @@ LABEL(loop_ashr_9_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_9_use_sse4_2)
@@ -960,7 +958,7 @@ LABEL(nibble_ashr_9_use_sse4_2):
* n(6~15) n - 6 9(15 +(n - 6) - n) ashr_10
*/
.p2align 4
-LABEL(ashr_10):
+LABEL(ashr_10_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -972,7 +970,7 @@ LABEL(ashr_10):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -1001,7 +999,7 @@ LABEL(loop_ashr_10_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1014,7 +1012,7 @@ LABEL(loop_ashr_10_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_10_use_sse4_2)
@@ -1040,7 +1038,7 @@ LABEL(nibble_ashr_10_use_sse4_2):
* n(5~15) n - 5 10(15 +(n - 5) - n) ashr_11
*/
.p2align 4
-LABEL(ashr_11):
+LABEL(ashr_11_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -1052,7 +1050,7 @@ LABEL(ashr_11):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -1081,7 +1079,7 @@ LABEL(loop_ashr_11_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1094,7 +1092,7 @@ LABEL(loop_ashr_11_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_11_use_sse4_2)
@@ -1120,7 +1118,7 @@ LABEL(nibble_ashr_11_use_sse4_2):
* n(4~15) n - 4 11(15 +(n - 4) - n) ashr_12
*/
.p2align 4
-LABEL(ashr_12):
+LABEL(ashr_12_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -1132,7 +1130,7 @@ LABEL(ashr_12):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -1161,7 +1159,7 @@ LABEL(loop_ashr_12_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1174,7 +1172,7 @@ LABEL(loop_ashr_12_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_12_use_sse4_2)
@@ -1200,7 +1198,7 @@ LABEL(nibble_ashr_12_use_sse4_2):
* n(3~15) n - 3 12(15 +(n - 3) - n) ashr_13
*/
.p2align 4
-LABEL(ashr_13):
+LABEL(ashr_13_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -1212,7 +1210,7 @@ LABEL(ashr_13):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -1242,7 +1240,7 @@ LABEL(loop_ashr_13_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1255,7 +1253,7 @@ LABEL(loop_ashr_13_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_13_use_sse4_2)
@@ -1281,7 +1279,7 @@ LABEL(nibble_ashr_13_use_sse4_2):
* n(2~15) n - 2 13(15 +(n - 2) - n) ashr_14
*/
.p2align 4
-LABEL(ashr_14):
+LABEL(ashr_14_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -1293,7 +1291,7 @@ LABEL(ashr_14):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
UPDATE_STRNCMP_COUNTER
@@ -1323,7 +1321,7 @@ LABEL(loop_ashr_14_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1336,7 +1334,7 @@ LABEL(loop_ashr_14_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_14_use_sse4_2)
@@ -1362,7 +1360,7 @@ LABEL(nibble_ashr_14_use_sse4_2):
* n(1~15) n - 1 14(15 +(n - 1) - n) ashr_15
*/
.p2align 4
-LABEL(ashr_15):
+LABEL(ashr_15_sse4_2):
pxor %xmm0, %xmm0
movdqa (%rdi), %xmm2
movdqa (%rsi), %xmm1
@@ -1374,7 +1372,7 @@ LABEL(ashr_15):
shr %cl, %edx
shr %cl, %r9d
sub %r9d, %edx
- jnz LABEL(less32bytes)
+ jnz LABEL(less32bytes_sse4_2)
movdqa (%rdi), %xmm3
@@ -1406,7 +1404,7 @@ LABEL(loop_ashr_15_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
@@ -1419,7 +1417,7 @@ LABEL(loop_ashr_15_use_sse4_2):
jbe LABEL(use_sse4_2_exit)
#ifdef USE_AS_STRNCMP
sub $16, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add $16, %rdx
jmp LABEL(loop_ashr_15_use_sse4_2)
@@ -1441,219 +1439,78 @@ LABEL(nibble_ashr_use_sse4_2_exit):
pcmpistri $0x1a,(%rsi,%rdx), %xmm0
.p2align 4
LABEL(use_sse4_2_exit):
- jnc LABEL(strcmp_exitz)
+ jnc LABEL(strcmp_exitz_sse4_2)
#ifdef USE_AS_STRNCMP
sub %rcx, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
add %rcx, %rdx
lea -16(%rdi, %r9), %rdi
movzbl (%rdi, %rdx), %eax
movzbl (%rsi, %rdx), %edx
test %r8d, %r8d
- jz LABEL(use_sse4_2_ret)
+ jz LABEL(use_sse4_2_ret_sse4_2)
xchg %eax, %edx
-LABEL(use_sse4_2_ret):
+LABEL(use_sse4_2_ret_sse4_2):
sub %edx, %eax
ret
- .p2align 4
-LABEL(aftertail):
- pcmpeqb %xmm3, %xmm1
- psubb %xmm0, %xmm1
- pmovmskb %xmm1, %edx
- not %edx
-
- .p2align 4
-LABEL(exit):
- lea -16(%r9, %rcx), %rax /* locate the exact offset for rdi */
-LABEL(less32bytes):
+LABEL(less32bytes_sse4_2):
lea (%rdi, %rax), %rdi /* locate the exact address for first operand(rdi) */
lea (%rsi, %rcx), %rsi /* locate the exact address for second operand(rsi) */
test %r8d, %r8d
- jz LABEL(ret)
+ jz LABEL(ret_sse4_2)
xchg %rsi, %rdi /* recover original order according to flag(%r8d) */
.p2align 4
-LABEL(ret):
-LABEL(less16bytes):
- /*
- * Check to see if BSF is fast on this processor. If not, use a different
- * exit tail.
- */
+LABEL(ret_sse4_2):
+LABEL(less16bytes_sse4_2):
bsf %rdx, %rdx /* find and store bit index in %rdx */
#ifdef USE_AS_STRNCMP
sub %rdx, %r11
- jbe LABEL(strcmp_exitz)
+ jbe LABEL(strcmp_exitz_sse4_2)
#endif
- xor %ecx, %ecx /* clear %ecx */
- xor %eax, %eax /* clear %eax */
-
- movb (%rsi, %rdx), %cl
- movb (%rdi, %rdx), %al
+ movzbl (%rsi, %rdx), %ecx
+ movzbl (%rdi, %rdx), %eax
sub %ecx, %eax
ret
-LABEL(strcmp_exitz):
+LABEL(strcmp_exitz_sse4_2):
xor %eax, %eax
ret
.p2align 4
-LABEL(Byte0):
- /*
- * never need to handle byte 0 for strncmpy
-#ifdef USE_AS_STRNCMP
- sub $0, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- */
+LABEL(Byte0_sse4_2):
movzx (%rsi), %ecx
movzx (%rdi), %eax
sub %ecx, %eax
ret
-
- .p2align 4
-LABEL(Byte1):
-
-#ifdef USE_AS_STRNCMP
- sub $1, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 1(%rsi), %ecx
- movzx 1(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(Byte2):
-
-#ifdef USE_AS_STRNCMP
- sub $2, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 2(%rsi), %ecx
- movzx 2(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(Byte3):
-
-#ifdef USE_AS_STRNCMP
- sub $3, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 3(%rsi), %ecx
- movzx 3(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(Byte4):
-
-#ifdef USE_AS_STRNCMP
- sub $4, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 4(%rsi), %ecx
- movzx 4(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(Byte5):
-
-#ifdef USE_AS_STRNCMP
- sub $5, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 5(%rsi), %ecx
- movzx 5(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(Byte6):
-
-#ifdef USE_AS_STRNCMP
- sub $6, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 6(%rsi), %ecx
- movzx 6(%rdi), %eax
-
- sub %ecx, %eax
- ret
-
- .p2align 4
-LABEL(next_8_bytes):
- add $8, %rdi
- add $8, %rsi
-#ifdef USE_AS_STRNCMP
- sub $8, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- test $0x01, %dh
- jnz LABEL(Byte0)
-
- test $0x02, %dh
- jnz LABEL(Byte1)
-
- test $0x04, %dh
- jnz LABEL(Byte2)
-
- test $0x08, %dh
- jnz LABEL(Byte3)
-
- test $0x10, %dh
- jnz LABEL(Byte4)
-
- test $0x20, %dh
- jnz LABEL(Byte5)
-
- test $0x40, %dh
- jnz LABEL(Byte6)
-
-#ifdef USE_AS_STRNCMP
- sub $7, %r11
- jbe LABEL(strcmp_exitz)
-#endif
- movzx 7(%rsi), %ecx
- movzx 7(%rdi), %eax
-
- sub %ecx, %eax
- ret
cfi_endproc
.size STRCMP_SSE42, .-STRCMP_SSE42
/* Put all SSE 4.2 functions together. */
.section .rodata.sse4.2,"a",@progbits
- .p2align 4
-LABEL(unaligned_table):
- .int LABEL(ashr_1) - LABEL(unaligned_table)
- .int LABEL(ashr_2) - LABEL(unaligned_table)
- .int LABEL(ashr_3) - LABEL(unaligned_table)
- .int LABEL(ashr_4) - LABEL(unaligned_table)
- .int LABEL(ashr_5) - LABEL(unaligned_table)
- .int LABEL(ashr_6) - LABEL(unaligned_table)
- .int LABEL(ashr_7) - LABEL(unaligned_table)
- .int LABEL(ashr_8) - LABEL(unaligned_table)
- .int LABEL(ashr_9) - LABEL(unaligned_table)
- .int LABEL(ashr_10) - LABEL(unaligned_table)
- .int LABEL(ashr_11) - LABEL(unaligned_table)
- .int LABEL(ashr_12) - LABEL(unaligned_table)
- .int LABEL(ashr_13) - LABEL(unaligned_table)
- .int LABEL(ashr_14) - LABEL(unaligned_table)
- .int LABEL(ashr_15) - LABEL(unaligned_table)
- .int LABEL(ashr_0) - LABEL(unaligned_table)
+ .p2align 3
+LABEL(unaligned_table_sse4_2):
+ .int LABEL(ashr_1_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_2_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_3_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_4_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_5_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_6_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_7_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_8_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_9_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_10_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_11_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_12_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_13_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_14_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_15_sse4_2) - LABEL(unaligned_table_sse4_2)
+ .int LABEL(ashr_0_sse4_2) - LABEL(unaligned_table_sse4_2)
# undef ENTRY
@@ -1673,6 +1530,4 @@ LABEL(unaligned_table):
.globl __GI_STRCMP; __GI_STRCMP = STRCMP_SSE2
#endif
-#ifndef USE_AS_STRNCMP
#include "../strcmp.S"
-#endif
diff --git a/sysdeps/x86_64/multiarch/strcpy.S b/sysdeps/x86_64/multiarch/strcpy.S
index 25cd01307d..7e400a9140 100644
--- a/sysdeps/x86_64/multiarch/strcpy.S
+++ b/sysdeps/x86_64/multiarch/strcpy.S
@@ -64,16 +64,9 @@ ENTRY(STRCPY)
call __init_cpu_features
1: leaq STRCPY_SSE2(%rip), %rax
testl $(1<<9), __cpu_features+CPUID_OFFSET+COMMON_CPUID_INDEX_1*CPUID_SIZE+CPUID_ECX_OFFSET(%rip)
- jz 3f
-/* Avoid SSSE3 strcpy on Atom since it is slow. */
- cmpl $1, __cpu_features+KIND_OFFSET(%rip)
- jne 2f
- cmpl $6, __cpu_features+FAMILY_OFFSET(%rip)
- jne 2f
- cmpl $28, __cpu_features+MODEL_OFFSET(%rip)
- jz 3f
-2: leaq STRCPY_SSSE3(%rip), %rax
-3: ret
+ jz 2f
+ leaq STRCPY_SSSE3(%rip), %rax
+2: ret
END(STRCPY)
.section .text.ssse3,"ax",@progbits
diff --git a/sysdeps/x86_64/multiarch/strncmp-c.c b/sysdeps/x86_64/multiarch/strncmp-c.c
deleted file mode 100644
index d4f74a418d..0000000000
--- a/sysdeps/x86_64/multiarch/strncmp-c.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifdef SHARED
-#define STRNCMP __strncmp_sse2
-#undef libc_hidden_builtin_def
-#define libc_hidden_builtin_def(name) \
- __hidden_ver1 (__strncmp_sse2, __GI_strncmp, __strncmp_sse2);
-#endif
-
-#include "strncmp.c"
diff --git a/sysdeps/x86_64/rtld-memchr.c b/sysdeps/x86_64/rtld-memchr.c
new file mode 100644
index 0000000000..f63fefbcec
--- /dev/null
+++ b/sysdeps/x86_64/rtld-memchr.c
@@ -0,0 +1 @@
+#include <string/memchr.c>
diff --git a/sysdeps/x86_64/rtld-memcmp.c b/sysdeps/x86_64/rtld-memcmp.c
new file mode 100644
index 0000000000..2ee40328b8
--- /dev/null
+++ b/sysdeps/x86_64/rtld-memcmp.c
@@ -0,0 +1 @@
+#include <string/memcmp.c>
diff --git a/sysdeps/x86_64/rtld-rawmemchr.c b/sysdeps/x86_64/rtld-rawmemchr.c
new file mode 100644
index 0000000000..2b9189393c
--- /dev/null
+++ b/sysdeps/x86_64/rtld-rawmemchr.c
@@ -0,0 +1 @@
+#include <string/rawmemchr.c>
diff --git a/sysdeps/x86_64/rtld-strchr.S b/sysdeps/x86_64/rtld-strchr.S
new file mode 100644
index 0000000000..8934697972
--- /dev/null
+++ b/sysdeps/x86_64/rtld-strchr.S
@@ -0,0 +1,291 @@
+/* strchr (str, ch) -- Return pointer to first occurrence of CH in STR.
+ For AMD x86-64.
+ Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include "asm-syntax.h"
+#include "bp-sym.h"
+#include "bp-asm.h"
+
+
+ .text
+ENTRY (BP_SYM (strchr))
+
+ /* Before we start with the main loop we process single bytes
+ until the source pointer is aligned. This has two reasons:
+ 1. aligned 64-bit memory access is faster
+ and (more important)
+ 2. we process in the main loop 64 bit in one step although
+ we don't know the end of the string. But accessing at
+ 8-byte alignment guarantees that we never access illegal
+ memory if this would not also be done by the trivial
+ implementation (this is because all processor inherent
+ boundaries are multiples of 8). */
+
+ movq %rdi, %rdx
+ andl $7, %edx /* Mask alignment bits */
+ movq %rdi, %rax /* duplicate destination. */
+ jz 1f /* aligned => start loop */
+ neg %edx
+ addl $8, %edx /* Align to 8 bytes. */
+
+ /* Search the first bytes directly. */
+0: movb (%rax), %cl /* load byte */
+ cmpb %cl,%sil /* compare byte. */
+ je 6f /* target found */
+ testb %cl,%cl /* is byte NUL? */
+ je 7f /* yes => return NULL */
+ incq %rax /* increment pointer */
+ decl %edx
+ jnz 0b
+
+
+1:
+ /* At the moment %rsi contains C. What we need for the
+ algorithm is C in all bytes of the register. Avoid
+ operations on 16 bit words because these require an
+ prefix byte (and one more cycle). */
+ /* Populate 8 bit data to full 64-bit. */
+ movabs $0x0101010101010101,%r9
+ movzbl %sil,%edx
+ imul %rdx,%r9
+
+ movq $0xfefefefefefefeff, %r8 /* Save magic. */
+
+ /* We exit the loop if adding MAGIC_BITS to LONGWORD fails to
+ change any of the hole bits of LONGWORD.
+
+ 1) Is this safe? Will it catch all the zero bytes?
+ Suppose there is a byte with all zeros. Any carry bits
+ propagating from its left will fall into the hole at its
+ least significant bit and stop. Since there will be no
+ carry from its most significant bit, the LSB of the
+ byte to the left will be unchanged, and the zero will be
+ detected.
+
+ 2) Is this worthwhile? Will it ignore everything except
+ zero bytes? Suppose every byte of QUARDWORD has a bit set
+ somewhere. There will be a carry into bit 8. If bit 8
+ is set, this will carry into bit 16. If bit 8 is clear,
+ one of bits 9-15 must be set, so there will be a carry
+ into bit 16. Similarly, there will be a carry into bit
+ 24 tec.. If one of bits 54-63 is set, there will be a carry
+ into bit 64 (=carry flag), so all of the hole bits will
+ be changed.
+
+ 3) But wait! Aren't we looking for C, not zero?
+ Good point. So what we do is XOR LONGWORD with a longword,
+ each of whose bytes is C. This turns each byte that is C
+ into a zero. */
+
+ .p2align 4
+4:
+ /* Main Loop is unrolled 4 times. */
+ /* First unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ xorq %r9, %rcx /* XOR with qword c|...|c => bytes of str == c
+ are now 0 */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found c => return pointer */
+
+ /* The quadword we looked at does not contain the value we're looking
+ for. Let's search now whether we have reached the end of the
+ string. */
+ xorq %r9, %rcx /* restore original dword without reload */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 7f /* highest byte is NUL => return NULL */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 7f /* found NUL => return NULL */
+
+ /* Second unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ xorq %r9, %rcx /* XOR with qword c|...|c => bytes of str == c
+ are now 0 */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found c => return pointer */
+
+ /* The quadword we looked at does not contain the value we're looking
+ for. Let's search now whether we have reached the end of the
+ string. */
+ xorq %r9, %rcx /* restore original dword without reload */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 7f /* highest byte is NUL => return NULL */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 7f /* found NUL => return NULL */
+ /* Third unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ xorq %r9, %rcx /* XOR with qword c|...|c => bytes of str == c
+ are now 0 */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found c => return pointer */
+
+ /* The quadword we looked at does not contain the value we're looking
+ for. Let's search now whether we have reached the end of the
+ string. */
+ xorq %r9, %rcx /* restore original dword without reload */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 7f /* highest byte is NUL => return NULL */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 7f /* found NUL => return NULL */
+ /* Fourth unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ xorq %r9, %rcx /* XOR with qword c|...|c => bytes of str == c
+ are now 0 */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found c => return pointer */
+
+ /* The quadword we looked at does not contain the value we're looking
+ for. Let's search now whether we have reached the end of the
+ string. */
+ xorq %r9, %rcx /* restore original dword without reload */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 7f /* highest byte is NUL => return NULL */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jz 4b /* no NUL found => restart loop */
+
+
+7: /* Return NULL. */
+ xorl %eax, %eax
+ retq
+
+
+ /* We now scan for the byte in which the character was matched.
+ But we have to take care of the case that a NUL char is
+ found before this in the dword. Note that we XORed %rcx
+ with the byte we're looking for, therefore the tests below look
+ reversed. */
+
+
+ .p2align 4 /* Align, it's a jump target. */
+3: movq %r9,%rdx /* move to %rdx so that we can access bytes */
+ subq $8,%rax /* correct pointer increment. */
+ testb %cl, %cl /* is first byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %cl /* is first byte NUL? */
+ je 7b /* yes => return NULL */
+ incq %rax /* increment pointer */
+
+ testb %ch, %ch /* is second byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %ch /* is second byte NUL? */
+ je 7b /* yes => return NULL? */
+ incq %rax /* increment pointer */
+
+ shrq $16, %rcx /* make upper bytes accessible */
+ testb %cl, %cl /* is third byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %cl /* is third byte NUL? */
+ je 7b /* yes => return NULL */
+ incq %rax /* increment pointer */
+
+ testb %ch, %ch /* is fourth byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %ch /* is fourth byte NUL? */
+ je 7b /* yes => return NULL? */
+ incq %rax /* increment pointer */
+
+ shrq $16, %rcx /* make upper bytes accessible */
+ testb %cl, %cl /* is fifth byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %cl /* is fifth byte NUL? */
+ je 7b /* yes => return NULL */
+ incq %rax /* increment pointer */
+
+ testb %ch, %ch /* is sixth byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %ch /* is sixth byte NUL? */
+ je 7b /* yes => return NULL? */
+ incq %rax /* increment pointer */
+
+ shrq $16, %rcx /* make upper bytes accessible */
+ testb %cl, %cl /* is seventh byte C? */
+ jz 6f /* yes => return pointer */
+ cmpb %dl, %cl /* is seventh byte NUL? */
+ je 7b /* yes => return NULL */
+
+ /* It must be in the eigth byte and it cannot be NUL. */
+ incq %rax
+
+6:
+ nop
+ retq
+END (BP_SYM (strchr))
+
+weak_alias (BP_SYM (strchr), BP_SYM (index))
+libc_hidden_builtin_def (strchr)
diff --git a/sysdeps/x86_64/rtld-strcmp.S b/sysdeps/x86_64/rtld-strcmp.S
new file mode 100644
index 0000000000..a25535c161
--- /dev/null
+++ b/sysdeps/x86_64/rtld-strcmp.S
@@ -0,0 +1,28 @@
+#include <sysdep.h>
+#include "asm-syntax.h"
+#include "bp-sym.h"
+#include "bp-asm.h"
+
+#ifndef LABEL
+#define LABEL(l) L(l)
+#endif
+
+ .text
+ENTRY (BP_SYM (STRCMP))
+/* Simple version since we can't use SSE registers in ld.so. */
+L(oop): movb (%rdi), %al
+ cmpb (%rsi), %al
+ jne L(neq)
+ incq %rdi
+ incq %rsi
+ testb %al, %al
+ jnz L(oop)
+
+ xorl %eax, %eax
+ ret
+
+L(neq): movl $1, %eax
+ movl $-1, %ecx
+ cmovbl %ecx, %eax
+ ret
+END (BP_SYM (STRCMP))
diff --git a/sysdeps/x86_64/rtld-strlen.S b/sysdeps/x86_64/rtld-strlen.S
new file mode 100644
index 0000000000..fd950edaaa
--- /dev/null
+++ b/sysdeps/x86_64/rtld-strlen.S
@@ -0,0 +1,139 @@
+/* strlen(str) -- determine the length of the string STR.
+ Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+ Based on i486 version contributed by Ulrich Drepper <drepper@redhat.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include "asm-syntax.h"
+#include "bp-sym.h"
+#include "bp-asm.h"
+
+
+ .text
+ENTRY (strlen)
+ movq %rdi, %rcx /* Duplicate source pointer. */
+ andl $7, %ecx /* mask alignment bits */
+ movq %rdi, %rax /* duplicate destination. */
+ jz 1f /* aligned => start loop */
+
+ neg %ecx /* We need to align to 8 bytes. */
+ addl $8,%ecx
+ /* Search the first bytes directly. */
+0: cmpb $0x0,(%rax) /* is byte NUL? */
+ je 2f /* yes => return */
+ incq %rax /* increment pointer */
+ decl %ecx
+ jnz 0b
+
+1: movq $0xfefefefefefefeff,%r8 /* Save magic. */
+
+ .p2align 4 /* Align loop. */
+4: /* Main Loop is unrolled 4 times. */
+ /* First unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found NUL => return pointer */
+
+ /* Second unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found NUL => return pointer */
+
+ /* Third unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jnz 3f /* found NUL => return pointer */
+
+ /* Fourth unroll. */
+ movq (%rax), %rcx /* get double word (= 8 bytes) in question */
+ addq $8,%rax /* adjust pointer for next word */
+ movq %r8, %rdx /* magic value */
+ addq %rcx, %rdx /* add the magic value to the word. We get
+ carry bits reported for each byte which
+ is *not* 0 */
+ jnc 3f /* highest byte is NUL => return pointer */
+ xorq %rcx, %rdx /* (word+magic)^word */
+ orq %r8, %rdx /* set all non-carry bits */
+ incq %rdx /* add 1: if one carry bit was *not* set
+ the addition will not result in 0. */
+ jz 4b /* no NUL found => continue loop */
+
+ .p2align 4 /* Align, it's a jump target. */
+3: subq $8,%rax /* correct pointer increment. */
+
+ testb %cl, %cl /* is first byte NUL? */
+ jz 2f /* yes => return */
+ incq %rax /* increment pointer */
+
+ testb %ch, %ch /* is second byte NUL? */
+ jz 2f /* yes => return */
+ incq %rax /* increment pointer */
+
+ testl $0x00ff0000, %ecx /* is third byte NUL? */
+ jz 2f /* yes => return pointer */
+ incq %rax /* increment pointer */
+
+ testl $0xff000000, %ecx /* is fourth byte NUL? */
+ jz 2f /* yes => return pointer */
+ incq %rax /* increment pointer */
+
+ shrq $32, %rcx /* look at other half. */
+
+ testb %cl, %cl /* is first byte NUL? */
+ jz 2f /* yes => return */
+ incq %rax /* increment pointer */
+
+ testb %ch, %ch /* is second byte NUL? */
+ jz 2f /* yes => return */
+ incq %rax /* increment pointer */
+
+ testl $0xff0000, %ecx /* is third byte NUL? */
+ jz 2f /* yes => return pointer */
+ incq %rax /* increment pointer */
+2:
+ subq %rdi, %rax /* compute difference to string start */
+ ret
+END (strlen)
+libc_hidden_builtin_def (strlen)
diff --git a/sysdeps/x86_64/strcmp.S b/sysdeps/x86_64/strcmp.S
index 119b88e40b..340a64ba35 100644
--- a/sysdeps/x86_64/strcmp.S
+++ b/sysdeps/x86_64/strcmp.S
@@ -1,8 +1,10 @@
/* Highly optimized version for x86-64.
- Copyright (C) 1999, 2000, 2002, 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2002, 2003, 2005, 2009
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Based on i686 version contributed by Ulrich Drepper
<drepper@cygnus.com>, 1999.
+ Updated with SSE2 support contributed by Intel Corporation.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -24,8 +26,35 @@
#include "bp-sym.h"
#include "bp-asm.h"
- .text
-ENTRY (BP_SYM (strcmp))
+#undef UPDATE_STRNCMP_COUNTER
+
+#ifndef LABEL
+#define LABEL(l) L(l)
+#endif
+
+#ifdef USE_AS_STRNCMP
+/* Since the counter, %r11, is unsigned, we branch to strcmp_exitz
+ if the new counter > the old one or is 0. */
+# define UPDATE_STRNCMP_COUNTER \
+ /* calculate left number to compare */ \
+ lea -16(%rcx, %r11), %r9; \
+ cmp %r9, %r11; \
+ jb LABEL(strcmp_exitz); \
+ test %r9, %r9; \
+ je LABEL(strcmp_exitz); \
+ mov %r9, %r11
+
+#else
+# define UPDATE_STRNCMP_COUNTER
+# ifndef STRCMP
+# define STRCMP strcmp
+# endif
+#endif
+
+ .text
+ENTRY (BP_SYM (STRCMP))
+#ifdef NOT_IN_libc
+/* Simple version since we can't use SSE registers in ld.so. */
L(oop): movb (%rdi), %al
cmpb (%rsi), %al
jne L(neq)
@@ -41,5 +70,1914 @@ L(neq): movl $1, %eax
movl $-1, %ecx
cmovbl %ecx, %eax
ret
-END (BP_SYM (strcmp))
-libc_hidden_builtin_def (strcmp)
+END (BP_SYM (STRCMP))
+#else /* NOT_IN_libc */
+/*
+ * This implementation uses SSE to compare up to 16 bytes at a time.
+ */
+#ifdef USE_AS_STRNCMP
+ test %rdx, %rdx
+ je LABEL(strcmp_exitz)
+ cmp $1, %rdx
+ je LABEL(Byte0)
+ mov %rdx, %r11
+#endif
+ mov %esi, %ecx
+ mov %edi, %eax
+/* Use 64bit AND here to avoid long NOP padding. */
+ and $0x3f, %rcx /* rsi alignment in cache line */
+ and $0x3f, %rax /* rdi alignment in cache line */
+ cmp $0x30, %ecx
+ ja LABEL(crosscache) /* rsi: 16-byte load will cross cache line */
+ cmp $0x30, %eax
+ ja LABEL(crosscache) /* rdi: 16-byte load will cross cache line */
+ movlpd (%rdi), %xmm1
+ movlpd (%rsi), %xmm2
+ movhpd 8(%rdi), %xmm1
+ movhpd 8(%rsi), %xmm2
+ pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
+ pcmpeqb %xmm2, %xmm1 /* compare first 16 bytes for equality */
+ psubb %xmm0, %xmm1 /* packed sub of comparison results*/
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx /* if first 16 bytes are same, edx == 0xffff */
+ jnz LABEL(less16bytes) /* If not, find different value or null char */
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz) /* finish comparision */
+#endif
+ add $16, %rsi /* prepare to search next 16 bytes */
+ add $16, %rdi /* prepare to search next 16 bytes */
+
+ /*
+ * Determine source and destination string offsets from 16-byte alignment.
+ * Use relative offset difference between the two to determine which case
+ * below to use.
+ */
+ .p2align 4
+LABEL(crosscache):
+ and $0xfffffffffffffff0, %rsi /* force %rsi is 16 byte aligned */
+ and $0xfffffffffffffff0, %rdi /* force %rdi is 16 byte aligned */
+ mov $0xffff, %edx /* for equivalent offset */
+ xor %r8d, %r8d
+ and $0xf, %ecx /* offset of rsi */
+ and $0xf, %eax /* offset of rdi */
+ cmp %eax, %ecx
+ je LABEL(ashr_0) /* rsi and rdi relative offset same */
+ ja LABEL(bigger)
+ mov %edx, %r8d /* r8d is offset flag for exit tail */
+ xchg %ecx, %eax
+ xchg %rsi, %rdi
+LABEL(bigger):
+ lea 15(%rax), %r9
+ sub %rcx, %r9
+ lea LABEL(unaligned_table)(%rip), %r10
+ movslq (%r10, %r9,4), %r9
+ lea (%r10, %r9), %r10
+ jmp *%r10 /* jump to corresponding case */
+
+/*
+ * The following cases will be handled by ashr_0
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(0~15) n(0~15) 15(15+ n-n) ashr_0
+ */
+ .p2align 4
+LABEL(ashr_0):
+
+ movdqa (%rsi), %xmm1
+ pxor %xmm0, %xmm0 /* clear %xmm0 for null char check */
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
+ pcmpeqb (%rdi), %xmm1 /* compare 16 bytes for equality */
+ psubb %xmm0, %xmm1 /* packed sub of comparison results*/
+ pmovmskb %xmm1, %r9d
+ shr %cl, %edx /* adjust 0xffff for offset */
+ shr %cl, %r9d /* adjust for 16-byte offset */
+ sub %r9d, %edx
+ /*
+ * edx must be the same with r9d if in left byte (16-rcx) is equal to
+ * the start from (16-rax) and no null char was seen.
+ */
+ jne LABEL(less32bytes) /* mismatch or null char */
+ UPDATE_STRNCMP_COUNTER
+ mov $16, %rcx
+ mov $16, %r9
+ pxor %xmm0, %xmm0 /* clear xmm0, may have changed above */
+
+ /*
+ * Now both strings are aligned at 16-byte boundary. Loop over strings
+ * checking 32-bytes per iteration.
+ */
+ .p2align 4
+LABEL(loop_ashr_0):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit) /* mismatch or null char seen */
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+ add $16, %rcx
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+ add $16, %rcx
+ jmp LABEL(loop_ashr_0)
+
+/*
+ * The following cases will be handled by ashr_1
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(15) n -15 0(15 +(n-15) - n) ashr_1
+ */
+ .p2align 4
+LABEL(ashr_1):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0 /* Any null chars? */
+ pslldq $15, %xmm2 /* shift first string to align with second */
+ pcmpeqb %xmm1, %xmm2 /* compare 16 bytes for equality */
+ psubb %xmm0, %xmm2 /* packed sub of comparison results*/
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx /* adjust 0xffff for offset */
+ shr %cl, %r9d /* adjust for 16-byte offset */
+ sub %r9d, %edx
+ jnz LABEL(less32bytes) /* mismatch or null char seen */
+ movdqa (%rdi), %xmm3
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads*/
+ mov $1, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 1(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_1):
+ add $16, %r10
+ jg LABEL(nibble_ashr_1) /* cross page boundary */
+
+LABEL(gobble_ashr_1):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4 /* store for next cycle */
+
+ psrldq $1, %xmm3
+ pslldq $15, %xmm2
+ por %xmm3, %xmm2 /* merge into one 16byte value */
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_1) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4 /* store for next cycle */
+
+ psrldq $1, %xmm3
+ pslldq $15, %xmm2
+ por %xmm3, %xmm2 /* merge into one 16byte value */
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_1)
+
+ /*
+ * Nibble avoids loads across page boundary. This is to avoid a potential
+ * access into unmapped memory.
+ */
+ .p2align 4
+LABEL(nibble_ashr_1):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char*/
+ pmovmskb %xmm0, %edx
+ test $0xfffe, %edx
+ jnz LABEL(ashr_1_exittail) /* find null char*/
+
+#ifdef USE_AS_STRNCMP
+ cmp $14, %r11
+ jbe LABEL(ashr_1_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10 /* substract 4K from %r10 */
+ jmp LABEL(gobble_ashr_1)
+
+ /*
+ * Once find null char, determine if there is a string mismatch
+ * before the null char.
+ */
+ .p2align 4
+LABEL(ashr_1_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $1, %xmm0
+ psrldq $1, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_2
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(14~15) n -14 1(15 +(n-14) - n) ashr_2
+ */
+ .p2align 4
+LABEL(ashr_2):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $14, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $2, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 2(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_2):
+ add $16, %r10
+ jg LABEL(nibble_ashr_2)
+
+LABEL(gobble_ashr_2):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $2, %xmm3
+ pslldq $14, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_2) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $2, %xmm3
+ pslldq $14, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_2)
+
+ .p2align 4
+LABEL(nibble_ashr_2):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xfffc, %edx
+ jnz LABEL(ashr_2_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $13, %r11
+ jbe LABEL(ashr_2_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_2)
+
+ .p2align 4
+LABEL(ashr_2_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $2, %xmm0
+ psrldq $2, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_3
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(13~15) n -13 2(15 +(n-13) - n) ashr_3
+ */
+ .p2align 4
+LABEL(ashr_3):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $13, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $3, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 3(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_3):
+ add $16, %r10
+ jg LABEL(nibble_ashr_3)
+
+LABEL(gobble_ashr_3):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $3, %xmm3
+ pslldq $13, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_3) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $3, %xmm3
+ pslldq $13, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_3)
+
+ .p2align 4
+LABEL(nibble_ashr_3):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xfff8, %edx
+ jnz LABEL(ashr_3_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $12, %r11
+ jbe LABEL(ashr_3_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_3)
+
+ .p2align 4
+LABEL(ashr_3_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $3, %xmm0
+ psrldq $3, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_4
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(12~15) n -12 3(15 +(n-12) - n) ashr_4
+ */
+ .p2align 4
+LABEL(ashr_4):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $12, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $4, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 4(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_4):
+ add $16, %r10
+ jg LABEL(nibble_ashr_4)
+
+LABEL(gobble_ashr_4):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $4, %xmm3
+ pslldq $12, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_4) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $4, %xmm3
+ pslldq $12, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_4)
+
+ .p2align 4
+LABEL(nibble_ashr_4):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xfff0, %edx
+ jnz LABEL(ashr_4_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $11, %r11
+ jbe LABEL(ashr_4_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_4)
+
+ .p2align 4
+LABEL(ashr_4_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $4, %xmm0
+ psrldq $4, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_5
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(11~15) n - 11 4(15 +(n-11) - n) ashr_5
+ */
+ .p2align 4
+LABEL(ashr_5):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $11, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $5, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 5(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_5):
+ add $16, %r10
+ jg LABEL(nibble_ashr_5)
+
+LABEL(gobble_ashr_5):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $5, %xmm3
+ pslldq $11, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_5) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $5, %xmm3
+ pslldq $11, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_5)
+
+ .p2align 4
+LABEL(nibble_ashr_5):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xffe0, %edx
+ jnz LABEL(ashr_5_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $10, %r11
+ jbe LABEL(ashr_5_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_5)
+
+ .p2align 4
+LABEL(ashr_5_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $5, %xmm0
+ psrldq $5, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_6
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(10~15) n - 10 5(15 +(n-10) - n) ashr_6
+ */
+ .p2align 4
+LABEL(ashr_6):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $10, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $6, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 6(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_6):
+ add $16, %r10
+ jg LABEL(nibble_ashr_6)
+
+LABEL(gobble_ashr_6):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $6, %xmm3
+ pslldq $10, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_6) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $6, %xmm3
+ pslldq $10, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_6)
+
+ .p2align 4
+LABEL(nibble_ashr_6):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xffc0, %edx
+ jnz LABEL(ashr_6_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $9, %r11
+ jbe LABEL(ashr_6_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_6)
+
+ .p2align 4
+LABEL(ashr_6_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $6, %xmm0
+ psrldq $6, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_7
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(9~15) n - 9 6(15 +(n - 9) - n) ashr_7
+ */
+ .p2align 4
+LABEL(ashr_7):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $9, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $7, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 7(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_7):
+ add $16, %r10
+ jg LABEL(nibble_ashr_7)
+
+LABEL(gobble_ashr_7):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $7, %xmm3
+ pslldq $9, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_7) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $7, %xmm3
+ pslldq $9, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_7)
+
+ .p2align 4
+LABEL(nibble_ashr_7):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xff80, %edx
+ jnz LABEL(ashr_7_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $8, %r11
+ jbe LABEL(ashr_7_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_7)
+
+ .p2align 4
+LABEL(ashr_7_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $7, %xmm0
+ psrldq $7, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_8
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(8~15) n - 8 7(15 +(n - 8) - n) ashr_8
+ */
+ .p2align 4
+LABEL(ashr_8):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $8, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $8, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 8(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_8):
+ add $16, %r10
+ jg LABEL(nibble_ashr_8)
+
+LABEL(gobble_ashr_8):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $8, %xmm3
+ pslldq $8, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_8) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $8, %xmm3
+ pslldq $8, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_8)
+
+ .p2align 4
+LABEL(nibble_ashr_8):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xff00, %edx
+ jnz LABEL(ashr_8_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $7, %r11
+ jbe LABEL(ashr_8_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_8)
+
+ .p2align 4
+LABEL(ashr_8_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $8, %xmm0
+ psrldq $8, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_9
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(7~15) n - 7 8(15 +(n - 7) - n) ashr_9
+ */
+ .p2align 4
+LABEL(ashr_9):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $7, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $9, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 9(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_9):
+ add $16, %r10
+ jg LABEL(nibble_ashr_9)
+
+LABEL(gobble_ashr_9):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $9, %xmm3
+ pslldq $7, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_9) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $9, %xmm3
+ pslldq $7, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3 /* store for next cycle */
+ jmp LABEL(loop_ashr_9)
+
+ .p2align 4
+LABEL(nibble_ashr_9):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xfe00, %edx
+ jnz LABEL(ashr_9_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $6, %r11
+ jbe LABEL(ashr_9_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_9)
+
+ .p2align 4
+LABEL(ashr_9_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $9, %xmm0
+ psrldq $9, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_10
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(6~15) n - 6 9(15 +(n - 6) - n) ashr_10
+ */
+ .p2align 4
+LABEL(ashr_10):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $6, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $10, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 10(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_10):
+ add $16, %r10
+ jg LABEL(nibble_ashr_10)
+
+LABEL(gobble_ashr_10):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $10, %xmm3
+ pslldq $6, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_10) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $10, %xmm3
+ pslldq $6, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_10)
+
+ .p2align 4
+LABEL(nibble_ashr_10):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xfc00, %edx
+ jnz LABEL(ashr_10_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $5, %r11
+ jbe LABEL(ashr_10_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_10)
+
+ .p2align 4
+LABEL(ashr_10_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $10, %xmm0
+ psrldq $10, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_11
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(5~15) n - 5 10(15 +(n - 5) - n) ashr_11
+ */
+ .p2align 4
+LABEL(ashr_11):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $5, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $11, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 11(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_11):
+ add $16, %r10
+ jg LABEL(nibble_ashr_11)
+
+LABEL(gobble_ashr_11):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $11, %xmm3
+ pslldq $5, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_11) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $11, %xmm3
+ pslldq $5, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_11)
+
+ .p2align 4
+LABEL(nibble_ashr_11):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xf800, %edx
+ jnz LABEL(ashr_11_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $4, %r11
+ jbe LABEL(ashr_11_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_11)
+
+ .p2align 4
+LABEL(ashr_11_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $11, %xmm0
+ psrldq $11, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_12
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(4~15) n - 4 11(15 +(n - 4) - n) ashr_12
+ */
+ .p2align 4
+LABEL(ashr_12):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $4, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $12, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 12(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_12):
+ add $16, %r10
+ jg LABEL(nibble_ashr_12)
+
+LABEL(gobble_ashr_12):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $12, %xmm3
+ pslldq $4, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_12) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $12, %xmm3
+ pslldq $4, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_12)
+
+ .p2align 4
+LABEL(nibble_ashr_12):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xf000, %edx
+ jnz LABEL(ashr_12_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $3, %r11
+ jbe LABEL(ashr_12_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_12)
+
+ .p2align 4
+LABEL(ashr_12_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $12, %xmm0
+ psrldq $12, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_13
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(3~15) n - 3 12(15 +(n - 3) - n) ashr_13
+ */
+ .p2align 4
+LABEL(ashr_13):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $3, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $13, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 13(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_13):
+ add $16, %r10
+ jg LABEL(nibble_ashr_13)
+
+LABEL(gobble_ashr_13):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $13, %xmm3
+ pslldq $3, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_13) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $13, %xmm3
+ pslldq $3, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_13)
+
+ .p2align 4
+LABEL(nibble_ashr_13):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xe000, %edx
+ jnz LABEL(ashr_13_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $2, %r11
+ jbe LABEL(ashr_13_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_13)
+
+ .p2align 4
+LABEL(ashr_13_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $13, %xmm0
+ psrldq $13, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_14
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(2~15) n - 2 13(15 +(n - 2) - n) ashr_14
+ */
+ .p2align 4
+LABEL(ashr_14):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $2, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $14, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 14(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_14):
+ add $16, %r10
+ jg LABEL(nibble_ashr_14)
+
+LABEL(gobble_ashr_14):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $14, %xmm3
+ pslldq $2, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_14) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $14, %xmm3
+ pslldq $2, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_14)
+
+ .p2align 4
+LABEL(nibble_ashr_14):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0xc000, %edx
+ jnz LABEL(ashr_14_exittail)
+
+#ifdef USE_AS_STRNCMP
+ cmp $1, %r11
+ jbe LABEL(ashr_14_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_14)
+
+ .p2align 4
+LABEL(ashr_14_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $14, %xmm0
+ psrldq $14, %xmm3
+ jmp LABEL(aftertail)
+
+/*
+ * The following cases will be handled by ashr_15
+ * rcx(offset of rsi) rax(offset of rdi) relative offset corresponding case
+ * n(1~15) n - 1 14(15 +(n - 1) - n) ashr_15
+ */
+ .p2align 4
+LABEL(ashr_15):
+ pxor %xmm0, %xmm0
+ movdqa (%rdi), %xmm2
+ movdqa (%rsi), %xmm1
+ pcmpeqb %xmm1, %xmm0
+ pslldq $1, %xmm2
+ pcmpeqb %xmm1, %xmm2
+ psubb %xmm0, %xmm2
+ pmovmskb %xmm2, %r9d
+ shr %cl, %edx
+ shr %cl, %r9d
+ sub %r9d, %edx
+ jnz LABEL(less32bytes)
+
+ movdqa (%rdi), %xmm3
+
+ UPDATE_STRNCMP_COUNTER
+
+ pxor %xmm0, %xmm0
+ mov $16, %rcx /* index for loads */
+ mov $15, %r9d /* byte position left over from less32bytes case */
+ /*
+ * Setup %r10 value allows us to detect crossing a page boundary.
+ * When %r10 goes positive we have crossed a page boundary and
+ * need to do a nibble.
+ */
+ lea 15(%rdi), %r10
+ and $0xfff, %r10 /* offset into 4K page */
+
+ sub $0x1000, %r10 /* subtract 4K pagesize */
+
+ .p2align 4
+LABEL(loop_ashr_15):
+ add $16, %r10
+ jg LABEL(nibble_ashr_15)
+
+LABEL(gobble_ashr_15):
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $15, %xmm3
+ pslldq $1, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+
+ add $16, %r10
+ jg LABEL(nibble_ashr_15) /* cross page boundary */
+
+ movdqa (%rsi, %rcx), %xmm1
+ movdqa (%rdi, %rcx), %xmm2
+ movdqa %xmm2, %xmm4
+
+ psrldq $15, %xmm3
+ pslldq $1, %xmm2
+ por %xmm3, %xmm2
+
+ pcmpeqb %xmm1, %xmm0
+ pcmpeqb %xmm2, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ sub $0xffff, %edx
+ jnz LABEL(exit)
+
+#ifdef USE_AS_STRNCMP
+ sub $16, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+
+ add $16, %rcx
+ movdqa %xmm4, %xmm3
+ jmp LABEL(loop_ashr_15)
+
+ .p2align 4
+LABEL(nibble_ashr_15):
+ pcmpeqb %xmm3, %xmm0 /* check nibble for null char */
+ pmovmskb %xmm0, %edx
+ test $0x8000, %edx
+ jnz LABEL(ashr_15_exittail)
+
+#ifdef USE_AS_STRNCMP
+ test %r11, %r11
+ je LABEL(ashr_15_exittail)
+#endif
+
+ pxor %xmm0, %xmm0
+ sub $0x1000, %r10
+ jmp LABEL(gobble_ashr_15)
+
+ .p2align 4
+LABEL(ashr_15_exittail):
+ movdqa (%rsi, %rcx), %xmm1
+ psrldq $15, %xmm3
+ psrldq $15, %xmm0
+
+ .p2align 4
+LABEL(aftertail):
+ pcmpeqb %xmm3, %xmm1
+ psubb %xmm0, %xmm1
+ pmovmskb %xmm1, %edx
+ not %edx
+
+ .p2align 4
+LABEL(exit):
+ lea -16(%r9, %rcx), %rax /* locate the exact offset for rdi */
+LABEL(less32bytes):
+ lea (%rdi, %rax), %rdi /* locate the exact address for first operand(rdi) */
+ lea (%rsi, %rcx), %rsi /* locate the exact address for second operand(rsi) */
+ test %r8d, %r8d
+ jz LABEL(ret)
+ xchg %rsi, %rdi /* recover original order according to flag(%r8d) */
+
+ .p2align 4
+LABEL(ret):
+LABEL(less16bytes):
+ bsf %rdx, %rdx /* find and store bit index in %rdx */
+
+#ifdef USE_AS_STRNCMP
+ sub %rdx, %r11
+ jbe LABEL(strcmp_exitz)
+#endif
+ movzbl (%rsi, %rdx), %ecx
+ movzbl (%rdi, %rdx), %eax
+
+ sub %ecx, %eax
+ ret
+
+LABEL(strcmp_exitz):
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+LABEL(Byte0):
+ movzx (%rsi), %ecx
+ movzx (%rdi), %eax
+
+ sub %ecx, %eax
+ ret
+END (BP_SYM (STRCMP))
+
+ .section .rodata,"a",@progbits
+ .p2align 3
+LABEL(unaligned_table):
+ .int LABEL(ashr_1) - LABEL(unaligned_table)
+ .int LABEL(ashr_2) - LABEL(unaligned_table)
+ .int LABEL(ashr_3) - LABEL(unaligned_table)
+ .int LABEL(ashr_4) - LABEL(unaligned_table)
+ .int LABEL(ashr_5) - LABEL(unaligned_table)
+ .int LABEL(ashr_6) - LABEL(unaligned_table)
+ .int LABEL(ashr_7) - LABEL(unaligned_table)
+ .int LABEL(ashr_8) - LABEL(unaligned_table)
+ .int LABEL(ashr_9) - LABEL(unaligned_table)
+ .int LABEL(ashr_10) - LABEL(unaligned_table)
+ .int LABEL(ashr_11) - LABEL(unaligned_table)
+ .int LABEL(ashr_12) - LABEL(unaligned_table)
+ .int LABEL(ashr_13) - LABEL(unaligned_table)
+ .int LABEL(ashr_14) - LABEL(unaligned_table)
+ .int LABEL(ashr_15) - LABEL(unaligned_table)
+ .int LABEL(ashr_0) - LABEL(unaligned_table)
+#endif /* NOT_IN_libc */
+libc_hidden_builtin_def (STRCMP)
diff --git a/sysdeps/x86_64/strncmp.S b/sysdeps/x86_64/strncmp.S
new file mode 100644
index 0000000000..0af34e7f15
--- /dev/null
+++ b/sysdeps/x86_64/strncmp.S
@@ -0,0 +1,3 @@
+#define STRCMP strncmp
+#define USE_AS_STRNCMP
+#include "strcmp.S"
diff --git a/sysdeps/x86_64/tst-xmmymm.sh b/sysdeps/x86_64/tst-xmmymm.sh
new file mode 100755
index 0000000000..0735276e6d
--- /dev/null
+++ b/sysdeps/x86_64/tst-xmmymm.sh
@@ -0,0 +1,17 @@
+#! /bin/sh
+objpfx="$1"
+
+tmp=$(mktemp ${objpfx}tst-xmmymm.XXXXXX)
+trap 'rm -f "$tmp"' 1 2 3 15
+
+objdump -d "${objpfx}ld.so" |
+awk 'BEGIN { last="" } /^[[:xdigit:]]* <[_[:alnum:]]*>:$/ { fct=substr($2, 2, length($2)-3) } /,%[xy]mm[[:digit:]]*$/ { if (last != fct) { print fct; last=fct} }' |
+tee "$tmp"
+
+echo "Functions which incorrectly modify xmm/ymm registers:"
+err=1
+egrep -vs '^_dl_runtime_profile$' "$tmp" || err=0
+if test $err -eq 0; then echo "None"; fi
+
+rm "$tmp"
+exit $err