summaryrefslogtreecommitdiff
path: root/sysdeps/arm/armv6t2/strlen.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/arm/armv6t2/strlen.S')
-rw-r--r--sysdeps/arm/armv6t2/strlen.S50
1 files changed, 11 insertions, 39 deletions
diff --git a/sysdeps/arm/armv6t2/strlen.S b/sysdeps/arm/armv6t2/strlen.S
index 4f320de570..a34ef20e9d 100644
--- a/sysdeps/arm/armv6t2/strlen.S
+++ b/sysdeps/arm/armv6t2/strlen.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,7 @@
*/
-#include <arm-features.h> /* This might #define NO_THUMB. */
+#include <arm-features.h>
#include <sysdep.h>
#ifdef __ARMEB__
@@ -32,24 +32,8 @@
#define S2HI lsl
#endif
-#ifndef NO_THUMB
/* This code is best on Thumb. */
.thumb
-#else
-/* Using bne.w explicitly is desirable in Thumb mode because it helps
- align the following label without a nop. In ARM mode there is no
- such difference. */
-.macro bne.w label
- bne \label
-.endm
-
-/* This clobbers the condition codes, which the real Thumb cbnz instruction
- does not do. But it doesn't matter for any of the uses here. */
-.macro cbnz reg, label
- cmp \reg, #0
- bne \label
-.endm
-#endif
/* Parameters and result. */
#define srcin r0
@@ -67,7 +51,7 @@
.text
.p2align 6
ENTRY(strlen)
- sfi_pld srcin, #0
+ pld [srcin, #0]
strd r4, r5, [sp, #-8]!
cfi_adjust_cfa_offset (8)
cfi_rel_offset (r4, 0)
@@ -76,15 +60,14 @@ ENTRY(strlen)
bic src, srcin, #7
mvn const_m1, #0
ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */
- sfi_pld src, #32
+ pld [src, #32]
bne.w .Lmisaligned8
mov const_0, #0
mov result, #-8
.Lloop_aligned:
/* Bytes 0-7. */
- sfi_breg src, \
- ldrd data1a, data1b, [\B]
- sfi_pld src, #64
+ ldrd data1a, data1b, [src]
+ pld [src, #64]
add result, result, #8
.Lstart_realigned:
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
@@ -94,8 +77,7 @@ ENTRY(strlen)
cbnz data1b, .Lnull_found
/* Bytes 8-15. */
- sfi_breg src, \
- ldrd data1a, data1b, [\B, #8]
+ ldrd data1a, data1b, [src, #8]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
@@ -104,8 +86,7 @@ ENTRY(strlen)
cbnz data1b, .Lnull_found
/* Bytes 16-23. */
- sfi_breg src, \
- ldrd data1a, data1b, [\B, #16]
+ ldrd data1a, data1b, [src, #16]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
@@ -114,8 +95,7 @@ ENTRY(strlen)
cbnz data1b, .Lnull_found
/* Bytes 24-31. */
- sfi_breg src, \
- ldrd data1a, data1b, [\B, #24]
+ ldrd data1a, data1b, [src, #24]
add src, src, #32
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
@@ -143,24 +123,16 @@ ENTRY(strlen)
.Lmisaligned8:
cfi_restore_state
- sfi_breg src, \
- ldrd data1a, data1b, [\B]
+ ldrd data1a, data1b, [src]
and tmp2, tmp1, #3
rsb result, tmp1, #0
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
tst tmp1, #4
- sfi_pld src, #64
+ pld [src, #64]
S2HI tmp2, const_m1, tmp2
-#ifdef NO_THUMB
- mvn tmp1, tmp2
- orr data1a, data1a, tmp1
- itt ne
- orrne data1b, data1b, tmp1
-#else
orn data1a, data1a, tmp2
itt ne
ornne data1b, data1b, tmp2
-#endif
movne data1a, const_m1
mov const_0, #0
b .Lstart_realigned