summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog13
-rw-r--r--dirent/dirent.h12
-rw-r--r--sysdeps/arm/memset.S75
-rw-r--r--sysdeps/unix/sysv/linux/arm/mmap.S39
-rw-r--r--sysdeps/unix/sysv/linux/arm/mmap64.S38
5 files changed, 115 insertions, 62 deletions
diff --git a/ChangeLog b/ChangeLog
index 468a551be3..ac3576f1d0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,16 @@
+2003-09-14 Ulrich Drepper <drepper@redhat.com>
+
+ * dirent/dirent.h: Remove __THROW from scandir.
+
+2003-09-14 Philip Blundell <philb@gnu.org>
+
+ * sysdeps/unix/sysv/linux/arm/mmap.S: Use sys_mmap2 if it's known
+ to be available.
+
+ * sysdeps/unix/sysv/linux/arm/mmap64.S: Optimise code a little.
+
+ * sysdeps/arm/memset.S: Rewrite.
+
2003-09-12 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/s390/bits/typesizes.h: New.
diff --git a/dirent/dirent.h b/dirent/dirent.h
index 9f7c380b40..319fb745a1 100644
--- a/dirent/dirent.h
+++ b/dirent/dirent.h
@@ -239,14 +239,14 @@ extern int dirfd (DIR *__dirp) __THROW;
extern int scandir (__const char *__restrict __dir,
struct dirent ***__restrict __namelist,
int (*__selector) (__const struct dirent *),
- int (*__cmp) (__const void *, __const void *)) __THROW;
+ int (*__cmp) (__const void *, __const void *));
# else
# ifdef __REDIRECT
extern int __REDIRECT (scandir,
(__const char *__restrict __dir,
struct dirent ***__restrict __namelist,
int (*__selector) (__const struct dirent *),
- int (*__cmp) (__const void *, __const void *)) __THROW,
+ int (*__cmp) (__const void *, __const void *)),
scandir64);
# else
# define scandir scandir64
@@ -259,7 +259,7 @@ extern int __REDIRECT (scandir,
extern int scandir64 (__const char *__restrict __dir,
struct dirent64 ***__restrict __namelist,
int (*__selector) (__const struct dirent64 *),
- int (*__cmp) (__const void *, __const void *)) __THROW;
+ int (*__cmp) (__const void *, __const void *));
# endif
/* Function to compare two `struct dirent's alphabetically. */
@@ -269,8 +269,7 @@ extern int alphasort (__const void *__e1, __const void *__e2)
# else
# ifdef __REDIRECT
extern int __REDIRECT (alphasort,
- (__const void *__e1, __const void *__e2)
- __THROW,
+ (__const void *__e1, __const void *__e2) __THROW,
alphasort64) __attribute_pure__;
# else
# define alphasort alphasort64
@@ -290,8 +289,7 @@ extern int versionsort (__const void *__e1, __const void *__e2)
# else
# ifdef __REDIRECT
extern int __REDIRECT (versionsort,
- (__const void *__e1, __const void *__e2)
- __THROW,
+ (__const void *__e1, __const void *__e2) __THROW,
versionsort64) __attribute_pure__;
# else
# define versionsort versionsort64
diff --git a/sysdeps/arm/memset.S b/sysdeps/arm/memset.S
index 0b624139eb..1e2699d077 100644
--- a/sysdeps/arm/memset.S
+++ b/sysdeps/arm/memset.S
@@ -22,47 +22,46 @@
/* void *memset (dstpp, c, len) */
ENTRY(memset)
- mov a4, a1
- cmp a3, $8 @ at least 8 bytes to do?
- blt 2f
- orr a2, a2, a2, lsl $8
- orr a2, a2, a2, lsl $16
+ mov r3, r0
+ cmp r2, #8
+ bcc 2f @ less than 8 bytes to move
+
1:
- tst a4, $3 @ aligned yet?
- strneb a2, [a4], $1
- subne a3, a3, $1
+ tst r3, #3 @ aligned yet?
+ strneb r1, [r3], #1
+ subne r2, r2, #1
bne 1b
- mov ip, a2
+
+ orr r1, r1, r1, lsl $8
+ orr r1, r1, r1, lsl $16
+
1:
- cmp a3, $8 @ 8 bytes still to do?
- blt 2f
- stmia a4!, {a2, ip}
- sub a3, a3, $8
- cmp a3, $8 @ 8 bytes still to do?
- blt 2f
- stmia a4!, {a2, ip}
- sub a3, a3, $8
- cmp a3, $8 @ 8 bytes still to do?
- blt 2f
- stmia a4!, {a2, ip}
- sub a3, a3, $8
- cmp a3, $8 @ 8 bytes still to do?
- stmgeia a4!, {a2, ip}
- subge a3, a3, $8
- bge 1b
+ subs r2, r2, #8
+ strcs r1, [r3], #4 @ store up to 32 bytes per loop iteration
+ strcs r1, [r3], #4
+ subcss r2, r2, #8
+ strcs r1, [r3], #4
+ strcs r1, [r3], #4
+ subcss r2, r2, #8
+ strcs r1, [r3], #4
+ strcs r1, [r3], #4
+ subcss r2, r2, #8
+ strcs r1, [r3], #4
+ strcs r1, [r3], #4
+ bcs 1b
+
+ and r2, r2, #7
2:
- movs a3, a3 @ anything left?
- RETINSTR(moveq,pc,lr) @ nope
- rsb a3, a3, $7
- add pc, pc, a3, lsl $2
- mov r0, r0
- strb a2, [a4], $1
- strb a2, [a4], $1
- strb a2, [a4], $1
- strb a2, [a4], $1
- strb a2, [a4], $1
- strb a2, [a4], $1
- strb a2, [a4], $1
- RETINSTR(mov,pc,lr)
+ subs r2, r2, #1 @ store up to 4 bytes per loop iteration
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ subcss r2, r2, #1
+ strcsb r1, [r3], #1
+ bcs 2b
+
+ DO_RET(lr)
END(memset)
libc_hidden_builtin_def (memset)
diff --git a/sysdeps/unix/sysv/linux/arm/mmap.S b/sysdeps/unix/sysv/linux/arm/mmap.S
index af93c7bce7..7beba6841c 100644
--- a/sysdeps/unix/sysv/linux/arm/mmap.S
+++ b/sysdeps/unix/sysv/linux/arm/mmap.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 1998, 2000 Free Software Foundation, Inc.
+/* Copyright (C) 1998, 2000, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,11 +17,47 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <kernel-features.h>
+
+#define EINVAL 22
.text
ENTRY (__mmap)
+# ifdef __ASSUME_MMAP2_SYSCALL
+ /* This code is actually a couple of cycles slower than the
+ sys_mmap version below, so it might seem like a loss. But the
+ code path inside the kernel is sufficiently much shorter to
+ make it a net gain to use mmap2 when it's known to be
+ available. */
+
+ /* shuffle args */
+ str r5, [sp, #-4]!
+ ldr r5, [sp, #8]
+ str r4, [sp, #-4]!
+ ldr r4, [sp, #8]
+
+ /* convert offset to pages */
+ movs ip, r5, lsl #20
+ bne .Linval
+ mov r5, r5, lsr #12
+
+ /* do the syscall */
+ swi SYS_ify (mmap2)
+
+ /* restore registers */
+2:
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+
+ cmn r0, $4096
+ RETINSTR(movcc, pc, lr)
+ b PLTJMP(syscall_error)
+.Linval:
+ mov r0, #-EINVAL
+ b 2b
+# else
/* Because we can only get five args through the syscall interface, and
mmap() takes six, we need to build a parameter block and pass its
address instead. The 386 port does a similar trick. */
@@ -49,6 +85,7 @@ ENTRY (__mmap)
cmn r0, $4096
RETINSTR(movcc, pc, lr)
b PLTJMP(syscall_error);
+#endif
PSEUDO_END (__mmap)
diff --git a/sysdeps/unix/sysv/linux/arm/mmap64.S b/sysdeps/unix/sysv/linux/arm/mmap64.S
index 3936e25d26..f8361b5cbf 100644
--- a/sysdeps/unix/sysv/linux/arm/mmap64.S
+++ b/sysdeps/unix/sysv/linux/arm/mmap64.S
@@ -27,38 +27,44 @@
.text
ENTRY (__mmap64)
#ifdef __NR_mmap2
- stmfd sp!, {r4, r5, lr}
- ldr r5, [sp, $16]
- ldr r4, [sp, $12]
- movs ip, r5, lsl $20 @ check that offset is page-aligned
+ ldr ip, [sp, $4] @ offset low part
+ str r5, [sp, #-4]!
+ ldr r5, [sp, $12] @ offset high part
+ str r4, [sp, #-4]!
+ movs r4, ip, lsl $20 @ check that offset is page-aligned
+ mov ip, ip, lsr $12
+ moveqs r4, r5, lsr $12 @ check for overflow
bne .Linval
- ldr ip, [sp, $20]
- mov r5, r5, lsr $12
- orr r5, r5, ip, lsl $20 @ compose page offset
- movs ip, ip, lsr $12
- bne .Linval @ check for overflow
+ ldr r4, [sp, $8] @ load fd
+ orr r5, ip, r5, lsl $20 @ compose page offset
mov ip, r0
swi SYS_ify (mmap2)
cmn r0, $4096
- LOADREGS(ccfd, sp!, {r4, r5, pc})
# ifdef __ASSUME_MMAP2_SYSCALL
- ldmfd sp!, {r4, r5, lr}
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ RETINSTR(movcc, pc, lr)
b PLTJMP(syscall_error)
# else
+ ldrcc r4, [sp], #4
+ ldrcc r5, [sp], #4
+ RETINSTR(movcc, pc, lr)
cmn r0, $ENOSYS
- ldmnefd sp!, {r4, r5, lr}
- bne PLTJMP(syscall_error)
+ bne .Lerror
/* The current kernel does not support mmap2. Fall back to plain
mmap if the offset is small enough. */
- ldr r5, [sp, $20]
+ ldr r5, [sp, $16]
mov r0, ip @ first arg was clobbered
teq r5, $0
- ldmeqfd sp!, {r4, r5, lr}
+ ldreq r4, [sp], #4
+ ldreq r5, [sp], #4
beq PLTJMP(__mmap)
# endif
.Linval:
mov r0, $-EINVAL
- ldmfd sp!, {r4, r5, lr}
+.Lerror:
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
b PLTJMP(syscall_error)
#else
/* The kernel headers do not support mmap2. Fall back to plain