diff options
Diffstat (limited to 'sysdeps/powerpc/powerpc64/power8/memset.S')
-rw-r--r-- | sysdeps/powerpc/powerpc64/power8/memset.S | 84 |
1 files changed, 77 insertions, 7 deletions
diff --git a/sysdeps/powerpc/powerpc64/power8/memset.S b/sysdeps/powerpc/powerpc64/power8/memset.S index 11433d89ad..a42232b42a 100644 --- a/sysdeps/powerpc/powerpc64/power8/memset.S +++ b/sysdeps/powerpc/powerpc64/power8/memset.S @@ -1,5 +1,5 @@ /* Optimized memset implementation for PowerPC64/POWER8. - Copyright (C) 2014-2016 Free Software Foundation, Inc. + Copyright (C) 2014-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,14 +20,18 @@ #define MTVSRD_V1_R4 .long 0x7c240166 /* mtvsrd v1,r4 */ -/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5])); +/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5])); Returns 's'. */ +#ifndef MEMSET +# define MEMSET memset +#endif + /* No need to use .machine power8 since mtvsrd is already handled by the define. It avoid breakage on binutils that does not support this machine specifier. */ .machine power7 -EALIGN (memset, 5, 0) +ENTRY_TOCLESS (MEMSET, 5) CALL_MCOUNT 3 L(_memset): @@ -373,7 +377,10 @@ L(write_LT_32): subf r5,r0,r5 2: bf 30,1f - sth r4,0(r10) + /* Use stb instead of sth because it doesn't generate + alignment interrupts on cache-inhibited storage. */ + stb r4,0(r10) + stb r4,1(r10) addi r10,r10,2 1: bf 31,L(end_4bytes_alignment) @@ -433,17 +440,80 @@ L(tail5): /* Handles copies of 0~8 bytes. */ .align 4 L(write_LE_8): - bne cr6,L(tail4) + bne cr6,L(LE7_tail4) + /* If input is word aligned, use stw, else use stb. */ + andi. r0,r10,3 + bne L(8_unalign) stw r4,0(r10) stw r4,4(r10) blr -END_GEN_TB (memset,TB_TOCLESS) + + /* Unaligned input and size is 8. */ + .align 4 +L(8_unalign): + andi. r0,r10,1 + beq L(8_hwalign) + stb r4,0(r10) + sth r4,1(r10) + sth r4,3(r10) + sth r4,5(r10) + stb r4,7(r10) + blr + + /* Halfword aligned input and size is 8. */ + .align 4 +L(8_hwalign): + sth r4,0(r10) + sth r4,2(r10) + sth r4,4(r10) + sth r4,6(r10) + blr + + .align 4 + /* Copies 4~7 bytes. */ +L(LE7_tail4): + /* Use stb instead of sth because it doesn't generate + alignment interrupts on cache-inhibited storage. */ + bf 29,L(LE7_tail2) + stb r4,0(r10) + stb r4,1(r10) + stb r4,2(r10) + stb r4,3(r10) + bf 30,L(LE7_tail5) + stb r4,4(r10) + stb r4,5(r10) + bflr 31 + stb r4,6(r10) + blr + + .align 4 + /* Copies 2~3 bytes. */ +L(LE7_tail2): + bf 30,1f + stb r4,0(r10) + stb r4,1(r10) + bflr 31 + stb r4,2(r10) + blr + + .align 4 +L(LE7_tail5): + bflr 31 + stb r4,4(r10) + blr + + .align 4 +1: bflr 31 + stb r4,0(r10) + blr + +END_GEN_TB (MEMSET,TB_TOCLESS) libc_hidden_builtin_def (memset) /* Copied from bzero.S to prevent the linker from inserting a stub between bzero and memset. */ -ENTRY (__bzero) +ENTRY_TOCLESS (__bzero) CALL_MCOUNT 3 mr r5,r4 li r4,0 |