summaryrefslogtreecommitdiff
path: root/sysdeps/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r--sysdeps/powerpc/powerpc32/bsd-_setjmp.S8
-rw-r--r--sysdeps/powerpc/powerpc32/bsd-setjmp.S4
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_ceil.S19
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_ceilf.S7
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_floor.S19
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_floorf.S7
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_lround.S32
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_rint.S19
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_rintf.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_round.S31
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_roundf.S19
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_trunc.S20
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_truncf.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/setjmp-common.S2
-rw-r--r--sysdeps/powerpc/powerpc32/memset.S90
-rw-r--r--sysdeps/powerpc/powerpc32/ppc-mcount.S2
-rw-r--r--sysdeps/powerpc/powerpc32/setjmp-common.S2
-rw-r--r--sysdeps/powerpc/powerpc32/sysdep.h2
18 files changed, 97 insertions, 202 deletions
diff --git a/sysdeps/powerpc/powerpc32/bsd-_setjmp.S b/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
index 50deda8c90..4c28c2e547 100644
--- a/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
+++ b/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
@@ -26,7 +26,7 @@
/* Build a non-versioned object for rtld-*. */
ENTRY (BP_SYM (_setjmp))
li r4,0 /* Set second argument to 0. */
- b JUMPTARGET(BP_SYM (__sigsetjmp))
+ b BP_SYM (__sigsetjmp@local)
END (BP_SYM (_setjmp))
libc_hidden_def (_setjmp)
#else
@@ -37,7 +37,7 @@ symbol_version (__novmx_setjmp,_setjmp,GLIBC_2.0);
ENTRY (BP_SYM (__novmx_setjmp))
li r4,0 /* Set second argument to 0. */
- b JUMPTARGET(BP_SYM (__novmx__sigsetjmp))
+ b BP_SYM (__novmx__sigsetjmp@local)
END (BP_SYM (__novmx_setjmp))
libc_hidden_def (__novmx_setjmp)
# endif /* defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) */
@@ -48,12 +48,12 @@ default_symbol_version (__vmx_setjmp,_setjmp,GLIBC_2.3.4)
if HAVE_CLEANUP_JMP_BUF is defined */
ENTRY (BP_SYM (__GI__setjmp))
li r4,0 /* Set second argument to 0. */
- b JUMPTARGET(BP_SYM (__vmx__sigsetjmp))
+ b BP_SYM (__vmx__sigsetjmp@local)
END (BP_SYM (__GI__setjmp))
ENTRY (BP_SYM (__vmx_setjmp))
li r4,0 /* Set second argument to 0. */
- b JUMPTARGET(BP_SYM (__vmx__sigsetjmp))
+ b BP_SYM (__vmx__sigsetjmp@local)
END (BP_SYM (__vmx_setjmp))
libc_hidden_def (__vmx_setjmp)
#endif /* !NOT_IN_libc */
diff --git a/sysdeps/powerpc/powerpc32/bsd-setjmp.S b/sysdeps/powerpc/powerpc32/bsd-setjmp.S
index 159b122ca5..01b195d832 100644
--- a/sysdeps/powerpc/powerpc32/bsd-setjmp.S
+++ b/sysdeps/powerpc/powerpc32/bsd-setjmp.S
@@ -25,7 +25,7 @@
ENTRY (__novmxsetjmp)
li r4,1 /* Set second argument to 1. */
- b JUMPTARGET (__novmx__sigsetjmp)
+ b __novmx__sigsetjmp@local
END (__novmxsetjmp)
strong_alias (__novmxsetjmp, __novmx__setjmp)
symbol_version (__novmxsetjmp, setjmp, GLIBC_2.0)
@@ -34,7 +34,7 @@ symbol_version (__novmxsetjmp, setjmp, GLIBC_2.0)
ENTRY (__vmxsetjmp)
li r4,1 /* Set second argument to 1. */
- b JUMPTARGET (__vmx__sigsetjmp)
+ b __vmx__sigsetjmp@local
END (__vmxsetjmp)
strong_alias (__vmxsetjmp, __vmx__setjmp)
strong_alias (__vmx__setjmp, __setjmp)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
index 22cf76e54c..7924e34648 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
@@ -19,19 +19,10 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type TWO52.0,@object
- .size TWO52.0,8
-TWO52.0:
- .long 0x43300000
- .long 0
-
- .section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
.LC0: /* 2**52 */
- .long 0x43300000
- .long 0
+ .long 0x59800000
.section ".text"
ENTRY (__ceil)
@@ -42,10 +33,10 @@ ENTRY (__ceil)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp13,0(r9)
+ lfs fp13,0(r9)
#else
lis r9,.LC0@ha
- lfd fp13,.LC0@l(r9)
+ lfs fp13,.LC0@l(r9)
#endif
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
index e7a72186c9..9315d8d2df 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
@@ -19,13 +19,6 @@
#include <sysdep.h>
- .section .rodata
- .align 2
- .type TWO23.0,@object
- .size TWO23.0,4
-TWO23.0:
- .long 0x4b000000
-
.section .rodata.cst4,"aM",@progbits,4
.align 2
.LC0: /* 2**23 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floor.S b/sysdeps/powerpc/powerpc32/fpu/s_floor.S
index 812ea7ced2..c8f59c24a6 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_floor.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_floor.S
@@ -19,19 +19,10 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type TWO52.0,@object
- .size TWO52.0,8
-TWO52.0:
- .long 0x43300000
- .long 0
-
- .section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
.LC0: /* 2**52 */
- .long 0x43300000
- .long 0
+ .long 0x59800000
.section ".text"
ENTRY (__floor)
@@ -42,10 +33,10 @@ ENTRY (__floor)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp13,0(r9)
+ lfs fp13,0(r9)
#else
lis r9,.LC0@ha
- lfd fp13,.LC0@l(r9)
+ lfs fp13,.LC0@l(r9)
#endif
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
index ead41d4657..8ee0644ac9 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
@@ -19,13 +19,6 @@
#include <sysdep.h>
- .section .rodata
- .align 2
- .type TWO23.0,@object
- .size TWO23.0,4
-TWO23.0:
- .long 0x4b000000
-
.section .rodata.cst4,"aM",@progbits,4
.align 2
.LC0: /* 2**23 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_lround.S b/sysdeps/powerpc/powerpc32/fpu/s_lround.S
index dcb97e373b..72fd49ba46 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_lround.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_lround.S
@@ -19,27 +19,12 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type NEGZERO.0,@object
- .size NEGZERO.0,8
-NEGZERO.0:
- .long 0x00000000
- .long 0
- .type POINTFIVE.0,@object
- .size POINTFIVE.0,8
-POINTFIVE.0:
- .long 0x3fe00000
- .long 0
-
.section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .align 2
.LC0: /* 0.0 */
.long 0x00000000
- .long 0
.LC1: /* 0.5 */
- .long 0x3fe00000
- .long 0
+ .long 0x3f000000
.section ".text"
@@ -60,17 +45,16 @@ ENTRY (__lround)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp12,0(r9)
+ lfs fp12,0(r9)
#else
lis r9,.LC0@ha
- lfd fp12,.LC0@l(r9)
+ lfs fp12,.LC0@l(r9)
#endif
#ifdef SHARED
- lwz r9,.LC1@got(10)
- lfd fp10,0(r9)
+ lfs fp10,.LC1-.LC0(r9)
#else
lis r9,.LC1@ha
- lfd fp10,.LC1@l(r9)
+ lfs fp10,.LC1@l(r9)
#endif
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
ble- cr6,.L4
@@ -78,8 +62,8 @@ ENTRY (__lround)
.L9:
fctiwz fp2,fp1 /* Convert To Integer DW lround toward 0. */
stfd fp2,-8(r1)
- nop /* Insure the following load is in a different dispatch group */
- nop /* to avoid pipe stall on POWER4&5. */
+ nop /* Ensure the following load is in a different dispatch */
+ nop /* group to avoid pipe stall on POWER4&5. */
nop
lwz r3,-4(r1)
blr
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rint.S b/sysdeps/powerpc/powerpc32/fpu/s_rint.S
index fa02dbc59c..4abdcedfe8 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_rint.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_rint.S
@@ -22,19 +22,10 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type TWO52.0,@object
- .size TWO52.0,8
-TWO52.0:
- .long 0x43300000
- .long 0
-
- .section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
.LC0: /* 2**52 */
- .long 0x43300000
- .long 0
+ .long 0x59800000
.section ".text"
ENTRY (__rint)
@@ -44,10 +35,10 @@ ENTRY (__rint)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp13,0(r9)
+ lfs fp13,0(r9)
#else
lis r9,.LC0@ha
- lfd fp13,.LC0@l(r9)
+ lfs fp13,.LC0@l(r9)
#endif
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
index 7825951268..d02bd066b8 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
@@ -19,14 +19,6 @@
#include <sysdep.h>
-
- .section .rodata
- .align 2
- .type TWO23.0,@object
- .size TWO23.0,4
-TWO23.0:
- .long 0x4b000000
-
.section .rodata.cst4,"aM",@progbits,4
.align 2
.LC0: /* 2**23 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_round.S b/sysdeps/powerpc/powerpc32/fpu/s_round.S
index 39eab232f6..96fc2984fd 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_round.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_round.S
@@ -19,27 +19,12 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type TWO52.0,@object
- .size TWO52.0,8
-TWO52.0:
- .long 0x43300000
- .long 0
- .type POINTFIVE.0,@object
- .size POINTFIVE.0,8
-POINTFIVE.0:
- .long 0x3fe00000
- .long 0
-
.section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .align 2
.LC0: /* 2**52 */
- .long 0x43300000
- .long 0
+ .long 0x59800000
.LC1: /* 0.5 */
- .long 0x3fe00000
- .long 0
+ .long 0x3f000000
/* double [fp1] round (double x [fp1])
IEEE 1003.1 round function. IEEE specifies "round to the nearest
@@ -51,6 +36,7 @@ POINTFIVE.0:
"Round toward Zero" mode and round by adding +-0.5 before rounding
to the integer value. */
+ .section ".text"
ENTRY (__round)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
@@ -59,10 +45,10 @@ ENTRY (__round)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp13,0(r9)
+ lfs fp13,0(r9)
#else
lis r9,.LC0@ha
- lfd fp13,.LC0@l(r9)
+ lfs fp13,.LC0@l(r9)
#endif
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
@@ -71,11 +57,10 @@ ENTRY (__round)
bnllr- cr7
mtfsfi 7,1 /* Set rounding mode toward 0. */
#ifdef SHARED
- lwz r9,.LC1@got(10)
- lfd fp10,0(r9)
+ lfs fp10,.LC1-.LC0(r9)
#else
lis r9,.LC1@ha
- lfd fp10,.LC1@l(r9)
+ lfs fp10,.LC1@l(r9)
#endif
ble- cr6,.L4
fadd fp1,fp1,fp10 /* x+= 0.5; */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
index a9b42f0170..87965dea80 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
@@ -19,19 +19,8 @@
#include <sysdep.h>
- .section .rodata
- .align 2
- .type TWO23.0,@object
- .size TWO23.0,4
-TWO23.0:
- .long 0x4b000000
- .type POINTFIVE.0,@object
- .size POINTFIVE.0,4
-POINTFIVE.0:
- .long 0x3f000000
-
- .section .rodata.cst4,"aM",@progbits,4
- .align 2
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 2
.LC0: /* 2**23 */
.long 0x4b000000
.LC1: /* 0.5 */
@@ -47,6 +36,7 @@ POINTFIVE.0:
"Round toward Zero" mode and round by adding +-0.5 before rounding
to the integer value. */
+ .section ".text"
ENTRY (__roundf )
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
@@ -67,8 +57,7 @@ ENTRY (__roundf )
bnllr- cr7
mtfsfi 7,1 /* Set rounding mode toward 0. */
#ifdef SHARED
- lwz r9,.LC1@got(10)
- lfs fp10,0(r9)
+ lfs fp10,.LC1-.LC0(r9)
#else
lis r9,.LC1@ha
lfs fp10,.LC1@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
index 08acc00cb2..7a3e705a81 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
@@ -19,19 +19,10 @@
#include <sysdep.h>
- .section .rodata
- .align 3
- .type TWO52.0,@object
- .size TWO52.0,8
-TWO52.0:
- .long 0x43300000
- .long 0
-
- .section .rodata.cst8,"aM",@progbits,8
- .align 3
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
.LC0: /* 2**52 */
- .long 0x43300000
- .long 0
+ .long 0x59800000
/* double [fp1] trunc (double x [fp1])
IEEE 1003.1 trunc function. IEEE specifies "trunc to the integer
@@ -40,6 +31,7 @@ TWO52.0:
We set "round toward Zero" mode and trunc by adding +-2**52 then
subtracting +-2**52. */
+ .section ".text"
ENTRY (__trunc)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
@@ -48,10 +40,10 @@ ENTRY (__trunc)
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
- lfd fp13,0(r9)
+ lfs fp13,0(r9)
#else
lis r9,.LC0@ha
- lfd fp13,.LC0@l(r9)
+ lfs fp13,.LC0@l(r9)
#endif
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
index 3b6fe731b4..5275c69d29 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
@@ -19,13 +19,6 @@
#include <sysdep.h>
- .section .rodata
- .align 2
- .type TWO23.0,@object
- .size TWO23.0,2
-TWO23.0:
- .long 0x4b000000
-
.section .rodata.cst4,"aM",@progbits,4
.align 2
.LC0: /* 2**23 */
@@ -38,6 +31,7 @@ TWO23.0:
We set "round toward Zero" mode and trunc by adding +-2**23 then
subtracting +-2**23. */
+ .section ".text"
ENTRY (__truncf)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
diff --git a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
index 77ee05f487..e0c0606da4 100644
--- a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
@@ -164,5 +164,5 @@ L(aligned_save_vmx):
stvx 31,0,r6
L(no_vmx):
#endif
- b JUMPTARGET (BP_SYM (__sigjmp_save))
+ b BP_SYM (__sigjmp_save@local)
END (BP_SYM (__sigsetjmp))
diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S
index 53f1143320..4c0edc8e45 100644
--- a/sysdeps/powerpc/powerpc32/memset.S
+++ b/sysdeps/powerpc/powerpc32/memset.S
@@ -140,7 +140,7 @@ L(nondcbz):
/* We can't use dcbz here as we don't know the cache line size. We can
use "data cache block touch for store", which is safe. */
-L(c3): dcbtst rNEG64, rMEMP
+L(c3): dcbtst rNEG64, rMEMP
stw rCHR, -4(rMEMP)
stw rCHR, -8(rMEMP)
stw rCHR, -12(rMEMP)
@@ -166,7 +166,7 @@ L(cloopdone):
add rMEMP, rMEMP, rALIGN
b L(medium_tail2) /* 72nd instruction from .align */
- .align 5
+ .align 5
nop
/* Clear cache lines of memory in 128-byte chunks.
This code is optimized for processors with 32-byte cache lines.
@@ -200,7 +200,7 @@ L(zloop):
beqlr cr5
b L(medium_tail2)
- .align 5
+ .align 5
L(small):
/* Memset of 4 bytes or less. */
cmplwi cr5, rLEN, 1
@@ -218,7 +218,7 @@ L(small):
blr
/* Memset of 0-31 bytes. */
- .align 5
+ .align 5
L(medium):
cmplwi cr1, rLEN, 16
L(medium_tail2):
@@ -258,70 +258,70 @@ L(medium_28t):
L(checklinesize):
#ifdef SHARED
- mflr rTMP
+ mflr rTMP
/* If the remaining length is less the 32 bytes then don't bother getting
- the cache line size. */
+ the cache line size. */
beq L(medium)
/* Establishes GOT addressability so we can load __cache_line_size
from static. This value was set from the aux vector during startup. */
- bl _GLOBAL_OFFSET_TABLE_@local-4
- mflr rGOT
- lwz rGOT,__cache_line_size@got(rGOT)
- lwz rCLS,0(rGOT)
- mtlr rTMP
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr rGOT
+ lwz rGOT,__cache_line_size@got(rGOT)
+ lwz rCLS,0(rGOT)
+ mtlr rTMP
#else
/* Load __cache_line_size from static. This value was set from the
aux vector during startup. */
- lis rCLS,__cache_line_size@ha
+ lis rCLS,__cache_line_size@ha
/* If the remaining length is less the 32 bytes then don't bother getting
- the cache line size. */
+ the cache line size. */
beq L(medium)
- lwz rCLS,__cache_line_size@l(rCLS)
+ lwz rCLS,__cache_line_size@l(rCLS)
#endif
-/*If the cache line size was not set then goto to L(nondcbz), which is
- safe for any cache line size. */
- cmplwi cr1,rCLS,0
+/* If the cache line size was not set then goto to L(nondcbz), which is
+ safe for any cache line size. */
+ cmplwi cr1,rCLS,0
beq cr1,L(nondcbz)
/* If the cache line size is 32 bytes then goto to L(zloopstart),
- which is coded specificly for 32-byte lines (and 601). */
- cmplwi cr1,rCLS,32
+ which is coded specificly for 32-byte lines (and 601). */
+ cmplwi cr1,rCLS,32
beq cr1,L(zloopstart)
/* Now we know the cache line size and it is not 32-bytes. However
- we may not yet be aligned to the cache line and may have a partial
- line to fill. Touch it 1st to fetch the cache line. */
- dcbtst 0,rMEMP
+ we may not yet be aligned to the cache line and may have a partial
+ line to fill. Touch it 1st to fetch the cache line. */
+ dcbtst 0,rMEMP
- addi rCLM,rCLS,-1
+ addi rCLM,rCLS,-1
L(getCacheAligned):
- cmplwi cr1,rLEN,32
- and. rTMP,rCLM,rMEMP
- blt cr1,L(handletail32)
- beq L(cacheAligned)
+ cmplwi cr1,rLEN,32
+ and. rTMP,rCLM,rMEMP
+ blt cr1,L(handletail32)
+ beq L(cacheAligned)
/* We are not aligned to start of a cache line yet. Store 32-byte
of data and test again. */
- addi rMEMP,rMEMP,32
- addi rLEN,rLEN,-32
- stw rCHR,-32(rMEMP)
- stw rCHR,-28(rMEMP)
- stw rCHR,-24(rMEMP)
- stw rCHR,-20(rMEMP)
- stw rCHR,-16(rMEMP)
- stw rCHR,-12(rMEMP)
- stw rCHR,-8(rMEMP)
- stw rCHR,-4(rMEMP)
- b L(getCacheAligned)
+ addi rMEMP,rMEMP,32
+ addi rLEN,rLEN,-32
+ stw rCHR,-32(rMEMP)
+ stw rCHR,-28(rMEMP)
+ stw rCHR,-24(rMEMP)
+ stw rCHR,-20(rMEMP)
+ stw rCHR,-16(rMEMP)
+ stw rCHR,-12(rMEMP)
+ stw rCHR,-8(rMEMP)
+ stw rCHR,-4(rMEMP)
+ b L(getCacheAligned)
/* Now we are aligned to the cache line and can use dcbz. */
L(cacheAligned):
- cmplw cr1,rLEN,rCLS
- blt cr1,L(handletail32)
- dcbz 0,rMEMP
- subf rLEN,rCLS,rLEN
- add rMEMP,rMEMP,rCLS
- b L(cacheAligned)
+ cmplw cr1,rLEN,rCLS
+ blt cr1,L(handletail32)
+ dcbz 0,rMEMP
+ subf rLEN,rCLS,rLEN
+ add rMEMP,rMEMP,rCLS
+ b L(cacheAligned)
/* We are here because; the cache line size was set, it was not
32-bytes, and the remainder (rLEN) is now less than the actual cache
@@ -329,7 +329,7 @@ L(cacheAligned):
store the remaining bytes. */
L(handletail32):
clrrwi. rALIGN, rLEN, 5
- b L(nondcbz)
+ b L(nondcbz)
END (BP_SYM (memset))
libc_hidden_builtin_def (memset)
diff --git a/sysdeps/powerpc/powerpc32/ppc-mcount.S b/sysdeps/powerpc/powerpc32/ppc-mcount.S
index a72d676bbe..314c8ee703 100644
--- a/sysdeps/powerpc/powerpc32/ppc-mcount.S
+++ b/sysdeps/powerpc/powerpc32/ppc-mcount.S
@@ -62,7 +62,7 @@ ENTRY(_mcount)
stw r10,40(r1)
stw r4, 44(r1)
stw r5, 8(r1)
- bl JUMPTARGET(__mcount_internal)
+ bl __mcount_internal@local
nop
/* Restore the registers... */
lwz r6, 8(r1)
diff --git a/sysdeps/powerpc/powerpc32/setjmp-common.S b/sysdeps/powerpc/powerpc32/setjmp-common.S
index 40f626498c..ad7113f461 100644
--- a/sysdeps/powerpc/powerpc32/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/setjmp-common.S
@@ -55,5 +55,5 @@ ENTRY (BP_SYM (__sigsetjmp))
stw r29,((JB_GPRS+15)*4)(3)
stw r30,((JB_GPRS+16)*4)(3)
stw r31,((JB_GPRS+17)*4)(3)
- b JUMPTARGET (BP_SYM (__sigjmp_save))
+ b BP_SYM (__sigjmp_save@local)
END (BP_SYM (__sigsetjmp))
diff --git a/sysdeps/powerpc/powerpc32/sysdep.h b/sysdeps/powerpc/powerpc32/sysdep.h
index 59761f75b9..775073f325 100644
--- a/sysdeps/powerpc/powerpc32/sysdep.h
+++ b/sysdeps/powerpc/powerpc32/sysdep.h
@@ -124,7 +124,7 @@
#define PSEUDO_RET \
bnslr+; \
- b JUMPTARGET(__syscall_error)
+ b __syscall_error@local
#define ret PSEUDO_RET
#undef PSEUDO_END