summaryrefslogtreecommitdiff
path: root/sysdeps/powerpc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2005-06-20 15:59:03 +0000
committerJakub Jelinek <jakub@redhat.com>2005-06-20 15:59:03 +0000
commit27424b29289a49958e62450203f33a57dc1465e2 (patch)
tree7045409bd7d383127ecac0f78325a2a035b3f754 /sysdeps/powerpc
parent841d8c3466e6472c9cd16ee5bff701ba0380998a (diff)
Updated to fedora-glibc-20050620T1530
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r--sysdeps/powerpc/fpu/libm-test-ulps30
-rw-r--r--sysdeps/powerpc/fpu/s_fabs.S8
-rw-r--r--sysdeps/powerpc/fpu/s_fmax.S8
-rw-r--r--sysdeps/powerpc/fpu/s_fmin.S8
-rw-r--r--sysdeps/powerpc/powerpc32/backtrace.c3
-rw-r--r--sysdeps/powerpc/powerpc32/configure33
-rw-r--r--sysdeps/powerpc/powerpc32/configure.in18
-rw-r--r--sysdeps/powerpc/powerpc32/dl-dtprocnum.h3
-rw-r--r--sysdeps/powerpc/powerpc32/dl-machine.h124
-rw-r--r--sysdeps/powerpc/powerpc32/dl-start.S7
-rw-r--r--sysdeps/powerpc/powerpc32/elf/start.S12
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S9
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_ceil.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_ceilf.S10
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_copysign.S6
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_floor.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_floorf.S10
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_lround.S7
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_rint.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_rintf.S10
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_round.S7
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_roundf.S7
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_trunc.S8
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/s_truncf.S10
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/setjmp-common.S19
-rw-r--r--sysdeps/powerpc/powerpc32/memset.S7
-rw-r--r--sysdeps/powerpc/powerpc32/ppc-mcount.S20
-rw-r--r--sysdeps/powerpc/powerpc32/sysdep.h23
-rw-r--r--sysdeps/powerpc/powerpc64/backtrace.c3
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_copysign.S6
30 files changed, 339 insertions, 101 deletions
diff --git a/sysdeps/powerpc/fpu/libm-test-ulps b/sysdeps/powerpc/fpu/libm-test-ulps
index 6dd3940d22..850a10746c 100644
--- a/sysdeps/powerpc/fpu/libm-test-ulps
+++ b/sysdeps/powerpc/fpu/libm-test-ulps
@@ -226,9 +226,9 @@ idouble: 2
ifloat: 3
Test "Real part of: cpow (2 + 3 i, 4 + 0 i) == -119.0 - 120.0 i":
double: 1
-float: 4
+float: 5
idouble: 1
-ifloat: 4
+ifloat: 5
Test "Imaginary part of: cpow (2 + 3 i, 4 + 0 i) == -119.0 - 120.0 i":
float: 2
ifloat: 2
@@ -351,6 +351,9 @@ double: 1
float: 1
idouble: 1
ifloat: 1
+Test "j0 (2.0) == 0.223890779141235668051827454649948626":
+float: 2
+ifloat: 2
Test "j0 (10.0) == -0.245935764451348335197760862485328754":
double: 2
float: 1
@@ -382,6 +385,9 @@ double: 1
float: 1
idouble: 1
ifloat: 1
+Test "jn (0, 2.0) == 0.223890779141235668051827454649948626":
+float: 2
+ifloat: 2
Test "jn (0, 10.0) == -0.245935764451348335197760862485328754":
double: 2
float: 1
@@ -418,8 +424,8 @@ Test "jn (10, 10.0) == 0.207486106633358857697278723518753428":
float: 1
ifloat: 1
Test "jn (10, 2.0) == 0.251538628271673670963516093751820639e-6":
-float: 3
-ifloat: 3
+float: 4
+ifloat: 4
Test "jn (3, 0.125) == 0.406503832554912875023029337653442868e-4":
double: 1
float: 1
@@ -435,9 +441,9 @@ idouble: 3
ifloat: 1
Test "jn (3, 2.0) == 0.128943249474402051098793332969239835":
double: 1
-float: 1
+float: 2
idouble: 1
-ifloat: 1
+ifloat: 2
# lgamma
Test "lgamma (0.7) == 0.260867246531666514385732417016759578":
@@ -730,9 +736,9 @@ ifloat: 1
Function: Real part of "cpow":
double: 2
-float: 4
+float: 5
idouble: 2
-ifloat: 4
+ifloat: 5
Function: Imaginary part of "cpow":
double: 2
@@ -800,9 +806,9 @@ ifloat: 1
Function: "j0":
double: 2
-float: 1
+float: 2
idouble: 2
-ifloat: 1
+ifloat: 2
Function: "j1":
double: 1
@@ -812,9 +818,9 @@ ifloat: 2
Function: "jn":
double: 3
-float: 3
+float: 4
idouble: 3
-ifloat: 3
+ifloat: 4
Function: "lgamma":
double: 1
diff --git a/sysdeps/powerpc/fpu/s_fabs.S b/sysdeps/powerpc/fpu/s_fabs.S
index 157ef09507..ab9a3a99bb 100644
--- a/sysdeps/powerpc/fpu/s_fabs.S
+++ b/sysdeps/powerpc/fpu/s_fabs.S
@@ -25,13 +25,13 @@ ENTRY(__fabs)
blr
END(__fabs)
-weak_alias(__fabs,fabs)
+weak_alias (__fabs,fabs)
/* It turns out that it's safe to use this code even for single-precision. */
strong_alias(__fabs,__fabsf)
-weak_alias(__fabs,fabsf)
+weak_alias (__fabs,fabsf)
#ifdef NO_LONG_DOUBLE
-weak_alias(__fabs,__fabsl)
-weak_alias(__fabs,fabsl)
+weak_alias (__fabs,__fabsl)
+weak_alias (__fabs,fabsl)
#endif
diff --git a/sysdeps/powerpc/fpu/s_fmax.S b/sysdeps/powerpc/fpu/s_fmax.S
index 5666cdd079..8502c863b2 100644
--- a/sysdeps/powerpc/fpu/s_fmax.S
+++ b/sysdeps/powerpc/fpu/s_fmax.S
@@ -31,13 +31,13 @@ ENTRY(__fmax)
blr
END(__fmax)
-weak_alias(__fmax,fmax)
+weak_alias (__fmax,fmax)
/* It turns out that it's safe to use this code even for single-precision. */
strong_alias(__fmax,__fmaxf)
-weak_alias(__fmax,fmaxf)
+weak_alias (__fmax,fmaxf)
#ifdef NO_LONG_DOUBLE
-weak_alias(__fmax,__fmaxl)
-weak_alias(__fmax,fmaxl)
+weak_alias (__fmax,__fmaxl)
+weak_alias (__fmax,fmaxl)
#endif
diff --git a/sysdeps/powerpc/fpu/s_fmin.S b/sysdeps/powerpc/fpu/s_fmin.S
index 96387d9ae1..5f788d06f3 100644
--- a/sysdeps/powerpc/fpu/s_fmin.S
+++ b/sysdeps/powerpc/fpu/s_fmin.S
@@ -31,13 +31,13 @@ ENTRY(__fmin)
blr
END(__fmin)
-weak_alias(__fmin,fmin)
+weak_alias (__fmin,fmin)
/* It turns out that it's safe to use this code even for single-precision. */
strong_alias(__fmin,__fminf)
-weak_alias(__fmin,fminf)
+weak_alias (__fmin,fminf)
#ifdef NO_LONG_DOUBLE
-weak_alias(__fmin,__fminl)
-weak_alias(__fmin,fminl)
+weak_alias (__fmin,__fminl)
+weak_alias (__fmin,fminl)
#endif
diff --git a/sysdeps/powerpc/powerpc32/backtrace.c b/sysdeps/powerpc/powerpc32/backtrace.c
index 118f0d6175..e7e12544c5 100644
--- a/sysdeps/powerpc/powerpc32/backtrace.c
+++ b/sysdeps/powerpc/powerpc32/backtrace.c
@@ -1,5 +1,5 @@
/* Return backtrace of current program state.
- Copyright (C) 1998, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2000, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -64,3 +64,4 @@ __backtrace (void **array, int size)
return count;
}
weak_alias (__backtrace, backtrace)
+libc_hidden_def (__backtrace)
diff --git a/sysdeps/powerpc/powerpc32/configure b/sysdeps/powerpc/powerpc32/configure
new file mode 100644
index 0000000000..9ebac38b57
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/configure
@@ -0,0 +1,33 @@
+# This file is generated from configure.in by Autoconf. DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether gas has R_PPC_REL16 relocs.
+echo "$as_me:$LINENO: checking for R_PPC_REL16 gas support" >&5
+echo $ECHO_N "checking for R_PPC_REL16 gas support... $ECHO_C" >&6
+if test "${libc_cv_ppc_rel16+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat > conftest.s <<\EOF
+ .text
+ addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha
+EOF
+if { ac_try='${CC-cc} -c $CFLAGS conftest.s 1>&5'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ libc_cv_ppc_rel16=yes
+else
+ libc_cv_ppc_rel16=no
+fi
+rm -f conftest*
+fi
+echo "$as_me:$LINENO: result: $libc_cv_ppc_rel16" >&5
+echo "${ECHO_T}$libc_cv_ppc_rel16" >&6
+if test $libc_cv_ppc_rel16 = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_ASM_PPC_REL16 1
+_ACEOF
+
+fi
diff --git a/sysdeps/powerpc/powerpc32/configure.in b/sysdeps/powerpc/powerpc32/configure.in
new file mode 100644
index 0000000000..6d2c41cb9a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/configure.in
@@ -0,0 +1,18 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether gas has R_PPC_REL16 relocs.
+AC_CACHE_CHECK(for R_PPC_REL16 gas support, libc_cv_ppc_rel16, [dnl
+cat > conftest.s <<\EOF
+ .text
+ addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha
+EOF
+if AC_TRY_COMMAND(${CC-cc} -c $CFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then
+ libc_cv_ppc_rel16=yes
+else
+ libc_cv_ppc_rel16=no
+fi
+rm -f conftest*])
+if test $libc_cv_ppc_rel16 = yes; then
+ AC_DEFINE(HAVE_ASM_PPC_REL16)
+fi
diff --git a/sysdeps/powerpc/powerpc32/dl-dtprocnum.h b/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
new file mode 100644
index 0000000000..7fe2be7939
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
@@ -0,0 +1,3 @@
+/* Number of extra dynamic section entries for this architecture. By
+ default there are none. */
+#define DT_THISPROCNUM DT_PPC_NUM
diff --git a/sysdeps/powerpc/powerpc32/dl-machine.h b/sysdeps/powerpc/powerpc32/dl-machine.h
index 545c19b300..496fa71ecc 100644
--- a/sysdeps/powerpc/powerpc32/dl-machine.h
+++ b/sysdeps/powerpc/powerpc32/dl-machine.h
@@ -25,6 +25,10 @@
#include <assert.h>
#include <dl-tls.h>
+/* Translate a processor specific dynamic tag to the index
+ in l_info array. */
+#define DT_PPC(x) (DT_PPC_##x - DT_LOPROC + DT_NUM)
+
/* Return nonzero iff ELF header is compatible with the running host. */
static inline int
elf_machine_matches_host (const Elf32_Ehdr *ehdr)
@@ -32,24 +36,38 @@ elf_machine_matches_host (const Elf32_Ehdr *ehdr)
return ehdr->e_machine == EM_PPC;
}
+/* Return the value of the GOT pointer. */
+static inline Elf32_Addr * __attribute__ ((const))
+ppc_got (void)
+{
+ Elf32_Addr *got;
+#ifdef HAVE_ASM_PPC_REL16
+ asm ("bcl 20,31,1f\n"
+ "1: mflr %0\n"
+ " addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n"
+ " addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n"
+ : "=b" (got) : : "lr");
+#else
+ asm (" bl _GLOBAL_OFFSET_TABLE_-4@local"
+ : "=l" (got));
+#endif
+ return got;
+}
/* Return the link-time address of _DYNAMIC, stored as
the first value in the GOT. */
-static inline Elf32_Addr
+static inline Elf32_Addr __attribute__ ((const))
elf_machine_dynamic (void)
{
- Elf32_Addr *got;
- asm (" bl _GLOBAL_OFFSET_TABLE_-4@local"
- : "=l"(got));
- return *got;
+ return *ppc_got ();
}
/* Return the run-time load address of the shared object. */
-static inline Elf32_Addr
+static inline Elf32_Addr __attribute__ ((const))
elf_machine_load_address (void)
{
- unsigned int *got;
- unsigned int *branchaddr;
+ Elf32_Addr *branchaddr;
+ Elf32_Addr runtime_dynamic;
/* This is much harder than you'd expect. Possibly I'm missing something.
The 'obvious' way:
@@ -80,19 +98,17 @@ elf_machine_load_address (void)
the address ourselves. That gives us the following code: */
/* Get address of the 'b _DYNAMIC@local'... */
- asm ("bl 0f ;"
+ asm ("bcl 20,31,0f;"
"b _DYNAMIC@local;"
"0:"
- : "=l"(branchaddr));
-
- /* ... and the address of the GOT. */
- asm (" bl _GLOBAL_OFFSET_TABLE_-4@local"
- : "=l"(got));
+ : "=l" (branchaddr));
/* So now work out the difference between where the branch actually points,
and the offset of that location in memory from the start of the file. */
- return ((Elf32_Addr)branchaddr - *got
- + ((int)(*branchaddr << 6 & 0xffffff00) >> 6));
+ runtime_dynamic = ((Elf32_Addr) branchaddr
+ + ((Elf32_Sword) (*branchaddr << 6 & 0xffffff00) >> 6));
+
+ return runtime_dynamic - elf_machine_dynamic ();
}
#define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */
@@ -144,13 +160,69 @@ __elf_preferred_address(struct link_map *loader, size_t maplength,
/* The PowerPC never uses REL relocations. */
#define ELF_MACHINE_NO_REL 1
-/* Set up the loaded object described by L so its unrelocated PLT
+/* Set up the loaded object described by MAP so its unrelocated PLT
entries will jump to the on-demand fixup code in dl-runtime.c.
Also install a small trampoline to be used by entries that have
been relocated to an address too far away for a single branch. */
extern int __elf_machine_runtime_setup (struct link_map *map,
int lazy, int profile);
-#define elf_machine_runtime_setup __elf_machine_runtime_setup
+
+static inline int
+elf_machine_runtime_setup (struct link_map *map,
+ int lazy, int profile)
+{
+ if (map->l_info[DT_JMPREL] == 0)
+ return lazy;
+
+ if (map->l_info[DT_PPC(GOT)] == 0)
+ /* Handle old style PLT. */
+ return __elf_machine_runtime_setup (map, lazy, profile);
+
+ /* New style non-exec PLT consisting of an array of addresses. */
+ map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_addr;
+ if (lazy)
+ {
+ Elf32_Addr *plt, *got, glink;
+ Elf32_Word num_plt_entries;
+ void (*dlrr) (void);
+ extern void _dl_runtime_resolve (void);
+ extern void _dl_prof_resolve (void);
+
+ if (__builtin_expect (!profile, 1))
+ dlrr = _dl_runtime_resolve;
+ else
+ {
+ if (GLRO(dl_profile) != NULL
+ &&_dl_name_match_p (GLRO(dl_profile), map))
+ GL(dl_profile_map) = map;
+ dlrr = _dl_prof_resolve;
+ }
+ got = (Elf32_Addr *) map->l_info[DT_PPC(GOT)]->d_un.d_ptr;
+ glink = got[1];
+ got[1] = (Elf32_Addr) dlrr;
+ got[2] = (Elf32_Addr) map;
+
+ /* Relocate everything in .plt by the load address offset. */
+ plt = (Elf32_Addr *) D_PTR (map, l_info[DT_PLTGOT]);
+ num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+ / sizeof (Elf32_Rela));
+
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .plt section.
+ The prelinker saved us at got[1] address of .glink
+ section's start. */
+ if (glink)
+ {
+ glink += map->l_addr;
+ while (num_plt_entries-- != 0)
+ *plt++ = glink, glink += 4;
+ }
+ else
+ while (num_plt_entries-- != 0)
+ *plt++ += map->l_addr;
+ }
+ return lazy;
+}
/* Change the PLT entry whose reloc is 'reloc' to call the actual routine. */
extern Elf32_Addr __elf_machine_fixup_plt (struct link_map *map,
@@ -163,7 +235,12 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
const Elf32_Rela *reloc,
Elf32_Addr *reloc_addr, Elf64_Addr finaladdr)
{
- return __elf_machine_fixup_plt (map, reloc, reloc_addr, finaladdr);
+ if (map->l_info[DT_PPC(GOT)] == 0)
+ /* Handle old style PLT. */
+ return __elf_machine_fixup_plt (map, reloc, reloc_addr, finaladdr);
+
+ *reloc_addr = finaladdr;
+ return finaladdr;
}
/* Return the final value of a plt relocation. */
@@ -286,11 +363,16 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
break;
#endif /* USE_TLS etc. */
-#ifdef RESOLVE_CONFLICT_FIND_MAP
case R_PPC_JMP_SLOT:
+#ifdef RESOLVE_CONFLICT_FIND_MAP
RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
- /* FALLTHROUGH */
#endif
+ if (map->l_info[DT_PPC(GOT)] != 0)
+ {
+ *reloc_addr = value;
+ break;
+ }
+ /* FALLTHROUGH */
default:
__process_machine_rela (map, reloc, sym_map, sym, refsym,
diff --git a/sysdeps/powerpc/powerpc32/dl-start.S b/sysdeps/powerpc/powerpc32/dl-start.S
index d72202d4a4..e1f7f6e24a 100644
--- a/sysdeps/powerpc/powerpc32/dl-start.S
+++ b/sysdeps/powerpc/powerpc32/dl-start.S
@@ -47,8 +47,15 @@ ENTRY(_dl_start_user)
passed by value!). */
/* Put our GOT pointer in r31, */
+#ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r31
+ addis r31,r31,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r31,r31,_GLOBAL_OFFSET_TABLE_-1b@l
+#else
bl _GLOBAL_OFFSET_TABLE_-4@local
mflr r31
+#endif
/* the address of _start in r30, */
mr r30,r3
/* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */
diff --git a/sysdeps/powerpc/powerpc32/elf/start.S b/sysdeps/powerpc/powerpc32/elf/start.S
index 7827357a6c..bafd2ae001 100644
--- a/sysdeps/powerpc/powerpc32/elf/start.S
+++ b/sysdeps/powerpc/powerpc32/elf/start.S
@@ -52,7 +52,7 @@ L(start_addresses):
ASM_SIZE_DIRECTIVE(L(start_addresses))
.section ".text"
-#ifdef PIC
+#if defined PIC && !defined HAVE_ASM_PPC_REL16
L(start_addressesp):
.long L(start_addresses)-L(branch)
#endif
@@ -73,11 +73,19 @@ L(branch):
mtlr r0
stw r0,0(r1)
/* Set r13 to point at the 'small data area', and put the address of
- start_addresses in r8... */
+ start_addresses in r8. Also load the GOT pointer so that new PLT
+ calls work, like the one to __libc_start_main. */
#ifdef PIC
+# ifdef HAVE_ASM_PPC_REL16
+ addis r30,r13,_GLOBAL_OFFSET_TABLE_-L(branch)@ha
+ addis r8,r13,L(start_addresses)-L(branch)@ha
+ addi r30,r30,_GLOBAL_OFFSET_TABLE_-L(branch)@l
+ lwzu r13,L(start_addresses)-L(branch)@l(r8)
+# else
lwz r8,L(start_addressesp)-L(branch)(r13)
add r8,r13,r8
lwz r13,0(r8)
+# endif
#else
lis r8,L(start_addresses)@ha
lwzu r13,L(start_addresses)@l(r8)
diff --git a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
index a2415b9542..73cc8181f9 100644
--- a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
@@ -34,14 +34,21 @@ ENTRY (BP_SYM (__longjmp))
#ifndef __NO_VMX__
# ifdef PIC
mflr r6
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r5
+ addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r5
+# endif
# ifdef SHARED
lwz r5,_rtld_global_ro@got(r5)
mtlr r6
lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
# else
- lwz r5,_rtld_global_ro@got(r5)
+ lwz r5,_dl_hwcap@got(r5)
mtlr r6
lwz r5,0(r5)
# endif
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
index 7924e34648..13afba88f0 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
@@ -29,11 +29,19 @@ ENTRY (__ceil)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
index 9315d8d2df..f8ca1de08c 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
@@ -20,7 +20,7 @@
#include <sysdep.h>
.section .rodata.cst4,"aM",@progbits,4
- .align 2
+ .align 2
.LC0: /* 2**23 */
.long 0x4b000000
@@ -29,11 +29,19 @@ ENTRY (__ceilf)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_copysign.S b/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
index 933435da3d..ee2c09cba8 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
@@ -38,13 +38,13 @@ L(0): fnabs fp1,fp1
blr
END (__copysign)
-weak_alias(__copysign,copysign)
+weak_alias (__copysign,copysign)
/* It turns out that it's safe to use this code even for single-precision. */
-weak_alias(__copysign,copysignf)
+weak_alias (__copysign,copysignf)
strong_alias(__copysign,__copysignf)
#ifdef NO_LONG_DOUBLE
-weak_alias(__copysign,copysignl)
+weak_alias (__copysign,copysignl)
strong_alias(__copysign,__copysignl)
#endif
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floor.S b/sysdeps/powerpc/powerpc32/fpu/s_floor.S
index c8f59c24a6..5dfe8f2d9a 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_floor.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_floor.S
@@ -29,11 +29,19 @@ ENTRY (__floor)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
index 8ee0644ac9..31b71ad229 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
@@ -20,7 +20,7 @@
#include <sysdep.h>
.section .rodata.cst4,"aM",@progbits,4
- .align 2
+ .align 2
.LC0: /* 2**23 */
.long 0x4b000000
@@ -29,11 +29,19 @@ ENTRY (__floorf)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_lround.S b/sysdeps/powerpc/powerpc32/fpu/s_lround.S
index 72fd49ba46..a85743164c 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_lround.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_lround.S
@@ -41,9 +41,16 @@
ENTRY (__lround)
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
+# endif
mtlr r11
lfs fp12,0(r9)
#else
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rint.S b/sysdeps/powerpc/powerpc32/fpu/s_rint.S
index 4abdcedfe8..1cfcd78b5c 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_rint.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_rint.S
@@ -31,11 +31,19 @@
ENTRY (__rint)
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
index d02bd066b8..93c02667fe 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
@@ -20,7 +20,7 @@
#include <sysdep.h>
.section .rodata.cst4,"aM",@progbits,4
- .align 2
+ .align 2
.LC0: /* 2**23 */
.long 0x4b000000
@@ -28,11 +28,19 @@
ENTRY (__rintf)
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_round.S b/sysdeps/powerpc/powerpc32/fpu/s_round.S
index 96fc2984fd..53b45916d1 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_round.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_round.S
@@ -41,9 +41,16 @@ ENTRY (__round)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
+# endif
mtlr r11
lfs fp13,0(r9)
#else
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
index 87965dea80..39ba08655a 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
@@ -41,9 +41,16 @@ ENTRY (__roundf )
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
+# endif
mtlr r11
lfs fp13,0(r9)
#else
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
index 7a3e705a81..827e8cb940 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
@@ -36,11 +36,19 @@ ENTRY (__trunc)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
index 5275c69d29..55e7a74b41 100644
--- a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
+++ b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
@@ -20,7 +20,7 @@
#include <sysdep.h>
.section .rodata.cst4,"aM",@progbits,4
- .align 2
+ .align 2
.LC0: /* 2**23 */
.long 0x4b000000
@@ -36,11 +36,19 @@ ENTRY (__truncf)
mffs fp11 /* Save current FPU rounding mode. */
#ifdef SHARED
mflr r11
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ mtlr r11
+ lfs fp13,.LC0-1b@l(r9)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r10
lwz r9,.LC0@got(10)
mtlr r11
lfs fp13,0(r9)
+# endif
#else
lis r9,.LC0@ha
lfs fp13,.LC0@l(r9)
diff --git a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
index e0c0606da4..cf3f215f2d 100644
--- a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
@@ -74,20 +74,27 @@ ENTRY (BP_SYM (__sigsetjmp))
stw r31,((JB_GPRS+17)*4)(3)
stfd fp31,((JB_FPRS+17*2)*4)(3)
#ifndef __NO_VMX__
-#ifdef PIC
+# ifdef PIC
mflr r6
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r5
+ addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr r5
-#ifdef SHARED
+# endif
+# ifdef SHARED
lwz r5,_rtld_global_ro@got(r5)
mtlr r6
lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
-#else
- lwz r5,_rtld_global_ro@got(r5)
+# else
+ lwz r5,_dl_hwcap@got(r5)
mtlr r6
lwz r5,0(r5)
-#endif
-#else
+# endif
+# else
lis r5,_dl_hwcap@ha
lwz r5,_dl_hwcap@l(r5)
#endif
diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S
index 4c0edc8e45..f09c294674 100644
--- a/sysdeps/powerpc/powerpc32/memset.S
+++ b/sysdeps/powerpc/powerpc32/memset.S
@@ -264,10 +264,17 @@ L(checklinesize):
beq L(medium)
/* Establishes GOT addressability so we can load __cache_line_size
from static. This value was set from the aux vector during startup. */
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr rGOT
+ addis rGOT,rGOT,__cache_line_size-1b@ha
+ lwz rCLS,__cache_line_size-1b@l(rGOT)
+# else
bl _GLOBAL_OFFSET_TABLE_@local-4
mflr rGOT
lwz rGOT,__cache_line_size@got(rGOT)
lwz rCLS,0(rGOT)
+# endif
mtlr rTMP
#else
/* Load __cache_line_size from static. This value was set from the
diff --git a/sysdeps/powerpc/powerpc32/ppc-mcount.S b/sysdeps/powerpc/powerpc32/ppc-mcount.S
index 314c8ee703..c1a08d379c 100644
--- a/sysdeps/powerpc/powerpc32/ppc-mcount.S
+++ b/sysdeps/powerpc/powerpc32/ppc-mcount.S
@@ -1,5 +1,5 @@
/* PowerPC-specific implementation of profiling support.
- Copyright (C) 1997, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1997, 1999, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,25 +24,19 @@
#include <sysdep.h>
-/* We do profiling as described in the SYSV ELF ABI, _mcount is called
- with the address of a data word in r0 (that is different for every
- routine, initialised to 0, and otherwise unused). The caller has put
- the address the caller will return to in the usual place on the stack,
- 4(r1). _mcount is responsible for ensuring that when it returns no
- argument-passing registers are disturbed, and that the LR is set back
- to (what the caller sees as) 4(r1).
+/* We do profiling as described in the SYSV ELF ABI, except that glibc
+ _mcount manages its own counters. The caller has put the address the
+ caller will return to in the usual place on the stack, 4(r1). _mcount
+ is responsible for ensuring that when it returns no argument-passing
+ registers are disturbed, and that the LR is set back to (what the
+ caller sees as) 4(r1).
This is intended so that the following code can be inserted at the
front of any routine without changing the routine:
.data
- .align 2
- 0: .long 0
- .previous
mflr r0
- lis r11,0b@ha
stw r0,4(r1)
- addi r0,r11,0b@l
bl _mcount
*/
diff --git a/sysdeps/powerpc/powerpc32/sysdep.h b/sysdeps/powerpc/powerpc32/sysdep.h
index 775073f325..552f595a10 100644
--- a/sysdeps/powerpc/powerpc32/sysdep.h
+++ b/sysdeps/powerpc/powerpc32/sysdep.h
@@ -29,31 +29,10 @@
/* The mcount code relies on a the return address being on the stack
to locate our caller and so it can restore it; so store one just
for its benefit. */
-# ifdef PIC
-# define CALL_MCOUNT \
- .pushsection; \
- .section ".data"; \
- .align ALIGNARG(2); \
-0:.long 0; \
- .previous; \
- mflr r0; \
- stw r0,4(r1); \
- bl _GLOBAL_OFFSET_TABLE_@local-4; \
- mflr r11; \
- lwz r0,0b@got(r11); \
- bl JUMPTARGET(_mcount);
-# else /* PIC */
-# define CALL_MCOUNT \
- .section ".data"; \
- .align ALIGNARG(2); \
-0:.long 0; \
- .previous; \
+# define CALL_MCOUNT \
mflr r0; \
- lis r11,0b@ha; \
stw r0,4(r1); \
- addi r0,r11,0b@l; \
bl JUMPTARGET(_mcount);
-# endif /* PIC */
#else /* PROF */
# define CALL_MCOUNT /* Do nothing. */
#endif /* PROF */
diff --git a/sysdeps/powerpc/powerpc64/backtrace.c b/sysdeps/powerpc/powerpc64/backtrace.c
index 8669b6a689..9c8ebbb18d 100644
--- a/sysdeps/powerpc/powerpc64/backtrace.c
+++ b/sysdeps/powerpc/powerpc64/backtrace.c
@@ -1,5 +1,5 @@
/* Return backtrace of current program state.
- Copyright (C) 1998, 2000, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2000, 2002, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -67,3 +67,4 @@ __backtrace (void **array, int size)
return count;
}
weak_alias (__backtrace, backtrace)
+libc_hidden_def (__backtrace)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_copysign.S b/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
index a43ed12cf0..f083d2b7ea 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
@@ -39,13 +39,13 @@ L(0): fnabs fp1,fp1
blr
END (__copysign)
-weak_alias(__copysign,copysign)
+weak_alias (__copysign,copysign)
/* It turns out that it's safe to use this code even for single-precision. */
-weak_alias(__copysign,copysignf)
+weak_alias (__copysign,copysignf)
strong_alias(__copysign,__copysignf)
#ifdef NO_LONG_DOUBLE
-weak_alias(__copysign,copysignl)
+weak_alias (__copysign,copysignl)
strong_alias(__copysign,__copysignl)
#endif