summaryrefslogtreecommitdiff
path: root/sysdeps/powerpc/powerpc64
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/powerpc/powerpc64')
-rw-r--r--sysdeps/powerpc/powerpc64/970/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/Makefile12
-rw-r--r--sysdeps/powerpc/powerpc64/__longjmp-common.S7
-rw-r--r--sysdeps/powerpc/powerpc64/__longjmp.S2
-rw-r--r--sysdeps/powerpc/powerpc64/a2/memcpy.S10
-rw-r--r--sysdeps/powerpc/powerpc64/addmul_1.S4
-rw-r--r--sysdeps/powerpc/powerpc64/atomic-machine.h36
-rw-r--r--sysdeps/powerpc/powerpc64/backtrace.c21
-rw-r--r--sysdeps/powerpc/powerpc64/be/970/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/Implies-after5
-rw-r--r--sysdeps/powerpc/powerpc64/be/a2/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/cell/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/fpu/Implies (renamed from sysdeps/powerpc/powerpc64/power4/fpu/Implies)0
-rw-r--r--sysdeps/powerpc/powerpc64/be/fpu/multiarch/Implies (renamed from sysdeps/powerpc/powerpc64/power4/fpu/multiarch/Implies)0
-rw-r--r--sysdeps/powerpc/powerpc64/be/multiarch/Implies (renamed from sysdeps/powerpc/powerpc64/power4/multiarch/Implies)0
-rw-r--r--sysdeps/powerpc/powerpc64/be/power4/Implies (renamed from sysdeps/powerpc/powerpc64/power4/Implies)1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power4/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power4/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power4/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5+/Implies5
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5+/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5+/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5+/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power5/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6x/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6x/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6x/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power6x/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power7/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power7/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power7/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power7/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power8/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power8/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power8/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power8/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power9/Implies3
-rw-r--r--sysdeps/powerpc/powerpc64/be/power9/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/be/power9/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/be/power9/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/bits/wordsize.h3
-rw-r--r--sysdeps/powerpc/powerpc64/bzero.S2
-rw-r--r--sysdeps/powerpc/powerpc64/cell/memcpy.S10
-rw-r--r--sysdeps/powerpc/powerpc64/crti.S4
-rw-r--r--sysdeps/powerpc/powerpc64/crtn.S2
-rw-r--r--sysdeps/powerpc/powerpc64/dl-dtprocnum.h2
-rw-r--r--sysdeps/powerpc/powerpc64/dl-irel.h2
-rw-r--r--sysdeps/powerpc/powerpc64/dl-machine.c31
-rw-r--r--sysdeps/powerpc/powerpc64/dl-machine.h63
-rw-r--r--sysdeps/powerpc/powerpc64/dl-trampoline.S6
-rw-r--r--sysdeps/powerpc/powerpc64/entry.h2
-rw-r--r--sysdeps/powerpc/powerpc64/ffsll.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/Makefile44
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-power8.S (renamed from sysdeps/powerpc/powerpc64/strtok_r.S)14
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-ppc64.c21
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf.c32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot.c3
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf.c3
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-power5+.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil.c13
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-power5+.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-ppc64.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-power6.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign.c15
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysignf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-ppc64.c24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf.c32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power7.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power8.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite.c29
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef.c16
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-power5+.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c13
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-power5+.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power7.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power8.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf.c31
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff.c16
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power5.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6x.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power7.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power8.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-ppc64.S16
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan.c41
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnanf.c27
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power6x.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power8.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint.c21
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrintf.c47
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power5+.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power6x.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power8.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround.c23
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf-ppc64.S31
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf.c47
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-ppc64.c7
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb.c14
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-power5+.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-ppc64.c7
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf.c15
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-power5+.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-power5+.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c13
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-power5+.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-ppc64.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-ppc64.c24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf.c32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-power5+.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-ppc64.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c13
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-power5+.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-ppc64.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/multiarch/w_expf.c1
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_ceil.S32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_ceilf.S24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_copysign.S20
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_copysignl.S14
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fabs.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fabsl.S9
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fdim.c5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_floor.S32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_floorf.S24
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fma.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fmax.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_fmin.S5
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_llrint.S28
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_llrintf.S37
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_llround.S18
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_llroundf.S7
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S23
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S15
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_rint.S23
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_rintf.S15
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_round.S23
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_roundf.S15
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_trunc.S32
-rw-r--r--sysdeps/powerpc/powerpc64/fpu/s_truncf.S24
-rw-r--r--sysdeps/powerpc/powerpc64/hp-timing.h2
-rw-r--r--sysdeps/powerpc/powerpc64/le/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/Implies-before6
-rw-r--r--sysdeps/powerpc/powerpc64/le/Makefile84
-rw-r--r--sysdeps/powerpc/powerpc64/le/configure75
-rw-r--r--sysdeps/powerpc/powerpc64/le/configure.ac48
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/e_sqrtf128.c56
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/multiarch/Makefile6
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-power9.c35
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-ppc64le.c35
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128.c31
-rw-r--r--sysdeps/powerpc/powerpc64/le/fpu/sfp-machine.h115
-rw-r--r--sysdeps/powerpc/powerpc64/le/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power7/Implies13
-rw-r--r--sysdeps/powerpc/powerpc64/le/power7/fpu/Implies5
-rw-r--r--sysdeps/powerpc/powerpc64/le/power7/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power7/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power8/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/le/power8/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/le/power8/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power8/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power9/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/le/power9/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/le/power9/fpu/e_sqrtf128.c36
-rw-r--r--sysdeps/powerpc/powerpc64/le/power9/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/le/power9/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/lshift.S4
-rw-r--r--sysdeps/powerpc/powerpc64/memcpy.S12
-rw-r--r--sysdeps/powerpc/powerpc64/memset.S12
-rw-r--r--sysdeps/powerpc/powerpc64/mul_1.S4
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/Makefile36
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/bcopy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/bcopy.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/bzero.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c64
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/init-arch.h2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memchr-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memchr-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memchr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memchr.c5
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcmp-power4.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcmp-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcmp-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcmp-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcmp.c19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-a2.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-cell.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-power4.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-power6.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-power8-cached.S176
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy-ppc64.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memcpy.c25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memmove-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memmove.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/mempcpy-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/mempcpy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/mempcpy.c16
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memrchr-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memrchr-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c3
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memrchr.c11
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset-power4.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset-power6.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset-power8.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset-ppc64.S18
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/memset.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/rawmemchr-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/rawmemchr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/rawmemchr.c12
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/rtld-memset.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/rtld-strchr.S2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpcpy-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpcpy-power8.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpcpy-ppc64.c6
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpcpy.c16
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpncpy-power7.S23
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpncpy-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpncpy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/stpncpy.c19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power7.S22
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp-ppc64.c21
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c32
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l-power7.S21
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasestr-power8.S33
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasestr-ppc64.c34
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcasestr.c37
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcat-power7.c4
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcat-power8.c8
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcat-ppc64.c4
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcat.c16
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchr-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchr-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchr-ppc64.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchr.c17
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchrnul-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchrnul-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchrnul-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strchrnul.c5
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S22
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcmp-power9.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S22
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcmp.c22
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcpy-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcpy-power8.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcpy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcpy.c16
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcspn-power8.S23
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcspn-ppc64.c26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strcspn.c35
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strlen-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strlen-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strlen-ppc64.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strlen.c11
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase-ppc64.c21
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase.c27
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase_l-power7.c8
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncase_l.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncat-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncat-power8.c31
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncat-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncat.c7
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-power4.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-power7.S21
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-power8.S23
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-power9.S25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-ppc64.S20
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp.c25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncpy-power7.S23
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncpy-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncpy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncpy.c18
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strnlen-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strnlen-power8.S26
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strnlen-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strnlen.c19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strrchr-power7.S19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S24
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strrchr-ppc64.c8
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strrchr.c15
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strspn-power8.S23
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strspn-ppc64.c25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strspn.c35
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strstr-power7.S25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strstr.c12
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcschr-power6.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcschr-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcschr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcschr.c19
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcscpy-power6.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcscpy-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcscpy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcscpy.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power6.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power7.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcsrchr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wcsrchr.c2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/wordcopy-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/power4/memcmp.S22
-rw-r--r--sysdeps/powerpc/powerpc64/power4/memcpy.S11
-rw-r--r--sysdeps/powerpc/powerpc64/power4/memset.S13
-rw-r--r--sysdeps/powerpc/powerpc64/power4/strncmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/Implies4
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S15
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S7
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S15
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S7
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S28
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_llroundf.S1
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S15
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S7
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S15
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S7
-rw-r--r--sysdeps/powerpc/powerpc64/power5+/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power5/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power5/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power5/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power5/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power6/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S23
-rw-r--r--sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power6/memcpy.S11
-rw-r--r--sysdeps/powerpc/powerpc64/power6/memset.S15
-rw-r--r--sysdeps/powerpc/powerpc64/power6/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S28
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S28
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/fpu/s_llroundf.S1
-rw-r--r--sysdeps/powerpc/powerpc64/power6x/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power7/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power7/add_n.S12
-rw-r--r--sysdeps/powerpc/powerpc64/power7/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power7/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memchr.S22
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memcmp.S32
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memcpy.S76
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memmove.S139
-rw-r--r--sysdeps/powerpc/powerpc64/power7/mempcpy.S11
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memrchr.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memset.S13
-rw-r--r--sysdeps/powerpc/powerpc64/power7/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power7/rawmemchr.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/stpncpy.S2
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strcasecmp.S7
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strchr.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strchrnul.S13
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strcmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strlen.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strncmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strncpy.S48
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strnlen.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strrchr.S11
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strstr-ppc64.c2
-rw-r--r--sysdeps/powerpc/powerpc64/power7/strstr.S32
-rw-r--r--sysdeps/powerpc/powerpc64/power7/sub_n.S2
-rw-r--r--sysdeps/powerpc/powerpc64/power8/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power8/Makefile3
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S303
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S509
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S4
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S28
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S28
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_llroundf.S1
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S520
-rw-r--r--sysdeps/powerpc/powerpc64/power8/fpu/w_expf.c1
-rw-r--r--sysdeps/powerpc/powerpc64/power8/memchr.S335
-rw-r--r--sysdeps/powerpc/powerpc64/power8/memcmp.S1447
-rw-r--r--sysdeps/powerpc/powerpc64/power8/memrchr.S345
-rw-r--r--sysdeps/powerpc/powerpc64/power8/memset.S84
-rw-r--r--sysdeps/powerpc/powerpc64/power8/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power8/stpcpy.S2
-rw-r--r--sysdeps/powerpc/powerpc64/power8/stpncpy.S6
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcasecmp.S457
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcasestr-ppc64.c29
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcasestr.S538
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strchr.S377
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strchrnul.S23
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcmp.S40
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcpy.S167
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strcspn.S20
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strlen.S290
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strncase.S20
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strncmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strncpy.S176
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strnlen.S425
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strrchr.S468
-rw-r--r--sysdeps/powerpc/powerpc64/power8/strspn.S202
-rw-r--r--sysdeps/powerpc/powerpc64/power9/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power9/fpu/Implies2
-rw-r--r--sysdeps/powerpc/powerpc64/power9/fpu/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power9/multiarch/Implies1
-rw-r--r--sysdeps/powerpc/powerpc64/power9/strcmp.S268
-rw-r--r--sysdeps/powerpc/powerpc64/power9/strncmp.S379
-rw-r--r--sysdeps/powerpc/powerpc64/ppc-mcount.S6
-rw-r--r--sysdeps/powerpc/powerpc64/register-dump.h2
-rw-r--r--sysdeps/powerpc/powerpc64/setjmp-bug21895.c51
-rw-r--r--sysdeps/powerpc/powerpc64/setjmp-common.S13
-rw-r--r--sysdeps/powerpc/powerpc64/setjmp.S2
-rw-r--r--sysdeps/powerpc/powerpc64/start.S6
-rw-r--r--sysdeps/powerpc/powerpc64/strchr.S10
-rw-r--r--sysdeps/powerpc/powerpc64/strcmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/strcspn.S127
-rw-r--r--sysdeps/powerpc/powerpc64/strlen.S10
-rw-r--r--sysdeps/powerpc/powerpc64/strncmp.S10
-rw-r--r--sysdeps/powerpc/powerpc64/strpbrk.S135
-rw-r--r--sysdeps/powerpc/powerpc64/strspn.S144
-rw-r--r--sysdeps/powerpc/powerpc64/strtok.S226
-rw-r--r--sysdeps/powerpc/powerpc64/submul_1.S2
-rw-r--r--sysdeps/powerpc/powerpc64/sysdep.h222
-rw-r--r--sysdeps/powerpc/powerpc64/tls-macros.h6
-rw-r--r--sysdeps/powerpc/powerpc64/tst-audit.h2
-rw-r--r--sysdeps/powerpc/powerpc64/tst-setjmp-bug21895-static.c75
474 files changed, 10986 insertions, 2978 deletions
diff --git a/sysdeps/powerpc/powerpc64/970/Implies b/sysdeps/powerpc/powerpc64/970/Implies
deleted file mode 100644
index bedb20b65c..0000000000
--- a/sysdeps/powerpc/powerpc64/970/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power4/fpu
-powerpc/powerpc64/power4
diff --git a/sysdeps/powerpc/powerpc64/Makefile b/sysdeps/powerpc/powerpc64/Makefile
index 9d15db0328..a0bd0c9504 100644
--- a/sysdeps/powerpc/powerpc64/Makefile
+++ b/sysdeps/powerpc/powerpc64/Makefile
@@ -47,3 +47,15 @@ ifeq ($(subdir),gmon)
CFLAGS-mcount.c += $(no-special-regs)
sysdep_routines += ppc-mcount
endif
+
+ifeq ($(subdir),setjmp)
+tests += tst-setjmp-bug21895-static
+tests-static += tst-setjmp-bug21895-static
+modules-names += setjmp-bug21895
+
+$(objpfx)tst-setjmp-bug21895-static: $(common-objpfx)dlfcn/libdl.a
+$(objpfx)tst-setjmp-bug21895-static.out: $(objpfx)setjmp-bug21895.so
+
+tst-setjmp-bug21895-static-ENV = \
+ LD_LIBRARY_PATH=$(objpfx):$(common-objpfx):$(common-objpfx)setjmp:$(common-objpfx)elf
+endif
diff --git a/sysdeps/powerpc/powerpc64/__longjmp-common.S b/sysdeps/powerpc/powerpc64/__longjmp-common.S
index ff5d4cfa1f..99c17c5797 100644
--- a/sysdeps/powerpc/powerpc64/__longjmp-common.S
+++ b/sysdeps/powerpc/powerpc64/__longjmp-common.S
@@ -1,5 +1,5 @@
/* longjmp for PowerPC64.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -130,9 +130,6 @@ L(no_vmx):
ld r0,(JB_LR*8)(r3)
ld r14,((JB_GPRS+0)*8)(r3)
lfd fp14,((JB_FPRS+0)*8)(r3)
-#if defined SHARED && !IS_IN (rtld)
- std r2,FRAME_TOC_SAVE(r1) /* Restore the callers TOC save area. */
-#endif
ld r15,((JB_GPRS+1)*8)(r3)
lfd fp15,((JB_FPRS+1)*8)(r3)
ld r16,((JB_GPRS+2)*8)(r3)
@@ -152,7 +149,7 @@ L(no_vmx):
second argument (-4@4), and target address (8@0), respectively. */
LIBC_PROBE (longjmp, 3, 8@3, -4@4, 8@0)
mtlr r0
-/* std r2,FRAME_TOC_SAVE(r1) Restore the TOC save area. */
+ std r2,FRAME_TOC_SAVE(r1) /* Restore the TOC save area. */
ld r21,((JB_GPRS+7)*8)(r3)
lfd fp21,((JB_FPRS+7)*8)(r3)
ld r22,((JB_GPRS+8)*8)(r3)
diff --git a/sysdeps/powerpc/powerpc64/__longjmp.S b/sysdeps/powerpc/powerpc64/__longjmp.S
index 029e089d67..c84b420686 100644
--- a/sysdeps/powerpc/powerpc64/__longjmp.S
+++ b/sysdeps/powerpc/powerpc64/__longjmp.S
@@ -1,5 +1,5 @@
/* AltiVec/VMX (new) version of __longjmp for PowerPC64.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/a2/memcpy.S b/sysdeps/powerpc/powerpc64/a2/memcpy.S
index 31a0b5dabe..488ab6dde3 100644
--- a/sysdeps/powerpc/powerpc64/a2/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/a2/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC A2.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Michael Brutman <brutman@us.ibm.com>.
This file is part of the GNU C Library.
@@ -19,6 +19,10 @@
#include <sysdep.h>
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
@@ -30,7 +34,7 @@
.machine a2
-EALIGN (memcpy, 5, 0)
+ENTRY (MEMCPY, 5)
CALL_MCOUNT 3
dcbt 0,r4 /* Prefetch ONE SRC cacheline */
@@ -520,5 +524,5 @@ L(endloop2_128):
b L(lessthancacheline)
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/addmul_1.S b/sysdeps/powerpc/powerpc64/addmul_1.S
index f222e2c6a4..48e3b1b290 100644
--- a/sysdeps/powerpc/powerpc64/addmul_1.S
+++ b/sysdeps/powerpc/powerpc64/addmul_1.S
@@ -1,6 +1,6 @@
/* PowerPC64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
the result to a second limb vector.
- Copyright (C) 1999-2016 Free Software Foundation, Inc.
+ Copyright (C) 1999-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
#define N r5
#define VL r6
-EALIGN(FUNC, 5, 0)
+ENTRY_TOCLESS (FUNC, 5)
std r31, -8(r1)
rldicl. r0, N, 0, 62
std r30, -16(r1)
diff --git a/sysdeps/powerpc/powerpc64/atomic-machine.h b/sysdeps/powerpc/powerpc64/atomic-machine.h
index 751487a3a7..1f09c52bd2 100644
--- a/sysdeps/powerpc/powerpc64/atomic-machine.h
+++ b/sysdeps/powerpc/powerpc64/atomic-machine.h
@@ -1,5 +1,5 @@
/* Atomic operations. PowerPC64 version.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -35,6 +35,7 @@
#define __HAVE_64B_ATOMICS 1
#define USE_ATOMIC_COMPILER_BUILTINS 0
+#define ATOMIC_EXCHANGE_USES_CAS 1
/* The 32-bit exchange_bool is different on powerpc64 because the subf
does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
@@ -58,23 +59,6 @@
__tmp != 0; \
})
-#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
-({ \
- unsigned int __tmp, __tmp2; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- " clrldi %1,%1,32\n" \
- "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
- " subf. %0,%1,%0\n" \
- " bne 2f\n" \
- " stwcx. %4,0,%2\n" \
- " bne- 1b\n" \
- "2: " \
- : "=&r" (__tmp), "=r" (__tmp2) \
- : "b" (mem), "1" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
-})
-
/*
* Only powerpc64 processors support Load doubleword and reserve index (ldarx)
* and Store doubleword conditional indexed (stdcx) instructions. So here
@@ -96,22 +80,6 @@
__tmp != 0; \
})
-#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
-({ \
- unsigned long __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " \
- : "=&r" (__tmp) \
- : "b" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
-})
-
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ \
__typeof (*(mem)) __tmp; \
diff --git a/sysdeps/powerpc/powerpc64/backtrace.c b/sysdeps/powerpc/powerpc64/backtrace.c
index edd53bb619..c0c4b48262 100644
--- a/sysdeps/powerpc/powerpc64/backtrace.c
+++ b/sysdeps/powerpc/powerpc64/backtrace.c
@@ -1,5 +1,5 @@
/* Return backtrace of current program state.
- Copyright (C) 1998-2016 Free Software Foundation, Inc.
+ Copyright (C) 1998-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,10 +16,12 @@
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <http://www.gnu.org/licenses/>. */
-#include <execinfo.h>
#include <stddef.h>
#include <string.h>
#include <signal.h>
+#include <stdint.h>
+
+#include <execinfo.h>
#include <libc-vdso.h>
/* This is the stack layout we see with every stack frame.
@@ -37,7 +39,7 @@
struct layout
{
struct layout *next;
- long condition_register;
+ long int condition_register;
void *return_address;
};
@@ -47,16 +49,16 @@ struct layout
dummy frame to make it look like it has a caller. */
struct signal_frame_64 {
#define SIGNAL_FRAMESIZE 128
- char dummy[SIGNAL_FRAMESIZE];
- struct ucontext uc;
+ char dummy[SIGNAL_FRAMESIZE];
+ ucontext_t uc;
/* We don't care about the rest, since the IP value is at 'uc' field. */
};
static inline int
-is_sigtramp_address (unsigned long nip)
+is_sigtramp_address (void *nip)
{
#ifdef SHARED
- if (nip == (unsigned long)__vdso_sigtramp_rt64)
+ if (nip == VDSO_SYMBOL (sigtramp_rt64))
return 1;
#endif
return 0;
@@ -82,10 +84,11 @@ __backtrace (void **array, int size)
/* Check if the symbol is the signal trampoline and get the interrupted
* symbol address from the trampoline saved area. */
- if (is_sigtramp_address ((unsigned long)current->return_address))
+ if (is_sigtramp_address (current->return_address))
{
struct signal_frame_64 *sigframe = (struct signal_frame_64*) current;
- array[++count] = (void*)sigframe->uc.uc_mcontext.gp_regs[PT_NIP];
+ array[++count] = (void*) sigframe->uc.uc_mcontext.gp_regs[PT_NIP];
+ current = (void*) sigframe->uc.uc_mcontext.gp_regs[PT_R1];
}
}
diff --git a/sysdeps/powerpc/powerpc64/be/970/Implies b/sysdeps/powerpc/powerpc64/be/970/Implies
new file mode 100644
index 0000000000..ac431fa96e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/970/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power4
diff --git a/sysdeps/powerpc/powerpc64/be/Implies b/sysdeps/powerpc/powerpc64/be/Implies
new file mode 100644
index 0000000000..a105a325f7
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64
diff --git a/sysdeps/powerpc/powerpc64/be/Implies-after b/sysdeps/powerpc/powerpc64/be/Implies-after
new file mode 100644
index 0000000000..78dba9510c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/Implies-after
@@ -0,0 +1,5 @@
+# On PowerPC we use the IBM extended long double format.
+ieee754/ldbl-128ibm
+ieee754/ldbl-opt
+ieee754/dbl-64
+ieee754/flt-32
diff --git a/sysdeps/powerpc/powerpc64/be/a2/Implies b/sysdeps/powerpc/powerpc64/be/a2/Implies
new file mode 100644
index 0000000000..6c02123791
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/a2/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/a2
diff --git a/sysdeps/powerpc/powerpc64/be/cell/Implies b/sysdeps/powerpc/powerpc64/be/cell/Implies
new file mode 100644
index 0000000000..d6b89b15d3
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/cell/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/cell
diff --git a/sysdeps/powerpc/powerpc64/power4/fpu/Implies b/sysdeps/powerpc/powerpc64/be/fpu/Implies
index c1f617b7da..c1f617b7da 100644
--- a/sysdeps/powerpc/powerpc64/power4/fpu/Implies
+++ b/sysdeps/powerpc/powerpc64/be/fpu/Implies
diff --git a/sysdeps/powerpc/powerpc64/power4/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/fpu/multiarch/Implies
index 8d6531a174..8d6531a174 100644
--- a/sysdeps/powerpc/powerpc64/power4/fpu/multiarch/Implies
+++ b/sysdeps/powerpc/powerpc64/be/fpu/multiarch/Implies
diff --git a/sysdeps/powerpc/powerpc64/power4/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/multiarch/Implies
index 30edcf7f9d..30edcf7f9d 100644
--- a/sysdeps/powerpc/powerpc64/power4/multiarch/Implies
+++ b/sysdeps/powerpc/powerpc64/be/multiarch/Implies
diff --git a/sysdeps/powerpc/powerpc64/power4/Implies b/sysdeps/powerpc/powerpc64/be/power4/Implies
index a372141bb7..6d067e28ec 100644
--- a/sysdeps/powerpc/powerpc64/power4/Implies
+++ b/sysdeps/powerpc/powerpc64/be/power4/Implies
@@ -1,2 +1,3 @@
+powerpc/powerpc64/power4
powerpc/power4/fpu
powerpc/power4
diff --git a/sysdeps/powerpc/powerpc64/be/power4/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power4/fpu/Implies
new file mode 100644
index 0000000000..c1f617b7da
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power4/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power4/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power4/fpu/multiarch/Implies
new file mode 100644
index 0000000000..8d6531a174
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power4/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power4/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power4/multiarch/Implies
new file mode 100644
index 0000000000..30edcf7f9d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power4/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power5+/Implies b/sysdeps/powerpc/powerpc64/be/power5+/Implies
new file mode 100644
index 0000000000..03852149d4
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5+/Implies
@@ -0,0 +1,5 @@
+powerpc/powerpc64/power5+
+powerpc/power5+/fpu
+powerpc/power5+
+powerpc/powerpc64/be/power5/fpu
+powerpc/powerpc64/be/power5
diff --git a/sysdeps/powerpc/powerpc64/be/power5+/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power5+/fpu/Implies
new file mode 100644
index 0000000000..25f20b1d8e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5+/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power5+/fpu
+powerpc/powerpc64/be/power5/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power5+/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power5+/fpu/multiarch/Implies
new file mode 100644
index 0000000000..5985565e0f
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5+/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power5/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power5+/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power5+/multiarch/Implies
new file mode 100644
index 0000000000..818920343d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5+/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power5/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power5/Implies b/sysdeps/powerpc/powerpc64/be/power5/Implies
new file mode 100644
index 0000000000..e50a23b357
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power5
+powerpc/powerpc64/be/power4/fpu
+powerpc/powerpc64/be/power4
diff --git a/sysdeps/powerpc/powerpc64/be/power5/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power5/fpu/Implies
new file mode 100644
index 0000000000..58ab3b7abb
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power5/fpu
+powerpc/powerpc64/be/power4/fpu/
diff --git a/sysdeps/powerpc/powerpc64/be/power5/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power5/fpu/multiarch/Implies
new file mode 100644
index 0000000000..f2fffcb96f
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power4/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power5/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power5/multiarch/Implies
new file mode 100644
index 0000000000..a9cda70bcd
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power5/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power4/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power6/Implies b/sysdeps/powerpc/powerpc64/be/power6/Implies
new file mode 100644
index 0000000000..c0e0be57b6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power6
+powerpc/powerpc64/be/power5+/fpu
+powerpc/powerpc64/be/power5+
diff --git a/sysdeps/powerpc/powerpc64/be/power6/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power6/fpu/Implies
new file mode 100644
index 0000000000..a16a96e9eb
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power6/fpu
+powerpc/powerpc64/be/power5+/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power6/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power6/fpu/multiarch/Implies
new file mode 100644
index 0000000000..ef0f432215
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power5+/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power6/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power6/multiarch/Implies
new file mode 100644
index 0000000000..9510043332
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power5+/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power6x/Implies b/sysdeps/powerpc/powerpc64/be/power6x/Implies
new file mode 100644
index 0000000000..018c999e10
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6x/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power6x
+powerpc/powerpc64/be/power6/fpu
+powerpc/powerpc64/be/power6
diff --git a/sysdeps/powerpc/powerpc64/be/power6x/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power6x/fpu/Implies
new file mode 100644
index 0000000000..4d1744c0b4
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6x/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power6x/fpu
+powerpc/powerpc64/be/power6/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power6x/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power6x/fpu/multiarch/Implies
new file mode 100644
index 0000000000..6da6f27f08
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6x/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power6/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power6x/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power6x/multiarch/Implies
new file mode 100644
index 0000000000..86cbf9e528
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power6x/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power6/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power7/Implies b/sysdeps/powerpc/powerpc64/be/power7/Implies
new file mode 100644
index 0000000000..b1853f1873
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power7/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power7
+powerpc/powerpc64/be/power6/fpu
+powerpc/powerpc64/be/power6
diff --git a/sysdeps/powerpc/powerpc64/be/power7/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power7/fpu/Implies
new file mode 100644
index 0000000000..33830fe120
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power7/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power7/fpu
+powerpc/powerpc64/be/power6/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power7/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power7/fpu/multiarch/Implies
new file mode 100644
index 0000000000..6da6f27f08
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power7/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power6/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power7/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power7/multiarch/Implies
new file mode 100644
index 0000000000..86cbf9e528
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power7/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power6/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power8/Implies b/sysdeps/powerpc/powerpc64/be/power8/Implies
new file mode 100644
index 0000000000..cdaa47e809
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power8/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power8
+powerpc/powerpc64/be/power7/fpu
+powerpc/powerpc64/be/power7
diff --git a/sysdeps/powerpc/powerpc64/be/power8/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power8/fpu/Implies
new file mode 100644
index 0000000000..72e9f54efc
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power8/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power8/fpu
+powerpc/powerpc64/be/power7/fpu/
diff --git a/sysdeps/powerpc/powerpc64/be/power8/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power8/fpu/multiarch/Implies
new file mode 100644
index 0000000000..babe3de793
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power8/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power7/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power8/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power8/multiarch/Implies
new file mode 100644
index 0000000000..8ec7fa2c54
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power8/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power7/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power9/Implies b/sysdeps/powerpc/powerpc64/be/power9/Implies
new file mode 100644
index 0000000000..9b40c0f58c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power9/Implies
@@ -0,0 +1,3 @@
+powerpc/powerpc64/power9
+powerpc/powerpc64/be/power8/fpu
+powerpc/powerpc64/be/power8
diff --git a/sysdeps/powerpc/powerpc64/be/power9/fpu/Implies b/sysdeps/powerpc/powerpc64/be/power9/fpu/Implies
new file mode 100644
index 0000000000..bdec0f9295
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power9/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power9/fpu
+powerpc/powerpc64/be/power8/fpu
diff --git a/sysdeps/powerpc/powerpc64/be/power9/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power9/fpu/multiarch/Implies
new file mode 100644
index 0000000000..93e2a02716
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power9/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power8/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/be/power9/multiarch/Implies b/sysdeps/powerpc/powerpc64/be/power9/multiarch/Implies
new file mode 100644
index 0000000000..8bea6abe00
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/be/power9/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/be/power8/multiarch
diff --git a/sysdeps/powerpc/powerpc64/bits/wordsize.h b/sysdeps/powerpc/powerpc64/bits/wordsize.h
index f31ac9ab08..04ca9debf0 100644
--- a/sysdeps/powerpc/powerpc64/bits/wordsize.h
+++ b/sysdeps/powerpc/powerpc64/bits/wordsize.h
@@ -5,4 +5,7 @@
# define __WORDSIZE_TIME64_COMPAT32 1
#else
# define __WORDSIZE 32
+# define __WORDSIZE_TIME64_COMPAT32 0
+# define __WORDSIZE32_SIZE_ULONG 0
+# define __WORDSIZE32_PTRDIFF_LONG 0
#endif
diff --git a/sysdeps/powerpc/powerpc64/bzero.S b/sysdeps/powerpc/powerpc64/bzero.S
index 56806fcd5a..c66824a931 100644
--- a/sysdeps/powerpc/powerpc64/bzero.S
+++ b/sysdeps/powerpc/powerpc64/bzero.S
@@ -1,5 +1,5 @@
/* Optimized bzero `implementation' for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/cell/memcpy.S b/sysdeps/powerpc/powerpc64/cell/memcpy.S
index d5dc9c4a35..3e07003b58 100644
--- a/sysdeps/powerpc/powerpc64/cell/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/cell/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for CELL BE PowerPC.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,10 @@
#include <sysdep.h>
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
#define PREFETCH_AHEAD 6 /* no cache lines SRC prefetching ahead */
#define ZERO_AHEAD 4 /* no cache lines DST zeroing ahead */
@@ -39,7 +43,7 @@
.align 7
-EALIGN (memcpy, 5, 0)
+ENTRY_TOCLESS (MEMCPY, 5)
CALL_MCOUNT 3
dcbt 0,r4 /* Prefetch ONE SRC cacheline */
@@ -238,5 +242,5 @@ EALIGN (memcpy, 5, 0)
stb r0,0(r6)
1: blr
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/crti.S b/sysdeps/powerpc/powerpc64/crti.S
index e1b68a8634..2242deb3dd 100644
--- a/sysdeps/powerpc/powerpc64/crti.S
+++ b/sysdeps/powerpc/powerpc64/crti.S
@@ -1,5 +1,5 @@
/* Special .init and .fini section support for PowerPC64.
- Copyright (C) 2012-2016 Free Software Foundation, Inc.
+ Copyright (C) 2012-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -62,6 +62,7 @@
#endif
.section ".init", "ax", @progbits
ENTRY_2(_init)
+ .hidden _init
.align ALIGNARG (2)
BODY_LABEL (_init):
LOCALENTRY(_init)
@@ -80,6 +81,7 @@ BODY_LABEL (_init):
.section ".fini", "ax", @progbits
ENTRY_2(_fini)
+ .hidden _fini
.align ALIGNARG (2)
BODY_LABEL (_fini):
LOCALENTRY(_fini)
diff --git a/sysdeps/powerpc/powerpc64/crtn.S b/sysdeps/powerpc/powerpc64/crtn.S
index ae9135ce9f..6067bed6c3 100644
--- a/sysdeps/powerpc/powerpc64/crtn.S
+++ b/sysdeps/powerpc/powerpc64/crtn.S
@@ -1,5 +1,5 @@
/* Special .init and .fini section support for PowerPC64.
- Copyright (C) 2012-2016 Free Software Foundation, Inc.
+ Copyright (C) 2012-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/dl-dtprocnum.h b/sysdeps/powerpc/powerpc64/dl-dtprocnum.h
index e72b71e2f8..d46cadb9c9 100644
--- a/sysdeps/powerpc/powerpc64/dl-dtprocnum.h
+++ b/sysdeps/powerpc/powerpc64/dl-dtprocnum.h
@@ -1,5 +1,5 @@
/* Configuration of lookup functions. PowerPC64 version.
- Copyright (C) 2002-2016 Free Software Foundation, Inc.
+ Copyright (C) 2002-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/dl-irel.h b/sysdeps/powerpc/powerpc64/dl-irel.h
index db76ebbca9..ab13c04358 100644
--- a/sysdeps/powerpc/powerpc64/dl-irel.h
+++ b/sysdeps/powerpc/powerpc64/dl-irel.h
@@ -1,6 +1,6 @@
/* Machine-dependent ELF indirect relocation inline functions.
PowerPC64 version.
- Copyright (C) 2009-2016 Free Software Foundation, Inc.
+ Copyright (C) 2009-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.c b/sysdeps/powerpc/powerpc64/dl-machine.c
index 3b4ed65bc9..959907bf02 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.c
+++ b/sysdeps/powerpc/powerpc64/dl-machine.c
@@ -1,5 +1,5 @@
/* Machine-dependent ELF dynamic relocation functions. PowerPC64 version.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,14 +24,17 @@
void
_dl_reloc_overflow (struct link_map *map,
- const char *name,
- Elf64_Addr *const reloc_addr,
- const Elf64_Sym *refsym)
+ const char *name,
+ Elf64_Addr *const reloc_addr,
+ const Elf64_Sym *refsym)
{
- char buffer[128];
+ char buffer[1024];
char *t;
t = stpcpy (buffer, name);
- t = stpcpy (t, " reloc at 0x");
+ /* Notice that _itoa_word() writes characters from the higher address to the
+ lower address, requiring the destination string to reserve all the
+ required size before the call. */
+ t = stpcpy (t, " reloc at 0x0000000000000000");
_itoa_word ((unsigned long) reloc_addr, t, 16, 0);
if (refsym)
{
@@ -45,3 +48,19 @@ _dl_reloc_overflow (struct link_map *map,
t = stpcpy (t, " out of range");
_dl_signal_error (0, map->l_name, NULL, buffer);
}
+
+#if _CALL_ELF == 2
+void
+_dl_error_localentry (struct link_map *map, const Elf64_Sym *refsym)
+{
+ char buffer[1024];
+ char *t;
+ const char *strtab;
+
+ strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+ t = stpcpy (buffer, "expected localentry:0 `");
+ t = stpcpy (t, strtab + refsym->st_name);
+ t = stpcpy (t, "'");
+ _dl_signal_error (0, map->l_name, NULL, buffer);
+}
+#endif
diff --git a/sysdeps/powerpc/powerpc64/dl-machine.h b/sysdeps/powerpc/powerpc64/dl-machine.h
index d6f780ec85..99a83d0c82 100644
--- a/sysdeps/powerpc/powerpc64/dl-machine.h
+++ b/sysdeps/powerpc/powerpc64/dl-machine.h
@@ -1,6 +1,6 @@
/* Machine-dependent ELF dynamic relocation inline functions.
PowerPC64 version.
- Copyright 1995-2016 Free Software Foundation, Inc.
+ Copyright 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,6 +27,7 @@
#include <dl-tls.h>
#include <sysdep.h>
#include <hwcapinfo.h>
+#include <cpu-features.c>
/* Translate a processor specific dynamic tag to the index
in l_info array. */
@@ -300,13 +301,14 @@ BODY_PREFIX "_dl_start_user:\n" \
/* We define an initialization function to initialize HWCAP/HWCAP2 and
platform data so it can be copied into the TCB later. This is called
very early in _dl_sysdep_start for dynamically linked binaries. */
-#ifdef SHARED
+#if defined(SHARED) && IS_IN (rtld)
# define DL_PLATFORM_INIT dl_platform_init ()
static inline void __attribute__ ((unused))
dl_platform_init (void)
{
__tcb_parse_hwcap_and_convert_at_platform ();
+ init_cpu_features (&GLRO(dl_powerpc_cpu_features));
}
#endif
@@ -440,20 +442,30 @@ elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
}
#if _CALL_ELF == 2
-/* If the PLT entry whose reloc is 'reloc' resolves to a function in
- the same object, return the target function's local entry point
- offset if usable. */
+extern void attribute_hidden _dl_error_localentry (struct link_map *map,
+ const Elf64_Sym *refsym);
+
+/* If the PLT entry resolves to a function in the same object, return
+ the target function's local entry point offset if usable. */
static inline Elf64_Addr __attribute__ ((always_inline))
ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
- const Elf64_Rela *reloc)
+ const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
{
- const Elf64_Sym *symtab;
- const Elf64_Sym *sym;
-
/* If the target function is in a different object, we cannot
use the local entry point. */
if (sym_map != map)
- return 0;
+ {
+ /* Check that optimized plt call stubs for localentry:0 functions
+ are not being satisfied by a non-zero localentry symbol. */
+ if (map->l_info[DT_PPC64(OPT)]
+ && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
+ && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
+ && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
+ && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
+ _dl_error_localentry (map, refsym);
+
+ return 0;
+ }
/* If the linker inserted multiple TOCs, we cannot use the
local entry point. */
@@ -461,16 +473,13 @@ ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
&& (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
return 0;
- /* Otherwise, we can use the local entry point. Retrieve its offset
- from the symbol's ELF st_other field. */
- symtab = (const void *) D_PTR (map, l_info[DT_SYMTAB]);
- sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
-
/* If the target function is an ifunc then the local entry offset is
for the resolver, not the final destination. */
if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
return 0;
+ /* Otherwise, we can use the local entry point. Retrieve its offset
+ from the symbol's ELF st_other field. */
return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
}
#endif
@@ -479,6 +488,7 @@ ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
routine. */
static inline Elf64_Addr __attribute__ ((always_inline))
elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
+ const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
{
@@ -534,7 +544,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
PPC_DCBST (&plt->fd_func);
PPC_ISYNC;
#else
- finaladdr += ppc64_local_entry_offset (map, sym_map, reloc);
+ finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
*reloc_addr = finaladdr;
#endif
@@ -543,6 +553,7 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
static inline void __attribute__ ((always_inline))
elf_machine_plt_conflict (struct link_map *map, lookup_t sym_map,
+ const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
const Elf64_Rela *reloc,
Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
{
@@ -565,7 +576,7 @@ elf_machine_plt_conflict (struct link_map *map, lookup_t sym_map,
PPC_DCBST (&plt->fd_toc);
PPC_SYNC;
#else
- finaladdr += ppc64_local_entry_offset (map, sym_map, reloc);
+ finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
*reloc_addr = finaladdr;
#endif
}
@@ -604,11 +615,10 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
#define dont_expect(X) __builtin_expect ((X), 0)
-extern void _dl_reloc_overflow (struct link_map *map,
- const char *name,
- Elf64_Addr *const reloc_addr,
- const Elf64_Sym *refsym)
- attribute_hidden;
+extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
+ const char *name,
+ Elf64_Addr *const reloc_addr,
+ const Elf64_Sym *refsym);
auto inline void __attribute__ ((always_inline))
elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
@@ -698,8 +708,7 @@ elf_machine_rela (struct link_map *map,
/* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
and STT_GNU_IFUNC. */
struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
- Elf64_Addr value = ((sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value)
- + reloc->r_addend);
+ Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
if (sym != NULL
&& __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
@@ -728,9 +737,11 @@ elf_machine_rela (struct link_map *map,
/* Fall thru */
case R_PPC64_JMP_SLOT:
#ifdef RESOLVE_CONFLICT_FIND_MAP
- elf_machine_plt_conflict (map, sym_map, reloc, reloc_addr, value);
+ elf_machine_plt_conflict (map, sym_map, refsym, sym,
+ reloc, reloc_addr, value);
#else
- elf_machine_fixup_plt (map, sym_map, reloc, reloc_addr, value);
+ elf_machine_fixup_plt (map, sym_map, refsym, sym,
+ reloc, reloc_addr, value);
#endif
return;
diff --git a/sysdeps/powerpc/powerpc64/dl-trampoline.S b/sysdeps/powerpc/powerpc64/dl-trampoline.S
index fc07e453e9..aa141dc44b 100644
--- a/sysdeps/powerpc/powerpc64/dl-trampoline.S
+++ b/sysdeps/powerpc/powerpc64/dl-trampoline.S
@@ -1,5 +1,5 @@
/* PLT trampolines. PPC64 version.
- Copyright (C) 2005-2016 Free Software Foundation, Inc.
+ Copyright (C) 2005-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -33,7 +33,7 @@
a function that makes no calls except for __tls_get_addr and we
might be here resolving the __tls_get_addr call. */
#define INT_PARMS FRAME_MIN_SIZE
-EALIGN(_dl_runtime_resolve, 4, 0)
+ENTRY (_dl_runtime_resolve, 4)
stdu r1,-FRAME_SIZE(r1)
cfi_adjust_cfa_offset (FRAME_SIZE)
std r3,INT_PARMS+0(r1)
@@ -195,7 +195,7 @@ END(_dl_runtime_resolve)
parm1 (r3) and the index (r0) needs to be converted to an offset
(index * 24) in parm2 (r4). */
#ifndef PROF
-EALIGN(_dl_profile_resolve, 4, 0)
+ENTRY (_dl_profile_resolve, 4)
/* Spill r30, r31 to preserve the link_map* and reloc_addr, in case we
need to call _dl_call_pltexit. */
std r31,-8(r1)
diff --git a/sysdeps/powerpc/powerpc64/entry.h b/sysdeps/powerpc/powerpc64/entry.h
index 6baffdbc95..c2f7348cad 100644
--- a/sysdeps/powerpc/powerpc64/entry.h
+++ b/sysdeps/powerpc/powerpc64/entry.h
@@ -1,5 +1,5 @@
/* Finding the entry point and start of text. PowerPC64 version.
- Copyright (C) 2002-2016 Free Software Foundation, Inc.
+ Copyright (C) 2002-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/ffsll.c b/sysdeps/powerpc/powerpc64/ffsll.c
index 786f4e5e9d..c95ad99b36 100644
--- a/sysdeps/powerpc/powerpc64/ffsll.c
+++ b/sysdeps/powerpc/powerpc64/ffsll.c
@@ -1,6 +1,6 @@
/* Find first set bit in a word, counted from least significant end.
For PowerPC.
- Copyright (C) 1991-2016 Free Software Foundation, Inc.
+ Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Torbjorn Granlund (tege@sics.se).
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/Makefile b/sysdeps/powerpc/powerpc64/fpu/multiarch/Makefile
index 0e3eac7190..73f2f69377 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/Makefile
@@ -1,30 +1,35 @@
ifeq ($(subdir),math)
-sysdep_routines += s_isnan-power7 s_isnan-power6x s_isnan-power6 \
- s_isnan-power5 s_isnan-ppc64 s_copysign-power6 \
- s_copysign-ppc64 s_finite-power7 s_finite-ppc64 \
- s_finitef-ppc64 s_isinff-ppc64 s_isinf-power7 \
- s_isinf-ppc64 s_modf-power5+ s_modf-ppc64 \
- s_modff-power5+ s_modff-ppc64 s_isnan-power8 \
- s_isinf-power8 s_finite-power8
+# These functions are built both for libc and libm because they're required
+# by printf. While the libc objects have the prefix s_, the libm ones are
+# prefixed with m_.
+sysdep_calls := s_copysign-power6 s_copysign-ppc64 \
+ s_finite-power8 s_finite-power7 s_finite-ppc64 \
+ s_finitef-ppc64 \
+ s_isinf-power8 s_isinf-ppc64 \
+ s_isinff-ppc64 s_isinf-power7 \
+ s_isnan-power8 s_isnan-power7 s_isnan-power6x s_isnan-power6 \
+ s_isnan-power5 s_isnan-ppc64 \
+ s_modf-power5+ s_modf-ppc64 \
+ s_modff-power5+ s_modff-ppc64
-libm-sysdep_routines += s_isnan-power7 s_isnan-power6x s_isnan-power6 \
- s_isnan-power5 s_isnan-ppc64 s_llround-power6x \
+sysdep_routines += $(sysdep_calls)
+libm-sysdep_routines += s_llround-power6x \
s_llround-power5+ s_llround-ppc64 s_ceil-power5+ \
s_ceil-ppc64 s_ceilf-power5+ s_ceilf-ppc64 \
s_floor-power5+ s_floor-ppc64 s_floorf-power5+ \
s_floorf-ppc64 s_round-power5+ s_round-ppc64 \
s_roundf-power5+ s_roundf-ppc64 s_trunc-power5+ \
s_trunc-ppc64 s_truncf-power5+ s_truncf-ppc64 \
- s_copysign-power6 s_copysign-ppc64 s_llrint-power6x \
- s_llrint-ppc64 s_finite-power7 s_finite-ppc64 \
- s_finitef-ppc64 s_isinff-ppc64 s_isinf-power7 \
- s_isinf-ppc64 s_logb-power7 s_logbf-power7 \
+ s_llrint-power6x s_llrint-ppc64 \
+ s_logb-power7 s_logbf-power7 \
s_logbl-power7 s_logb-ppc64 s_logbf-ppc64 \
- s_logbl-ppc64 s_modf-power5+ s_modf-ppc64 \
- s_modff-power5+ s_modff-ppc64 e_hypot-ppc64 \
+ s_logbl-ppc64 e_hypot-ppc64 \
e_hypot-power7 e_hypotf-ppc64 e_hypotf-power7 \
- s_isnan-power8 s_isinf-power8 s_finite-power8 \
- s_llrint-power8 s_llround-power8
+ s_llrint-power8 s_llround-power8 s_llroundf-ppc64 \
+ e_expf-power8 e_expf-ppc64 \
+ s_sinf-ppc64 s_sinf-power8 \
+ s_cosf-ppc64 s_cosf-power8 \
+ $(sysdep_calls:s_%=m_%)
CFLAGS-s_logbf-power7.c = -mcpu=power7
CFLAGS-s_logbl-power7.c = -mcpu=power7
@@ -33,4 +38,9 @@ CFLAGS-s_modf-power5+.c = -mcpu=power5+
CFLAGS-s_modff-power5+.c = -mcpu=power5+
CFLAGS-e_hypot-power7.c = -mcpu=power7
CFLAGS-e_hypotf-power7.c = -mcpu=power7
+
+# These files quiet sNaNs in a way that is optimized away without
+# -fsignaling-nans.
+CFLAGS-s_modf-ppc64.c += -fsignaling-nans
+CFLAGS-s_modff-ppc64.c += -fsignaling-nans
endif
diff --git a/sysdeps/powerpc/powerpc64/strtok_r.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-power8.S
index 2df0ce42b3..c9dad4e132 100644
--- a/sysdeps/powerpc/powerpc64/strtok_r.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-power8.S
@@ -1,5 +1,5 @@
-/* Optimized strtok_r implementation for PowerPC64.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* __ieee754_expf() POWER8 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,9 +16,9 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#define USE_AS_STRTOK_R
-#include <sysdeps/powerpc/powerpc64/strtok.S>
+#undef strong_alias
+#define strong_alias(a, b)
-weak_alias (__strtok_r, strtok_r)
-libc_hidden_def (__strtok_r)
-libc_hidden_builtin_def (strtok_r)
+#define __ieee754_expf __ieee754_expf_power8
+
+#include <sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-ppc64.c
new file mode 100644
index 0000000000..8ab41a8ed4
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf-ppc64.c
@@ -0,0 +1,21 @@
+/* __ieee_expf() PowerPC64 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define __expf __ieee754_expf_ppc64
+
+#include <sysdeps/ieee754/flt-32/e_expf.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf.c
new file mode 100644
index 0000000000..f752514efa
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_expf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of ieee754_expf.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math.h>
+#include <math_private.h>
+#include <math_ldbl_opt.h>
+#include "init-arch.h"
+
+extern __typeof (__ieee754_expf) __ieee754_expf_ppc64 attribute_hidden;
+extern __typeof (__ieee754_expf) __ieee754_expf_power8 attribute_hidden;
+
+libc_ifunc (__ieee754_expf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __ieee754_expf_power8
+ : __ieee754_expf_ppc64);
+
+strong_alias (__ieee754_expf, __expf_finite)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-power7.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-power7.c
index d79ee7ba47..416ef50df6 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-power7.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-power7.c
@@ -1,5 +1,5 @@
/* __ieee_hypot() POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-ppc64.c
index 8bbcfc3560..f469a24719 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot-ppc64.c
@@ -1,5 +1,5 @@
/* __ieee_hypot() PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot.c
index 30f91ef742..8d05c430cc 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypot.c
@@ -1,5 +1,5 @@
/* Multiple versions of ieee754_hypot.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <math.h>
+#include <math_private.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-power7.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-power7.c
index 572dfe6bfb..b40158fd29 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-power7.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-power7.c
@@ -1,5 +1,5 @@
/* __ieee_hypotf() POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-ppc64.c
index 634bf85da6..54dbaee016 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf-ppc64.c
@@ -1,5 +1,5 @@
/* __ieee_hypot() PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf.c
index 7686b51e2b..9bef1fea3f 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/e_hypotf.c
@@ -1,5 +1,5 @@
/* Multiple versions of ieee754_hypot.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <math.h>
+#include <math_private.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-power5+.S
index be59f4bdb8..76651b694c 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-power5+.S
@@ -1,5 +1,5 @@
/* ceil function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-ppc64.S
index b44d44af33..c75c66ba3b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil-ppc64.S
@@ -1,5 +1,5 @@
/* ceil function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil.c
index 3c20329eb0..5cde4eb46f 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceil.c
@@ -1,5 +1,5 @@
/* Multiple versions of ceil.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__ceil) __ceil_ppc64 attribute_hidden;
extern __typeof (__ceil) __ceil_power5plus attribute_hidden;
@@ -29,12 +30,4 @@ libc_ifunc (__ceil,
? __ceil_power5plus
: __ceil_ppc64);
-weak_alias (__ceil, ceil)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__ceil, __ceill)
-weak_alias (__ceil, ceill)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __ceil, ceill, GLIBC_2_0);
-#endif
+libm_alias_double (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-power5+.S
index d9ad500d9c..b9c9e14fba 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-power5+.S
@@ -1,5 +1,5 @@
/* ceilf function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-ppc64.S
index ba3a20f0ba..ce5cc49770 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf-ppc64.S
@@ -1,5 +1,5 @@
/* ceilf function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf.c
index 4c366c8123..18697e52b3 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_ceilf.c
@@ -1,5 +1,5 @@
/* Multiple versions of ceilf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__ceilf) __ceilf_ppc64 attribute_hidden;
extern __typeof (__ceilf) __ceilf_power5plus attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__ceilf,
? __ceilf_power5plus
: __ceilf_ppc64);
-weak_alias (__ceilf, ceilf)
+libm_alias_float (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-power6.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-power6.S
index 9ee298a80e..3ce85ca822 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-power6.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-power6.S
@@ -1,5 +1,5 @@
/* copysign(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-ppc64.S
index c253657baa..97fad681e3 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign-ppc64.S
@@ -1,5 +1,5 @@
/* copysign(). PowerPC64 default version.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -17,8 +17,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign.c
index 91c123bc92..74ce097397 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysign.c
@@ -1,5 +1,5 @@
/* Multiple versions of copysign.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -25,6 +25,7 @@
#undef __copysign
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__redirect_copysign) __copysign_ppc64 attribute_hidden;
extern __typeof (__redirect_copysign) __copysign_power6 attribute_hidden;
@@ -36,16 +37,8 @@ libc_ifunc (__libm_copysign,
: __copysign_ppc64);
strong_alias (__libm_copysign, __copysign)
-weak_alias (__copysign, copysign)
+libm_alias_double (__copysign, copysign)
-#ifdef NO_LONG_DOUBLE
-weak_alias (__copysign,copysignl)
-strong_alias(__copysign,__copysignl)
-#endif
-#if IS_IN (libm)
-# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __copysign, copysignl, GLIBC_2_0);
-# endif
-#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+#if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
compat_symbol (libc, __copysign, copysignl, GLIBC_2_0);
#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysignf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysignf.c
index 84f22809d0..377a3f814c 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysignf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_copysignf.c
@@ -1,5 +1,5 @@
/* Multiple versions of copysignf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,6 +19,7 @@
#include <math.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
/* It's safe to use double-precision implementation for single-precision. */
extern __typeof (__copysignf) __copysign_ppc64 attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__copysignf,
? __copysign_power6
: __copysign_ppc64);
-weak_alias (__copysignf, copysignf)
+libm_alias_float (__copysign, copysign)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-power8.S
new file mode 100644
index 0000000000..17adc90ad2
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-power8.S
@@ -0,0 +1,24 @@
+/* cosf function. PowerPC64/power8 version.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __cosf __cosf_power8
+
+#include <sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-ppc64.c
new file mode 100644
index 0000000000..34e0553b93
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf-ppc64.c
@@ -0,0 +1,24 @@
+/* cosf function. PowerPC64 default version.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __cosf __cosf_ppc64
+
+#include <sysdeps/powerpc/fpu/s_cosf.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf.c
new file mode 100644
index 0000000000..cb12178791
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_cosf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of cosf.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+#include <libm-alias-float.h>
+
+extern __typeof (__cosf) __cosf_ppc64 attribute_hidden;
+extern __typeof (__cosf) __cosf_power8 attribute_hidden;
+
+libc_ifunc (__cosf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __cosf_power8
+ : __cosf_ppc64);
+
+libm_alias_float (__cos, cos)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power7.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power7.S
index c0a101feea..d3e9f606bd 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power7.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power7.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power8.S
index 44e0f6f944..7a200a5c1a 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power8.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-power8.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER7 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-ppc64.c
index e6d2029164..2a597fc38d 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite-ppc64.c
@@ -1,5 +1,5 @@
/* finite(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite.c
index 067edc2ea6..14c9ea7fef 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finite.c
@@ -1,5 +1,5 @@
/* Multiple versions of finite.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,17 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define __finite __redirect___finite
+
+/* The following definitions, although not related to the 'double'
+ version of 'finite', are required to guarantee macro expansions
+ (e.g.: from __finitef to __redirect_finitef) in include/math.h, thus
+ compensating for the unintended macro expansions in
+ math/bits/mathcalls-helper-functions.h. */
+#define __finitef __redirect___finitef
+#define __finitel __redirect___finitel
+#define __finitef128 __redirect___finitef128
+
#include <math.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
@@ -24,13 +35,17 @@
extern __typeof (__finite) __finite_ppc64 attribute_hidden;
extern __typeof (__finite) __finite_power7 attribute_hidden;
extern __typeof (__finite) __finite_power8 attribute_hidden;
+#undef __finite
+#undef __finitef
+#undef __finitel
+#undef __finitef128
-libc_ifunc (__finite,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __finite_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __finite_power7
- : __finite_ppc64);
+libc_ifunc_redirected (__redirect___finite, __finite,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __finite_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __finite_power7
+ : __finite_ppc64);
weak_alias (__finite, finite)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef-ppc64.c
index e010880f46..0df571cd2e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef-ppc64.c
@@ -1,5 +1,5 @@
/* finitef(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef.c
index e0b4686ced..d2ffdeeb41 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_finitef.c
@@ -1,5 +1,5 @@
/* Multiple versions of finitef.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define __finitef __redirect___finitef
#include <math.h>
#include <shlib-compat.h>
#include "init-arch.h"
@@ -24,12 +25,13 @@ extern __typeof (__finitef) __finitef_ppc64 attribute_hidden;
/* The double-precision version also works for single-precision. */
extern __typeof (__finitef) __finite_power7 attribute_hidden;
extern __typeof (__finitef) __finite_power8 attribute_hidden;
+#undef __finitef
-libc_ifunc (__finitef,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __finite_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __finite_power7
- : __finitef_ppc64);
+libc_ifunc_redirected (__redirect___finitef, __finitef,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __finite_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __finite_power7
+ : __finitef_ppc64);
weak_alias (__finitef, finitef)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-power5+.S
index a93f3d9183..0c4c97f7a6 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-power5+.S
@@ -1,5 +1,5 @@
/* floor function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-ppc64.S
index 89be2a5bee..97ec94d733 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor-ppc64.S
@@ -1,5 +1,5 @@
/* floor function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c
index 6a743fd7ee..d1a21a1d12 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floor.c
@@ -1,5 +1,5 @@
/* Multiple versions of floor.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__floor) __floor_ppc64 attribute_hidden;
extern __typeof (__floor) __floor_power5plus attribute_hidden;
@@ -29,12 +30,4 @@ libc_ifunc (__floor,
? __floor_power5plus
: __floor_ppc64);
-weak_alias (__floor, floor)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__floor, __floorl)
-weak_alias (__floor, floorl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __floor, floorl, GLIBC_2_0);
-#endif
+libm_alias_double (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-power5+.S
index 87b9e3caa2..10676569c3 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-power5+.S
@@ -1,5 +1,5 @@
/* floorf function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-ppc64.S
index f97492d2ef..b207f0cad2 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf-ppc64.S
@@ -1,5 +1,5 @@
/* floorf function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,9 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c
index 2ee2048f98..d1bbc14614 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_floorf.c
@@ -1,5 +1,5 @@
/* Multiple versions of floorf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__floorf) __floorf_ppc64 attribute_hidden;
extern __typeof (__floorf) __floorf_power5plus attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__floorf,
? __floorf_power5plus
: __floorf_ppc64);
-weak_alias (__floorf, floorf)
+libm_alias_float (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power7.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power7.S
index aaa163a914..e644f9605f 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power7.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power7.S
@@ -1,5 +1,5 @@
/* isinf(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power8.S
index 69341ec162..dc7764d915 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power8.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-power8.S
@@ -1,5 +1,5 @@
/* isinf(). PowerPC64/POWER8 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-ppc64.c
index c400a73fa9..c1907980db 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf-ppc64.c
@@ -1,5 +1,5 @@
/* isinf(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf.c
index 07e159d9c1..44f5d0c129 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinf.c
@@ -1,5 +1,5 @@
/* Multiple versions of isinf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,17 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define __isinf __redirect___isinf
+
+/* The following definitions, although not related to the 'double'
+ version of 'isinf', are required to guarantee macro expansions
+ (e.g.: from __isinff to __redirect_isinff) in include/math.h, thus
+ compensating for the unintended macro expansions in
+ math/bits/mathcalls-helper-functions.h. */
+#define __isinff __redirect___isinff
+#define __isinfl __redirect___isinfl
+#define __isinff128 __redirect___isinff128
+
#include <math.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
@@ -24,13 +35,17 @@
extern __typeof (__isinf) __isinf_ppc64 attribute_hidden;
extern __typeof (__isinf) __isinf_power7 attribute_hidden;
extern __typeof (__isinf) __isinf_power8 attribute_hidden;
-
-libc_ifunc (__isinf,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __isinf_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __isinf_power7
- : __isinf_ppc64);
+#undef __isinf
+#undef __isinff
+#undef __isinfl
+#undef __isinff128
+
+libc_ifunc_redirected (__redirect___isinf, __isinf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __isinf_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __isinf_power7
+ : __isinf_ppc64);
weak_alias (__isinf, isinf)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff-ppc64.c
index 72775f571d..531f15d822 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff-ppc64.c
@@ -1,5 +1,5 @@
/* isinff(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff.c
index 2cb161b54c..7e29a60713 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isinff.c
@@ -1,5 +1,5 @@
/* Multiple versions of isinf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define __isinff __redirect___isinff
#include <math.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
@@ -25,12 +26,13 @@ extern __typeof (__isinff) __isinff_ppc64 attribute_hidden;
/* The double-precision version also works for single-precision. */
extern __typeof (__isinff) __isinf_power7 attribute_hidden;
extern __typeof (__isinff) __isinf_power8 attribute_hidden;
+#undef __isinff
-libc_ifunc (__isinff,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __isinf_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __isinf_power7
- : __isinff_ppc64);
+libc_ifunc_redirected (__redirect___isinff, __isinff,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __isinf_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __isinf_power7
+ : __isinff_ppc64);
weak_alias (__isinff, isinff)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power5.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power5.S
index c61b52618e..749cdbc9fe 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power5.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power5.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER5 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6.S
index c77bb345b6..51cbc6b243 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER6 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6x.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6x.S
index 389008efdd..a2d64dfeb7 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6x.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power6x.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER6X version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power7.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power7.S
index 4e388a4ba7..8585808b51 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power7.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power7.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power8.S
index 83e3b3eb1a..d501878cb2 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power8.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-power8.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER7 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef hidden_def
#define hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-ppc64.S
index 9a8451a762..82e14d8b2e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan-ppc64.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC32 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,17 +16,21 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
#undef strong_alias
#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
#define __isnan __isnan_ppc64
-#undef hidden_def
-#define hidden_def(name) \
- .globl __GI___isnan ; .set __GI___isnan,__isnan_ppc64
+#ifdef SHARED
+ #undef hidden_def
+ #define hidden_def(name) \
+ .globl __GI___isnan ; .set __GI___isnan,__isnan_ppc64
+#endif
#include <sysdeps/powerpc/powerpc64/fpu/s_isnan.S>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan.c
index a614f25047..0c155130c6 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnan.c
@@ -1,5 +1,5 @@
/* Multiple versions of isnan.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,17 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define __isnan __redirect___isnan
+
+/* The following definitions, although not related to the 'double'
+ version of 'isnan', are required to guarantee macro expansions
+ (e.g.: from __isnanf to __redirect_isnanf) in include/math.h, thus
+ compensating for the unintended macro expansions in
+ math/bits/mathcalls-helper-functions.h. */
+#define __isnanf __redirect___isnanf
+#define __isnanl __redirect___isnanl
+#define __isnanf128 __redirect___isnanf128
+
#include <math.h>
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
@@ -27,19 +38,23 @@ extern __typeof (__isnan) __isnan_power6 attribute_hidden;
extern __typeof (__isnan) __isnan_power6x attribute_hidden;
extern __typeof (__isnan) __isnan_power7 attribute_hidden;
extern __typeof (__isnan) __isnan_power8 attribute_hidden;
+#undef __isnan
+#undef __isnanf
+#undef __isnanl
+#undef __isnanf128
-libc_ifunc (__isnan,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __isnan_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __isnan_power7 :
- (hwcap & PPC_FEATURE_POWER6_EXT)
- ? __isnan_power6x :
- (hwcap & PPC_FEATURE_ARCH_2_05)
- ? __isnan_power6 :
- (hwcap & PPC_FEATURE_POWER5)
- ? __isnan_power5
- : __isnan_ppc64);
+libc_ifunc_redirected (__redirect___isnan, __isnan,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __isnan_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __isnan_power7
+ : (hwcap & PPC_FEATURE_POWER6_EXT)
+ ? __isnan_power6x
+ : (hwcap & PPC_FEATURE_ARCH_2_05)
+ ? __isnan_power6
+ : (hwcap & PPC_FEATURE_POWER5)
+ ? __isnan_power5
+ : __isnan_ppc64);
weak_alias (__isnan, isnan)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnanf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnanf.c
index acbc131721..e9558d554c 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnanf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_isnanf.c
@@ -1,5 +1,5 @@
/* Multiple versions of isnan.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,17 +27,18 @@ extern __typeof (__isnanf) __isnan_power6x attribute_hidden;
extern __typeof (__isnanf) __isnan_power7 attribute_hidden;
extern __typeof (__isnanf) __isnan_power8 attribute_hidden;
-libc_ifunc (__isnanf,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __isnan_power8 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __isnan_power7 :
- (hwcap & PPC_FEATURE_POWER6_EXT)
- ? __isnan_power6x :
- (hwcap & PPC_FEATURE_ARCH_2_05)
- ? __isnan_power6 :
- (hwcap & PPC_FEATURE_POWER5)
- ? __isnan_power5
- : __isnan_ppc64);
+libc_ifunc_hidden (__isnanf, __isnanf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __isnan_power8
+ : (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __isnan_power7
+ : (hwcap & PPC_FEATURE_POWER6_EXT)
+ ? __isnan_power6x
+ : (hwcap & PPC_FEATURE_ARCH_2_05)
+ ? __isnan_power6
+ : (hwcap & PPC_FEATURE_POWER5)
+ ? __isnan_power5
+ : __isnan_ppc64);
+hidden_def (__isnanf)
weak_alias (__isnanf, isnanf)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power6x.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power6x.S
index 7968fb7b78..3de77d5c7a 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power6x.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power6x.S
@@ -1,5 +1,5 @@
/* Round double to long int. PowerPC64/POWER6X default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power8.S
index b87482e443..619527207f 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power8.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-power8.S
@@ -1,5 +1,5 @@
/* Round double to long int. PowerPC64/POWER6X default version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-ppc64.S
index f9c77abf4a..093726628d 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint-ppc64.S
@@ -1,5 +1,5 @@
/* Round double to long int. PowerPC32 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint.c
index 2a21a510e9..59f8c2bd95 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrint.c
@@ -1,5 +1,5 @@
/* Multiple versions of llrint.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,6 +27,7 @@
#undef __lrint
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__llrint) __llrint_ppc64 attribute_hidden;
extern __typeof (__llrint) __llrint_power6x attribute_hidden;
@@ -39,22 +40,8 @@ libc_ifunc (__llrint,
? __llrint_power6x
: __llrint_ppc64);
-weak_alias (__llrint, llrint)
-#ifdef NO_LONG_DOUBLE
-strong_alias (__llrint, __llrintl)
-weak_alias (__llrint, llrintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llrint, llrintl, GLIBC_2_1);
-#endif
+libm_alias_double (__llrint, llrint)
/* long has the same width as long long on PowerPC64. */
strong_alias (__llrint, __lrint)
-weak_alias (__lrint, lrint)
-#ifdef NO_LONG_DOUBLE
-strong_alias (__lrint, __lrintl)
-weak_alias (__lrint, lrintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __lrint, lrintl, GLIBC_2_1);
-#endif
+libm_alias_double (__lrint, lrint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrintf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrintf.c
new file mode 100644
index 0000000000..919b5de03a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llrintf.c
@@ -0,0 +1,47 @@
+/* Multiple versions of llrintf.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+/* Redefine lrintf/__lrintf so that the compiler won't complain about the type
+ mismatch with the IFUNC selector in strong_alias below. */
+#define lrintf __hidden_lrintf
+#define __lrintf __hidden___lrintf
+
+#include <math.h>
+#undef lrintf
+#undef __lrintf
+#include "init-arch.h"
+#include <libm-alias-float.h>
+
+extern __typeof (__llrintf) __llrint_ppc64 attribute_hidden;
+extern __typeof (__llrintf) __llrint_power6x attribute_hidden;
+extern __typeof (__llrintf) __llrint_power8 attribute_hidden;
+
+/* The ppc64 ABI passes float and double parameters in 64bit floating point
+ registers (at least up to a point) as IEEE binary64 format, so effectively
+ of "double" type. Both l[l]rint and l[l]rintf return long type. So these
+ functions have identical signatures and functionality, and can use a
+ single implementation. */
+libc_ifunc (__llrintf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __llrint_power8 :
+ (hwcap & PPC_FEATURE_POWER6_EXT)
+ ? __llrint_power6x
+ : __llrint_ppc64);
+
+libm_alias_float (__llrint, llrint)
+strong_alias (__llrintf, __lrintf)
+libm_alias_float (__lrint, lrint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power5+.S
index 3cb1c0c105..448ec22478 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power5+.S
@@ -1,5 +1,5 @@
/* llround(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(name, alias)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power6x.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power6x.S
index 21698393fb..9110148419 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power6x.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power6x.S
@@ -1,5 +1,5 @@
/* llround(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(name, alias)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power8.S
index bc3a1e5591..3bbb5924ee 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power8.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-power8.S
@@ -1,5 +1,5 @@
/* llround(). PowerPC64 default version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(name, alias)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-ppc64.S
index 85415a0d51..c098210ade 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround-ppc64.S
@@ -1,5 +1,5 @@
/* llround(). PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef compat_symbol
#define compat_symbol(a,b,c,d)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround.c
index 423bdce727..07ae196178 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llround.c
@@ -1,5 +1,5 @@
/* Multiple versions of llround.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__llround) __llround_ppc64 attribute_hidden;
extern __typeof (__llround) __llround_power5plus attribute_hidden;
@@ -38,26 +39,10 @@ libc_ifunc (__llround,
? __llround_power5plus
: __llround_ppc64);
-weak_alias (__llround, llround)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__llround, llroundl)
-strong_alias (__llround, __llroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llround, llroundl, GLIBC_2_1);
-compat_symbol (libm, llround, lroundl, GLIBC_2_1);
-#endif
+libm_alias_double (__llround, llround)
/* long has the same width as long long on PPC64. */
#undef lround
#undef __lround
strong_alias (__llround, __lround)
-weak_alias (__llround, lround)
-#ifdef NO_LONG_DOUBLE
-strong_alias (__llround, __llroundl)
-weak_alias (__llround, llroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __lround, lroundl, GLIBC_2_1);
-#endif
+libm_alias_double (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf-ppc64.S
new file mode 100644
index 0000000000..1af420a1d9
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf-ppc64.S
@@ -0,0 +1,31 @@
+/* llroundf(). PowerPC64 default version.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <shlib-compat.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llroundf __llroundf_ppc64
+#define __lroundf __lroundf_ppc64
+
+#include <sysdeps/powerpc/powerpc64/fpu/s_llroundf.S>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf.c
new file mode 100644
index 0000000000..792ae1dc5e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_llroundf.c
@@ -0,0 +1,47 @@
+/* Multiple versions of llroundf.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+/* Redefine lroundf/__lroundf so that the compiler won't complain about
+ the type mismatch with the IFUNC selector in strong_alias below. */
+#define lroundf __hidden_lroundf
+#define __lroundf __hidden___lroundf
+
+#include <math.h>
+#undef lroundf
+#undef __lroundf
+#include "init-arch.h"
+#include <libm-alias-float.h>
+
+extern __typeof (__llroundf) __llroundf_ppc64 attribute_hidden;
+extern __typeof (__llroundf) __llround_power6x attribute_hidden;
+extern __typeof (__llroundf) __llround_power8 attribute_hidden;
+
+/* The ppc64 ABI passes float and double parameters in 64bit floating point
+ registers (at least up to a point) as IEEE binary64 format, so effectively
+ of "double" type. Both l[l]round and l[l]roundf return long type. So these
+ functions have identical signatures and functionality, and can use a
+ single implementation. */
+libc_ifunc (__llroundf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __llround_power8 :
+ (hwcap & PPC_FEATURE_POWER6_EXT)
+ ? __llround_power6x
+ : __llroundf_ppc64);
+
+libm_alias_float (__llround, llround)
+strong_alias (__llroundf, __lroundf)
+libm_alias_float (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-power7.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-power7.c
index 769dc2346a..504603a242 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-power7.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-power7.c
@@ -1,5 +1,5 @@
/* logb(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-ppc64.c
index 1f530b547d..0f1e4fc76b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb-ppc64.c
@@ -1,5 +1,5 @@
/* logb(). PowerPC32/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,11 +18,6 @@
#include <math.h>
-#undef weak_alias
-#define weak_alias(a, b)
-#undef strong_alias
-#define strong_alias(a, b)
-
#define __logb __logb_ppc64
#include <sysdeps/ieee754/dbl-64/s_logb.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb.c
index f7bc72d3eb..b4a72c81fd 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logb.c
@@ -1,5 +1,5 @@
/* Multiple versions of logb.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__logb) __logb_ppc64 attribute_hidden;
extern __typeof (__logb) __logb_power7 attribute_hidden;
@@ -29,13 +30,4 @@ libc_ifunc (__logb,
? __logb_power7
: __logb_ppc64);
-weak_alias (__logb, logb)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__logb, __logbl)
-weak_alias (__logb, logbl)
-#endif
-
-#if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
-compat_symbol (libm, logb, logbl, GLIBC_2_0);
-#endif
+libm_alias_double (__logb, logb)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-power7.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-power7.c
index 4c80d7ff4d..fee93e2352 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-power7.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-power7.c
@@ -1,5 +1,5 @@
/* logb(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-ppc64.c
index 42630633b0..fe197b7e9b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf-ppc64.c
@@ -1,5 +1,5 @@
/* logbf(). PowerPC64 default implementation.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf.c
index 3fc948b10d..48e7d410e1 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbf.c
@@ -1,5 +1,5 @@
/* Multiple versions of logbf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__logbf) __logbf_ppc64 attribute_hidden;
extern __typeof (__logbf) __logbf_power7 attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__logbf,
? __logbf_power7
: __logbf_ppc64);
-weak_alias (__logbf, logbf)
+libm_alias_float (__logb, logb)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-power7.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-power7.c
index e04e673056..eb08b5fe5e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-power7.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-power7.c
@@ -1,5 +1,5 @@
/* logb(). PowerPC64/POWER7 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-ppc64.c
index e16a721cfc..62d8c95783 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl-ppc64.c
@@ -1,5 +1,5 @@
/* logbl(). PowerPC64/POWER7 version.
- Copyright (C) 2012-2016 Free Software Foundation, Inc.
+ Copyright (C) 2012-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl.c
index 4b86b39635..f677c2aa69 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_logbl.c
@@ -1,5 +1,5 @@
/* Multiple versions of logbl.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-power5+.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-power5+.c
index 24fe354860..c7a382445a 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-power5+.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-power5+.c
@@ -1,5 +1,5 @@
/* PowerPC/POWER5+ implementation for modf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-ppc64.c
index 9758e88d71..c4aabe8f6e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation for modf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,11 +19,6 @@
#include <math.h>
#include <math_ldbl_opt.h>
-#undef weak_alias
-#define weak_alias(a,b)
-#undef strong_alias
-#define strong_alias(a,b)
-
#define __modf __modf_ppc64
#include <sysdeps/ieee754/dbl-64/s_modf.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf.c
index 2af1959ad5..1fe7408cf3 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modf.c
@@ -1,5 +1,5 @@
/* Multiple versions of modf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__modf) __modf_ppc64 attribute_hidden;
extern __typeof (__modf) __modf_power5plus attribute_hidden;
@@ -29,16 +30,8 @@ libc_ifunc (__modf,
? __modf_power5plus
: __modf_ppc64);
-weak_alias (__modf, modf)
+libm_alias_double (__modf, modf)
-#ifdef NO_LONG_DOUBLE
-strong_alias (__modf, __modfl)
-weak_alias (__modf, modfl)
-#endif
-#if IS_IN (libm)
-# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __modf, modfl, GLIBC_2_0);
-# endif
-#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+#if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
compat_symbol (libc, __modf, modfl, GLIBC_2_0);
#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-power5+.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-power5+.c
index cbac75a97b..1c23444a21 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-power5+.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-power5+.c
@@ -1,5 +1,5 @@
/* PowerPC/POWER5+ implementation for modff.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-ppc64.c
index 521bc0841d..0b1b289efd 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation for modff.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff.c
index 1163d1c1cf..efa96a799b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_modff.c
@@ -1,5 +1,5 @@
/* Multiple versions of modff.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
#include <math.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__modff) __modff_ppc64 attribute_hidden;
extern __typeof (__modff) __modff_power5plus attribute_hidden;
@@ -27,4 +28,4 @@ libc_ifunc (__modff,
? __modff_power5plus
: __modff_ppc64);
-weak_alias (__modff, modff)
+libm_alias_float (__modf, modf)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-power5+.S
index edcc60242f..67c001005a 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-power5+.S
@@ -1,5 +1,5 @@
/* round function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-ppc64.S
index 3a6bb790d7..43cf13ab38 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round-ppc64.S
@@ -1,5 +1,5 @@
/* round function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c
index 0408dc9e0b..bfa4d8119e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_round.c
@@ -1,5 +1,5 @@
/* Multiple versions of round.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__round) __round_ppc64 attribute_hidden;
extern __typeof (__round) __round_power5plus attribute_hidden;
@@ -29,12 +30,4 @@ libc_ifunc (__round,
? __round_power5plus
: __round_ppc64);
-weak_alias (__round, round)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__round, __roundl)
-weak_alias (__round, roundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __round, roundl, GLIBC_2_0);
-#endif
+libm_alias_double (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-power5+.S
index 970cf7056c..6daf4c9f12 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-power5+.S
@@ -1,5 +1,5 @@
/* roundf function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-ppc64.S
index ef05017e50..9500b278cf 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf-ppc64.S
@@ -1,5 +1,5 @@
/* roundf function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c
index 6d33611a88..3d77156846 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_roundf.c
@@ -1,5 +1,5 @@
/* Multiple versions of roundf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__roundf) __roundf_ppc64 attribute_hidden;
extern __typeof (__roundf) __roundf_power5plus attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__roundf,
? __roundf_power5plus
: __roundf_ppc64);
-weak_alias (__roundf, roundf)
+libm_alias_float (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-power8.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-power8.S
new file mode 100644
index 0000000000..6cc058e93c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-power8.S
@@ -0,0 +1,24 @@
+/* sinf(). PowerPC64/POWER8 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __sinf __sinf_power8
+
+#include <sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-ppc64.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-ppc64.c
new file mode 100644
index 0000000000..4f0a09e040
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf-ppc64.c
@@ -0,0 +1,24 @@
+/* sinf(). PowerPC64 default version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __sinf __sinf_ppc64
+
+#include <sysdeps/powerpc/fpu/s_sinf.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf.c
new file mode 100644
index 0000000000..f1d9a97eea
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_sinf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of sinf.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+#include <libm-alias-float.h>
+
+extern __typeof (__sinf) __sinf_ppc64 attribute_hidden;
+extern __typeof (__sinf) __sinf_power8 attribute_hidden;
+
+libc_ifunc (__sinf,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __sinf_power8
+ : __sinf_ppc64);
+
+libm_alias_float (__sin, sin)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-power5+.S
index 47c9191bc7..c3cb5d3f21 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-power5+.S
@@ -1,5 +1,5 @@
/* trunc function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-ppc64.S
index b9c25e18cb..1cfe0531e8 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc-ppc64.S
@@ -1,5 +1,5 @@
/* trunc function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c
index 9d6144726d..989ffc9cf4 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c
@@ -1,5 +1,5 @@
/* Multiple versions of trunc.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-double.h>
extern __typeof (__trunc) __trunc_ppc64 attribute_hidden;
extern __typeof (__trunc) __trunc_power5plus attribute_hidden;
@@ -29,12 +30,4 @@ libc_ifunc (__trunc,
? __trunc_power5plus
: __trunc_ppc64);
-weak_alias (__trunc, trunc)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__trunc, __truncl)
-weak_alias (__trunc, truncl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __trunc, truncl, GLIBC_2_0);
-#endif
+libm_alias_double (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-power5+.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-power5+.S
index 96f08a9631..b40f00dd8e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-power5+.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-power5+.S
@@ -1,5 +1,5 @@
/* truncf function. PowerPC64/power5+ version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-ppc64.S b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-ppc64.S
index f413b0f74a..b05bbfd1e5 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf-ppc64.S
@@ -1,5 +1,5 @@
/* truncf function. PowerPC64 default version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,8 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#undef weak_alias
#define weak_alias(a,b)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c
index f2d99109ae..fc5351b0ca 100644
--- a/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c
@@ -1,5 +1,5 @@
/* Multiple versions of truncf.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#include <math_ldbl_opt.h>
#include <shlib-compat.h>
#include "init-arch.h"
+#include <libm-alias-float.h>
extern __typeof (__truncf) __truncf_ppc64 attribute_hidden;
extern __typeof (__truncf) __truncf_power5plus attribute_hidden;
@@ -29,4 +30,4 @@ libc_ifunc (__truncf,
? __truncf_power5plus
: __truncf_ppc64);
-weak_alias (__truncf, truncf)
+libm_alias_float (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/fpu/multiarch/w_expf.c b/sysdeps/powerpc/powerpc64/fpu/multiarch/w_expf.c
new file mode 100644
index 0000000000..b5fe164520
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/fpu/multiarch/w_expf.c
@@ -0,0 +1 @@
+#include <sysdeps/../math/w_expf.c>
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_ceil.S b/sysdeps/powerpc/powerpc64/fpu/s_ceil.S
index 77fe0f3c96..252d94f51e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_ceil.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_ceil.S
@@ -1,5 +1,5 @@
/* ceil function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,28 +18,31 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2**52 */
.tc FD_43300000_0[TC],0x4330000000000000
.section ".text"
-EALIGN (__ceil, 4, 0)
+ENTRY (__ceil, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfd fp13,.LC0@toc(2)
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,2 /* Set rounding mode toward +inf. */
ble- cr6,.L4
fadd fp1,fp1,fp13 /* x+= TWO52; */
fsub fp1,fp1,fp13 /* x-= TWO52; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -48,16 +51,15 @@ EALIGN (__ceil, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
blr
END (__ceil)
-weak_alias (__ceil, ceil)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__ceil, ceill)
-strong_alias (__ceil, __ceill)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
-#endif
+libm_alias_double (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_ceilf.S b/sysdeps/powerpc/powerpc64/fpu/s_ceilf.S
index 91d4a25ae1..3c62077c14 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_ceilf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_ceilf.S
@@ -1,5 +1,5 @@
/* float ceil function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.p2align 3
@@ -25,22 +26,24 @@
.long 0x0
.section ".text"
-EALIGN (__ceilf, 4, 0)
+ENTRY (__ceilf, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfs fp13,.LC0@toc(2)
fabs fp0,fp1
fsubs fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,2 /* Set rounding mode toward +inf. */
ble- cr6,.L4
fadds fp1,fp1,fp13 /* x+= TWO23; */
fsubs fp1,fp1,fp13 /* x-= TWO23; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -49,9 +52,16 @@ EALIGN (__ceilf, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
blr
END (__ceilf)
-weak_alias (__ceilf, ceilf)
+libm_alias_float (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_copysign.S b/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
index 26fe79e3c4..e40cbc7b8b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_copysign.S
@@ -1,5 +1,5 @@
/* Copy a sign bit between floating-point values. PowerPC64 version.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,8 +21,10 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
-ENTRY(__copysign)
+ENTRY_TOCLESS (__copysign)
CALL_MCOUNT 0
/* double [f1] copysign (double [f1] x, double [f2] y);
copysign(x,y) returns a value with the magnitude of x and
@@ -40,20 +42,12 @@ L(0): fnabs fp1,fp1
blr
END (__copysign)
-weak_alias (__copysign,copysign)
+libm_alias_double (__copysign, copysign)
/* It turns out that it's safe to use this code even for single-precision. */
-weak_alias (__copysign,copysignf)
strong_alias(__copysign,__copysignf)
+libm_alias_float (__copysign, copysign)
-#ifdef NO_LONG_DOUBLE
-weak_alias (__copysign,copysignl)
-strong_alias(__copysign,__copysignl)
-#endif
-#if IS_IN (libm)
-# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __copysign, copysignl, GLIBC_2_0)
-# endif
-#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+#if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
compat_symbol (libc, __copysign, copysignl, GLIBC_2_0)
#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_copysignl.S b/sysdeps/powerpc/powerpc64/fpu/s_copysignl.S
index 1e1b9d43e6..1dcfa33b2e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_copysignl.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_copysignl.S
@@ -1,6 +1,6 @@
/* Copy a sign bit between floating-point values.
IBM extended format long double version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,7 +20,7 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
-ENTRY(__copysignl)
+ENTRY_TOCLESS (__copysignl)
/* long double [f1,f2] copysign (long double [f1,f2] x, long double [f3,f4] y);
copysign(x,y) returns a value with the magnitude of x and
with the sign bit of y. */
@@ -30,16 +30,14 @@ ENTRY(__copysignl)
blt L(0)
fmr fp0,fp1
fabs fp1,fp1
- fcmpu cr1,fp0,fp1
- beqlr cr1
- fneg fp2,fp2
+ fneg fp3,fp2
+ fsel fp2,fp0,fp2,fp3
blr
L(0):
fmr fp0,fp1
fnabs fp1,fp1
- fcmpu cr1,fp0,fp1
- beqlr cr1
- fneg fp2,fp2
+ fneg fp3,fp2
+ fsel fp2,fp0,fp3,fp2
blr
END (__copysignl)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fabs.S b/sysdeps/powerpc/powerpc64/fpu/s_fabs.S
deleted file mode 100644
index 53d21301ee..0000000000
--- a/sysdeps/powerpc/powerpc64/fpu/s_fabs.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <math_ldbl_opt.h>
-#include <sysdeps/powerpc/fpu/s_fabs.S>
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __fabs, fabsl, GLIBC_2_0)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fabsl.S b/sysdeps/powerpc/powerpc64/fpu/s_fabsl.S
index 0462d20630..04936949ac 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_fabsl.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_fabsl.S
@@ -1,6 +1,6 @@
/* Copy a sign bit between floating-point values.
IBM extended format long double version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,15 +20,14 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
-ENTRY(__fabsl)
+ENTRY_TOCLESS (__fabsl)
/* long double [f1,f2] fabs (long double [f1,f2] x);
fabs(x,y) returns a value with the magnitude of x and
with the sign bit of y. */
fmr fp0,fp1
fabs fp1,fp1
- fcmpu cr1,fp0,fp1
- beqlr cr1
- fneg fp2,fp2
+ fneg fp3,fp2
+ fsel fp2,fp0,fp2,fp3
blr
END (__fabsl)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fdim.c b/sysdeps/powerpc/powerpc64/fpu/s_fdim.c
deleted file mode 100644
index e34b51ee54..0000000000
--- a/sysdeps/powerpc/powerpc64/fpu/s_fdim.c
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <math_ldbl_opt.h>
-#include <sysdeps/powerpc/fpu/s_fdim.c>
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __fdim, fdiml, GLIBC_2_1);
-#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_floor.S b/sysdeps/powerpc/powerpc64/fpu/s_floor.S
index afa08b70c4..f0d57c47dd 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_floor.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_floor.S
@@ -1,5 +1,5 @@
/* Floor function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,28 +18,31 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2**52 */
.tc FD_43300000_0[TC],0x4330000000000000
.section ".text"
-EALIGN (__floor, 4, 0)
+ENTRY (__floor, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfd fp13,.LC0@toc(2)
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,3 /* Set rounding mode toward -inf. */
ble- cr6,.L4
fadd fp1,fp1,fp13 /* x+= TWO52; */
fsub fp1,fp1,fp13 /* x-= TWO52; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -48,16 +51,15 @@ EALIGN (__floor, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
blr
END (__floor)
-weak_alias (__floor, floor)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__floor, floorl)
-strong_alias (__floor, __floorl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __floor, floorl, GLIBC_2_0)
-#endif
+libm_alias_double (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_floorf.S b/sysdeps/powerpc/powerpc64/fpu/s_floorf.S
index 80080ca0b6..b8c70fa7de 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_floorf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_floorf.S
@@ -1,5 +1,5 @@
/* float Floor function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.p2align 3
@@ -25,22 +26,24 @@
.long 0x0
.section ".text"
-EALIGN (__floorf, 4, 0)
+ENTRY (__floorf, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfs fp13,.LC0@toc(2)
fabs fp0,fp1
fsubs fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,3 /* Set rounding mode toward -inf. */
ble- cr6,.L4
fadds fp1,fp1,fp13 /* x+= TWO23; */
fsubs fp1,fp1,fp13 /* x-= TWO23; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -49,9 +52,16 @@ EALIGN (__floorf, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
blr
END (__floorf)
-weak_alias (__floorf, floorf)
+libm_alias_float (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fma.S b/sysdeps/powerpc/powerpc64/fpu/s_fma.S
deleted file mode 100644
index d40695c633..0000000000
--- a/sysdeps/powerpc/powerpc64/fpu/s_fma.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <math_ldbl_opt.h>
-#include <sysdeps/powerpc/fpu/s_fma.S>
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __fma, fmal, GLIBC_2_1)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fmax.S b/sysdeps/powerpc/powerpc64/fpu/s_fmax.S
deleted file mode 100644
index 69735761ab..0000000000
--- a/sysdeps/powerpc/powerpc64/fpu/s_fmax.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <math_ldbl_opt.h>
-#include <sysdeps/powerpc/fpu/s_fmax.S>
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __fmax, fmaxl, GLIBC_2_1)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_fmin.S b/sysdeps/powerpc/powerpc64/fpu/s_fmin.S
deleted file mode 100644
index 6d4a0a946c..0000000000
--- a/sysdeps/powerpc/powerpc64/fpu/s_fmin.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <math_ldbl_opt.h>
-#include <sysdeps/powerpc/fpu/s_fmin.S>
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __fmin, fminl, GLIBC_2_1)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/fpu/s_isnan.S
index 8169ae9cff..12d23969ee 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64 version.
- Copyright (C) 2008-2016 Free Software Foundation, Inc.
+ Copyright (C) 2008-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,7 @@
/* int __isnan(x) */
.machine power4
-EALIGN (__isnan, 4, 0)
+ENTRY_TOCLESS (__isnan, 4)
CALL_MCOUNT 0
mffs fp0
mtfsb0 4*cr6+lt /* reset_fpscr_bit (FPSCR_VE) */
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_llrint.S b/sysdeps/powerpc/powerpc64/fpu/s_llrint.S
index 2a1fee7c70..d3cc9b821e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_llrint.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_llrint.S
@@ -1,5 +1,5 @@
/* Round double to long int. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,9 +18,11 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
/* long long int[r3] __llrint (double x[fp1]) */
-ENTRY (__llrint)
+ENTRY_TOCLESS (__llrint)
CALL_MCOUNT 0
fctid fp13,fp1
stfd fp13,-16(r1)
@@ -32,16 +34,12 @@ ENTRY (__llrint)
END (__llrint)
strong_alias (__llrint, __lrint)
-weak_alias (__llrint, llrint)
-weak_alias (__lrint, lrint)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__llrint, __llrintl)
-weak_alias (__llrint, llrintl)
-strong_alias (__lrint, __lrintl)
-weak_alias (__lrint, lrintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llrint, llrintl, GLIBC_2_1)
-compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
-#endif
+libm_alias_double (__llrint, llrint)
+libm_alias_double (__lrint, lrint)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llrint, __llrintf)
+libm_alias_float (__llrint, llrint)
+strong_alias (__lrint, __lrintf)
+libm_alias_float (__lrint, lrint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_llrintf.S b/sysdeps/powerpc/powerpc64/fpu/s_llrintf.S
index ac257ede53..ba7752311d 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_llrintf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_llrintf.S
@@ -1,36 +1 @@
-/* Round double to long int. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* long long int[r3] __llrintf (float x[fp1]) */
-ENTRY (__llrintf)
- CALL_MCOUNT 0
- fctid fp13,fp1
- stfd fp13,-16(r1)
- nop /* Insure the following load is in a different dispatch group */
- nop /* to avoid pipe stall on POWER4&5. */
- nop
- ld r3,-16(r1)
- blr
- END (__llrintf)
-
-strong_alias (__llrintf, __lrintf)
-weak_alias (__llrintf, llrintf)
-weak_alias (__lrintf, lrintf)
-
+/* __llrintf is in s_llrint.S. */
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/fpu/s_llround.S
index 19d9c7de45..8efb29859b 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_llround.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_llround.S
@@ -1,5 +1,5 @@
/* llround function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2^52 */
@@ -81,16 +82,5 @@ ENTRY (__llround)
END (__llround)
strong_alias (__llround, __lround)
-weak_alias (__llround, llround)
-weak_alias (__lround, lround)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__llround, llroundl)
-strong_alias (__llround, __llroundl)
-weak_alias (__lround, lroundl)
-strong_alias (__lround, __lroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
-compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
-#endif
+libm_alias_double (__llround, llround)
+libm_alias_double (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_llroundf.S b/sysdeps/powerpc/powerpc64/fpu/s_llroundf.S
index 50bbdb1bb3..613469602e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_llroundf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_llroundf.S
@@ -1,5 +1,5 @@
/* llroundf function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.LC0: /* 2^23 */
@@ -83,6 +84,6 @@ ENTRY (__llroundf)
END (__llroundf)
strong_alias (__llroundf, __lroundf)
-weak_alias (__llroundf, llroundf)
-weak_alias (__lroundf, lroundf)
+libm_alias_float (__llround, llround)
+libm_alias_float (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S b/sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S
index 93d24652e1..7d89e78f88 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_nearbyint.S
@@ -1,5 +1,5 @@
/* Round to int floating-point values. PowerPC64 version.
- Copyright (C) 2011-2016 Free Software Foundation, Inc.
+ Copyright (C) 2011-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Adhemerval Zanella <azanella@br.ibm.com>, 2011
@@ -22,6 +22,7 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
/* double [fp1] nearbyint(double [fp1] x) */
@@ -31,12 +32,12 @@
.tc FD_43300000_0[TC],0x4330000000000000
.section ".text"
-EALIGN (__nearbyint, 4, 0)
+ENTRY (__nearbyint, 4)
CALL_MCOUNT 0
fabs fp0,fp1
lfd fp13,.LC0@toc(2)
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
- bgelr cr7
+ bge cr7,.L10
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp1,fp12 /* if (x > 0.0) */
ble cr7, L(lessthanzero)
@@ -56,14 +57,12 @@ L(lessthanzero):
fnabs fp1,fp1 /* if (x == 0.0) */
mtfsf 0xff,fp11 /* Restore FE_INEXACT state. */
blr /* x = -0.0; */
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
+ blr
END (__nearbyint)
-weak_alias (__nearbyint, nearbyint)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__nearbyint, nearbyint)
-strong_alias (__nearbyint, __nearbyintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __nearbyint, nearbyintl, GLIBC_2_1)
-#endif
+libm_alias_double (__nearbyint, nearbyint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S b/sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S
index 3ba53784a4..4b62ba1e8d 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_nearbyintf.S
@@ -1,5 +1,5 @@
/* Round to int floating-point values. PowerPC64 version.
- Copyright (C) 2011-2016 Free Software Foundation, Inc.
+ Copyright (C) 2011-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Adhemerval Zanella <azanella@br.ibm.com>, 2011
@@ -21,6 +21,7 @@
when it's coded in C. */
#include <sysdep.h>
+#include <libm-alias-float.h>
/* float [fp1] nearbyintf(float [fp1]) */
@@ -32,12 +33,12 @@
.long 0x0
.section ".text"
-EALIGN (__nearbyintf, 4, 0)
+ENTRY (__nearbyintf, 4)
CALL_MCOUNT 0
fabs fp0,fp1
lfs fp13,.LC0@toc(2)
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
- bgelr cr7
+ bge cr7,.L10
fsubs fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp1,fp12 /* if (x > 0.0) */
ble cr7, L(lessthanzero)
@@ -57,6 +58,12 @@ L(lessthanzero):
fnabs fp1,fp1 /* if (x == 0.0) */
mtfsf 0xff,fp11 /* Restore FE_INEXACT state. */
blr /* x = -0.0; */
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
+ blr
END (__nearbyintf)
-weak_alias (__nearbyintf, nearbyintf)
+libm_alias_float (__nearbyint, nearbyint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_rint.S b/sysdeps/powerpc/powerpc64/fpu/s_rint.S
index 60c3deeb2e..a05e6dc8ca 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_rint.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_rint.S
@@ -1,5 +1,5 @@
/* Round to int floating-point values. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,20 +21,21 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2**52 */
.tc FD_43300000_0[TC],0x4330000000000000
.section ".text"
-EALIGN (__rint, 4, 0)
+ENTRY (__rint, 4)
CALL_MCOUNT 0
lfd fp13,.LC0@toc(2)
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr cr7
+ bnl cr7,.L10
bng cr6,.L4
fadd fp1,fp1,fp13 /* x+= TWO52; */
fsub fp1,fp1,fp13 /* x-= TWO52; */
@@ -46,14 +47,12 @@ EALIGN (__rint, 4, 0)
fadd fp1,fp1,fp13 /* x+= TWO52; */
fnabs fp1,fp1 /* if (x == 0.0) */
blr /* x = -0.0; */
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
+ blr
END (__rint)
-weak_alias (__rint, rint)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__rint, rintl)
-strong_alias (__rint, __rintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __rint, rintl, GLIBC_2_0)
-#endif
+libm_alias_double (__rint, rint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_rintf.S b/sysdeps/powerpc/powerpc64/fpu/s_rintf.S
index 0b274b008a..7ac532cc8e 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_rintf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_rintf.S
@@ -1,5 +1,5 @@
/* Round float to int floating-point values. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.p2align 3
@@ -25,14 +26,14 @@
.long 0x0
.section ".text"
-EALIGN (__rintf, 4, 0)
+ENTRY (__rintf, 4)
CALL_MCOUNT 0
lfs fp13,.LC0@toc(2)
fabs fp0,fp1
fsubs fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr cr7
+ bnl cr7,.L10
bng cr6,.L4
fadds fp1,fp1,fp13 /* x+= TWO23; */
fsubs fp1,fp1,fp13 /* x-= TWO23; */
@@ -44,7 +45,13 @@ EALIGN (__rintf, 4, 0)
fadds fp1,fp1,fp13 /* x+= TWO23; */
fnabs fp1,fp1 /* if (x == 0.0) */
blr /* x = -0.0; */
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
+ blr
END (__rintf)
-weak_alias (__rintf, rintf)
+libm_alias_float (__rint, rint)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_round.S b/sysdeps/powerpc/powerpc64/fpu/s_round.S
index 19713b37e2..d5aa47c596 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_round.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_round.S
@@ -1,5 +1,5 @@
/* round function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2**52 */
@@ -36,7 +37,7 @@
"Round toward Zero" mode and round by adding +-0.5 before rounding
to the integer value. */
-EALIGN (__round, 4, 0)
+ENTRY (__round, 4)
CALL_MCOUNT 0
lfd fp13,.LC0@toc(2)
fabs fp0,fp1
@@ -45,7 +46,7 @@ EALIGN (__round, 4, 0)
mffs fp11 /* Save current FPU rounding mode and
"inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,1 /* Set rounding mode toward 0. */
lfd fp10,.LC1@toc(2)
ble- cr6,.L4
@@ -68,14 +69,12 @@ EALIGN (__round, 4, 0)
mtfsf 0xff,fp11 /* Restore previous rounding mode and
"inexact" state. */
blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
+ blr
END (__round)
-weak_alias (__round, round)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__round, roundl)
-strong_alias (__round, __roundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __round, roundl, GLIBC_2_1)
-#endif
+libm_alias_double (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_roundf.S b/sysdeps/powerpc/powerpc64/fpu/s_roundf.S
index 8841d83497..a8db632db6 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_roundf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_roundf.S
@@ -1,5 +1,5 @@
/* roundf function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.p2align 3
@@ -37,7 +38,7 @@
"Round toward Zero" mode and round by adding +-0.5 before rounding
to the integer value. */
-EALIGN (__roundf, 4, 0)
+ENTRY (__roundf, 4)
CALL_MCOUNT 0
lfs fp13,.LC0@toc(2)
fabs fp0,fp1
@@ -46,7 +47,7 @@ EALIGN (__roundf, 4, 0)
mffs fp11 /* Save current FPU rounding mode and
"inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,1 /* Set rounding mode toward 0. */
lfs fp10,.LC1@toc(2)
ble- cr6,.L4
@@ -69,7 +70,13 @@ EALIGN (__roundf, 4, 0)
mtfsf 0xff,fp11 /* Restore previous rounding mode and
"inexact" state. */
blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
+ blr
END (__roundf)
-weak_alias (__roundf, roundf)
+libm_alias_float (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_trunc.S b/sysdeps/powerpc/powerpc64/fpu/s_trunc.S
index 4ad939ebc4..eecc72764a 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_trunc.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_trunc.S
@@ -1,5 +1,5 @@
/* trunc function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.section ".toc","aw"
.LC0: /* 2**52 */
@@ -31,22 +32,24 @@
We set "round toward Zero" mode and trunc by adding +-2**52 then
subtracting +-2**52. */
-EALIGN (__trunc, 4, 0)
+ENTRY (__trunc, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfd fp13,.LC0@toc(2)
fabs fp0,fp1
fsub fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,1 /* Set rounding toward 0 mode. */
ble- cr6,.L4
fadd fp1,fp1,fp13 /* x+= TWO52; */
fsub fp1,fp1,fp13 /* x-= TWO52; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -55,16 +58,15 @@ EALIGN (__trunc, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadd fp1,fp1,fp1
blr
END (__trunc)
-weak_alias (__trunc, trunc)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__trunc, truncl)
-strong_alias (__trunc, __truncl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __trunc, truncl, GLIBC_2_1)
-#endif
+libm_alias_double (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/fpu/s_truncf.S b/sysdeps/powerpc/powerpc64/fpu/s_truncf.S
index 3a990550d6..7796e6b26d 100644
--- a/sysdeps/powerpc/powerpc64/fpu/s_truncf.S
+++ b/sysdeps/powerpc/powerpc64/fpu/s_truncf.S
@@ -1,5 +1,5 @@
/* truncf function. PowerPC64 version.
- Copyright (C) 2004-2016 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.section ".toc","aw"
.p2align 3
@@ -32,22 +33,24 @@
We set "round toward Zero" mode and trunc by adding +-2**23 then
subtracting +-2**23. */
-EALIGN (__truncf, 4, 0)
+ENTRY (__truncf, 4)
CALL_MCOUNT 0
- mffs fp11 /* Save current FPU rounding mode. */
lfs fp13,.LC0@toc(2)
fabs fp0,fp1
fsubs fp12,fp13,fp13 /* generate 0.0 */
fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ mffs fp11 /* Save current FPU rounding mode and
+ "inexact" state. */
fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
- bnllr- cr7
+ bnl- cr7,.L10
mtfsfi 7,1 /* Set rounding toward 0 mode. */
ble- cr6,.L4
fadds fp1,fp1,fp13 /* x+= TWO23; */
fsubs fp1,fp1,fp13 /* x-= TWO23; */
fabs fp1,fp1 /* if (x == 0.0) */
/* x = 0.0; */
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
blr
.L4:
bge- cr6,.L9 /* if (x < 0.0) */
@@ -56,9 +59,16 @@ EALIGN (__truncf, 4, 0)
fnabs fp1,fp1 /* if (x == 0.0) */
/* x = -0.0; */
.L9:
- mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ mtfsf 0xff,fp11 /* Restore previous rounding mode and
+ "inexact" state. */
+ blr
+.L10:
+ /* Ensure sNaN input is converted to qNaN. */
+ fcmpu cr7,fp1,fp1
+ beqlr cr7
+ fadds fp1,fp1,fp1
blr
END (__truncf)
-weak_alias (__truncf, truncf)
+libm_alias_float (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/hp-timing.h b/sysdeps/powerpc/powerpc64/hp-timing.h
index 1d9889b6ca..c0aa3642f6 100644
--- a/sysdeps/powerpc/powerpc64/hp-timing.h
+++ b/sysdeps/powerpc/powerpc64/hp-timing.h
@@ -1,5 +1,5 @@
/* High precision, low overhead timing functions. powerpc64 version.
- Copyright (C) 2005-2016 Free Software Foundation, Inc.
+ Copyright (C) 2005-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
diff --git a/sysdeps/powerpc/powerpc64/le/Implies b/sysdeps/powerpc/powerpc64/le/Implies
new file mode 100644
index 0000000000..a105a325f7
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64
diff --git a/sysdeps/powerpc/powerpc64/le/Implies-before b/sysdeps/powerpc/powerpc64/le/Implies-before
new file mode 100644
index 0000000000..7c20db4e97
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/Implies-before
@@ -0,0 +1,6 @@
+# On PowerPC we use the IBM extended long double format.
+ieee754/ldbl-128ibm
+ieee754/ldbl-opt
+ieee754/dbl-64
+ieee754/flt-32
+ieee754/float128
diff --git a/sysdeps/powerpc/powerpc64/le/Makefile b/sysdeps/powerpc/powerpc64/le/Makefile
new file mode 100644
index 0000000000..f59db1ca3c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/Makefile
@@ -0,0 +1,84 @@
+# When building float128 we need to ensure -mfloat128 is
+# passed to all such object files.
+
+# libgcc requires __tcb_parse_hwcap_and_convert_at_platform when built with
+# a binary128 type. That symbol is provided by the loader on dynamically
+# linked executables, forcing to link the loader after libgcc link.
+f128-loader-link = $(as-needed) $(elf-objpfx)ld.so $(no-as-needed)
+
+ifeq ($(subdir),math)
+# sqrtf128 requires emulation before POWER9.
+CPPFLAGS += -I../soft-fp
+
+# float128 requires adding a handful of extra flags.
+$(foreach suf,$(all-object-suffixes),%f128$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),%f128_r$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),$(objpfx)test-float128%$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),$(objpfx)test-float64x%$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),$(objpfx)test-ifloat128%$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),$(objpfx)test-ifloat64x%$(suf)): CFLAGS += -mfloat128
+# Pairs of types with _Float128 / _Float64x as the wider type but not
+# the narrower one.
+f128-pairs = float32-float64x float32-float128 float64-float64x \
+ float64-float128 float32x-float64x float32x-float128
+$(foreach suf,$(all-object-suffixes),$(foreach pair,$(f128-pairs),$(objpfx)test-$(pair)%$(suf))): CFLAGS += -mfloat128
+CFLAGS-libm-test-support-float128.c += -mfloat128
+CFLAGS-libm-test-support-float64x.c += -mfloat128
+CFLAGS-test-math-iscanonical.cc += -mfloat128
+CFLAGS-test-math-iseqsig.cc += -mfloat128
+CFLAGS-test-math-issignaling.cc += -mfloat128
+CFLAGS-test-math-iszero.cc += -mfloat128
+$(foreach test, \
+ test-float128% test-ifloat128% test-float64x% test-ifloat64x% \
+ $(foreach pair,$(f128-pairs),test-$(pair)%) \
+ test-math-iscanonical test-math-iseqsig test-math-issignaling \
+ test-math-iszero, \
+ $(objpfx)$(test)): \
+ gnulib-tests += $(f128-loader-link)
+endif
+
+# Append flags to string <-> _Float128 routines.
+ifneq ($(filter $(subdir),wcsmbs stdlib),)
+$(foreach suf,$(all-object-suffixes),%f128$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),%f128_l$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),%f128_nan$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),%float1282mpn$(suf)): CFLAGS += -mfloat128
+$(foreach suf,$(all-object-suffixes),%mpn2float128$(suf)): CFLAGS += -mfloat128
+CFLAGS-bug-strtod.c += -mfloat128
+CFLAGS-bug-strtod2.c += -mfloat128
+CFLAGS-tst-strtod-round.c += -mfloat128
+CFLAGS-tst-wcstod-round.c += -mfloat128
+CFLAGS-tst-strtod-nan-locale.c += -mfloat128
+CFLAGS-tst-wcstod-nan-locale.c += -mfloat128
+CFLAGS-tst-strtod6.c += -mfloat128
+CFLAGS-tst-strfrom.c += -mfloat128
+CFLAGS-tst-strfrom-locale.c += -mfloat128
+CFLAGS-strfrom-skeleton.c += -mfloat128
+CFLAGS-tst-strtod-nan-sign.c += -mfloat128
+CFLAGS-tst-wcstod-nan-sign.c += -mfloat128
+$(foreach test,bug-strtod bug-strtod2 bug-strtod2 tst-strtod-round \
+tst-wcstod-round tst-strtod6 tst-strrom tst-strfrom-locale \
+tst-strtod-nan-locale tst-wcstod-nan-locale \
+strfrom-skeleton tst-strtod-nan-sign tst-wcstod-nan-sign, \
+$(objpfx)$(test)): gnulib-tests += $(f128-loader-link)
+
+# When building glibc with support for _Float128, the powers of ten tables in
+# fpioconst.c and in the string conversion functions must be extended. Some
+# Makefiles (e.g.: wcsmbs/Makefile) override CFLAGS defined by the Makefiles in
+# sysdeps. This is avoided with the use sysdep-CFLAGS instead of CFLAGS.
+sysdep-CFLAGS += $(sysdep-CFLAGS-$(<F))
+sysdep-CFLAGS-fpioconst.c += -mfloat128
+sysdep-CFLAGS-strtod_l.c += -mfloat128
+sysdep-CFLAGS-strtof_l.c += -mfloat128
+sysdep-CFLAGS-strtold_l.c += -mfloat128
+sysdep-CFLAGS-wcstod_l.c += -mfloat128
+sysdep-CFLAGS-wcstof_l.c += -mfloat128
+sysdep-CFLAGS-wcstold_l.c += -mfloat128
+endif
+
+# Append flags to printf routines.
+ifeq ($(subdir),stdio-common)
+CFLAGS-printf_fp.c = -mfloat128
+CFLAGS-printf_fphex.c = -mfloat128
+CFLAGS-printf_size.c = -mfloat128
+endif
diff --git a/sysdeps/powerpc/powerpc64/le/configure b/sysdeps/powerpc/powerpc64/le/configure
new file mode 100644
index 0000000000..66bb5dcc1a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/configure
@@ -0,0 +1,75 @@
+# This file is generated from configure.ac by Autoconf. DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc/powerpc64le.
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC supports binary128 floating point type" >&5
+$as_echo_n "checking if $CC supports binary128 floating point type... " >&6; }
+if ${libc_cv_compiler_powerpc64le_binary128_ok+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -Werror -mfloat128"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+__float128 a, b, c, d, e;
+int i;
+
+__float128
+foobar (__float128 x)
+{
+ a = __builtin_nansq ("0");
+ b = __builtin_huge_valq ();
+ c = __builtin_infq ();
+ d = __builtin_fabsq (x);
+ e = __builtin_nanq ("0");
+ i = __builtin_signbit (x);
+ return __builtin_copysignq (x, x);
+}
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ libc_cv_compiler_powerpc64le_binary128_ok=yes
+else
+ libc_cv_compiler_powerpc64le_binary128_ok=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+CFLAGS="$save_CFLAGS"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_compiler_powerpc64le_binary128_ok" >&5
+$as_echo "$libc_cv_compiler_powerpc64le_binary128_ok" >&6; }
+if test "$libc_cv_compiler_powerpc64le_binary128_ok" != "yes"; then :
+ critic_missing="$critic_missing binary128 floating point type (GCC >= 6.2) is required on powerpc64le."
+fi
+
+OLD_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS $libc_cv_cc_submachine"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the target machine is at least POWER8" >&5
+$as_echo_n "checking if the target machine is at least POWER8... " >&6; }
+if ${libc_cv_target_power8_ok+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#ifndef _ARCH_PWR8
+#error invalid target architecture
+#endif
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ libc_cv_target_power8_ok=yes
+else
+ libc_cv_target_power8_ok=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_target_power8_ok" >&5
+$as_echo "$libc_cv_target_power8_ok" >&6; }
+if test "$libc_cv_target_power8_ok" != "yes"; then :
+ critic_missing="$critic_missing POWER8 or newer is required on powerpc64le."
+fi
+CFLAGS="$OLD_CFLAGS"
+
+test -n "$critic_missing" && as_fn_error $? "*** $critic_missing" "$LINENO" 5
diff --git a/sysdeps/powerpc/powerpc64/le/configure.ac b/sysdeps/powerpc/powerpc64/le/configure.ac
new file mode 100644
index 0000000000..20a49d89e8
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/configure.ac
@@ -0,0 +1,48 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc/powerpc64le.
+
+dnl Require binary128 floating point support on powerpc64le (available in
+dnl GCC 6.2).
+AC_CACHE_CHECK([if $CC supports binary128 floating point type],
+ libc_cv_compiler_powerpc64le_binary128_ok, [dnl
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -Werror -mfloat128"
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+__float128 a, b, c, d, e;
+int i;
+
+__float128
+foobar (__float128 x)
+{
+ a = __builtin_nansq ("0");
+ b = __builtin_huge_valq ();
+ c = __builtin_infq ();
+ d = __builtin_fabsq (x);
+ e = __builtin_nanq ("0");
+ i = __builtin_signbit (x);
+ return __builtin_copysignq (x, x);
+}
+]])],
+ [libc_cv_compiler_powerpc64le_binary128_ok=yes],
+ [libc_cv_compiler_powerpc64le_binary128_ok=no])
+CFLAGS="$save_CFLAGS"])
+AS_IF([test "$libc_cv_compiler_powerpc64le_binary128_ok" != "yes"],
+ [critic_missing="$critic_missing binary128 floating point type (GCC >= 6.2) is required on powerpc64le."])
+
+dnl Require at least POWER8 on powerpc64le
+OLD_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS $libc_cv_cc_submachine"
+AC_CACHE_CHECK([if the target machine is at least POWER8],
+ libc_cv_target_power8_ok, [
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#ifndef _ARCH_PWR8
+#error invalid target architecture
+#endif
+]])],
+ [libc_cv_target_power8_ok=yes],
+ [libc_cv_target_power8_ok=no])])
+AS_IF([test "$libc_cv_target_power8_ok" != "yes"],
+ [critic_missing="$critic_missing POWER8 or newer is required on powerpc64le."])
+CFLAGS="$OLD_CFLAGS"
+
+test -n "$critic_missing" && AC_MSG_ERROR([*** $critic_missing])
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/Implies b/sysdeps/powerpc/powerpc64/le/fpu/Implies
new file mode 100644
index 0000000000..c1f617b7da
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/fpu
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/e_sqrtf128.c b/sysdeps/powerpc/powerpc64/le/fpu/e_sqrtf128.c
new file mode 100644
index 0000000000..f7c8ea33d3
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/e_sqrtf128.c
@@ -0,0 +1,56 @@
+/* soft-fp sqrt for _Float128
+ Return sqrt(a)
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math.h>
+
+/* Unavoidable hacks since TFmode is assumed to be binary128 when
+ -mabi=ibmlongdouble is used. */
+#if __HAVE_FLOAT128_UNLIKE_LDBL
+# define TFtype KFtype
+# define TF KF
+#endif
+
+#include <soft-fp.h>
+#include <quad.h>
+
+__float128
+__ieee754_sqrtf128 (__float128 a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q (A);
+ FP_DECL_Q (R);
+ __float128 r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q (A, a);
+ FP_SQRT_Q (R, A);
+ FP_PACK_Q (r, R);
+ FP_HANDLE_EXCEPTIONS;
+ return r;
+}
+strong_alias (__ieee754_sqrtf128, __sqrtf128_finite)
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Implies
new file mode 100644
index 0000000000..8d6531a174
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Makefile b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Makefile
new file mode 100644
index 0000000000..a32f3d8b81
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/Makefile
@@ -0,0 +1,6 @@
+ifeq ($(subdir),math)
+libm-sysdep_routines += w_sqrtf128-power9 w_sqrtf128-ppc64le
+
+CFLAGS-w_sqrtf128-ppc64le.c += -mfloat128
+CFLAGS-w_sqrtf128-power9.c += -mfloat128 -mcpu=power9
+endif
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-power9.c b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-power9.c
new file mode 100644
index 0000000000..55d93b9256
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-power9.c
@@ -0,0 +1,35 @@
+/* POWER9 sqrt for _Float128
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math-type-macros-float128.h>
+
+#define __sqrtf128 __sqrtf128_power9
+
+#undef declare_mgen_alias
+#define declare_mgen_alias(a, b)
+
+#include <w_sqrt_template.c>
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-ppc64le.c b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-ppc64le.c
new file mode 100644
index 0000000000..72f09b1017
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128-ppc64le.c
@@ -0,0 +1,35 @@
+/* PPC64LE sqrt for _Float128
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <math-type-macros-float128.h>
+
+#define __sqrtf128 __sqrtf128_ppc64le
+
+#undef declare_mgen_alias
+#define declare_mgen_alias(a, b)
+
+#include <w_sqrt_template.c>
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128.c b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128.c
new file mode 100644
index 0000000000..a44bf4f5cc
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/multiarch/w_sqrtf128.c
@@ -0,0 +1,31 @@
+/* Multiple versions of __sqrtf128.
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define NO_MATH_REDIRECT
+#include <math.h>
+#include "init-arch.h"
+#include <math-type-macros-float128.h>
+
+extern __typeof (__sqrtf128) __sqrtf128_ppc64le attribute_hidden;
+extern __typeof (__sqrtf128) __sqrtf128_power9 attribute_hidden;
+
+libc_ifunc (__sqrtf128,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_00)
+ ? __sqrtf128_power9
+ : __sqrtf128_ppc64le);
+declare_mgen_alias (__sqrt, sqrt)
diff --git a/sysdeps/powerpc/powerpc64/le/fpu/sfp-machine.h b/sysdeps/powerpc/powerpc64/le/fpu/sfp-machine.h
new file mode 100644
index 0000000000..fac5dd0347
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/fpu/sfp-machine.h
@@ -0,0 +1,115 @@
+#define _FP_W_TYPE_SIZE 64
+#define _FP_W_TYPE unsigned long long
+#define _FP_WS_TYPE signed long long
+#define _FP_I_TYPE long long
+
+typedef int TItype __attribute__ ((mode (TI)));
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+
+#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
+
+/* The type of the result of a floating point comparison. This must
+ match `__libgcc_cmp_return__' in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
+
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+#define _FP_QNANNEGATEDP 0
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define _FP_TININESS_AFTER_ROUNDING 0
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+#define __BYTE_ORDER __LITTLE_ENDIAN
+
+/* Only provide exception support if we have hardware floating point using
+ floating point registers and we can execute the mtfsf instruction. This
+ would only be true if we are using the emulation routines for IEEE 128-bit
+ floating point on pre-ISA 3.0 machines without the IEEE 128-bit floating
+ point support. */
+
+#ifdef __FLOAT128__
+#define ISA_BIT(x) (1LL << (63 - x))
+
+/* Use the same bits of the FPSCR. */
+# define FP_EX_INVALID ISA_BIT(34)
+# define FP_EX_OVERFLOW ISA_BIT(35)
+# define FP_EX_UNDERFLOW ISA_BIT(36)
+# define FP_EX_DIVZERO ISA_BIT(37)
+# define FP_EX_INEXACT ISA_BIT(38)
+# define FP_EX_ALL (FP_EX_INVALID | FP_EX_OVERFLOW \
+ | FP_EX_UNDERFLOW | FP_EX_DIVZERO \
+ | FP_EX_INEXACT)
+
+void __sfp_handle_exceptions (int);
+
+# define FP_HANDLE_EXCEPTIONS \
+ do { \
+ if (__builtin_expect (_fex, 0)) \
+ __sfp_handle_exceptions (_fex); \
+ } while (0);
+
+/* The FP_EX_* bits track whether the exception has occurred. This macro
+ must set the FP_EX_* bits of those exceptions which are configured to
+ trap. The FPSCR bit which indicates this is 22 ISA bits above the
+ respective FP_EX_* bit. Note, the ISA labels bits from msb to lsb,
+ so 22 ISA bits above is 22 bits below when counted from the lsb. */
+# define FP_TRAPPING_EXCEPTIONS ((_fpscr.i << 22) & FP_EX_ALL)
+
+# define FP_RND_NEAREST 0x0
+# define FP_RND_ZERO 0x1
+# define FP_RND_PINF 0x2
+# define FP_RND_MINF 0x3
+# define FP_RND_MASK 0x3
+
+# define _FP_DECL_EX \
+ union { unsigned long long i; double d; } _fpscr __attribute__ ((unused)) = \
+ { .i = FP_RND_NEAREST }
+
+#define FP_INIT_ROUNDMODE \
+ do { \
+ __asm__ __volatile__ ("mffs %0" \
+ : "=f" (_fpscr.d)); \
+ } while (0)
+
+# define FP_ROUNDMODE (_fpscr.i & FP_RND_MASK)
+#endif /* !__FLOAT128__ */
diff --git a/sysdeps/powerpc/powerpc64/le/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/multiarch/Implies
new file mode 100644
index 0000000000..30edcf7f9d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power7/Implies b/sysdeps/powerpc/powerpc64/le/power7/Implies
new file mode 100644
index 0000000000..5763968694
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power7/Implies
@@ -0,0 +1,13 @@
+powerpc/powerpc64/power7
+powerpc/powerpc64/power6/fpu
+powerpc/powerpc64/power6
+powerpc/powerpc64/power5+/fpu
+powerpc/powerpc64/power5+
+powerpc/power5+/fpu
+powerpc/power5+
+powerpc/powerpc64/power5/fpu
+powerpc/powerpc64/power5
+powerpc/powerpc64/power4
+powerpc/power4/fpu
+powerpc/power4
+powerpc/powerpc64/le
diff --git a/sysdeps/powerpc/powerpc64/le/power7/fpu/Implies b/sysdeps/powerpc/powerpc64/le/power7/fpu/Implies
new file mode 100644
index 0000000000..27eb299ef0
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power7/fpu/Implies
@@ -0,0 +1,5 @@
+powerpc/powerpc64/power7/fpu
+powerpc/powerpc64/power6/fpu
+powerpc/powerpc64/power5+/fpu
+powerpc/powerpc64/power5/fpu
+powerpc/powerpc64/le/fpu
diff --git a/sysdeps/powerpc/powerpc64/le/power7/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power7/fpu/multiarch/Implies
new file mode 100644
index 0000000000..f9b3a92c33
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power7/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power7/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power7/multiarch/Implies
new file mode 100644
index 0000000000..87b32a21ee
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power7/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power8/Implies b/sysdeps/powerpc/powerpc64/le/power8/Implies
new file mode 100644
index 0000000000..6c3fc78ce1
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power8/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power8
+powerpc/powerpc64/le/power7
diff --git a/sysdeps/powerpc/powerpc64/le/power8/fpu/Implies b/sysdeps/powerpc/powerpc64/le/power8/fpu/Implies
new file mode 100644
index 0000000000..be3c5157b9
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power8/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power8/fpu
+powerpc/powerpc64/le/power7/fpu
diff --git a/sysdeps/powerpc/powerpc64/le/power8/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power8/fpu/multiarch/Implies
new file mode 100644
index 0000000000..36c05ff34a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power8/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/power7/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power8/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power8/multiarch/Implies
new file mode 100644
index 0000000000..0057194b12
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power8/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/power7/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power9/Implies b/sysdeps/powerpc/powerpc64/le/power9/Implies
new file mode 100644
index 0000000000..8c0cfd7c2e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power9/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power9
+powerpc/powerpc64/le/power8
diff --git a/sysdeps/powerpc/powerpc64/le/power9/fpu/Implies b/sysdeps/powerpc/powerpc64/le/power9/fpu/Implies
new file mode 100644
index 0000000000..8b7c0b4af6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power9/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/power9/fpu
+powerpc/powerpc64/le/power8/fpu
diff --git a/sysdeps/powerpc/powerpc64/le/power9/fpu/e_sqrtf128.c b/sysdeps/powerpc/powerpc64/le/power9/fpu/e_sqrtf128.c
new file mode 100644
index 0000000000..76ab451dbb
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power9/fpu/e_sqrtf128.c
@@ -0,0 +1,36 @@
+/* POWER9 sqrt for _Float128
+ Return sqrt(a)
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+__float128
+__ieee754_sqrtf128 (__float128 a)
+{
+ __float128 z;
+ asm ("xssqrtqp %0,%1" : "=v" (z) : "v" (a));
+ return z;
+}
+strong_alias (__ieee754_sqrtf128, __sqrtf128_finite)
diff --git a/sysdeps/powerpc/powerpc64/le/power9/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power9/fpu/multiarch/Implies
new file mode 100644
index 0000000000..dce5bca13e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power9/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/power8/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/le/power9/multiarch/Implies b/sysdeps/powerpc/powerpc64/le/power9/multiarch/Implies
new file mode 100644
index 0000000000..98efd68e54
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power9/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/le/power8/multiarch
diff --git a/sysdeps/powerpc/powerpc64/lshift.S b/sysdeps/powerpc/powerpc64/lshift.S
index 8ce7a7644d..8b6396ee6c 100644
--- a/sysdeps/powerpc/powerpc64/lshift.S
+++ b/sysdeps/powerpc/powerpc64/lshift.S
@@ -1,5 +1,5 @@
/* PowerPC64 mpn_lshift -- rp[] = up[] << cnt
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@
#define U1 r31
#define RETVAL r5
-EALIGN(__mpn_lshift, 5, 0)
+ENTRY_TOCLESS (__mpn_lshift, 5)
std U1, -8(r1)
std U0, -16(r1)
subfic TNC, CNT, 64
diff --git a/sysdeps/powerpc/powerpc64/memcpy.S b/sysdeps/powerpc/powerpc64/memcpy.S
index 4c44a124e0..ebde694a4f 100644
--- a/sysdeps/powerpc/powerpc64/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,7 +18,7 @@
#include <sysdep.h>
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+/* void * [r3] memcpy (void *dst [r3], void *src [r4], size_t len [r5]);
Returns 'dst'.
Memcpy handles short copies (< 32-bytes) using a binary move blocks
@@ -33,7 +33,11 @@
possible when both source and destination are doubleword aligned.
Each case has a optimized unrolled loop. */
-EALIGN (memcpy, 5, 0)
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
+ENTRY_TOCLESS (MEMCPY, 5)
CALL_MCOUNT 3
cmpldi cr1,5,31
@@ -389,5 +393,5 @@ EALIGN (memcpy, 5, 0)
ld 31,-8(1)
ld 3,-16(1)
blr
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/memset.S b/sysdeps/powerpc/powerpc64/memset.S
index 1106e77410..a36df7caa8 100644
--- a/sysdeps/powerpc/powerpc64/memset.S
+++ b/sysdeps/powerpc/powerpc64/memset.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,14 +24,18 @@
.section ".text"
.align 2
-/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
Returns 's'.
The memset is done in three sizes: byte (8 bits), word (32 bits),
cache line (256 bits). There is a special case for setting cache lines
to 0, to take advantage of the dcbz instruction. */
-EALIGN (memset, 5, 0)
+#ifndef MEMSET
+# define MEMSET memset
+#endif
+
+ENTRY (MEMSET, 5)
CALL_MCOUNT 3
#define rTMP r0
@@ -244,7 +248,7 @@ L(medium_27f):
L(medium_28t):
std rCHR, -8(rMEMP)
blr
-END_GEN_TB (memset,TB_TOCLESS)
+END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)
#ifndef NO_BZERO_IMPL
diff --git a/sysdeps/powerpc/powerpc64/mul_1.S b/sysdeps/powerpc/powerpc64/mul_1.S
index d1934dfc0f..953ded8028 100644
--- a/sysdeps/powerpc/powerpc64/mul_1.S
+++ b/sysdeps/powerpc/powerpc64/mul_1.S
@@ -1,6 +1,6 @@
/* PowerPC64 __mpn_mul_1 -- Multiply a limb vector with a limb and store
the result in a second limb vector.
- Copyright (C) 1999-2016 Free Software Foundation, Inc.
+ Copyright (C) 1999-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,7 +24,7 @@
#define N r5
#define VL r6
-EALIGN(__mpn_mul_1, 5, 0)
+ENTRY_TOCLESS (__mpn_mul_1, 5)
std r27, -40(r1)
std r26, -48(r1)
li r12, 0
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 3b0e3a0117..4df6b45c4c 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -1,25 +1,35 @@
ifeq ($(subdir),string)
-sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
- memcpy-power4 memcpy-ppc64 memcmp-power7 memcmp-power4 \
- memcmp-ppc64 memset-power7 memset-power6 memset-power4 \
+sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
+ memcpy-cell memcpy-power4 memcpy-ppc64 \
+ memcmp-power8 memcmp-power7 memcmp-power4 memcmp-ppc64 \
+ memset-power7 memset-power6 memset-power4 \
memset-ppc64 memset-power8 \
- mempcpy-power7 mempcpy-ppc64 memchr-power7 memchr-ppc64 \
- memrchr-power7 memrchr-ppc64 rawmemchr-power7 \
- rawmemchr-ppc64 strlen-power7 strlen-ppc64 strnlen-power7 \
- strnlen-ppc64 strcasecmp-power7 strcasecmp_l-power7 \
+ mempcpy-power7 mempcpy-ppc64 \
+ memchr-power8 memchr-power7 memchr-ppc64 \
+ memrchr-power8 memrchr-power7 memrchr-ppc64 \
+ rawmemchr-power7 rawmemchr-ppc64 \
+ strlen-power7 strlen-ppc64 \
+ strnlen-power8 strnlen-power7 strnlen-ppc64 \
+ strcasecmp-power7 strcasecmp_l-power7 \
strncase-power7 strncase_l-power7 \
- strncmp-power8 strncmp-power7 strncmp-power4 strncmp-ppc64 \
- strchr-power7 strchr-ppc64 \
- strchrnul-power7 strchrnul-ppc64 \
+ strncmp-power9 strncmp-power8 strncmp-power7 \
+ strncmp-power4 strncmp-ppc64 \
+ strchr-power8 strchr-power7 strchr-ppc64 \
+ strchrnul-power8 strchrnul-power7 strchrnul-ppc64 \
strcpy-power8 strcpy-power7 strcpy-ppc64 stpcpy-power8 \
stpcpy-power7 stpcpy-ppc64 \
- strrchr-power7 strrchr-ppc64 strncat-power7 strncat-ppc64 \
+ strrchr-power8 strrchr-power7 strrchr-ppc64 \
+ strncat-power8 strncat-power7 strncat-ppc64 \
strncpy-power7 strncpy-ppc64 \
stpncpy-power8 stpncpy-power7 stpncpy-ppc64 \
- strcmp-power8 strcmp-power7 strcmp-ppc64 \
+ strcmp-power9 strcmp-power8 strcmp-power7 strcmp-ppc64 \
strcat-power8 strcat-power7 strcat-ppc64 \
memmove-power7 memmove-ppc64 wordcopy-ppc64 bcopy-ppc64 \
- strncpy-power8 strstr-power7 strstr-ppc64
+ strncpy-power8 strstr-power7 strstr-ppc64 \
+ strspn-power8 strspn-ppc64 strcspn-power8 strcspn-ppc64 \
+ strlen-power8 strcasestr-power8 strcasestr-ppc64 \
+ strcasecmp-ppc64 strcasecmp-power8 strncase-ppc64 \
+ strncase-power8
CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
CFLAGS-strncase_l-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/bcopy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/bcopy-ppc64.c
index ddd9bfde5a..f28be7bf37 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/bcopy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/bcopy-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default bcopy.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/bcopy.c b/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
index ff271a0859..1c4a229b1f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/bcopy.c
@@ -1,5 +1,5 @@
/* PowerPC64 multiarch bcopy.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/bzero.c b/sysdeps/powerpc/powerpc64/multiarch/bzero.c
index 56073fc587..f8cb05bea8 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/bzero.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/bzero.c
@@ -1,5 +1,5 @@
/* Multiple versions of bzero. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index 11a8215ee4..38a21e478e 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -1,5 +1,5 @@
/* Enumerate available IFUNC implementations of a function. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -51,6 +51,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
#ifdef SHARED
/* Support sysdeps/powerpc/powerpc64/multiarch/memcpy.c. */
IFUNC_IMPL (i, name, memcpy,
+ IFUNC_IMPL_ADD (array, i, memcpy, hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __memcpy_power8_cached)
IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
__memcpy_power7)
IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_ARCH_2_06,
@@ -101,6 +103,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strlen.c. */
IFUNC_IMPL (i, name, strlen,
+ IFUNC_IMPL_ADD (array, i, strlen, hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strlen_power8)
IFUNC_IMPL_ADD (array, i, strlen, hwcap & PPC_FEATURE_HAS_VSX,
__strlen_power7)
IFUNC_IMPL_ADD (array, i, strlen, 1,
@@ -108,6 +112,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strncmp.c. */
IFUNC_IMPL (i, name, strncmp,
+ IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_3_00,
+ __strncmp_power9)
IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_2_07,
__strncmp_power8)
IFUNC_IMPL_ADD (array, i, strncmp, hwcap & PPC_FEATURE_HAS_VSX,
@@ -120,6 +126,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strchr.c. */
IFUNC_IMPL (i, name, strchr,
IFUNC_IMPL_ADD (array, i, strchr,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strchr_power8)
+ IFUNC_IMPL_ADD (array, i, strchr,
hwcap & PPC_FEATURE_HAS_VSX,
__strchr_power7)
IFUNC_IMPL_ADD (array, i, strchr, 1,
@@ -128,6 +137,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strchrnul.c. */
IFUNC_IMPL (i, name, strchrnul,
IFUNC_IMPL_ADD (array, i, strchrnul,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strchrnul_power8)
+ IFUNC_IMPL_ADD (array, i, strchrnul,
hwcap & PPC_FEATURE_HAS_VSX,
__strchrnul_power7)
IFUNC_IMPL_ADD (array, i, strchrnul, 1,
@@ -136,6 +148,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/memcmp.c. */
IFUNC_IMPL (i, name, memcmp,
+ IFUNC_IMPL_ADD (array, i, memcmp, hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __memcmp_power8)
IFUNC_IMPL_ADD (array, i, memcmp, hwcap & PPC_FEATURE_HAS_VSX,
__memcmp_power7)
IFUNC_IMPL_ADD (array, i, memcmp, hwcap & PPC_FEATURE_POWER4,
@@ -171,6 +185,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/memchr.c. */
IFUNC_IMPL (i, name, memchr,
IFUNC_IMPL_ADD (array, i, memchr,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __memchr_power8)
+ IFUNC_IMPL_ADD (array, i, memchr,
hwcap & PPC_FEATURE_HAS_VSX,
__memchr_power7)
IFUNC_IMPL_ADD (array, i, memchr, 1,
@@ -179,6 +196,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/memrchr.c. */
IFUNC_IMPL (i, name, memrchr,
IFUNC_IMPL_ADD (array, i, memrchr,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __memrchr_power8)
+ IFUNC_IMPL_ADD (array, i, memrchr,
hwcap & PPC_FEATURE_HAS_VSX,
__memrchr_power7)
IFUNC_IMPL_ADD (array, i, memrchr, 1,
@@ -194,6 +214,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strnlen.c. */
IFUNC_IMPL (i, name, strnlen,
+ IFUNC_IMPL_ADD (array, i, strnlen,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strnlen_power8)
IFUNC_IMPL_ADD (array, i, strnlen, hwcap & PPC_FEATURE_HAS_VSX,
__strnlen_power7)
IFUNC_IMPL_ADD (array, i, strnlen, 1,
@@ -202,6 +225,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c. */
IFUNC_IMPL (i, name, strcasecmp,
IFUNC_IMPL_ADD (array, i, strcasecmp,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strcasecmp_power8)
+ IFUNC_IMPL_ADD (array, i, strcasecmp,
hwcap & PPC_FEATURE_HAS_VSX,
__strcasecmp_power7)
IFUNC_IMPL_ADD (array, i, strcasecmp, 1, __strcasecmp_ppc))
@@ -217,6 +243,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strncase.c. */
IFUNC_IMPL (i, name, strncasecmp,
IFUNC_IMPL_ADD (array, i, strncasecmp,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strncasecmp_power8)
+ IFUNC_IMPL_ADD (array, i, strncasecmp,
hwcap & PPC_FEATURE_HAS_VSX,
__strncasecmp_power7)
IFUNC_IMPL_ADD (array, i, strncasecmp, 1, __strncasecmp_ppc))
@@ -265,6 +294,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strrchr.c. */
IFUNC_IMPL (i, name, strrchr,
IFUNC_IMPL_ADD (array, i, strrchr,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strrchr_power8)
+ IFUNC_IMPL_ADD (array, i, strrchr,
hwcap & PPC_FEATURE_HAS_VSX,
__strrchr_power7)
IFUNC_IMPL_ADD (array, i, strrchr, 1,
@@ -273,6 +305,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strncat.c. */
IFUNC_IMPL (i, name, strncat,
IFUNC_IMPL_ADD (array, i, strncat,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strncat_power8)
+ IFUNC_IMPL_ADD (array, i, strncat,
hwcap & PPC_FEATURE_HAS_VSX,
__strncat_power7)
IFUNC_IMPL_ADD (array, i, strncat, 1,
@@ -303,6 +338,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c. */
IFUNC_IMPL (i, name, strcmp,
IFUNC_IMPL_ADD (array, i, strcmp,
+ hwcap2 & PPC_FEATURE2_ARCH_3_00,
+ __strcmp_power9)
+ IFUNC_IMPL_ADD (array, i, strcmp,
hwcap2 & PPC_FEATURE2_ARCH_2_07,
__strcmp_power8)
IFUNC_IMPL_ADD (array, i, strcmp,
@@ -322,6 +360,22 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
IFUNC_IMPL_ADD (array, i, strcat, 1,
__strcat_ppc))
+ /* Support sysdeps/powerpc/powerpc64/multiarch/strspn.c. */
+ IFUNC_IMPL (i, name, strspn,
+ IFUNC_IMPL_ADD (array, i, strspn,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strspn_power8)
+ IFUNC_IMPL_ADD (array, i, strspn, 1,
+ __strspn_ppc))
+
+ /* Support sysdeps/powerpc/powerpc64/multiarch/strcspn.c. */
+ IFUNC_IMPL (i, name, strcspn,
+ IFUNC_IMPL_ADD (array, i, strcspn,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strcspn_power8)
+ IFUNC_IMPL_ADD (array, i, strcspn, 1,
+ __strcspn_ppc))
+
/* Support sysdeps/powerpc/powerpc64/multiarch/strstr.c. */
IFUNC_IMPL (i, name, strstr,
IFUNC_IMPL_ADD (array, i, strstr,
@@ -331,5 +385,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__strstr_ppc))
+ /* Support sysdeps/powerpc/powerpc64/multiarch/strcasestr.c. */
+ IFUNC_IMPL (i, name, strcasestr,
+ IFUNC_IMPL_ADD (array, i, strcasestr,
+ hwcap2 & PPC_FEATURE2_ARCH_2_07,
+ __strcasestr_power8)
+ IFUNC_IMPL_ADD (array, i, strcasestr, 1,
+ __strcasestr_ppc))
+
return i;
}
diff --git a/sysdeps/powerpc/powerpc64/multiarch/init-arch.h b/sysdeps/powerpc/powerpc64/multiarch/init-arch.h
index 98089b20f3..207add351a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/init-arch.h
+++ b/sysdeps/powerpc/powerpc64/multiarch/init-arch.h
@@ -1,5 +1,5 @@
/* This file is part of the GNU C Library.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memchr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memchr-power7.S
index df947f5f89..33b565f1e5 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memchr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memchr-power7.S
@@ -1,5 +1,5 @@
/* Optimized memchr implementation for PowerPC64/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__memchr_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__memchr_power7): \
- cfi_startproc; \
- LOCALENTRY(__memchr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__memchr_power7) \
- END_2(__memchr_power7)
+#define MEMCHR __memchr_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/memchr-power8.S
new file mode 100644
index 0000000000..cb1f0face9
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/memchr-power8.S
@@ -0,0 +1,26 @@
+/* Optimized memchr implementation for PowerPC64/POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define MEMCHR __memchr_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+#undef weak_alias
+#define weak_alias(name,alias)
+
+#include <sysdeps/powerpc/powerpc64/power8/memchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memchr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/memchr-ppc64.c
index 896824c5ce..48cbe601eb 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memchr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memchr-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of memchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memchr.c b/sysdeps/powerpc/powerpc64/multiarch/memchr.c
index 71f89c4e88..cd71db972f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of memchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,10 +23,13 @@
extern __typeof (__memchr) __memchr_ppc attribute_hidden;
extern __typeof (__memchr) __memchr_power7 attribute_hidden;
+extern __typeof (__memchr) __memchr_power8 attribute_hidden;
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
libc_ifunc (__memchr,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __memchr_power8 :
(hwcap & PPC_FEATURE_HAS_VSX)
? __memchr_power7
: __memchr_ppc);
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcmp-power4.S b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power4.S
index 1d74e37feb..26879a2b0f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcmp-power4.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power4.S
@@ -1,5 +1,5 @@
/* Optimized memcmp implementation for PowerPC64/POWER4.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcmp_power4) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcmp_power4): \
- cfi_startproc; \
- LOCALENTRY(__memcmp_power4)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__memcmp_power4) \
- END_2(__memcmp_power4)
+#define MEMCMP __memcmp_power4
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcmp-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power7.S
index 9993e7266b..9715d11d8d 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcmp-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power7.S
@@ -1,5 +1,5 @@
/* Optimized memcmp implementation for PowerPC64/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcmp_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcmp_power7): \
- cfi_startproc; \
- LOCALENTRY(__memcmp_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__memcmp_power7) \
- END_2(__memcmp_power7)
+#define MEMCMP __memcmp_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcmp-power8.S b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power8.S
new file mode 100644
index 0000000000..c4da81c64e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcmp-power8.S
@@ -0,0 +1,26 @@
+/* Optimized memcmp implementation for PowerPC64/POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define MEMCMP __memcmp_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+#undef weak_alias
+#define weak_alias(name,alias)
+
+#include <sysdeps/powerpc/powerpc64/power8/memcmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcmp-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/memcmp-ppc64.c
index d88f908d12..8180ed122b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcmp-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcmp-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcmp.c b/sysdeps/powerpc/powerpc64/multiarch/memcmp.c
index e8cf6ae23d..2c7a083a65 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcmp.c
@@ -1,5 +1,5 @@
/* Multiple versions of memcmp. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
/* Define multiple versions only for definition in libc. */
#if IS_IN (libc)
+# define memcmp __redirect_memcmp
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -25,15 +26,19 @@
extern __typeof (memcmp) __memcmp_ppc attribute_hidden;
extern __typeof (memcmp) __memcmp_power4 attribute_hidden;
extern __typeof (memcmp) __memcmp_power7 attribute_hidden;
+extern __typeof (memcmp) __memcmp_power8 attribute_hidden;
+# undef memcmp
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (memcmp,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __memcmp_power7 :
- (hwcap & PPC_FEATURE_POWER4)
- ? __memcmp_power4
- : __memcmp_ppc);
+libc_ifunc_redirected (__redirect_memcmp, memcmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __memcmp_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __memcmp_power7
+ : (hwcap & PPC_FEATURE_POWER4)
+ ? __memcmp_power4
+ : __memcmp_ppc);
#else
#include <string/memcmp.c>
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-a2.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-a2.S
index bbe1b32033..91eddcebe9 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-a2.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-a2.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC A2.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_a2) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_a2): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_a2)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_a2,mask) \
- END_2(__memcpy_a2)
+#define MEMCPY __memcpy_a2
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-cell.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-cell.S
index 11bab911d4..a93f5146fb 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-cell.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-cell.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC/CELL.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_cell) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_cell): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_cell)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_cell,mask) \
- END_2(__memcpy_cell)
+#define MEMCPY __memcpy_cell
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power4.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power4.S
index e414819208..bc6844535b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power4.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power4.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC64/POWER4.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_power4) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_power4): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_power4)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_power4,mask) \
- END_2(__memcpy_power4)
+#define MEMCPY __memcpy_power4
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power6.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power6.S
index 819f015d32..9d4c9927d9 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power6.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power6.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC/POWER6.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_power6) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_power6): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_power6)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_power6,mask) \
- END_2(__memcpy_power6)
+#define MEMCPY __memcpy_power6
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power7.S
index d48b54f4c1..b24452e29c 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power7.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_power7): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_power7)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_power7,mask) \
- END_2(__memcpy_power7)
+#define MEMCPY __memcpy_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-power8-cached.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power8-cached.S
new file mode 100644
index 0000000000..6b69e57212
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-power8-cached.S
@@ -0,0 +1,176 @@
+/* Optimized memcpy implementation for cached memory on PowerPC64/POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst'. */
+
+ .machine power8
+ENTRY_TOCLESS (__memcpy_power8_cached, 5)
+ CALL_MCOUNT 3
+
+ cmpldi cr7,r5,15
+ bgt cr7,L(ge_16)
+ andi. r9,r5,0x1
+ mr r9,r3
+ beq cr0,1f
+ lbz r10,0(r4)
+ addi r9,r3,1
+ addi r4,r4,1
+ stb r10,0(r3)
+1:
+ andi. r10,r5,0x2
+ beq cr0,2f
+ lhz r10,0(r4)
+ addi r9,r9,2
+ addi r4,r4,2
+ sth r10,-2(r9)
+2:
+ andi. r10,r5,0x4
+ beq cr0,3f
+ lwz r10,0(r4)
+ addi r9,9,4
+ addi r4,4,4
+ stw r10,-4(r9)
+3:
+ andi. r10,r5,0x8
+ beqlr cr0
+ ld r10,0(r4)
+ std r10,0(r9)
+ blr
+
+ .align 4
+L(ge_16):
+ cmpldi cr7,r5,32
+ ble cr7,L(ge_16_le_32)
+ cmpldi cr7,r5,64
+ ble cr7,L(gt_32_le_64)
+
+ /* Align dst to 16 bytes. */
+ andi. r9,r3,0xf
+ mr r12,r3
+ beq cr0,L(dst_is_align_16)
+ lxvd2x v0,0,r4
+ subfic r12,r9,16
+ subf r5,r12,r5
+ add r4,r4,r12
+ add r12,r3,r12
+ stxvd2x v0,0,r3
+L(dst_is_align_16):
+ cmpldi cr7,r5,127
+ ble cr7,L(tail_copy)
+ mr r9,r12
+ srdi r10,r5,7
+ li r11,16
+ li r6,32
+ li r7,48
+ mtctr r10
+ clrrdi r0,r5,7
+
+ /* Main loop, copy 128 bytes each time. */
+ .align 4
+L(copy_128):
+ lxvd2x v10,0,r4
+ lxvd2x v11,r4,r11
+ addi r8,r4,64
+ addi r10,r9,64
+ lxvd2x v12,r4,r6
+ lxvd2x v0,r4,r7
+ addi r4,r4,128
+ stxvd2x v10,0,r9
+ stxvd2x v11,r9,r11
+ stxvd2x v12,r9,r6
+ stxvd2x v0,r9,r7
+ addi r9,r9,128
+ lxvd2x v10,0,r8
+ lxvd2x v11,r8,r11
+ lxvd2x v12,r8,r6
+ lxvd2x v0,r8,r7
+ stxvd2x v10,0,r10
+ stxvd2x v11,r10,r11
+ stxvd2x v12,r10,r6
+ stxvd2x v0,r10,r7
+ bdnz L(copy_128)
+
+ add r12,r12,r0
+ rldicl r5,r5,0,57
+L(tail_copy):
+ cmpldi cr7,r5,63
+ ble cr7,L(tail_le_64)
+ li r8,16
+ li r10,32
+ lxvd2x v10,0,r4
+ li r9,48
+ addi r5,r5,-64
+ lxvd2x v11,r4,r8
+ lxvd2x v12,r4,r10
+ lxvd2x v0,r4,r9
+ addi r4,r4,64
+ stxvd2x v10,0,r12
+ stxvd2x v11,r12,r8
+ stxvd2x v12,r12,r10
+ stxvd2x v0,r12,9
+ addi r12,r12,64
+
+L(tail_le_64):
+ cmpldi cr7,r5,32
+ bgt cr7,L(tail_gt_32_le_64)
+ cmpdi cr7,r5,0
+ beqlr cr7
+ addi r5,r5,-32
+ li r9,16
+ add r8,r4,r5
+ add r10,r12,r5
+ lxvd2x v12,r4,r5
+ lxvd2x v0,r8,r9
+ stxvd2x v12,r12,r5
+ stxvd2x v0,r10,r9
+ blr
+
+ .align 4
+L(ge_16_le_32):
+ addi r5,r5,-16
+ lxvd2x v0,0,r4
+ lxvd2x v1,r4,r5
+ stxvd2x v0,0,r3
+ stxvd2x v1,r3,r5
+ blr
+
+ .align 4
+L(gt_32_le_64):
+ mr r12,r3
+
+ .align 4
+L(tail_gt_32_le_64):
+ li r9,16
+ lxvd2x v0,0,r4
+ addi r5,r5,-32
+ lxvd2x v1,r4,r9
+ add r8,r4,r5
+ lxvd2x v2,r4,r5
+ add r10,r12,r5
+ lxvd2x v3,r8,r9
+ stxvd2x v0,0,r12
+ stxvd2x v1,r12,r9
+ stxvd2x v2,r12,r5
+ stxvd2x v3,r10,r9
+ blr
+
+END_GEN_TB (__memcpy_power8_cached,TB_TOCLESS)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/memcpy-ppc64.S
index 6e2b64fa7d..55036676b3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy-ppc64.S
@@ -1,5 +1,5 @@
/* Default memcpy implementation for PowerPC64.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,24 +16,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#if defined SHARED && IS_IN (libc)
-# undef EALIGN
-# define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memcpy_ppc) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memcpy_ppc): \
- cfi_startproc; \
- LOCALENTRY(__memcpy_ppc)
-
-# undef END_GEN_TB
-# define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memcpy_ppc,mask) \
- END_2(__memcpy_ppc)
+# define MEMCPY __memcpy_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
index 8549515c25..44dea594f3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memcpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of memcpy. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -35,18 +35,21 @@ extern __typeof (__redirect_memcpy) __memcpy_cell attribute_hidden;
extern __typeof (__redirect_memcpy) __memcpy_power6 attribute_hidden;
extern __typeof (__redirect_memcpy) __memcpy_a2 attribute_hidden;
extern __typeof (__redirect_memcpy) __memcpy_power7 attribute_hidden;
+extern __typeof (__redirect_memcpy) __memcpy_power8_cached attribute_hidden;
libc_ifunc (__libc_memcpy,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __memcpy_power7 :
- (hwcap & PPC_FEATURE_ARCH_2_06)
- ? __memcpy_a2 :
- (hwcap & PPC_FEATURE_ARCH_2_05)
- ? __memcpy_power6 :
- (hwcap & PPC_FEATURE_CELL_BE)
- ? __memcpy_cell :
- (hwcap & PPC_FEATURE_POWER4)
- ? __memcpy_power4
+ ((hwcap2 & PPC_FEATURE2_ARCH_2_07) && use_cached_memopt)
+ ? __memcpy_power8_cached :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __memcpy_power7 :
+ (hwcap & PPC_FEATURE_ARCH_2_06)
+ ? __memcpy_a2 :
+ (hwcap & PPC_FEATURE_ARCH_2_05)
+ ? __memcpy_power6 :
+ (hwcap & PPC_FEATURE_CELL_BE)
+ ? __memcpy_cell :
+ (hwcap & PPC_FEATURE_POWER4)
+ ? __memcpy_power4
: __memcpy_ppc);
#undef memcpy
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
index 5f35f16d9b..0b251d0f5f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove-power7.S
@@ -1,5 +1,5 @@
/* Optimized memmove implementation for PowerPC64/POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memmove_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memmove_power7): \
- cfi_startproc; \
- LOCALENTRY(__memmove_power7)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memmove_power7,mask) \
- END_2(__memmove_power7)
+#define MEMMOVE __memmove_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/memmove-ppc64.c
index fc9bed3c25..a185190360 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memmove-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memmove.c b/sysdeps/powerpc/powerpc64/multiarch/memmove.c
index 2775c2c8d4..39987155cc 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memmove.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memmove.c
@@ -1,5 +1,5 @@
/* Multiple versions of memmove. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/mempcpy-power7.S b/sysdeps/powerpc/powerpc64/multiarch/mempcpy-power7.S
index de5caae4cb..bee44f0364 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/mempcpy-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/mempcpy-power7.S
@@ -1,5 +1,5 @@
/* Optimized mempcpy implementation for PowerPC/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__mempcpy_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__mempcpy_power7): \
- cfi_startproc; \
- LOCALENTRY(__mempcpy_power7)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__mempcpy_power7,mask) \
- END_2(__mempcpy_power7)
+#define MEMPCPY __mempcpy_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/mempcpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/mempcpy-ppc64.c
index 27c6e877b2..a03748e8c6 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/mempcpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/mempcpy-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of mempcpy.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/mempcpy.c b/sysdeps/powerpc/powerpc64/multiarch/mempcpy.c
index 3c77b5f1bd..35e2368fa6 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/mempcpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/mempcpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of mempcpy.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,23 +17,27 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define mempcpy __redirect_mempcpy
+# define __mempcpy __redirect___mempcpy
# define NO_MEMPCPY_STPCPY_REDIRECT
+# define __NO_STRING_INLINES
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (__mempcpy) __mempcpy_ppc attribute_hidden;
extern __typeof (__mempcpy) __mempcpy_power7 attribute_hidden;
+# undef mempcpy
+# undef __mempcpy
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (__mempcpy,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __mempcpy_power7
- : __mempcpy_ppc);
+libc_ifunc_redirected (__redirect___mempcpy, __mempcpy,
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __mempcpy_power7
+ : __mempcpy_ppc);
weak_alias (__mempcpy, mempcpy)
-libc_hidden_def (mempcpy)
#else
# include <string/mempcpy.c>
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memrchr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memrchr-power7.S
index 0e41b6c9f5..afaae46dec 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memrchr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memrchr-power7.S
@@ -1,5 +1,5 @@
/* Optimized memrchr implementation for PowerPC64/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__memrchr_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__memrchr_power7): \
- cfi_startproc; \
- LOCALENTRY(__memrchr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__memrchr_power7) \
- END_2(__memrchr_power7)
+#define MEMRCHR __memrchr_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memrchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/memrchr-power8.S
new file mode 100644
index 0000000000..453e2b620e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/memrchr-power8.S
@@ -0,0 +1,26 @@
+/* Optimized memrchr implementation for PowerPC64/POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define MEMRCHR __memrchr_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+#undef weak_alias
+#define weak_alias(name,alias)
+
+#include <sysdeps/powerpc/powerpc64/power8/memrchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c
index 3db538f120..e6d60c3fc3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of memrchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -15,5 +15,4 @@
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-
#include <sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-ppc32.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memrchr.c b/sysdeps/powerpc/powerpc64/multiarch/memrchr.c
index 7603343947..48be9515e4 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memrchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memrchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of memrchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,13 +23,16 @@
extern __typeof (__memrchr) __memrchr_ppc attribute_hidden;
extern __typeof (__memrchr) __memrchr_power7 attribute_hidden;
+extern __typeof (__memrchr) __memrchr_power8 attribute_hidden;
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
libc_ifunc (__memrchr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __memrchr_power7
- : __memrchr_ppc);
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __memrchr_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __memrchr_power7
+ : __memrchr_ppc);
weak_alias (__memrchr, memrchr)
#else
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-power4.S b/sysdeps/powerpc/powerpc64/multiarch/memset-power4.S
index 34707382f1..78d6de9e19 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset-power4.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-power4.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER4.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memset_power4) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memset_power4): \
- cfi_startproc; \
- LOCALENTRY(__memset_power4)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memset_power4,mask) \
- END_2(__memset_power4)
+#define MEMSET __memset_power4
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-power6.S b/sysdeps/powerpc/powerpc64/multiarch/memset-power6.S
index c11db16890..79d93c7615 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset-power6.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-power6.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER6.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memset_power6) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memset_power6): \
- cfi_startproc; \
- LOCALENTRY(__memset_power6)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memset_power6,mask) \
- END_2(__memset_power6)
+#define MEMSET __memset_power6
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-power7.S b/sysdeps/powerpc/powerpc64/multiarch/memset-power7.S
index 61191815dc..02d1855186 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-power7.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memset_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memset_power7): \
- cfi_startproc; \
- LOCALENTRY(__memset_power7)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memset_power7,mask) \
- END_2(__memset_power7)
+#define MEMSET __memset_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-power8.S b/sysdeps/powerpc/powerpc64/multiarch/memset-power8.S
index ceafa7e83b..932eeefca1 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-power8.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER8.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memset_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memset_power8): \
- cfi_startproc; \
- LOCALENTRY(__memset_power8)
-
-#undef END_GEN_TB
-#define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memset_power8,mask) \
- END_2(__memset_power8)
+#define MEMSET __memset_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/memset-ppc64.S
index 7b3aa3acd1..61f4bc5089 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset-ppc64.S
@@ -1,5 +1,5 @@
/* Default memset/bzero implementation for PowerPC64.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -30,21 +30,7 @@ END_GEN_TB (__bzero_ppc,TB_TOCLESS)
#if defined SHARED && IS_IN (libc)
-# undef EALIGN
-# define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__memset_ppc) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__memset_ppc): \
- cfi_startproc; \
- LOCALENTRY(__memset_ppc)
-
-# undef END_GEN_TB
-# define END_GEN_TB(name, mask) \
- cfi_endproc; \
- TRACEBACK_MASK(__memset_ppc,mask) \
- END_2(__memset_ppc)
+# define MEMSET __memset_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/memset.c b/sysdeps/powerpc/powerpc64/multiarch/memset.c
index 8f0489e65b..1a7c46fecf 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/memset.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/memset.c
@@ -1,5 +1,5 @@
/* Multiple versions of memset.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-power7.S
index f1515bccce..a268376729 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-power7.S
@@ -1,5 +1,5 @@
/* Optimized rawmemchr implementation for PowerPC64/POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,21 +16,6 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__rawmemchr_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__rawmemchr_power7): \
- cfi_startproc; \
- LOCALENTRY(__rawmemchr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__rawmemchr_power7) \
- END_2(__rawmemchr_power7)
+#define RAWMEMCHR __rawmemchr_power7
#include <sysdeps/powerpc/powerpc64/power7/rawmemchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-ppc64.c
index 066a838dc7..e5daa9225b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of rawmemchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr.c b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr.c
index f06030eeeb..02bac49b53 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/rawmemchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/rawmemchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of rawmemchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,19 +17,21 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define __rawmemchr __redirect___rawmemchr
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (__rawmemchr) __rawmemchr_ppc attribute_hidden;
extern __typeof (__rawmemchr) __rawmemchr_power7 attribute_hidden;
+# undef __rawmemchr
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (__rawmemchr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __rawmemchr_power7
- : __rawmemchr_ppc);
+libc_ifunc_redirected (__redirect___rawmemchr, __rawmemchr,
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __rawmemchr_power7
+ : __rawmemchr_ppc);
weak_alias (__rawmemchr, rawmemchr)
#else
diff --git a/sysdeps/powerpc/powerpc64/multiarch/rtld-memset.c b/sysdeps/powerpc/powerpc64/multiarch/rtld-memset.c
index 5e66a21224..611eff4bf6 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/rtld-memset.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/rtld-memset.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/rtld-strchr.S b/sysdeps/powerpc/powerpc64/multiarch/rtld-strchr.S
index 06e8fb5d17..9ec081aebd 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/rtld-strchr.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/rtld-strchr.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power7.c b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power7.c
index fb383799f5..44e9f15e9b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power7.c
@@ -1,5 +1,5 @@
/* Multiarch stpcpy for POWER7/PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power8.S b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power8.S
index 8aeaa7c6ad..7718869370 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-power8.S
@@ -1,5 +1,5 @@
/* Optimized stpcpy implementation for POWER8/PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__stpcpy_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__stpcpy_power8): \
- cfi_startproc; \
- LOCALENTRY(__stpcpy_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__stpcpy_power8) \
- END_2(__stpcpy_power8)
+#define STPCPY __stpcpy_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-ppc64.c
index d607d8a6de..8359b7b4ca 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpcpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpcpy-ppc64.c
@@ -1,5 +1,5 @@
/* Multiarch stpcpy for PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,9 +27,7 @@ extern __typeof (stpcpy) __stpcpy_ppc attribute_hidden;
#define strlen __strlen_ppc
#undef weak_alias
-#define weak_alias(name, aliasname) \
- extern __typeof (__stpcpy_ppc) aliasname \
- __attribute__ ((weak, alias ("__stpcpy_ppc")));
+#define weak_alias(name, aliasname)
#undef libc_hidden_def
#define libc_hidden_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpcpy.c b/sysdeps/powerpc/powerpc64/multiarch/stpcpy.c
index bbc169180b..34c8896441 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpcpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpcpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of stpcpy. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#if defined SHARED && IS_IN (libc)
+# define __NO_STRING_INLINES
# define NO_MEMPCPY_STPCPY_REDIRECT
# include <string.h>
# include <shlib-compat.h>
@@ -26,14 +27,15 @@ extern __typeof (__stpcpy) __stpcpy_ppc attribute_hidden;
extern __typeof (__stpcpy) __stpcpy_power7 attribute_hidden;
extern __typeof (__stpcpy) __stpcpy_power8 attribute_hidden;
-libc_ifunc (__stpcpy,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __stpcpy_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __stpcpy_power7
- : __stpcpy_ppc);
+libc_ifunc_hidden (__stpcpy, __stpcpy,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __stpcpy_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __stpcpy_power7
+ : __stpcpy_ppc);
weak_alias (__stpcpy, stpcpy)
+libc_hidden_def (__stpcpy)
libc_hidden_def (stpcpy)
#else
# include <string/stpcpy.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power7.S b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power7.S
index fa1572ce01..6af2557c5d 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power7.S
@@ -1,5 +1,5 @@
/* Optimized stpncpy implementation for POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,29 +16,16 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#define USE_AS_STPNCPY
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__stpncpy_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__stpncpy_power7): \
- cfi_startproc; \
- LOCALENTRY(__stpncpy_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__stpncpy_power7) \
- END_2(__stpncpy_power7)
+#define STPNCPY __stpncpy_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
#define MEMSET __memset_power7
+#ifdef SHARED
+#define MEMSET_is_local
+#endif
#include <sysdeps/powerpc/powerpc64/power7/stpncpy.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power8.S b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power8.S
index 804e499023..4fea202a54 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-power8.S
@@ -1,5 +1,5 @@
/* Optimized stpncpy implementation for POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,24 +16,14 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
+#define STPNCPY __stpncpy_power8
-#define USE_AS_STPNCPY
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__stpncpy_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__stpncpy_power8): \
- cfi_startproc; \
- LOCALENTRY(__stpncpy_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__stpncpy_power8) \
- END_2(__stpncpy_power8)
+#define MEMSET __memset_power8
+#ifdef SHARED
+#define MEMSET_is_local
+#endif
#include <sysdeps/powerpc/powerpc64/power8/stpncpy.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-ppc64.c
index 593ae2324e..8f9dc08599 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpncpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpncpy-ppc64.c
@@ -1,5 +1,5 @@
/* Default stpncpy implementation for PowerPC64.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/stpncpy.c b/sysdeps/powerpc/powerpc64/multiarch/stpncpy.c
index b1484b1e36..28e3c97b72 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/stpncpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/stpncpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of stpncpy. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,8 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define stpncpy __redirect_stpncpy
+# define __stpncpy __redirect___stpncpy
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -24,13 +26,14 @@
extern __typeof (__stpncpy) __stpncpy_ppc attribute_hidden;
extern __typeof (__stpncpy) __stpncpy_power7 attribute_hidden;
extern __typeof (__stpncpy) __stpncpy_power8 attribute_hidden;
+# undef stpncpy
+# undef __stpncpy
-libc_ifunc (__stpncpy,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __stpncpy_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __stpncpy_power7
- : __stpncpy_ppc);
-
+libc_ifunc_redirected (__redirect___stpncpy, __stpncpy,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __stpncpy_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __stpncpy_power7
+ : __stpncpy_ppc);
weak_alias (__stpncpy, stpncpy)
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power7.S
index 013dc62867..bec2f30703 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power7.S
@@ -1,5 +1,5 @@
-/* Optimized strcasecmp implementation foOWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Optimized strcasecmp implementation for POWER7.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strcasecmp_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strcasecmp_power7): \
- cfi_startproc; \
- LOCALENTRY(__strcasecmp_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcasecmp_power7) \
- END_2(__strcasecmp_power7)
-
+#define __strcasecmp __strcasecmp_power7
#undef weak_alias
#define weak_alias(name, alias)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power8.S
new file mode 100644
index 0000000000..29453ff80d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-power8.S
@@ -0,0 +1,26 @@
+/* Optimized strcasecmp implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define __strcasecmp __strcasecmp_power8
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strcasecmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-ppc64.c
new file mode 100644
index 0000000000..b477255196
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp-ppc64.c
@@ -0,0 +1,21 @@
+/* Multiarch strcasecmp for PPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define strcasecmp __strcasecmp_ppc
+
+#include <string/strcasecmp.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c
index 1f22336d49..1a6661e49c 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp.c
@@ -1,5 +1,5 @@
-/* Multiple versions of strcasecmp.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Multiple versions of strcasecmp
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,25 +16,21 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#if IS_IN (libc)
-# include <string.h>
-# define strcasecmp __strcasecmp_ppc
-extern __typeof (__strcasecmp) __strcasecmp_ppc attribute_hidden;
-extern __typeof (__strcasecmp) __strcasecmp_power7 attribute_hidden;
-#endif
+#include <string.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
-#include <string/strcasecmp.c>
-#undef strcasecmp
+extern __typeof (__strcasecmp) __libc_strcasecmp;
-#if IS_IN (libc)
-# include <shlib-compat.h>
-# include "init-arch.h"
+extern __typeof (__strcasecmp) __strcasecmp_ppc attribute_hidden;
+extern __typeof (__strcasecmp) __strcasecmp_power7 attribute_hidden;
+extern __typeof (__strcasecmp) __strcasecmp_power8 attribute_hidden;
-extern __typeof (__strcasecmp) __libc_strcasecmp;
libc_ifunc (__libc_strcasecmp,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strcasecmp_power7
- : __strcasecmp_ppc);
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcasecmp_power8:
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcasecmp_power7
+ : __strcasecmp_ppc);
weak_alias (__libc_strcasecmp, strcasecmp)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l-power7.S
index d4706e1755..07e716950a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l-power7.S
@@ -1,5 +1,5 @@
/* Optimized strcasecmp_l implementation for POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strcasecmp_l_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strcasecmp_l_power7): \
- cfi_startproc; \
- LOCALENTRY(__strcasecmp_l_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcasecmp_l_power7) \
- END_2(__strcasecmp_l_power7)
+#define __strcasecmp_l __strcasecmp_l_power7
#undef weak_alias
#define weak_alias(name, alias)
@@ -40,5 +25,7 @@
#define libc_hidden_builtin_def(name)
#define USE_IN_EXTENDED_LOCALE_MODEL
+#define __STRCMP __strcasecmp_l
+#define STRCMP strcasecmp_l
#include <sysdeps/powerpc/powerpc64/power7/strcasecmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l.c b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l.c
index d05b1c8a86..3edccc4333 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasecmp_l.c
@@ -1,5 +1,5 @@
/* Multiple versions of strcasecmp_l.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasestr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcasestr-power8.S
new file mode 100644
index 0000000000..985f4e4e7d
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasestr-power8.S
@@ -0,0 +1,33 @@
+/* Optimized strcasestr implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRCASESTR __strcasestr_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+/* The following definitions are used in strcasestr optimization. */
+
+/* strlen is used to calculate len of r4. */
+#define STRLEN __strlen_power8
+/* strnlen is used to check if len of r3 is more than r4. */
+#define STRNLEN __strnlen_power8
+/* strchr is used to check if first char of r4 is present in r3. */
+#define STRCHR __strchr_power8
+
+#include <sysdeps/powerpc/powerpc64/power8/strcasestr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasestr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strcasestr-ppc64.c
new file mode 100644
index 0000000000..0805b6f939
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasestr-ppc64.c
@@ -0,0 +1,34 @@
+/* PowerPC64 default implementation of strcasestr.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+
+#define STRCASESTR __strcasestr_ppc
+#if IS_IN (libc) && defined(SHARED)
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name) \
+ __hidden_ver1(__strcasestr_ppc, __GI_strcasestr, __strcasestr_ppc);
+#endif
+
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+extern __typeof (strcasestr) __strcasestr_ppc attribute_hidden;
+
+#include <string/strcasestr.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcasestr.c b/sysdeps/powerpc/powerpc64/multiarch/strcasestr.c
new file mode 100644
index 0000000000..dc46bfd474
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcasestr.c
@@ -0,0 +1,37 @@
+/* Multiple versions of strcasestr.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__strcasestr) __strcasestr_ppc attribute_hidden;
+extern __typeof (__strcasestr) __strcasestr_power8 attribute_hidden;
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+ ifunc symbol properly. */
+libc_ifunc (__strcasestr,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcasestr_power8
+ : __strcasestr_ppc);
+
+weak_alias (__strcasestr, strcasestr)
+#else
+#include <string/strcasestr.c>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcat-power7.c b/sysdeps/powerpc/powerpc64/multiarch/strcat-power7.c
index 58563162ed..31966827ed 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcat-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcat-power7.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,4 +27,4 @@ extern typeof (strlen) __strlen_power7;
#define strcpy __strcpy_power7
#define strlen __strlen_power7
-#include <sysdeps/powerpc/strcat.c>
+#include <string/strcat.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcat-power8.c b/sysdeps/powerpc/powerpc64/multiarch/strcat-power8.c
index be17631411..996a3b6a28 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcat-power8.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcat-power8.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2015-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,8 +23,8 @@
#define libc_hidden_def(name)
extern typeof (strcpy) __strcpy_power8;
-extern typeof (strlen) __strlen_power7;
+extern typeof (strlen) __strlen_power8;
#define strcpy __strcpy_power8
-#define strlen __strlen_power7
-#include <sysdeps/powerpc/strcat.c>
+#define strlen __strlen_power8
+#include <string/strcat.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcat-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strcat-ppc64.c
index 3f8953eebe..fc3ef2a2bf 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcat-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcat-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -26,4 +26,4 @@
extern __typeof (strcat) __strcat_ppc attribute_hidden;
-#include <sysdeps/powerpc/strcat.c>
+#include <string/strcat.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcat.c b/sysdeps/powerpc/powerpc64/multiarch/strcat.c
index a2894ae027..48035ed3c0 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcat.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcat.c
@@ -1,5 +1,5 @@
/* Multiple versions of strcat. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define strcat __redirect_strcat
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -24,11 +25,12 @@
extern __typeof (strcat) __strcat_ppc attribute_hidden;
extern __typeof (strcat) __strcat_power7 attribute_hidden;
extern __typeof (strcat) __strcat_power8 attribute_hidden;
+# undef strcat
-libc_ifunc (strcat,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __strcat_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strcat_power7
- : __strcat_ppc);
+libc_ifunc_redirected (__redirect_strcat, strcat,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcat_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcat_power7
+ : __strcat_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strchr-power7.S
index 47f4fe734c..f91b809984 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchr-power7.S
@@ -1,5 +1,5 @@
/* Optimized strchr implementation for POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strchr_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strchr_power7): \
- cfi_startproc; \
- LOCALENTRY(__strchr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strchr_power7) \
- END_2(__strchr_power7)
+#define STRCHR __strchr_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strchr-power8.S
new file mode 100644
index 0000000000..16a484dbda
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchr-power8.S
@@ -0,0 +1,24 @@
+/* Optimized strchr implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRCHR __strchr_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchr-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/strchr-ppc64.S
index 8641d262cb..cdbb9c151e 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchr-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchr-ppc64.S
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of strchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#ifdef SHARED
-# undef ENTRY
-# define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strchr_ppc) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strchr_ppc): \
- cfi_startproc; \
- LOCALENTRY(__strchr_ppc)
-
-# undef END
-# define END(name) \
- cfi_endproc; \
- TRACEBACK(__strchr_ppc) \
- END_2(__strchr_ppc)
+# define STRCHR __strchr_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name) \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchr.c b/sysdeps/powerpc/powerpc64/multiarch/strchr.c
index 2cfde632cb..6528469ee3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of strchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,18 +18,25 @@
/* Define multiple versions only for definition in libc. */
#if defined SHARED && IS_IN (libc)
+# define strchr __redirect_strchr
+/* Omit the strchr inline definitions because it would redefine strchr. */
+# define __NO_STRING_INLINES
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (strchr) __strchr_ppc attribute_hidden;
extern __typeof (strchr) __strchr_power7 attribute_hidden;
+extern __typeof (strchr) __strchr_power8 attribute_hidden;
+# undef strchr
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (strchr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strchr_power7
- : __strchr_ppc);
+libc_ifunc_redirected (__redirect_strchr, strchr,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strchr_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strchr_power7
+ : __strchr_ppc);
weak_alias (strchr, index)
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power7.S
index 977286da48..fee140926b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power7.S
@@ -1,5 +1,5 @@
/* Optimized strchrnul implementation for POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strchrnul_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strchrnul_power7): \
- cfi_startproc; \
- LOCALENTRY(__strchrnul_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strchrnul_power7) \
- END_2(__strchrnul_power7)
+#define STRCHRNUL __strchrnul_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power8.S
new file mode 100644
index 0000000000..e17e918d00
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-power8.S
@@ -0,0 +1,24 @@
+/* Optimized strchrnul implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRCHRNUL __strchrnul_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strchrnul.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchrnul-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-ppc64.c
index 2ec52432da..59c6bc37f7 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchrnul-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchrnul-ppc64.c
@@ -1,5 +1,5 @@
/* PowerPC64 default implementation of strchrnul.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strchrnul.c b/sysdeps/powerpc/powerpc64/multiarch/strchrnul.c
index 682aa0fef7..ead31f791c 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strchrnul.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strchrnul.c
@@ -1,5 +1,5 @@
/* Multiple versions of strchrnul.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,10 +23,13 @@
extern __typeof (__strchrnul) __strchrnul_ppc attribute_hidden;
extern __typeof (__strchrnul) __strchrnul_power7 attribute_hidden;
+extern __typeof (__strchrnul) __strchrnul_power8 attribute_hidden;
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
libc_ifunc (__strchrnul,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strchrnul_power8 :
(hwcap & PPC_FEATURE_HAS_VSX)
? __strchrnul_power7
: __strchrnul_ppc);
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S
index d44114bcd9..814d472680 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power7.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strcmp_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strcmp_power7): \
- cfi_startproc; \
- LOCALENTRY(__strcmp_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcmp_power7) \
- END_2(__strcmp_power7)
+#define STRCMP __strcmp_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S
index e35251ca25..68803ffc1b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power8.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for POWER8/PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,25 +16,11 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strcmp_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strcmp_power8): \
- cfi_startproc; \
- LOCALENTRY(__strcmp_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcmp_power8) \
- END_2(__strcmp_power8)
+#if IS_IN (libc)
+#define STRCMP __strcmp_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
#include <sysdeps/powerpc/powerpc64/power8/strcmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power9.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power9.S
new file mode 100644
index 0000000000..8b569d38be
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power9.S
@@ -0,0 +1,26 @@
+/* Optimized strcmp implementation for POWER9/PPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+#define STRCMP __strcmp_power9
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power9/strcmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S
index a9515112d8..43e1c6f697 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-ppc64.S
@@ -1,5 +1,5 @@
/* Default strcmp implementation for PowerPC64.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,28 +16,12 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#if defined SHARED && IS_IN (libc)
-# undef EALIGN
-# define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strcmp_ppc) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strcmp_ppc): \
- cfi_startproc; \
- LOCALENTRY(__strcmp_ppc)
-
-# undef END
-# define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcmp_ppc) \
- END_2(__strcmp_ppc)
+# define STRCMP __strcmp_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name) \
.globl __GI_strcmp; __GI_strcmp = __strcmp_ppc
-#endif
+#endif /* SHARED && IS_IN */
#include <sysdeps/powerpc/powerpc64/strcmp.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
index aee888a4b7..b669053166 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -1,5 +1,5 @@
/* Multiple versions of strcmp. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,9 @@
<http://www.gnu.org/licenses/>. */
#if defined SHARED && IS_IN (libc)
+# define strcmp __redirect_strcmp
+/* Omit the strcmp inline definitions because it would redefine strcmp. */
+# define __NO_STRING_INLINES
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -24,11 +27,16 @@
extern __typeof (strcmp) __strcmp_ppc attribute_hidden;
extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power9 attribute_hidden;
-libc_ifunc (strcmp,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __strcmp_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strcmp_power7
- : __strcmp_ppc);
+# undef strcmp
+
+libc_ifunc_redirected (__redirect_strcmp, strcmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_00)
+ ? __strcmp_power9 :
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcmp_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcmp_power7
+ : __strcmp_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcpy-power7.c b/sysdeps/powerpc/powerpc64/multiarch/strcpy-power7.c
index f75abb95a4..0a719519a6 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcpy-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcpy-power7.c
@@ -1,5 +1,5 @@
/* Multiarch strcpy for POWER7/PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcpy-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcpy-power8.S
index 213c77a69b..5b68fc970e 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcpy-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcpy-power8.S
@@ -1,5 +1,5 @@
/* Optimized strcpy implementation for POWER8/PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strcpy_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strcpy_power8): \
- cfi_startproc; \
- LOCALENTRY(__strcpy_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strcpy_power8) \
- END_2(__strcpy_power8)
+#define STRCPY __strcpy_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strcpy-ppc64.c
index 1d75d2aaf2..22831974ab 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcpy-ppc64.c
@@ -1,5 +1,5 @@
/* Multiarch strcpy for PPC64.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcpy.c b/sysdeps/powerpc/powerpc64/multiarch/strcpy.c
index d2c38589e7..b18a92a62a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of strcpy. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,7 @@
<http://www.gnu.org/licenses/>. */
#if defined SHARED && IS_IN (libc)
+# define strcpy __redirect_strcpy
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -24,11 +25,12 @@
extern __typeof (strcpy) __strcpy_ppc attribute_hidden;
extern __typeof (strcpy) __strcpy_power7 attribute_hidden;
extern __typeof (strcpy) __strcpy_power8 attribute_hidden;
+#undef strcpy
-libc_ifunc (strcpy,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __strcpy_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strcpy_power7
- : __strcpy_ppc);
+libc_ifunc_redirected (__redirect_strcpy, strcpy,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcpy_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcpy_power7
+ : __strcpy_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcspn-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strcspn-power8.S
new file mode 100644
index 0000000000..23bf1c1e5f
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcspn-power8.S
@@ -0,0 +1,23 @@
+/* Optimized strcspn implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRSPN __strcspn_power8
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strcspn.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcspn-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strcspn-ppc64.c
new file mode 100644
index 0000000000..03eac0d183
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcspn-ppc64.c
@@ -0,0 +1,26 @@
+/* Default strcspn implementation for PowerPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRCSPN __strcspn_ppc
+
+#ifdef SHARED
+# undef libc_hidden_def
+# define libc_hidden_def(name)
+#endif
+
+#include <string/strcspn.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcspn.c b/sysdeps/powerpc/powerpc64/multiarch/strcspn.c
new file mode 100644
index 0000000000..308aab5970
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcspn.c
@@ -0,0 +1,35 @@
+/* Multiple versions of strcspn. PowerPC64 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+#undef strcspn
+extern __typeof (strcspn) __libc_strcspn;
+
+extern __typeof (strcspn) __strcspn_ppc attribute_hidden;
+extern __typeof (strcspn) __strcspn_power8 attribute_hidden;
+
+libc_ifunc (__libc_strcspn,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strcspn_power8
+ : __strcspn_ppc);
+
+weak_alias (__libc_strcspn, strcspn)
+libc_hidden_builtin_def (strcspn)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strlen-power7.S
index ee60e434aa..5f4591ff6c 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strlen-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen-power7.S
@@ -1,5 +1,5 @@
/* Optimized strlen implementation for POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strlen_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strlen_power7): \
- cfi_startproc; \
- LOCALENTRY(__strlen_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strlen_power7) \
- END_2(__strlen_power7)
+#define STRLEN __strlen_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strlen-power8.S
new file mode 100644
index 0000000000..fb8ffbf970
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen-power8.S
@@ -0,0 +1,24 @@
+/* Optimized strlen implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRLEN __strlen_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strlen.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/strlen-ppc64.S
index 2a4327a054..3e9e4814c9 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strlen-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen-ppc64.S
@@ -1,5 +1,5 @@
/* Default strlen implementation for PowerPC64.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#if defined SHARED && IS_IN (libc)
-# undef ENTRY
-# define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strlen_ppc) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strlen_ppc): \
- cfi_startproc; \
- LOCALENTRY(__strlen_ppc)
-
-# undef END
-# define END(name) \
- cfi_endproc; \
- TRACEBACK(__strlen_ppc) \
- END_2(__strlen_ppc)
+# define STRLEN __strlen_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strlen.c b/sysdeps/powerpc/powerpc64/multiarch/strlen.c
index 94501fdf2f..74810dab99 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strlen.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strlen.c
@@ -1,5 +1,5 @@
/* Multiple versions of strlen. PowerPC64 version.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -29,11 +29,14 @@ extern __typeof (__redirect_strlen) __libc_strlen;
extern __typeof (__redirect_strlen) __strlen_ppc attribute_hidden;
extern __typeof (__redirect_strlen) __strlen_power7 attribute_hidden;
+extern __typeof (__redirect_strlen) __strlen_power8 attribute_hidden;
libc_ifunc (__libc_strlen,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strlen_power7
- : __strlen_ppc);
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strlen_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strlen_power7
+ : __strlen_ppc);
#undef strlen
strong_alias (__libc_strlen, strlen)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase-power7.c b/sysdeps/powerpc/powerpc64/multiarch/strncase-power7.c
index 1e147aedb2..27ca40c40a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncase-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase-power7.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strncase-power8.S
new file mode 100644
index 0000000000..c0a0901a28
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase-power8.S
@@ -0,0 +1,26 @@
+/* Optimized strncasecmp implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define __strncasecmp __strncasecmp_power8
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strncase.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strncase-ppc64.c
new file mode 100644
index 0000000000..31ed9510c6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase-ppc64.c
@@ -0,0 +1,21 @@
+/* Multiarch strncasecmp for PPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define strncasecmp __strncasecmp_ppc
+
+#include <string/strncase.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase.c b/sysdeps/powerpc/powerpc64/multiarch/strncase.c
index 2729fcea83..8cf7154c59 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncase.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase.c
@@ -1,5 +1,5 @@
/* Multiple versions of strncasecmp
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,26 +16,21 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#if IS_IN (libc)
-# include <string.h>
-# define strncasecmp __strncasecmp_ppc
-extern __typeof (__strncasecmp) __strncasecmp_ppc attribute_hidden;
-extern __typeof (__strncasecmp) __strncasecmp_power7 attribute_hidden;
-#endif
+#include <string.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
-#include <string/strncase.c>
-#undef strncasecmp
+extern __typeof (__strncasecmp) __libc_strncasecmp;
-#if IS_IN (libc)
-# include <shlib-compat.h>
-# include "init-arch.h"
+extern __typeof (__strncasecmp) __strncasecmp_ppc attribute_hidden;
+extern __typeof (__strncasecmp) __strncasecmp_power7 attribute_hidden;
+extern __typeof (__strncasecmp) __strncasecmp_power8 attribute_hidden;
-/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
- ifunc symbol properly. */
-extern __typeof (__strncasecmp) __libc_strncasecmp;
libc_ifunc (__libc_strncasecmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strncasecmp_power8:
(hwcap & PPC_FEATURE_HAS_VSX)
? __strncasecmp_power7
: __strncasecmp_ppc);
+
weak_alias (__libc_strncasecmp, strncasecmp)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase_l-power7.c b/sysdeps/powerpc/powerpc64/multiarch/strncase_l-power7.c
index 21fba18836..ae2c4a3554 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncase_l-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase_l-power7.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,8 +18,10 @@
#include <string.h>
#define __strncasecmp_l __strncasecmp_l_power7
-#define USE_IN_EXTENDED_LOCALE_MODEL 1
+
+#undef libc_hidden_def
+#define libc_hidden_def(name)
extern __typeof (strncasecmp_l) __strncasecmp_l_power7 attribute_hidden;
-#include <string/strncase.c>
+#include <string/strncase_l.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncase_l.c b/sysdeps/powerpc/powerpc64/multiarch/strncase_l.c
index 33a1d8b639..d3c4bf18c1 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncase_l.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncase_l.c
@@ -1,5 +1,5 @@
/* Multiple versions of strncasecmp_l
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncat-power7.c b/sysdeps/powerpc/powerpc64/multiarch/strncat-power7.c
index bbc31e1abb..a393c2ed60 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncat-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncat-power7.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2015-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncat-power8.c b/sysdeps/powerpc/powerpc64/multiarch/strncat-power8.c
new file mode 100644
index 0000000000..7842a507a0
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncat-power8.c
@@ -0,0 +1,31 @@
+/* Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/ >. */
+
+#include <string.h>
+
+#define STRNCAT __strncat_power8
+
+extern __typeof (strncat) __strncat_power8 attribute_hidden;
+extern __typeof (strlen) __strlen_power8 attribute_hidden;
+extern __typeof (strnlen) __strnlen_power8 attribute_hidden;
+extern __typeof (memcpy) __memcpy_power7 attribute_hidden;
+
+#define strlen __strlen_power8
+#define __strnlen __strnlen_power8
+#define memcpy __memcpy_power7
+
+#include <string/strncat.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncat-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strncat-ppc64.c
index 5b364c8341..dd0f166514 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncat-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncat-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncat.c b/sysdeps/powerpc/powerpc64/multiarch/strncat.c
index 0f6f29cccc..a2e5038e53 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncat.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncat.c
@@ -1,5 +1,5 @@
/* Multiple versions of strncat. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,9 +23,12 @@
extern __typeof (strncat) __strncat_ppc attribute_hidden;
extern __typeof (strncat) __strncat_power7 attribute_hidden;
+extern __typeof (strncat) __strncat_power8 attribute_hidden;
libc_ifunc (strncat,
- (hwcap & PPC_FEATURE_HAS_VSX)
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strncat_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
? __strncat_power7
: __strncat_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power4.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power4.S
index d1aebbbc93..860f2ec18a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power4.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power4.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -15,23 +15,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name,alignt,words) \
- .section ".text"; \
- ENTRY_2(__strncmp_power4) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncmp_power4): \
- cfi_startproc; \
- LOCALENTRY(__strncmp_power4)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncmp_power4) \
- END_2(__strncmp_power4)
+#define STRNCMP __strncmp_power4
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power7.S
index 3edc4fce62..8d4108e1ba 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power7.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -15,24 +15,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name,alignt,words) \
- .section ".text"; \
- ENTRY_2(__strncmp_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncmp_power7): \
- cfi_startproc; \
- LOCALENTRY(__strncmp_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncmp_power7) \
- END_2(__strncmp_power7)
-
+#define STRNCMP __strncmp_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power8.S
index 771986ec77..e8a5fb804f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power8.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2015-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -15,26 +15,11 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name,alignt,words) \
- .section ".text"; \
- ENTRY_2(__strncmp_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncmp_power8): \
- cfi_startproc; \
- LOCALENTRY(__strncmp_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncmp_power8) \
- END_2(__strncmp_power8)
-
+#if IS_IN (libc)
+#define STRNCMP __strncmp_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
#include <sysdeps/powerpc/powerpc64/power8/strncmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power9.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power9.S
new file mode 100644
index 0000000000..3356f72527
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power9.S
@@ -0,0 +1,25 @@
+/* Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+#define STRNCMP __strncmp_power9
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power9/strncmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-ppc64.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-ppc64.S
index 30b139aa78..1b5704a079 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp-ppc64.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-ppc64.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -15,24 +15,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
#if defined SHARED && IS_IN (libc)
-#undef EALIGN
-#define EALIGN(name,alignt,words) \
- .section ".text"; \
- ENTRY_2(__strncmp_ppc) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncmp_ppc): \
- cfi_startproc; \
- LOCALENTRY(__strncmp_ppc)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncmp_ppc) \
- END_2(__strncmp_ppc)
+# define STRNCMP __strncmp_ppc
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name) \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp.c b/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
index 1eb6e517af..c4a40d1ec7 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
@@ -1,5 +1,5 @@
/* Multiple versions of strncmp.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,9 @@
/* Define multiple versions only for definition in libc. */
#if defined SHARED && IS_IN (libc)
+# define strncmp __redirect_strncmp
+/* Omit the strncmp inline definitions because it would redefine strncmp. */
+# define __NO_STRING_INLINES
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -26,15 +29,19 @@ extern __typeof (strncmp) __strncmp_ppc attribute_hidden;
extern __typeof (strncmp) __strncmp_power4 attribute_hidden;
extern __typeof (strncmp) __strncmp_power7 attribute_hidden;
extern __typeof (strncmp) __strncmp_power8 attribute_hidden;
+extern __typeof (strncmp) __strncmp_power9 attribute_hidden;
+# undef strncmp
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (strncmp,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __strncmp_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strncmp_power7 :
- (hwcap & PPC_FEATURE_POWER4)
- ? __strncmp_power4
- : __strncmp_ppc);
+libc_ifunc_redirected (__redirect_strncmp, strncmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_00)
+ ? __strncmp_power9 :
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strncmp_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strncmp_power7
+ : (hwcap & PPC_FEATURE_POWER4)
+ ? __strncmp_power4
+ : __strncmp_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncpy-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strncpy-power7.S
index 14efab3604..a044c11327 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncpy-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncpy-power7.S
@@ -1,5 +1,5 @@
/* Optimized strncpy implementation for POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,27 +16,14 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strncpy_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncpy_power7): \
- cfi_startproc; \
- LOCALENTRY(__strncpy_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncpy_power7) \
- END_2(__strncpy_power7)
+#define STRNCPY __strncpy_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
#define MEMSET __memset_power7
+#ifdef SHARED
+#define MEMSET_is_local
+#endif
#include <sysdeps/powerpc/powerpc64/power7/strncpy.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncpy-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strncpy-power8.S
index 3263bbd70a..10b8453f58 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncpy-power8.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncpy-power8.S
@@ -1,5 +1,5 @@
/* Optimized strncpy implementation for POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,25 +16,15 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strncpy_power8) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strncpy_power8): \
- cfi_startproc; \
- LOCALENTRY(__strncpy_power8)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strncpy_power8) \
- END_2(__strncpy_power8)
+#define STRNCPY __strncpy_power8
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
+/* memset is used to pad the end of the string. */
+#define MEMSET __memset_power8
+#ifdef SHARED
+#define MEMSET_is_local
+#endif
+
#include <sysdeps/powerpc/powerpc64/power8/strncpy.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strncpy-ppc64.c
index 6f5907eb9f..7efbe10905 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncpy-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncpy.c b/sysdeps/powerpc/powerpc64/multiarch/strncpy.c
index 0176514c1e..41e5ea8446 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of strncpy.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,9 @@
/* Define multiple versions only for definition in libc. */
#if IS_IN (libc)
+# define strncpy __redirect_strncpy
+/* Omit the strncpy inline definitions because it would redefine strncpy. */
+# define __NO_STRING_INLINES
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -25,14 +28,15 @@
extern __typeof (strncpy) __strncpy_ppc attribute_hidden;
extern __typeof (strncpy) __strncpy_power7 attribute_hidden;
extern __typeof (strncpy) __strncpy_power8 attribute_hidden;
+# undef strncpy
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (strncpy,
- (hwcap2 & PPC_FEATURE2_ARCH_2_07)
- ? __strncpy_power8 :
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strncpy_power7
- : __strncpy_ppc);
+libc_ifunc_redirected (__redirect_strncpy, strncpy,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strncpy_power8
+ : (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strncpy_power7
+ : __strncpy_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strnlen-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strnlen-power7.S
index ae4e6c05bb..565937b534 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strnlen-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strnlen-power7.S
@@ -1,5 +1,5 @@
/* Optimized strnlen version for POWER7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strnlen_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strnlen_power7): \
- cfi_startproc; \
- LOCALENTRY(__strnlen_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strnlen_power7) \
- END_2(__strnlen_power7)
+#define STRNLEN __strnlen_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strnlen-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strnlen-power8.S
new file mode 100644
index 0000000000..ed5b67e607
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strnlen-power8.S
@@ -0,0 +1,26 @@
+/* Optimized strnlen version for POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define __strnlen __strnlen_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#include <sysdeps/powerpc/powerpc64/power8/strnlen.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strnlen-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strnlen-ppc64.c
index de17a38f83..ce710e218d 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strnlen-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strnlen-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strnlen.c b/sysdeps/powerpc/powerpc64/multiarch/strnlen.c
index c4907e9ec8..298bfa0dc1 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strnlen.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strnlen.c
@@ -1,5 +1,5 @@
/* Multiple versions of strnlen.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,19 +17,24 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define strnlen __redirect_strnlen
+# define __strnlen __redirect___strnlen
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (__strnlen) __strnlen_ppc attribute_hidden;
extern __typeof (__strnlen) __strnlen_power7 attribute_hidden;
-
-libc_ifunc (__strnlen,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strnlen_power7
- : __strnlen_ppc);
+extern __typeof (__strnlen) __strnlen_power8 attribute_hidden;
+# undef strnlen
+# undef __strnlen
+libc_ifunc_redirected (__redirect___strnlen, __strnlen,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strnlen_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strnlen_power7
+ : __strnlen_ppc);
weak_alias (__strnlen, strnlen)
-libc_hidden_def (strnlen)
#else
#include <string/strnlen.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power7.S
index 464e74a5e8..949803031e 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strrchr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power7.S
@@ -1,5 +1,5 @@
/* Optimized strrchr implementation for POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,22 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef ENTRY
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(__strrchr_power7) \
- .align ALIGNARG(2); \
- BODY_LABEL(__strrchr_power7): \
- cfi_startproc; \
- LOCALENTRY(__strrchr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strrchr_power7) \
- END_2(__strrchr_power7)
+#define STRRCHR __strrchr_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
new file mode 100644
index 0000000000..342d02a38c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr-power8.S
@@ -0,0 +1,24 @@
+/* Optimized strrchr implementation for POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRRCHR __strrchr_power8
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strrchr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strrchr-ppc64.c
index 151bd35ea6..bcf049a2ed 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strrchr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,10 +18,10 @@
#include <string.h>
#define STRRCHR __strrchr_ppc
+
#undef weak_alias
-#define weak_alias(name, aliasname) \
- extern __typeof (__strrchr_ppc) aliasname \
- __attribute__ ((weak, alias ("__strrchr_ppc")));
+#define weak_alias(name, aliasname)
+
#if IS_IN (libc) && defined(SHARED)
# undef libc_hidden_builtin_def
# define libc_hidden_builtin_def(name) \
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
index 45742bc910..d46f7c0403 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strrchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of strrchr. PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,18 +18,23 @@
/* Define multiple versions only for definition in libc. */
#if IS_IN (libc)
+# define strrchr __redirect_strrchr
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (strrchr) __strrchr_ppc attribute_hidden;
extern __typeof (strrchr) __strrchr_power7 attribute_hidden;
+extern __typeof (strrchr) __strrchr_power8 attribute_hidden;
+#undef strrchr
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (strrchr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strrchr_power7
- : __strrchr_ppc);
+libc_ifunc_redirected (__redirect_strrchr, strrchr,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strrchr_power8 :
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strrchr_power7
+ : __strrchr_ppc);
weak_alias (strrchr, rindex)
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strspn-power8.S b/sysdeps/powerpc/powerpc64/multiarch/strspn-power8.S
new file mode 100644
index 0000000000..bc9f493cf7
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strspn-power8.S
@@ -0,0 +1,23 @@
+/* Optimized strspn implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRSPN __strspn_power8
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/power8/strspn.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strspn-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strspn-ppc64.c
new file mode 100644
index 0000000000..05a240b252
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strspn-ppc64.c
@@ -0,0 +1,25 @@
+/* Default strspn implementation for PowerPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define STRSPN __strspn_ppc
+#ifdef SHARED
+#undef libc_hidden_def
+#define libc_hidden_def(name)
+#endif
+
+#include <string/strspn.c>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strspn.c b/sysdeps/powerpc/powerpc64/multiarch/strspn.c
new file mode 100644
index 0000000000..61251610e7
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strspn.c
@@ -0,0 +1,35 @@
+/* Multiple versions of strspn. PowerPC64 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+#undef strspn
+extern __typeof (strspn) __libc_strspn;
+
+extern __typeof (strspn) __strspn_ppc attribute_hidden;
+extern __typeof (strspn) __strspn_power8 attribute_hidden;
+
+libc_ifunc (__libc_strspn,
+ (hwcap2 & PPC_FEATURE2_ARCH_2_07)
+ ? __strspn_power8
+ : __strspn_ppc);
+
+weak_alias (__libc_strspn, strspn)
+libc_hidden_builtin_def (strspn)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strstr-power7.S b/sysdeps/powerpc/powerpc64/multiarch/strstr-power7.S
index 68b93b07e7..a24ab585f4 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strstr-power7.S
+++ b/sysdeps/powerpc/powerpc64/multiarch/strstr-power7.S
@@ -1,5 +1,5 @@
/* Optimized strstr implementation for POWER7.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -16,23 +16,7 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <sysdep.h>
-
-#undef EALIGN
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(__strstr_power7) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
- BODY_LABEL(__strstr_power7): \
- cfi_startproc; \
- LOCALENTRY(__strstr_power7)
-
-#undef END
-#define END(name) \
- cfi_endproc; \
- TRACEBACK(__strstr_power7) \
- END_2(__strstr_power7)
+#define STRSTR __strstr_power7
#undef libc_hidden_builtin_def
#define libc_hidden_builtin_def(name)
@@ -40,5 +24,10 @@
#define STRLEN __strlen_power7
#define STRNLEN __strnlen_power7
#define STRCHR __strchr_power7
+#ifdef SHARED
+#define STRLEN_is_local
+#define STRNLEN_is_local
+#define STRCHR_is_local
+#endif
#include <sysdeps/powerpc/powerpc64/power7/strstr.S>
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c
index 2ca62c4b74..971bb18f7a 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strstr-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2015-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strstr.c b/sysdeps/powerpc/powerpc64/multiarch/strstr.c
index 7efc4b0913..264b5d88d5 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strstr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strstr.c
@@ -1,5 +1,5 @@
/* Multiple versions of strstr. PowerPC64 version.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,17 +18,19 @@
/* Define multiple versions only for definition in libc. */
#if IS_IN (libc)
+# define strstr __redirect_strstr
# include <string.h>
# include <shlib-compat.h>
# include "init-arch.h"
extern __typeof (strstr) __strstr_ppc attribute_hidden;
extern __typeof (strstr) __strstr_power7 attribute_hidden;
+# undef strstr
/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
ifunc symbol properly. */
-libc_ifunc (strstr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __strstr_power7
- : __strstr_ppc);
+libc_ifunc_redirected (__redirect_strstr, strstr,
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strstr_power7
+ : __strstr_ppc);
#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcschr-power6.c b/sysdeps/powerpc/powerpc64/multiarch/wcschr-power6.c
index b78fdd3533..52f562b613 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcschr-power6.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcschr-power6.c
@@ -1,5 +1,5 @@
/* wcschr.c - Wide Character Search for powerpc64/power6.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcschr-power7.c b/sysdeps/powerpc/powerpc64/multiarch/wcschr-power7.c
index 3374ba26ef..0acad04706 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcschr-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcschr-power7.c
@@ -1,5 +1,5 @@
/* wcschr.c - Wide Character Search for powerpc64/power7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcschr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/wcschr-ppc64.c
index 5192fe8704..f72865241b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcschr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcschr-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcschr.c b/sysdeps/powerpc/powerpc64/multiarch/wcschr.c
index 44c9b971ce..9976a63afa 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcschr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcschr.c
@@ -1,5 +1,5 @@
/* Multiple versions of wcschr
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,6 +17,8 @@
<http://www.gnu.org/licenses/>. */
#if IS_IN (libc)
+# define wcschr __redirect_wcschr
+# define __wcschr __redirect___wcschr
# include <wchar.h>
# include <shlib-compat.h>
# include "init-arch.h"
@@ -24,15 +26,16 @@
extern __typeof (wcschr) __wcschr_ppc attribute_hidden;
extern __typeof (wcschr) __wcschr_power6 attribute_hidden;
extern __typeof (wcschr) __wcschr_power7 attribute_hidden;
+# undef wcschr
+# undef __wcschr
-libc_ifunc (__wcschr,
- (hwcap & PPC_FEATURE_HAS_VSX)
- ? __wcschr_power7 :
- (hwcap & PPC_FEATURE_ARCH_2_05)
- ? __wcschr_power6
- : __wcschr_ppc);
+libc_ifunc_redirected (__redirect___wcschr, __wcschr,
+ (hwcap & PPC_FEATURE_HAS_VSX)
+ ? __wcschr_power7
+ : (hwcap & PPC_FEATURE_ARCH_2_05)
+ ? __wcschr_power6
+ : __wcschr_ppc);
weak_alias (__wcschr, wcschr)
-libc_hidden_builtin_def (wcschr)
#else
#undef libc_hidden_def
#define libc_hidden_def(a)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power6.c b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power6.c
index cc655942c2..ef0f7ccdc7 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power6.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power6.c
@@ -1,5 +1,5 @@
/* wcscpy.c - Wide Character Search for powerpc64/power6.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power7.c b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power7.c
index 7aec88dc94..2712f58156 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-power7.c
@@ -1,5 +1,5 @@
/* wcscpy.c - Wide Character Search for powerpc64/power7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-ppc64.c
index f48a65f0ef..4bcf792544 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcscpy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcscpy-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcscpy.c b/sysdeps/powerpc/powerpc64/multiarch/wcscpy.c
index 5c919f56ff..76fc3569b8 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcscpy.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcscpy.c
@@ -1,5 +1,5 @@
/* Multiple versions of wcscpy.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power6.c b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power6.c
index 184dc941d7..05414b00e3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power6.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power6.c
@@ -1,5 +1,5 @@
/* wcsrchr.c - Wide Character Search for powerpc64/power6.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power7.c b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power7.c
index 927d5c8cc3..35a56386d2 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power7.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-power7.c
@@ -1,5 +1,5 @@
/* wcsrchr.c - Wide Character Search for powerpc64/power7.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-ppc64.c
index bb796726dd..708575042b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr.c b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr.c
index dc4b796723..06e4b61d11 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wcsrchr.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wcsrchr.c
@@ -1,5 +1,5 @@
/* Multiple versions of wcsrchr.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/multiarch/wordcopy-ppc64.c b/sysdeps/powerpc/powerpc64/multiarch/wordcopy-ppc64.c
index 7a0694be39..486003f0c9 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/wordcopy-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/wordcopy-ppc64.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/power4/memcmp.S b/sysdeps/powerpc/powerpc64/power4/memcmp.S
index c1a77c64b0..e5319f101f 100644
--- a/sysdeps/powerpc/powerpc64/power4/memcmp.S
+++ b/sysdeps/powerpc/powerpc64/power4/memcmp.S
@@ -1,5 +1,5 @@
/* Optimized memcmp implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -22,8 +22,12 @@
const char *s2 [r4],
size_t size [r5]) */
+#ifndef MEMCMP
+# define MEMCMP memcmp
+#endif
+
.machine power4
-EALIGN (memcmp, 4, 0)
+ENTRY_TOCLESS (MEMCMP, 4)
CALL_MCOUNT 3
#define rRTN r3
@@ -52,8 +56,8 @@ EALIGN (memcmp, 4, 0)
byte loop. */
blt cr1, L(bytealigned)
std rWORD8, -8(r1)
- cfi_offset(rWORD8, -8)
std rWORD7, -16(r1)
+ cfi_offset(rWORD8, -8)
cfi_offset(rWORD7, -16)
bne L(unaligned)
/* At this point we know both strings have the same alignment and the
@@ -728,18 +732,18 @@ L(unaligned):
the actual start of rSTR2. */
clrrdi rSTR2, rSTR2, 3
std rWORD2_SHIFT, -48(r1)
- cfi_offset(rWORD2_SHIFT, -48)
/* Compute the left/right shift counts for the unaligned rSTR2,
compensating for the logical (DW aligned) start of rSTR1. */
clrldi rSHL, rWORD8_SHIFT, 61
clrrdi rSTR1, rSTR1, 3
std rWORD4_SHIFT, -56(r1)
- cfi_offset(rWORD4_SHIFT, -56)
sldi rSHL, rSHL, 3
cmpld cr5, rWORD8_SHIFT, rSTR2
add rN, rN, r12
sldi rWORD6, r12, 3
std rWORD6_SHIFT, -64(r1)
+ cfi_offset(rWORD2_SHIFT, -48)
+ cfi_offset(rWORD4_SHIFT, -56)
cfi_offset(rWORD6_SHIFT, -64)
subfic rSHR, rSHL, 64
srdi r0, rN, 5 /* Divide by 32 */
@@ -833,15 +837,15 @@ L(duPs4):
.align 4
L(DWunaligned):
std rWORD8_SHIFT, -40(r1)
- cfi_offset(rWORD8_SHIFT, -40)
clrrdi rSTR2, rSTR2, 3
std rWORD2_SHIFT, -48(r1)
- cfi_offset(rWORD2_SHIFT, -48)
srdi r0, rN, 5 /* Divide by 32 */
std rWORD4_SHIFT, -56(r1)
- cfi_offset(rWORD4_SHIFT, -56)
andi. r12, rN, 24 /* Get the DW remainder */
std rWORD6_SHIFT, -64(r1)
+ cfi_offset(rWORD8_SHIFT, -40)
+ cfi_offset(rWORD2_SHIFT, -48)
+ cfi_offset(rWORD4_SHIFT, -56)
cfi_offset(rWORD6_SHIFT, -64)
sldi rSHL, rSHL, 3
#ifdef __LITTLE_ENDIAN__
@@ -1360,6 +1364,6 @@ L(duzeroLength):
li rRTN, 0
blr
-END (memcmp)
+END (MEMCMP)
libc_hidden_builtin_def (memcmp)
weak_alias (memcmp, bcmp)
diff --git a/sysdeps/powerpc/powerpc64/power4/memcpy.S b/sysdeps/powerpc/powerpc64/power4/memcpy.S
index 844f08f627..ce074943f2 100644
--- a/sysdeps/powerpc/powerpc64/power4/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/power4/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,7 +18,7 @@
#include <sysdep.h>
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+/* void * [r3] memcpy (void *dst [r3], void *src [r4], size_t len [r5]);
Returns 'dst'.
Memcpy handles short copies (< 32-bytes) using a binary move blocks
@@ -33,8 +33,11 @@
possible when both source and destination are doubleword aligned.
Each case has a optimized unrolled loop. */
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
.machine power4
-EALIGN (memcpy, 5, 0)
+ENTRY_TOCLESS (MEMCPY, 5)
CALL_MCOUNT 3
cmpldi cr1,5,31
@@ -470,5 +473,5 @@ EALIGN (memcpy, 5, 0)
ld 31,-8(1)
ld 3,-16(1)
blr
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/power4/memset.S b/sysdeps/powerpc/powerpc64/power4/memset.S
index bc249c31ab..a8f0dfaa1a 100644
--- a/sysdeps/powerpc/powerpc64/power4/memset.S
+++ b/sysdeps/powerpc/powerpc64/power4/memset.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,15 +18,18 @@
#include <sysdep.h>
-/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
Returns 's'.
The memset is done in three sizes: byte (8 bits), word (32 bits),
cache line (256 bits). There is a special case for setting cache lines
to 0, to take advantage of the dcbz instruction. */
+#ifndef MEMSET
+# define MEMSET memset
+#endif
.machine power4
-EALIGN (memset, 5, 0)
+ENTRY_TOCLESS (MEMSET, 5)
CALL_MCOUNT 3
#define rTMP r0
@@ -232,12 +235,12 @@ L(medium_27f):
L(medium_28t):
std rCHR, -8(rMEMP)
blr
-END_GEN_TB (memset,TB_TOCLESS)
+END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)
/* Copied from bzero.S to prevent the linker from inserting a stub
between bzero and memset. */
-ENTRY (__bzero)
+ENTRY_TOCLESS (__bzero)
CALL_MCOUNT 3
mr r5,r4
li r4,0
diff --git a/sysdeps/powerpc/powerpc64/power4/strncmp.S b/sysdeps/powerpc/powerpc64/power4/strncmp.S
index e45ee6dce2..73629e4191 100644
--- a/sysdeps/powerpc/powerpc64/power4/strncmp.S
+++ b/sysdeps/powerpc/powerpc64/power4/strncmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,11 +18,15 @@
#include <sysdep.h>
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
/* See strlen.s for comments on how the end-of-string testing works. */
/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5]) */
-EALIGN (strncmp, 4, 0)
+ENTRY_TOCLESS (STRNCMP, 4)
CALL_MCOUNT 3
#define rTMP2 r0
@@ -217,5 +221,5 @@ L(u4): sub rRTN, rWORD1, rWORD2
L(ux):
li rRTN, 0
blr
-END (strncmp)
+END (STRNCMP)
libc_hidden_builtin_def (strncmp)
diff --git a/sysdeps/powerpc/powerpc64/power5+/Implies b/sysdeps/powerpc/powerpc64/power5+/Implies
deleted file mode 100644
index 565bc94471..0000000000
--- a/sysdeps/powerpc/powerpc64/power5+/Implies
+++ /dev/null
@@ -1,4 +0,0 @@
-powerpc/power5+/fpu
-powerpc/power5+
-powerpc/powerpc64/power5/fpu
-powerpc/powerpc64/power5
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/Implies b/sysdeps/powerpc/powerpc64/power5+/fpu/Implies
deleted file mode 100644
index f00c50fb49..0000000000
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5/fpu
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power5+/fpu/multiarch/Implies
deleted file mode 100644
index c0e67848e2..0000000000
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S
index a12fa29ab2..e500932573 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S
@@ -1,5 +1,5 @@
/* ceil function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,20 +18,13 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.machine "power5"
-EALIGN (__ceil, 4, 0)
+ENTRY_TOCLESS (__ceil, 4)
CALL_MCOUNT 0
frip fp1, fp1
blr
END (__ceil)
-weak_alias (__ceil, ceil)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__ceil, ceill)
-strong_alias (__ceil, __ceill)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
-#endif
+libm_alias_double (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S
index b1664c8352..d0b2118c2a 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S
@@ -1,5 +1,5 @@
/* ceilf function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,15 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.machine "power5"
-EALIGN (__ceilf, 4, 0)
+ENTRY_TOCLESS (__ceilf, 4)
CALL_MCOUNT 0
frip fp1, fp1 /* The rounding instructions are double. */
frsp fp1, fp1 /* But we need to set ooverflow for float. */
blr
END (__ceilf)
-weak_alias (__ceilf, ceilf)
+libm_alias_float (__ceil, ceil)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S
index c1fec4c9f2..9b9eb1529d 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S
@@ -1,5 +1,5 @@
/* floor function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,20 +18,13 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.machine "power5"
-EALIGN (__floor, 4, 0)
+ENTRY_TOCLESS (__floor, 4)
CALL_MCOUNT 0
frim fp1, fp1
blr
END (__floor)
-weak_alias (__floor, floor)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__floor, floorl)
-strong_alias (__floor, __floorl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
-compat_symbol (libm, __floor, floorl, GLIBC_2_0)
-#endif
+libm_alias_double (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S
index 4108cfa06d..790edbd9a4 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S
@@ -1,5 +1,5 @@
/* floorf function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,15 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.machine "power5"
-EALIGN (__floorf, 4, 0)
+ENTRY_TOCLESS (__floorf, 4)
CALL_MCOUNT 0
frim fp1, fp1 /* The rounding instructions are double. */
frsp fp1, fp1 /* But we need to set ooverflow for float. */
blr
END (__floorf)
-weak_alias (__floorf, floorf)
+libm_alias_float (__floor, floor)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S
index 3388c35e6c..9fc4ec4a04 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_llround.S
@@ -1,5 +1,5 @@
/* llround function. POWER5+, PowerPC64 version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,8 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
/* long long [r3] llround (float x [fp1])
IEEE 1003.1 llround function. IEEE specifies "round to the nearest
@@ -30,7 +32,7 @@
round to zero instruction. */
.machine "power5"
-EALIGN (__llround, 4, 0)
+ENTRY_TOCLESS (__llround, 4)
CALL_MCOUNT 0
frin fp2, fp1 /* Round to nearest +-0.5. */
fctidz fp3, fp2 /* Convert To Integer DW round toward 0. */
@@ -43,16 +45,12 @@ EALIGN (__llround, 4, 0)
END (__llround)
strong_alias (__llround, __lround)
-weak_alias (__llround, llround)
-weak_alias (__lround, lround)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__llround, llroundl)
-strong_alias (__llround, __llroundl)
-weak_alias (__lround, lroundl)
-strong_alias (__lround, __lroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
-compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
-#endif
+libm_alias_double (__llround, llround)
+libm_alias_double (__lround, lround)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llround, __llroundf)
+libm_alias_float (__llround, llround)
+strong_alias (__lround, __lroundf)
+libm_alias_float (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_llroundf.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_llroundf.S
new file mode 100644
index 0000000000..9ea6bd105b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __lroundf is in s_llround.S. */
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S
index a73311bbff..ed4df257d3 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S
@@ -1,5 +1,5 @@
/* round function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,20 +18,13 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.machine "power5"
-EALIGN (__round, 4, 0)
+ENTRY_TOCLESS (__round, 4)
CALL_MCOUNT 0
frin fp1, fp1
blr
END (__round)
-weak_alias (__round, round)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__round, roundl)
-strong_alias (__round, __roundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __round, roundl, GLIBC_2_1)
-#endif
+libm_alias_double (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S
index 7df18ecb6b..32df46059a 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S
@@ -1,5 +1,5 @@
/* roundf function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,15 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.machine "power5"
-EALIGN (__roundf, 4, 0)
+ENTRY_TOCLESS (__roundf, 4)
CALL_MCOUNT 0
frin fp1, fp1 /* The rounding instructions are double. */
frsp fp1, fp1 /* But we need to set ooverflow for float. */
blr
END (__roundf)
-weak_alias (__roundf, roundf)
+libm_alias_float (__round, round)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S
index b8971801dd..a45766582a 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S
@@ -1,5 +1,5 @@
/* trunc function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,20 +18,13 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-double.h>
.machine "power5"
-EALIGN (__trunc, 4, 0)
+ENTRY_TOCLESS (__trunc, 4)
CALL_MCOUNT 0
friz fp1, fp1
blr
END (__trunc)
-weak_alias (__trunc, trunc)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__trunc, truncl)
-strong_alias (__trunc, __truncl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __trunc, truncl, GLIBC_2_1)
-#endif
+libm_alias_double (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S b/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S
index 6e18ae1ae8..f06f54f163 100644
--- a/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S
+++ b/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S
@@ -1,5 +1,5 @@
/* truncf function. PowerPC64/power5+ version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,15 @@
<http://www.gnu.org/licenses/>. */
#include <sysdep.h>
+#include <libm-alias-float.h>
.machine "power5"
-EALIGN (__truncf, 4, 0)
+ENTRY_TOCLESS (__truncf, 4)
CALL_MCOUNT 0
friz fp1, fp1 /* The rounding instructions are double. */
frsp fp1, fp1 /* But we need to set ooverflow for float. */
blr
END (__truncf)
-weak_alias (__truncf, truncf)
+libm_alias_float (__trunc, trunc)
diff --git a/sysdeps/powerpc/powerpc64/power5+/multiarch/Implies b/sysdeps/powerpc/powerpc64/power5+/multiarch/Implies
deleted file mode 100644
index 0851b19fa2..0000000000
--- a/sysdeps/powerpc/powerpc64/power5+/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power5/Implies b/sysdeps/powerpc/powerpc64/power5/Implies
deleted file mode 100644
index bedb20b65c..0000000000
--- a/sysdeps/powerpc/powerpc64/power5/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power4/fpu
-powerpc/powerpc64/power4
diff --git a/sysdeps/powerpc/powerpc64/power5/fpu/Implies b/sysdeps/powerpc/powerpc64/power5/fpu/Implies
deleted file mode 100644
index 6b8c23efa6..0000000000
--- a/sysdeps/powerpc/powerpc64/power5/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power4/fpu/
diff --git a/sysdeps/powerpc/powerpc64/power5/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power5/fpu/multiarch/Implies
deleted file mode 100644
index 3740d050a6..0000000000
--- a/sysdeps/powerpc/powerpc64/power5/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power4/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S
index 57b5613d41..83b5657b75 100644
--- a/sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/power5/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64 version.
- Copyright (C) 2008-2016 Free Software Foundation, Inc.
+ Copyright (C) 2008-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,7 @@
/* int __isnan(x) */
.machine power5
-EALIGN (__isnan, 4, 0)
+ENTRY_TOCLESS (__isnan, 4)
CALL_MCOUNT 0
stfd fp1,-8(r1) /* copy FPR to GPR */
lis r0,0x7ff0
diff --git a/sysdeps/powerpc/powerpc64/power5/multiarch/Implies b/sysdeps/powerpc/powerpc64/power5/multiarch/Implies
deleted file mode 100644
index 9a3cbb0938..0000000000
--- a/sysdeps/powerpc/powerpc64/power5/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power4/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power6/Implies b/sysdeps/powerpc/powerpc64/power6/Implies
deleted file mode 100644
index 4c782d4122..0000000000
--- a/sysdeps/powerpc/powerpc64/power6/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power5+/fpu
-powerpc/powerpc64/power5+
diff --git a/sysdeps/powerpc/powerpc64/power6/fpu/Implies b/sysdeps/powerpc/powerpc64/power6/fpu/Implies
deleted file mode 100644
index f09854edb6..0000000000
--- a/sysdeps/powerpc/powerpc64/power6/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5+/fpu
diff --git a/sysdeps/powerpc/powerpc64/power6/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power6/fpu/multiarch/Implies
deleted file mode 100644
index fca8a4ef0f..0000000000
--- a/sysdeps/powerpc/powerpc64/power6/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5+/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S b/sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S
index 01346f473b..03e8bedb67 100644
--- a/sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S
+++ b/sysdeps/powerpc/powerpc64/power6/fpu/s_copysign.S
@@ -1,5 +1,5 @@
/* copysign(). PowerPC64/POWER6 version.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -19,6 +19,8 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
/* double [f1] copysign (double [f1] x, double [f2] y);
copysign(x,y) returns a value with the magnitude of x and
@@ -27,32 +29,21 @@
.section ".text"
.type __copysign, @function
.machine power6
-EALIGN (__copysign, 4, 0)
+ENTRY_TOCLESS (__copysign, 4)
CALL_MCOUNT 0
fcpsgn fp1,fp2,fp1
blr
END (__copysign)
hidden_def (__copysign)
-weak_alias (__copysign, copysign)
+libm_alias_double (__copysign, copysign)
/* It turns out that the 'double' version will also always work for
single-precision. */
strong_alias (__copysign, __copysignf)
hidden_def (__copysignf)
-weak_alias (__copysignf, copysignf)
+libm_alias_float (__copysign, copysign)
-#ifdef NO_LONG_DOUBLE
-strong_alias (__copysign, __copysignl)
-weak_alias (__copysign, copysignl)
-#endif
-
-#if IS_IN (libm)
-# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
-compat_symbol (libm, copysign, copysignl, GLIBC_2_0)
-# endif
-#else
-# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+#if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
compat_symbol (libc, copysign, copysignl, GLIBC_2_0);
-# endif
#endif
diff --git a/sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S
index 35f34c89b8..6296c944b5 100644
--- a/sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/power6/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64 version.
- Copyright (C) 2008-2016 Free Software Foundation, Inc.
+ Copyright (C) 2008-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,7 @@
/* int __isnan(x) */
.machine power6
-EALIGN (__isnan, 4, 0)
+ENTRY_TOCLESS (__isnan, 4)
CALL_MCOUNT 0
stfd fp1,-8(r1) /* copy FPR to GPR */
ori r1,r1,0
diff --git a/sysdeps/powerpc/powerpc64/power6/memcpy.S b/sysdeps/powerpc/powerpc64/power6/memcpy.S
index 9711810caf..9356867568 100644
--- a/sysdeps/powerpc/powerpc64/power6/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/power6/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,7 +18,7 @@
#include <sysdep.h>
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+/* void * [r3] memcpy (void *dst [r3], void *src [r4], size_t len [r5]);
Returns 'dst'.
Memcpy handles short copies (< 32-bytes) using a binary move blocks
@@ -40,8 +40,11 @@
the source but may take a risk and only require word alignment
for the destination. */
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
.machine "power6"
-EALIGN (memcpy, 7, 0)
+ENTRY_TOCLESS (MEMCPY, 7)
CALL_MCOUNT 3
cmpldi cr1,5,31
@@ -1492,5 +1495,5 @@ L(du_done):
ld 31,-8(1)
ld 3,-16(1)
blr
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/power6/memset.S b/sysdeps/powerpc/powerpc64/power6/memset.S
index c2d1c4e600..3901e33da2 100644
--- a/sysdeps/powerpc/powerpc64/power6/memset.S
+++ b/sysdeps/powerpc/powerpc64/power6/memset.S
@@ -1,5 +1,5 @@
/* Optimized 64-bit memset implementation for POWER6.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,15 +18,18 @@
#include <sysdep.h>
-/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
Returns 's'.
The memset is done in three sizes: byte (8 bits), word (32 bits),
cache line (256 bits). There is a special case for setting cache lines
to 0, to take advantage of the dcbz instruction. */
+#ifndef MEMSET
+# define MEMSET memset
+#endif
.machine power6
-EALIGN (memset, 7, 0)
+ENTRY_TOCLESS (MEMSET, 7)
CALL_MCOUNT 3
#define rTMP r0
@@ -251,7 +254,7 @@ L(cacheAlignedx):
/* A simple loop for the longer (>640 bytes) lengths. This form limits
the branch miss-predicted to exactly 1 at loop exit.*/
L(cacheAligned512):
- cmpli cr1,rLEN,128
+ cmpldi cr1,rLEN,128
blt cr1,L(cacheAligned1)
dcbz 0,rMEMP
addi rLEN,rLEN,-128
@@ -376,12 +379,12 @@ L(medium_27f):
L(medium_28t):
std rCHR, -8(rMEMP)
blr
-END_GEN_TB (memset,TB_TOCLESS)
+END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)
/* Copied from bzero.S to prevent the linker from inserting a stub
between bzero and memset. */
-ENTRY (__bzero)
+ENTRY_TOCLESS (__bzero)
CALL_MCOUNT 3
mr r5,r4
li r4,0
diff --git a/sysdeps/powerpc/powerpc64/power6/multiarch/Implies b/sysdeps/powerpc/powerpc64/power6/multiarch/Implies
deleted file mode 100644
index 2ebe304fa6..0000000000
--- a/sysdeps/powerpc/powerpc64/power6/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power5+/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power6x/Implies b/sysdeps/powerpc/powerpc64/power6x/Implies
deleted file mode 100644
index 9d68f39d22..0000000000
--- a/sysdeps/powerpc/powerpc64/power6x/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power6/fpu
-powerpc/powerpc64/power6
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/Implies b/sysdeps/powerpc/powerpc64/power6x/fpu/Implies
deleted file mode 100644
index 30fa17646e..0000000000
--- a/sysdeps/powerpc/powerpc64/power6x/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/fpu
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power6x/fpu/multiarch/Implies
deleted file mode 100644
index 410d289a6d..0000000000
--- a/sysdeps/powerpc/powerpc64/power6x/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S
index 1f3ee64331..96d0695ed2 100644
--- a/sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/power6x/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64 version.
- Copyright (C) 2008-2016 Free Software Foundation, Inc.
+ Copyright (C) 2008-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,7 @@
/* int __isnan(x) */
.machine power6
-EALIGN (__isnan, 4, 0)
+ENTRY_TOCLESS (__isnan, 4)
CALL_MCOUNT 0
mftgpr r4,fp1 /* copy FPR to GPR */
lis r0,0x7ff0
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S
index 0aae09140e..36980be13f 100644
--- a/sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S
+++ b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llrint.S
@@ -1,5 +1,5 @@
/* Round double to long int. POWER6x PowerPC64 version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,10 +18,12 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
.machine "power6"
/* long long int[r3] __llrint (double x[fp1]) */
-ENTRY (__llrint)
+ENTRY_TOCLESS (__llrint)
CALL_MCOUNT 0
fctid fp13,fp1
mftgpr r3,fp13
@@ -29,16 +31,12 @@ ENTRY (__llrint)
END (__llrint)
strong_alias (__llrint, __lrint)
-weak_alias (__llrint, llrint)
-weak_alias (__lrint, lrint)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__llrint, __llrintl)
-weak_alias (__llrint, llrintl)
-strong_alias (__lrint, __lrintl)
-weak_alias (__lrint, lrintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llrint, llrintl, GLIBC_2_1)
-compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
-#endif
+libm_alias_double (__llrint, llrint)
+libm_alias_double (__lrint, lrint)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llrint, __llrintf)
+libm_alias_float (__llrint, llrint)
+strong_alias (__lrint, __lrintf)
+libm_alias_float (__lrint, lrint)
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S
index 39480c15a8..605f55ed5f 100644
--- a/sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S
+++ b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llround.S
@@ -1,5 +1,5 @@
/* llround function. POWER6x PowerPC64 version.
- Copyright (C) 2006-2016 Free Software Foundation, Inc.
+ Copyright (C) 2006-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,8 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
/* long long [r3] llround (float x [fp1])
IEEE 1003.1 llround function. IEEE specifies "round to the nearest
@@ -30,7 +32,7 @@
round to zero instruction. */
.machine "power6"
-ENTRY (__llround)
+ENTRY_TOCLESS (__llround)
CALL_MCOUNT 0
frin fp2,fp1 /* Round to nearest +-0.5. */
fctidz fp3,fp2 /* Convert To Integer DW round toward 0. */
@@ -39,16 +41,12 @@ ENTRY (__llround)
END (__llround)
strong_alias (__llround, __lround)
-weak_alias (__llround, llround)
-weak_alias (__lround, lround)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__llround, llroundl)
-strong_alias (__llround, __llroundl)
-weak_alias (__lround, lroundl)
-strong_alias (__lround, __lroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
-compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
-#endif
+libm_alias_double (__llround, llround)
+libm_alias_double (__lround, lround)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llround, __llroundf)
+libm_alias_float (__llround, llround)
+strong_alias (__lround, __lroundf)
+libm_alias_float (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/power6x/fpu/s_llroundf.S b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llroundf.S
new file mode 100644
index 0000000000..9ea6bd105b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power6x/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __lroundf is in s_llround.S. */
diff --git a/sysdeps/powerpc/powerpc64/power6x/multiarch/Implies b/sysdeps/powerpc/powerpc64/power6x/multiarch/Implies
deleted file mode 100644
index bf5d6171a5..0000000000
--- a/sysdeps/powerpc/powerpc64/power6x/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power7/Implies b/sysdeps/powerpc/powerpc64/power7/Implies
deleted file mode 100644
index 9d68f39d22..0000000000
--- a/sysdeps/powerpc/powerpc64/power7/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power6/fpu
-powerpc/powerpc64/power6
diff --git a/sysdeps/powerpc/powerpc64/power7/add_n.S b/sysdeps/powerpc/powerpc64/power7/add_n.S
index 2397d3abca..1306e5aa2f 100644
--- a/sysdeps/powerpc/powerpc64/power7/add_n.S
+++ b/sysdeps/powerpc/powerpc64/power7/add_n.S
@@ -1,6 +1,6 @@
/* PowerPC64 mpn_lshift -- mpn_add_n/mpn_sub_n -- mpn addition and
subtraction.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -36,19 +36,19 @@
#define VP r5
#define N r6
-EALIGN(FUNC, 5, 0)
+ENTRY_TOCLESS (FUNC, 5)
#ifdef USE_AS_SUB
- addic r0, r0, 0
+ addic r0, r1, -1
#else
- addic r0, r1, -1
+ addic r0, r0, 0
#endif
andi. r7, N, 1
beq L(bx0)
ld r7, 0(UP)
- ld r9, r0(VP)
+ ld r9, 0(VP)
ADDSUBC r11, r9, r7
- std r11, r0(RP)
+ std r11, 0(RP)
cmpldi N, N, 1
beq N, L(end)
addi UP, UP, 8
diff --git a/sysdeps/powerpc/powerpc64/power7/fpu/Implies b/sysdeps/powerpc/powerpc64/power7/fpu/Implies
deleted file mode 100644
index 30fa17646e..0000000000
--- a/sysdeps/powerpc/powerpc64/power7/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/fpu
diff --git a/sysdeps/powerpc/powerpc64/power7/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power7/fpu/multiarch/Implies
deleted file mode 100644
index 410d289a6d..0000000000
--- a/sysdeps/powerpc/powerpc64/power7/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S b/sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S
index 5366fd6d0b..78e3bdef80 100644
--- a/sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S
+++ b/sysdeps/powerpc/powerpc64/power7/fpu/s_finite.S
@@ -1,5 +1,5 @@
/* finite(). PowerPC64/POWER7 version.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -27,7 +27,7 @@
.section ".text"
.type __finite, @function
.machine power7
-EALIGN (__finite, 4, 0)
+ENTRY (__finite, 4)
CALL_MCOUNT 0
lfd fp0,.LC0@toc(r2)
ftdiv cr7,fp1,fp0
diff --git a/sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S b/sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S
index 89dfad1235..0fbb99d64d 100644
--- a/sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S
+++ b/sysdeps/powerpc/powerpc64/power7/fpu/s_isinf.S
@@ -1,5 +1,5 @@
/* isinf(). PowerPC64/POWER7 version.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -27,7 +27,7 @@
.section ".text"
.type __isinf, @function
.machine power7
-EALIGN (__isinf, 4, 0)
+ENTRY (__isinf, 4)
CALL_MCOUNT 0
lfd fp0,.LC0@toc(r2)
ftdiv cr7,fp1,fp0
diff --git a/sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S
index bb65c3a153..3f675d7e58 100644
--- a/sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/power7/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER7 version.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -27,7 +27,7 @@
.section ".text"
.type __isnan, @function
.machine power7
-EALIGN (__isnan, 4, 0)
+ENTRY (__isnan, 4)
CALL_MCOUNT 0
lfd fp0,.LC0@toc(r2)
ftdiv cr7,fp1,fp0
diff --git a/sysdeps/powerpc/powerpc64/power7/memchr.S b/sysdeps/powerpc/powerpc64/power7/memchr.S
index 03f0d7c2dd..9ab5ab2099 100644
--- a/sysdeps/powerpc/powerpc64/power7/memchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/memchr.S
@@ -1,5 +1,5 @@
/* Optimized memchr implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,13 +20,27 @@
#include <sysdep.h>
/* int [r3] memchr (char *s [r3], int byte [r4], int size [r5]) */
+
+#ifndef MEMCHR
+# define MEMCHR __memchr
+#endif
.machine power7
-ENTRY (__memchr)
+ENTRY_TOCLESS (MEMCHR)
CALL_MCOUNT 3
dcbt 0,r3
clrrdi r8,r3,3
insrdi r4,r4,8,48
- add r7,r3,r5 /* Calculate the last acceptable address. */
+
+ /* Calculate the last acceptable address and check for possible
+ addition overflow by using satured math:
+ r7 = r3 + r5
+ r7 |= -(r7 < x) */
+ add r7,r3,r5
+ subfc r6,r3,r7
+ subfe r9,r9,r9
+ extsw r6,r9
+ or r7,r7,r6
+
insrdi r4,r4,16,32
cmpldi r5,32
li r9, -1
@@ -180,6 +194,6 @@ L(small_range):
bne cr6,L(done)
blr
-END (__memchr)
+END (MEMCHR)
weak_alias (__memchr, memchr)
libc_hidden_builtin_def (memchr)
diff --git a/sysdeps/powerpc/powerpc64/power7/memcmp.S b/sysdeps/powerpc/powerpc64/power7/memcmp.S
index 4be29008c7..91acdfbd92 100644
--- a/sysdeps/powerpc/powerpc64/power7/memcmp.S
+++ b/sysdeps/powerpc/powerpc64/power7/memcmp.S
@@ -1,5 +1,5 @@
/* Optimized memcmp implementation for POWER7/PowerPC64.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,9 +21,11 @@
/* int [r3] memcmp (const char *s1 [r3],
const char *s2 [r4],
size_t size [r5]) */
-
+#ifndef MEMCMP
+# define MEMCMP memcmp
+#endif
.machine power7
-EALIGN (memcmp, 4, 0)
+ENTRY_TOCLESS (MEMCMP, 4)
CALL_MCOUNT 3
#define rRTN r3
@@ -82,17 +84,17 @@ EALIGN (memcmp, 4, 0)
byte loop. */
blt cr1, L(bytealigned)
std rWORD8, rWORD8SAVE(r1)
- cfi_offset(rWORD8, rWORD8SAVE)
std rWORD7, rWORD7SAVE(r1)
- cfi_offset(rWORD7, rWORD7SAVE)
std rOFF8, rOFF8SAVE(r1)
- cfi_offset(rWORD7, rOFF8SAVE)
std rOFF16, rOFF16SAVE(r1)
- cfi_offset(rWORD7, rOFF16SAVE)
std rOFF24, rOFF24SAVE(r1)
- cfi_offset(rWORD7, rOFF24SAVE)
std rOFF32, rOFF32SAVE(r1)
- cfi_offset(rWORD7, rOFF32SAVE)
+ cfi_offset(rWORD8, rWORD8SAVE)
+ cfi_offset(rWORD7, rWORD7SAVE)
+ cfi_offset(rOFF8, rOFF8SAVE)
+ cfi_offset(rOFF16, rOFF16SAVE)
+ cfi_offset(rOFF24, rOFF24SAVE)
+ cfi_offset(rOFF32, rOFF32SAVE)
li rOFF8,8
li rOFF16,16
@@ -601,18 +603,18 @@ L(unaligned):
the actual start of rSTR2. */
clrrdi rSTR2, rSTR2, 3
std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
- cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
/* Compute the left/right shift counts for the unaligned rSTR2,
compensating for the logical (DW aligned) start of rSTR1. */
clrldi rSHL, rWORD8_SHIFT, 61
clrrdi rSTR1, rSTR1, 3
std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
- cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
sldi rSHL, rSHL, 3
cmpld cr5, rWORD8_SHIFT, rSTR2
add rN, rN, r12
sldi rWORD6, r12, 3
std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
subfic rSHR, rSHL, 64
srdi r0, rN, 5 /* Divide by 32 */
@@ -689,15 +691,15 @@ L(duPs4):
.align 4
L(DWunaligned):
std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
- cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
clrrdi rSTR2, rSTR2, 3
std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
- cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
srdi r0, rN, 5 /* Divide by 32 */
std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
- cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
andi. r12, rN, 24 /* Get the DW remainder */
std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
sldi rSHL, rSHL, 3
LD rWORD6, 0, rSTR2
@@ -1054,6 +1056,6 @@ L(duzeroLength):
li rRTN, 0
blr
-END (memcmp)
+END (MEMCMP)
libc_hidden_builtin_def (memcmp)
weak_alias (memcmp, bcmp)
diff --git a/sysdeps/powerpc/powerpc64/power7/memcpy.S b/sysdeps/powerpc/powerpc64/power7/memcpy.S
index 6bb5f13257..3d8629ca65 100644
--- a/sysdeps/powerpc/powerpc64/power7/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/power7/memcpy.S
@@ -1,5 +1,5 @@
/* Optimized memcpy implementation for PowerPC64/POWER7.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,15 +20,19 @@
#include <sysdep.h>
-/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+/* void * [r3] memcpy (void *dst [r3], void *src [r4], size_t len [r5]);
Returns 'dst'. */
+#ifndef MEMCPY
+# define MEMCPY memcpy
+#endif
+
#define dst 11 /* Use r11 so r3 kept unchanged. */
#define src 4
#define cnt 5
.machine power7
-EALIGN (memcpy, 5, 0)
+ENTRY_TOCLESS (MEMCPY, 5)
CALL_MCOUNT 3
cmpldi cr1,cnt,31
@@ -87,63 +91,63 @@ L(aligned_copy):
srdi 12,cnt,7
cmpdi 12,0
beq L(aligned_tail)
- lxvd2x 6,0,src
- lxvd2x 7,src,6
+ lvx 6,0,src
+ lvx 7,src,6
mtctr 12
b L(aligned_128loop)
.align 4
L(aligned_128head):
/* for the 2nd + iteration of this loop. */
- lxvd2x 6,0,src
- lxvd2x 7,src,6
+ lvx 6,0,src
+ lvx 7,src,6
L(aligned_128loop):
- lxvd2x 8,src,7
- lxvd2x 9,src,8
- stxvd2x 6,0,dst
+ lvx 8,src,7
+ lvx 9,src,8
+ stvx 6,0,dst
addi src,src,64
- stxvd2x 7,dst,6
- stxvd2x 8,dst,7
- stxvd2x 9,dst,8
- lxvd2x 6,0,src
- lxvd2x 7,src,6
+ stvx 7,dst,6
+ stvx 8,dst,7
+ stvx 9,dst,8
+ lvx 6,0,src
+ lvx 7,src,6
addi dst,dst,64
- lxvd2x 8,src,7
- lxvd2x 9,src,8
+ lvx 8,src,7
+ lvx 9,src,8
addi src,src,64
- stxvd2x 6,0,dst
- stxvd2x 7,dst,6
- stxvd2x 8,dst,7
- stxvd2x 9,dst,8
+ stvx 6,0,dst
+ stvx 7,dst,6
+ stvx 8,dst,7
+ stvx 9,dst,8
addi dst,dst,64
bdnz L(aligned_128head)
L(aligned_tail):
mtocrf 0x01,cnt
bf 25,32f
- lxvd2x 6,0,src
- lxvd2x 7,src,6
- lxvd2x 8,src,7
- lxvd2x 9,src,8
+ lvx 6,0,src
+ lvx 7,src,6
+ lvx 8,src,7
+ lvx 9,src,8
addi src,src,64
- stxvd2x 6,0,dst
- stxvd2x 7,dst,6
- stxvd2x 8,dst,7
- stxvd2x 9,dst,8
+ stvx 6,0,dst
+ stvx 7,dst,6
+ stvx 8,dst,7
+ stvx 9,dst,8
addi dst,dst,64
32:
bf 26,16f
- lxvd2x 6,0,src
- lxvd2x 7,src,6
+ lvx 6,0,src
+ lvx 7,src,6
addi src,src,32
- stxvd2x 6,0,dst
- stxvd2x 7,dst,6
+ stvx 6,0,dst
+ stvx 7,dst,6
addi dst,dst,32
16:
bf 27,8f
- lxvd2x 6,0,src
+ lvx 6,0,src
addi src,src,16
- stxvd2x 6,0,dst
+ stvx 6,0,dst
addi dst,dst,16
8:
bf 28,4f
@@ -422,5 +426,5 @@ L(end_unaligned_loop):
/* Return original DST pointer. */
blr
-END_GEN_TB (memcpy,TB_TOCLESS)
+END_GEN_TB (MEMCPY,TB_TOCLESS)
libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/power7/memmove.S b/sysdeps/powerpc/powerpc64/power7/memmove.S
index e263ba9608..b7f3dc28d1 100644
--- a/sysdeps/powerpc/powerpc64/power7/memmove.S
+++ b/sysdeps/powerpc/powerpc64/power7/memmove.S
@@ -1,5 +1,5 @@
/* Optimized memmove implementation for PowerPC64/POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,8 +27,11 @@
If source and destiny overlaps, a optimized backwards memcpy is used
instead. */
+#ifndef MEMMOVE
+# define MEMMOVE memmove
+#endif
.machine power7
-EALIGN (memmove, 5, 0)
+ENTRY_TOCLESS (MEMMOVE, 5)
CALL_MCOUNT 3
L(_memmove):
@@ -89,63 +92,63 @@ L(aligned_copy):
srdi 12,r5,7
cmpdi 12,0
beq L(aligned_tail)
- lxvd2x 6,0,r4
- lxvd2x 7,r4,6
+ lvx 6,0,r4
+ lvx 7,r4,6
mtctr 12
b L(aligned_128loop)
.align 4
L(aligned_128head):
/* for the 2nd + iteration of this loop. */
- lxvd2x 6,0,r4
- lxvd2x 7,r4,6
+ lvx 6,0,r4
+ lvx 7,r4,6
L(aligned_128loop):
- lxvd2x 8,r4,7
- lxvd2x 9,r4,8
- stxvd2x 6,0,r11
+ lvx 8,r4,7
+ lvx 9,r4,8
+ stvx 6,0,r11
addi r4,r4,64
- stxvd2x 7,r11,6
- stxvd2x 8,r11,7
- stxvd2x 9,r11,8
- lxvd2x 6,0,r4
- lxvd2x 7,r4,6
+ stvx 7,r11,6
+ stvx 8,r11,7
+ stvx 9,r11,8
+ lvx 6,0,r4
+ lvx 7,r4,6
addi r11,r11,64
- lxvd2x 8,r4,7
- lxvd2x 9,r4,8
+ lvx 8,r4,7
+ lvx 9,r4,8
addi r4,r4,64
- stxvd2x 6,0,r11
- stxvd2x 7,r11,6
- stxvd2x 8,r11,7
- stxvd2x 9,r11,8
+ stvx 6,0,r11
+ stvx 7,r11,6
+ stvx 8,r11,7
+ stvx 9,r11,8
addi r11,r11,64
bdnz L(aligned_128head)
L(aligned_tail):
mtocrf 0x01,r5
bf 25,32f
- lxvd2x 6,0,r4
- lxvd2x 7,r4,6
- lxvd2x 8,r4,7
- lxvd2x 9,r4,8
+ lvx 6,0,r4
+ lvx 7,r4,6
+ lvx 8,r4,7
+ lvx 9,r4,8
addi r4,r4,64
- stxvd2x 6,0,r11
- stxvd2x 7,r11,6
- stxvd2x 8,r11,7
- stxvd2x 9,r11,8
+ stvx 6,0,r11
+ stvx 7,r11,6
+ stvx 8,r11,7
+ stvx 9,r11,8
addi r11,r11,64
32:
bf 26,16f
- lxvd2x 6,0,r4
- lxvd2x 7,r4,6
+ lvx 6,0,r4
+ lvx 7,r4,6
addi r4,r4,32
- stxvd2x 6,0,r11
- stxvd2x 7,r11,6
+ stvx 6,0,r11
+ stvx 7,r11,6
addi r11,r11,32
16:
bf 27,8f
- lxvd2x 6,0,r4
+ lvx 6,0,r4
addi r4,r4,16
- stxvd2x 6,0,r11
+ stvx 6,0,r11
addi r11,r11,16
8:
bf 28,4f
@@ -485,63 +488,63 @@ L(aligned_copy_bwd):
srdi r12,r5,7
cmpdi r12,0
beq L(aligned_tail_bwd)
- lxvd2x v6,r4,r6
- lxvd2x v7,r4,r7
+ lvx v6,r4,r6
+ lvx v7,r4,r7
mtctr 12
b L(aligned_128loop_bwd)
.align 4
L(aligned_128head_bwd):
/* for the 2nd + iteration of this loop. */
- lxvd2x v6,r4,r6
- lxvd2x v7,r4,r7
+ lvx v6,r4,r6
+ lvx v7,r4,r7
L(aligned_128loop_bwd):
- lxvd2x v8,r4,r8
- lxvd2x v9,r4,r9
- stxvd2x v6,r11,r6
+ lvx v8,r4,r8
+ lvx v9,r4,r9
+ stvx v6,r11,r6
subi r4,r4,64
- stxvd2x v7,r11,r7
- stxvd2x v8,r11,r8
- stxvd2x v9,r11,r9
- lxvd2x v6,r4,r6
- lxvd2x v7,r4,7
+ stvx v7,r11,r7
+ stvx v8,r11,r8
+ stvx v9,r11,r9
+ lvx v6,r4,r6
+ lvx v7,r4,7
subi r11,r11,64
- lxvd2x v8,r4,r8
- lxvd2x v9,r4,r9
+ lvx v8,r4,r8
+ lvx v9,r4,r9
subi r4,r4,64
- stxvd2x v6,r11,r6
- stxvd2x v7,r11,r7
- stxvd2x v8,r11,r8
- stxvd2x v9,r11,r9
+ stvx v6,r11,r6
+ stvx v7,r11,r7
+ stvx v8,r11,r8
+ stvx v9,r11,r9
subi r11,r11,64
bdnz L(aligned_128head_bwd)
L(aligned_tail_bwd):
mtocrf 0x01,r5
bf 25,32f
- lxvd2x v6,r4,r6
- lxvd2x v7,r4,r7
- lxvd2x v8,r4,r8
- lxvd2x v9,r4,r9
+ lvx v6,r4,r6
+ lvx v7,r4,r7
+ lvx v8,r4,r8
+ lvx v9,r4,r9
subi r4,r4,64
- stxvd2x v6,r11,r6
- stxvd2x v7,r11,r7
- stxvd2x v8,r11,r8
- stxvd2x v9,r11,r9
+ stvx v6,r11,r6
+ stvx v7,r11,r7
+ stvx v8,r11,r8
+ stvx v9,r11,r9
subi r11,r11,64
32:
bf 26,16f
- lxvd2x v6,r4,r6
- lxvd2x v7,r4,r7
+ lvx v6,r4,r6
+ lvx v7,r4,r7
subi r4,r4,32
- stxvd2x v6,r11,r6
- stxvd2x v7,r11,r7
+ stvx v6,r11,r6
+ stvx v7,r11,r7
subi r11,r11,32
16:
bf 27,8f
- lxvd2x v6,r4,r6
+ lvx v6,r4,r6
subi r4,r4,16
- stxvd2x v6,r11,r6
+ stvx v6,r11,r6
subi r11,r11,16
8:
bf 28,4f
@@ -816,14 +819,14 @@ L(end_unaligned_loop_bwd):
stb r8,-7(r11)
/* Return original DST pointer. */
blr
-END_GEN_TB (memmove, TB_TOCLESS)
+END_GEN_TB (MEMMOVE, TB_TOCLESS)
libc_hidden_builtin_def (memmove)
/* void bcopy(const void *src [r3], void *dest [r4], size_t n [r5])
Implemented in this file to avoid linker create a stub function call
in the branch to '_memmove'. */
-ENTRY (__bcopy)
+ENTRY_TOCLESS (__bcopy)
mr r6,r3
mr r3,r4
mr r4,r6
diff --git a/sysdeps/powerpc/powerpc64/power7/mempcpy.S b/sysdeps/powerpc/powerpc64/power7/mempcpy.S
index 98ed1f07f9..7f5a4745ef 100644
--- a/sysdeps/powerpc/powerpc64/power7/mempcpy.S
+++ b/sysdeps/powerpc/powerpc64/power7/mempcpy.S
@@ -1,5 +1,5 @@
/* Optimized mempcpy implementation for POWER7.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,11 +20,14 @@
#include <sysdep.h>
-/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+/* void * [r3] __mempcpy (void *dst [r3], void *src [r4], size_t len [r5]);
Returns 'dst' + 'len'. */
+#ifndef MEMPCPY
+# define MEMPCPY __mempcpy
+#endif
.machine power7
-EALIGN (__mempcpy, 5, 0)
+ENTRY_TOCLESS (MEMPCPY, 5)
CALL_MCOUNT 3
cmpldi cr1,5,31
@@ -463,7 +466,7 @@ L(end_unaligned_loop):
add 3,3,5
blr
-END_GEN_TB (__mempcpy,TB_TOCLESS)
+END_GEN_TB (MEMPCPY,TB_TOCLESS)
libc_hidden_def (__mempcpy)
weak_alias (__mempcpy, mempcpy)
libc_hidden_builtin_def (mempcpy)
diff --git a/sysdeps/powerpc/powerpc64/power7/memrchr.S b/sysdeps/powerpc/powerpc64/power7/memrchr.S
index 042c46d804..583d513c4c 100644
--- a/sysdeps/powerpc/powerpc64/power7/memrchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/memrchr.S
@@ -1,5 +1,5 @@
/* Optimized memrchr implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,8 +20,12 @@
#include <sysdep.h>
/* int [r3] memrchr (char *s [r3], int byte [r4], int size [r5]) */
+
+#ifndef MEMRCHR
+# define MEMRCHR __memrchr
+#endif
.machine power7
-ENTRY (__memrchr)
+ENTRY_TOCLESS (MEMRCHR)
CALL_MCOUNT 3
add r7,r3,r5 /* Calculate the last acceptable address. */
neg r0,r7
@@ -192,6 +196,6 @@ L(loop_small):
bne L(loop_small)
blr
-END (__memrchr)
+END (MEMRCHR)
weak_alias (__memrchr, memrchr)
libc_hidden_builtin_def (memrchr)
diff --git a/sysdeps/powerpc/powerpc64/power7/memset.S b/sysdeps/powerpc/powerpc64/power7/memset.S
index cf831861fe..acd0adfb78 100644
--- a/sysdeps/powerpc/powerpc64/power7/memset.S
+++ b/sysdeps/powerpc/powerpc64/power7/memset.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER7.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -19,11 +19,14 @@
#include <sysdep.h>
-/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
Returns 's'. */
+#ifndef MEMSET
+# define MEMSET memset
+#endif
.machine power7
-EALIGN (memset, 5, 0)
+ENTRY_TOCLESS (MEMSET, 5)
CALL_MCOUNT 3
L(_memset):
@@ -380,12 +383,12 @@ L(small):
stw 4,4(10)
blr
-END_GEN_TB (memset,TB_TOCLESS)
+END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)
/* Copied from bzero.S to prevent the linker from inserting a stub
between bzero and memset. */
-ENTRY (__bzero)
+ENTRY_TOCLESS (__bzero)
CALL_MCOUNT 3
mr r5,r4
li r4,0
diff --git a/sysdeps/powerpc/powerpc64/power7/multiarch/Implies b/sysdeps/powerpc/powerpc64/power7/multiarch/Implies
deleted file mode 100644
index bf5d6171a5..0000000000
--- a/sysdeps/powerpc/powerpc64/power7/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power6/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power7/rawmemchr.S b/sysdeps/powerpc/powerpc64/power7/rawmemchr.S
index 32223d0baa..6ada0ebb30 100644
--- a/sysdeps/powerpc/powerpc64/power7/rawmemchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/rawmemchr.S
@@ -1,5 +1,5 @@
/* Optimized rawmemchr implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,8 +20,12 @@
#include <sysdep.h>
/* int [r3] rawmemchr (void *s [r3], int c [r4]) */
+
+#ifndef RAWMEMCHR
+# define RAWMEMCHR __rawmemchr
+#endif
.machine power7
-ENTRY (__rawmemchr)
+ENTRY_TOCLESS (RAWMEMCHR)
CALL_MCOUNT 2
dcbt 0,r3
clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
@@ -106,6 +110,6 @@ L(done):
srdi r0,r0,3 /* Convert leading zeros to bytes. */
add r3,r8,r0 /* Return address of the matching char. */
blr
-END (__rawmemchr)
+END (RAWMEMCHR)
weak_alias (__rawmemchr,rawmemchr)
libc_hidden_builtin_def (__rawmemchr)
diff --git a/sysdeps/powerpc/powerpc64/power7/stpncpy.S b/sysdeps/powerpc/powerpc64/power7/stpncpy.S
index 54cfae9218..279ce83973 100644
--- a/sysdeps/powerpc/powerpc64/power7/stpncpy.S
+++ b/sysdeps/powerpc/powerpc64/power7/stpncpy.S
@@ -1,5 +1,5 @@
/* Optimized stpncpy implementation for PowerPC64/POWER7.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/power7/strcasecmp.S b/sysdeps/powerpc/powerpc64/power7/strcasecmp.S
index d1c29166a9..1d4bc61137 100644
--- a/sysdeps/powerpc/powerpc64/power7/strcasecmp.S
+++ b/sysdeps/powerpc/powerpc64/power7/strcasecmp.S
@@ -1,5 +1,5 @@
/* Optimized strcasecmp implementation for PowerPC64.
- Copyright (C) 2011-2016 Free Software Foundation, Inc.
+ Copyright (C) 2011-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,17 +24,18 @@
or if defined USE_IN_EXTENDED_LOCALE_MODEL:
int [r3] strcasecmp_l (const char *s1 [r3], const char *s2 [r4],
- __locale_t loc [r5]) */
+ locale_t loc [r5]) */
#ifndef STRCMP
# define __STRCMP __strcasecmp
# define STRCMP strcasecmp
#endif
-ENTRY (__STRCMP)
#ifndef USE_IN_EXTENDED_LOCALE_MODEL
+ENTRY (__STRCMP)
CALL_MCOUNT 2
#else
+ENTRY_TOCLESS (__STRCMP)
CALL_MCOUNT 3
#endif
diff --git a/sysdeps/powerpc/powerpc64/power7/strchr.S b/sysdeps/powerpc/powerpc64/power7/strchr.S
index 1ad4bd3a2a..da648b2749 100644
--- a/sysdeps/powerpc/powerpc64/power7/strchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/strchr.S
@@ -1,5 +1,5 @@
/* Optimized strchr implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -19,9 +19,13 @@
#include <sysdep.h>
+#ifndef STRCHR
+# define STRCHR strchr
+#endif
+
/* int [r3] strchr (char *s [r3], int c [r4]) */
.machine power7
-ENTRY (strchr)
+ENTRY_TOCLESS (STRCHR)
CALL_MCOUNT 2
dcbt 0,r3
clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
@@ -221,6 +225,6 @@ L(done_null):
srdi r0,r0,3 /* Convert leading zeros to bytes. */
add r3,r8,r0 /* Return address of the matching null byte. */
blr
-END (strchr)
+END (STRCHR)
weak_alias (strchr, index)
libc_hidden_builtin_def (strchr)
diff --git a/sysdeps/powerpc/powerpc64/power7/strchrnul.S b/sysdeps/powerpc/powerpc64/power7/strchrnul.S
index 020c0459b8..f137174701 100644
--- a/sysdeps/powerpc/powerpc64/power7/strchrnul.S
+++ b/sysdeps/powerpc/powerpc64/power7/strchrnul.S
@@ -1,5 +1,5 @@
/* Optimized strchrnul implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -19,9 +19,12 @@
#include <sysdep.h>
+#ifndef STRCHRNUL
+# define STRCHRNUL __strchrnul
+#endif
/* int [r3] strchrnul (char *s [r3], int c [r4]) */
.machine power7
-ENTRY (__strchrnul)
+ENTRY_TOCLESS (STRCHRNUL)
CALL_MCOUNT 2
dcbt 0,r3
clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
@@ -123,6 +126,6 @@ L(done):
srdi r0,r0,3 /* Convert leading zeros to bytes. */
add r3,r8,r0 /* Return address of matching c/null byte. */
blr
-END (__strchrnul)
-weak_alias (__strchrnul,strchrnul)
-libc_hidden_builtin_def (__strchrnul)
+END (STRCHRNUL)
+weak_alias (STRCHRNUL, strchrnul)
+libc_hidden_builtin_def (STRCHRNUL)
diff --git a/sysdeps/powerpc/powerpc64/power7/strcmp.S b/sysdeps/powerpc/powerpc64/power7/strcmp.S
index 5bbae0d3b2..c3d5ec1090 100644
--- a/sysdeps/powerpc/powerpc64/power7/strcmp.S
+++ b/sysdeps/powerpc/powerpc64/power7/strcmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for Power7 using 'cmpb' instruction
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,10 +23,14 @@
#include <sysdep.h>
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
/* int [r3] strcmp (const char *s1 [r3], const char *s2 [r4]) */
.machine power7
-EALIGN (strcmp, 4, 0)
+ENTRY_TOCLESS (STRCMP, 4)
CALL_MCOUNT 2
or r9, r3, r4
@@ -160,5 +164,5 @@ L(diffOfNULL):
extsw r3, r10 /* sign extend result */
blr /* return */
-END (strcmp)
+END (STRCMP)
libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/power7/strlen.S b/sysdeps/powerpc/powerpc64/power7/strlen.S
index 4b2180f65f..9758089a15 100644
--- a/sysdeps/powerpc/powerpc64/power7/strlen.S
+++ b/sysdeps/powerpc/powerpc64/power7/strlen.S
@@ -1,5 +1,5 @@
/* Optimized strlen implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -20,8 +20,12 @@
#include <sysdep.h>
/* int [r3] strlen (char *s [r3]) */
+
+#ifndef STRLEN
+# define STRLEN strlen
+#endif
.machine power7
-ENTRY (strlen)
+ENTRY_TOCLESS (STRLEN)
CALL_MCOUNT 1
dcbt 0,r3
clrrdi r4,r3,3 /* Align the address to doubleword boundary. */
@@ -99,5 +103,5 @@ L(done):
srdi r0,r0,3 /* Convert leading/trailing zeros to bytes. */
add r3,r5,r0 /* Compute final length. */
blr
-END (strlen)
+END (STRLEN)
libc_hidden_builtin_def (strlen)
diff --git a/sysdeps/powerpc/powerpc64/power7/strncmp.S b/sysdeps/powerpc/powerpc64/power7/strncmp.S
index 0de9b97173..0c7429d19f 100644
--- a/sysdeps/powerpc/powerpc64/power7/strncmp.S
+++ b/sysdeps/powerpc/powerpc64/power7/strncmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for POWER7/PowerPC64.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,13 +18,17 @@
#include <sysdep.h>
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
/* See strlen.s for comments on how the end-of-string testing works. */
/* int [r3] strncmp (const char *s1 [r3],
const char *s2 [r4],
size_t size [r5]) */
-EALIGN (strncmp,5,0)
+ENTRY_TOCLESS (STRNCMP, 5)
CALL_MCOUNT 3
#define rTMP2 r0
@@ -219,5 +223,5 @@ L(u4): sub rRTN,rWORD1,rWORD2
L(ux):
li rRTN,0
blr
-END (strncmp)
+END (STRNCMP)
libc_hidden_builtin_def (strncmp)
diff --git a/sysdeps/powerpc/powerpc64/power7/strncpy.S b/sysdeps/powerpc/powerpc64/power7/strncpy.S
index 1b4a3d9181..1965f82a2b 100644
--- a/sysdeps/powerpc/powerpc64/power7/strncpy.S
+++ b/sysdeps/powerpc/powerpc64/power7/strncpy.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -40,17 +40,26 @@
memset. */
#ifdef USE_AS_STPNCPY
-# define FUNC_NAME __stpncpy
+# ifndef STPNCPY
+# define FUNC_NAME __stpncpy
+# else
+# define FUNC_NAME STPNCPY
+# endif
#else
-# define FUNC_NAME strncpy
-#endif
+# ifndef STRNCPY
+# define FUNC_NAME strncpy
+# else
+# define FUNC_NAME STRNCPY
+# endif
+#endif /* !USE_AS_STPNCPY */
-#define FRAMESIZE (FRAME_MIN_SIZE+32)
+#define FRAMESIZE (FRAME_MIN_SIZE+16)
#ifndef MEMSET
/* For builds with no IFUNC support, local calls should be made to internal
GLIBC symbol (created by libc_hidden_builtin_def). */
# ifdef SHARED
+# define MEMSET_is_local
# define MEMSET __GI_memset
# else
# define MEMSET memset
@@ -58,17 +67,20 @@
#endif
.machine power7
-EALIGN(FUNC_NAME, 4, 0)
+#ifdef MEMSET_is_local
+ENTRY_TOCLESS (FUNC_NAME, 4)
+#else
+ENTRY (FUNC_NAME, 4)
+#endif
CALL_MCOUNT 3
- mflr r0 /* load link register LR to r0 */
or r10, r3, r4 /* to verify source and destination */
rldicl. r8, r10, 0, 61 /* is double word aligned .. ? */
std r19, -8(r1) /* save callers register , r19 */
std r18, -16(r1) /* save callers register , r18 */
- std r0, 16(r1) /* store the link register */
- stdu r1, -FRAMESIZE(r1) /* create the stack frame */
+ cfi_offset(r19, -8)
+ cfi_offset(r18, -16)
mr r9, r3 /* save r3 into r9 for use */
mr r18, r3 /* save r3 for retCode of strncpy */
@@ -211,11 +223,23 @@ L(zeroFill):
cmpdi cr7, r8, 0 /* compare if length is zero */
beq cr7, L(update3return)
+ mflr r0 /* load link register LR to r0 */
+ std r0, 16(r1) /* store the link register */
+ stdu r1, -FRAMESIZE(r1) /* create the stack frame */
+ cfi_adjust_cfa_offset(FRAMESIZE)
+ cfi_offset(lr, 16)
mr r3, r19 /* fill buffer with */
li r4, 0 /* zero fill buffer */
mr r5, r8 /* how many bytes to fill buffer with */
bl MEMSET /* call optimized memset */
+#ifndef MEMSET_is_local
nop
+#endif
+ ld r0, FRAMESIZE+16(r1) /* read the saved link register */
+ addi r1, r1, FRAMESIZE /* restore stack pointer */
+ cfi_adjust_cfa_offset(-FRAMESIZE)
+ mtlr r0
+ cfi_restore(lr)
L(update3return):
#ifdef USE_AS_STPNCPY
@@ -226,11 +250,8 @@ L(hop2return):
#ifndef USE_AS_STPNCPY
mr r3, r18 /* set return value */
#endif
- addi r1, r1, FRAMESIZE /* restore stack pointer */
- ld r0, 16(r1) /* read the saved link register */
ld r18, -16(r1) /* restore callers save register, r18 */
ld r19, -8(r1) /* restore callers save register, r19 */
- mtlr r0 /* branch to link register */
blr /* return */
.p2align 4
@@ -264,16 +285,13 @@ L(oneBYone):
.p2align 4
L(done):
- addi r1, r1, FRAMESIZE /* restore stack pointer */
#ifdef USE_AS_STPNCPY
mr r3, r19 /* set the return value */
#else
mr r3, r18 /* set the return value */
#endif
- ld r0, 16(r1) /* read the saved link register */
ld r18, -16(r1) /* restore callers save register, r18 */
ld r19, -8(r1) /* restore callers save register, r19 */
- mtlr r0 /* branch to link register */
blr /* return */
L(update1):
diff --git a/sysdeps/powerpc/powerpc64/power7/strnlen.S b/sysdeps/powerpc/powerpc64/power7/strnlen.S
index 35b7749e0d..3097cac1fb 100644
--- a/sysdeps/powerpc/powerpc64/power7/strnlen.S
+++ b/sysdeps/powerpc/powerpc64/power7/strnlen.S
@@ -1,5 +1,5 @@
/* Optimized strnlen implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2010-2016 Free Software Foundation, Inc.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
Contributed by Luis Machado <luisgpm@br.ibm.com>.
This file is part of the GNU C Library.
@@ -19,9 +19,13 @@
#include <sysdep.h>
+#ifndef STRNLEN
+# define STRNLEN __strnlen
+#endif
+
/* int [r3] strnlen (char *s [r3], int size [r4]) */
.machine power7
-ENTRY (__strnlen)
+ENTRY_TOCLESS (STRNLEN)
CALL_MCOUNT 2
dcbt 0,r3
clrrdi r8,r3,3
@@ -172,7 +176,7 @@ L(loop_small):
mr r3,r4
blr
-END (__strnlen)
+END (STRNLEN)
libc_hidden_def (__strnlen)
weak_alias (__strnlen, strnlen)
libc_hidden_def (strnlen)
diff --git a/sysdeps/powerpc/powerpc64/power7/strrchr.S b/sysdeps/powerpc/powerpc64/power7/strrchr.S
index 5193b07a7a..e47e3d4188 100644
--- a/sysdeps/powerpc/powerpc64/power7/strrchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/strrchr.S
@@ -1,5 +1,5 @@
/* Optimized strrchr implementation for PowerPC64/POWER7 using cmpb insn.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,8 +19,13 @@
#include <sysdep.h>
/* int [r3] strrchr (char *s [r3], int c [r4]) */
+
+#ifndef STRRCHR
+# define STRRCHR strrchr
+#endif
+
.machine power7
-ENTRY (strrchr)
+ENTRY_TOCLESS (STRRCHR)
CALL_MCOUNT 2
dcbt 0,r3
clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
@@ -250,6 +255,6 @@ L(done_null):
srdi r0,r0,3 /* Convert trailing zeros to bytes. */
add r3,r8,r0 /* Return address of the matching null byte. */
blr
-END (strrchr)
+END (STRRCHR)
weak_alias (strrchr, rindex)
libc_hidden_builtin_def (strrchr)
diff --git a/sysdeps/powerpc/powerpc64/power7/strstr-ppc64.c b/sysdeps/powerpc/powerpc64/power7/strstr-ppc64.c
index 164c49a2c3..7559733c7c 100644
--- a/sysdeps/powerpc/powerpc64/power7/strstr-ppc64.c
+++ b/sysdeps/powerpc/powerpc64/power7/strstr-ppc64.c
@@ -1,5 +1,5 @@
/* Optimized strstr implementation for PowerPC64/POWER7.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/power7/strstr.S b/sysdeps/powerpc/powerpc64/power7/strstr.S
index fefac1c9e5..ac92f9c517 100644
--- a/sysdeps/powerpc/powerpc64/power7/strstr.S
+++ b/sysdeps/powerpc/powerpc64/power7/strstr.S
@@ -1,5 +1,5 @@
/* Optimized strstr implementation for PowerPC64/POWER7.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -25,11 +25,16 @@
#define ITERATIONS 64
+#ifndef STRSTR
+# define STRSTR strstr
+#endif
+
#ifndef STRLEN
/* For builds with no IFUNC support, local calls should be made to internal
GLIBC symbol (created by libc_hidden_builtin_def). */
# ifdef SHARED
# define STRLEN __GI_strlen
+# define STRLEN_is_local
# else
# define STRLEN strlen
# endif
@@ -40,6 +45,7 @@
GLIBC symbol (created by libc_hidden_builtin_def). */
# ifdef SHARED
# define STRNLEN __GI_strnlen
+# define STRNLEN_is_local
# else
# define STRNLEN __strnlen
# endif
@@ -48,6 +54,7 @@
#ifndef STRCHR
# ifdef SHARED
# define STRCHR __GI_strchr
+# define STRCHR_is_local
# else
# define STRCHR strchr
# endif
@@ -55,18 +62,19 @@
#define FRAMESIZE (FRAME_MIN_SIZE+32)
.machine power7
-EALIGN (strstr, 4, 0)
+/* Can't be ENTRY_TOCLESS due to calling __strstr_ppc which uses r2. */
+ENTRY (STRSTR, 4)
CALL_MCOUNT 2
mflr r0 /* Load link register LR to r0. */
std r31, -8(r1) /* Save callers register r31. */
- cfi_offset(r31, -8)
std r30, -16(r1) /* Save callers register r30. */
- cfi_offset(r30, -16)
std r29, -24(r1) /* Save callers register r29. */
- cfi_offset(r29, -24)
std r28, -32(r1) /* Save callers register r28. */
- cfi_offset(r28, -32)
std r0, 16(r1) /* Store the link register. */
+ cfi_offset(r31, -8)
+ cfi_offset(r30, -16)
+ cfi_offset(r28, -32)
+ cfi_offset(r29, -24)
cfi_offset(lr, 16)
stdu r1, -FRAMESIZE(r1) /* Create the stack frame. */
cfi_adjust_cfa_offset(FRAMESIZE)
@@ -82,7 +90,9 @@ EALIGN (strstr, 4, 0)
mr r30, r4
mr r3, r4
bl STRLEN
+#ifndef STRLEN_is_local
nop
+#endif
cmpdi cr7, r3, 0 /* If search str is null. */
beq cr7, L(ret_r3)
@@ -91,14 +101,18 @@ EALIGN (strstr, 4, 0)
mr r4, r3
mr r3, r29
bl STRNLEN
+#ifndef STRNLEN_is_local
nop
+#endif
cmpd cr7, r3, r31 /* If len(r3) < len(r4). */
blt cr7, L(retnull)
mr r3, r29
lbz r4, 0(r30)
bl STRCHR
+#ifndef STRCHR_is_local
nop
+#endif
mr r11, r3
/* If first char of search str is not present. */
@@ -330,7 +344,9 @@ L(begin):
beq cr7, L(default)
lbz r4, 0(r30)
bl STRCHR
+#ifndef STRCHR_is_local
nop
+#endif
/* If first char of search str is not present. */
cmpdi cr7, r3, 0
ble cr7, L(end)
@@ -433,7 +449,9 @@ L(nextbyte):
beq cr7, L(default)
lbz r4, 0(r30)
bl STRCHR
+#ifndef STRCHR_is_local
nop
+#endif
/* If first char of search str is not present. */
cmpdi cr7, r3, 0
ble cr7, L(end)
@@ -513,5 +531,5 @@ L(end):
ld r31, -8(r1) /* Restore callers save register r31. */
mtlr r0 /* Branch to link register. */
blr
-END (strstr)
+END (STRSTR)
libc_hidden_builtin_def (strstr)
diff --git a/sysdeps/powerpc/powerpc64/power7/sub_n.S b/sysdeps/powerpc/powerpc64/power7/sub_n.S
index 262eee5486..283e0cac6c 100644
--- a/sysdeps/powerpc/powerpc64/power7/sub_n.S
+++ b/sysdeps/powerpc/powerpc64/power7/sub_n.S
@@ -1,6 +1,6 @@
/* PowerPC64 mpn_lshift -- mpn_add_n/mpn_sub_n -- mpn addition and
subtraction.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/power8/Implies b/sysdeps/powerpc/powerpc64/power8/Implies
deleted file mode 100644
index 9a5e3c7277..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power7/fpu
-powerpc/powerpc64/power7
diff --git a/sysdeps/powerpc/powerpc64/power8/Makefile b/sysdeps/powerpc/powerpc64/power8/Makefile
new file mode 100644
index 0000000000..71a59529f3
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),string)
+sysdep_routines += strcasestr-ppc64
+endif
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/Implies b/sysdeps/powerpc/powerpc64/power8/fpu/Implies
deleted file mode 100644
index 1187cdfb0a..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/fpu/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power7/fpu/
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S b/sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S
new file mode 100644
index 0000000000..32ee8326e1
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/e_expf.S
@@ -0,0 +1,303 @@
+/* Optimized expf(). PowerPC64/POWER8 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* Short algorithm description:
+ *
+ * Let K = 64 (table size).
+ * e^x = 2^(x/log(2)) = 2^n * T[j] * (1 + P(y))
+ * where:
+ * x = m*log(2)/K + y, y in [0.0..log(2)/K]
+ * m = n*K + j, m,n,j - signed integer, j in [0..K-1]
+ * values of 2^(j/K) are tabulated as T[j].
+ *
+ * P(y) is a minimax polynomial approximation of expf(y)-1
+ * on small interval [0.0..log(2)/K].
+ *
+ * P(y) = P3*y*y*y*y + P2*y*y*y + P1*y*y + P0*y, calculated as
+ * z = y*y; P(y) = (P3*z + P1)*z + (P2*z + P0)*y
+ *
+ * Special cases:
+ * expf(NaN) = NaN
+ * expf(+INF) = +INF
+ * expf(-INF) = 0
+ * expf(x) = 1 for subnormals
+ * for finite argument, only expf(0)=1 is exact
+ * expf(x) overflows if x>88.7228317260742190
+ * expf(x) underflows if x<-103.972076416015620
+ */
+
+#define C1 0x42ad496b /* Single precision 125*log(2). */
+#define C2 0x31800000 /* Single precision 2^(-28). */
+#define SP_INF 0x7f800000 /* Single precision Inf. */
+#define SP_EXP_BIAS 0x1fc0 /* Single precision exponent bias. */
+
+#define DATA_OFFSET r9
+
+/* Implements the function
+
+ float [fp1] expf (float [fp1] x) */
+
+ .machine power8
+ENTRY (__ieee754_expf, 4)
+ addis DATA_OFFSET,r2,.Lanchor@toc@ha
+ addi DATA_OFFSET,DATA_OFFSET,.Lanchor@toc@l
+
+ xscvdpspn v0,v1
+ mfvsrd r8,v0 /* r8 = x */
+ lfd fp2,(.KLN2-.Lanchor)(DATA_OFFSET)
+ lfd fp3,(.P2-.Lanchor)(DATA_OFFSET)
+ rldicl r3,r8,32,33 /* r3 = |x| */
+ lis r4,C1@ha /* r4 = 125*log(2) */
+ ori r4,r4,C1@l
+ cmpw r3,r4
+ lfd fp5,(.P3-.Lanchor)(DATA_OFFSET)
+ lfd fp4,(.RS-.Lanchor)(DATA_OFFSET)
+ fmadd fp2,fp1,fp2,fp4 /* fp2 = x * K/log(2) + (2^23 + 2^22) */
+ bge L(special_paths) /* |x| >= 125*log(2) ? */
+
+ lis r4,C2@ha
+ ori r4,r4,C2@l
+ cmpw r3,r4
+ blt L(small_args) /* |x| < 2^(-28) ? */
+
+ /* Main path: here if 2^(-28) <= |x| < 125*log(2) */
+ frsp fp6,fp2
+ xscvdpsp v2,v2
+ mfvsrd r8,v2
+ mr r3,r8 /* r3 = m */
+ rldicl r8,r8,32,58 /* r8 = j */
+ lfs fp4,(.SP_RS-.Lanchor)(DATA_OFFSET)
+ fsubs fp2,fp6,fp4 /* fp2 = m = x * K/log(2) */
+ srdi r3,r3,32
+ clrrwi r3,r3,6 /* r3 = n */
+ lfd fp6,(.NLN2K-.Lanchor)(DATA_OFFSET)
+ fmadd fp0,fp2,fp6,fp1 /* fp0 = y = x - m*log(2)/K */
+ fmul fp2,fp0,fp0 /* fp2 = z = y^2 */
+ lfd fp4,(.P1-.Lanchor)(DATA_OFFSET)
+ lfd fp6,(.P0-.Lanchor)(DATA_OFFSET)
+ lis r4,SP_EXP_BIAS@ha
+ ori r4,r4,SP_EXP_BIAS@l
+ add r3,r3,r4
+ rldic r3,r3,49,1 /* r3 = 2^n */
+ fmadd fp4,fp5,fp2,fp4 /* fp4 = P3 * z + P1 */
+ fmadd fp6,fp3,fp2,fp6 /* fp6 = P2 * z + P0 */
+ mtvsrd v1,r3
+ xscvspdp v1,v1
+ fmul fp4,fp4,fp2 /* fp4 = (P3 * z + P1)*z */
+ fmadd fp0,fp0,fp6,fp4 /* fp0 = P(y) */
+ sldi r8,r8,3 /* Access doublewords from T[j]. */
+ addi r6,DATA_OFFSET,(.Ttable-.Lanchor)
+ lfdx fp3,r6,r8
+ fmadd fp0,fp0,fp3,fp3 /* fp0 = T[j] * (1 + P(y)) */
+ fmul fp1,fp1,fp0 /* fp1 = 2^n * T[j] * (1 + P(y)) */
+ frsp fp1,fp1
+ blr
+
+ .align 4
+/* x is either underflow, overflow, infinite or NaN. */
+L(special_paths):
+ srdi r8,r8,32
+ rlwinm r8,r8,3,29,29 /* r8 = 0, if x positive.
+ r8 = 4, otherwise. */
+ addi r6,DATA_OFFSET,(.SPRANGE-.Lanchor)
+ lwzx r4,r6,r8 /* r4 = .SPRANGE[signbit(x)] */
+ cmpw r3,r4
+ /* |x| <= .SPRANGE[signbit(x)] */
+ ble L(near_under_or_overflow)
+
+ lis r4,SP_INF@ha
+ ori r4,r4,SP_INF@l
+ cmpw r3,r4
+ bge L(arg_inf_or_nan) /* |x| > Infinite ? */
+
+ addi r6,DATA_OFFSET,(.SPLARGE_SMALL-.Lanchor)
+ lfsx fp1,r6,r8
+ fmuls fp1,fp1,fp1
+ blr
+
+
+ .align 4
+L(small_args):
+ /* expf(x) = 1.0, where |x| < |2^(-28)| */
+ lfs fp2,(.SPone-.Lanchor)(DATA_OFFSET)
+ fadds fp1,fp1,fp2
+ blr
+
+
+ .align 4
+L(arg_inf_or_nan:)
+ bne L(arg_nan)
+
+ /* expf(+INF) = +INF
+ expf(-INF) = 0 */
+ addi r6,DATA_OFFSET,(.INF_ZERO-.Lanchor)
+ lfsx fp1,r6,r8
+ blr
+
+
+ .align 4
+L(arg_nan):
+ /* expf(NaN) = NaN */
+ fadd fp1,fp1,fp1
+ frsp fp1,fp1
+ blr
+
+ .align 4
+L(near_under_or_overflow):
+ frsp fp6,fp2
+ xscvdpsp v2,v2
+ mfvsrd r8,v2
+ mr r3,r8 /* r3 = m */
+ rldicl r8,r8,32,58 /* r8 = j */
+ lfs fp4,(.SP_RS-.Lanchor)(DATA_OFFSET)
+ fsubs fp2,fp6,fp4 /* fp2 = m = x * K/log(2) */
+ srdi r3,r3,32
+ clrrwi r3,r3,6 /* r3 = n */
+ lfd fp6,(.NLN2K-.Lanchor)(DATA_OFFSET)
+ fmadd fp0,fp2,fp6,fp1 /* fp0 = y = x - m*log(2)/K */
+ fmul fp2,fp0,fp0 /* fp2 = z = y^2 */
+ lfd fp4,(.P1-.Lanchor)(DATA_OFFSET)
+ lfd fp6,(.P0-.Lanchor)(DATA_OFFSET)
+ ld r4,(.DP_EXP_BIAS-.Lanchor)(DATA_OFFSET)
+ add r3,r3,r4
+ rldic r3,r3,46,1 /* r3 = 2 */
+ fmadd fp4,fp5,fp2,fp4 /* fp4 = P3 * z + P1 */
+ fmadd fp6,fp3,fp2,fp6 /* fp6 = P2 * z + P0 */
+ mtvsrd v1,r3
+ fmul fp4,fp4,fp2 /* fp4 = (P3*z + P1)*z */
+ fmadd fp0,fp0,fp6,fp4 /* fp0 = P(y) */
+ sldi r8,r8,3 /* Access doublewords from T[j]. */
+ addi r6,DATA_OFFSET,(.Ttable-.Lanchor)
+ lfdx fp3,r6,r8
+ fmadd fp0,fp0,fp3,fp3 /* fp0 = T[j] * (1 + T[j]) */
+ fmul fp1,fp1,fp0 /* fp1 = 2^n * T[j] * (1 + T[j]) */
+ frsp fp1,fp1
+ blr
+END(__ieee754_expf)
+
+ .section .rodata, "a",@progbits
+.Lanchor:
+ .balign 8
+/* Table T[j] = 2^(j/K). Double precision. */
+.Ttable:
+ .8byte 0x3ff0000000000000
+ .8byte 0x3ff02c9a3e778061
+ .8byte 0x3ff059b0d3158574
+ .8byte 0x3ff0874518759bc8
+ .8byte 0x3ff0b5586cf9890f
+ .8byte 0x3ff0e3ec32d3d1a2
+ .8byte 0x3ff11301d0125b51
+ .8byte 0x3ff1429aaea92de0
+ .8byte 0x3ff172b83c7d517b
+ .8byte 0x3ff1a35beb6fcb75
+ .8byte 0x3ff1d4873168b9aa
+ .8byte 0x3ff2063b88628cd6
+ .8byte 0x3ff2387a6e756238
+ .8byte 0x3ff26b4565e27cdd
+ .8byte 0x3ff29e9df51fdee1
+ .8byte 0x3ff2d285a6e4030b
+ .8byte 0x3ff306fe0a31b715
+ .8byte 0x3ff33c08b26416ff
+ .8byte 0x3ff371a7373aa9cb
+ .8byte 0x3ff3a7db34e59ff7
+ .8byte 0x3ff3dea64c123422
+ .8byte 0x3ff4160a21f72e2a
+ .8byte 0x3ff44e086061892d
+ .8byte 0x3ff486a2b5c13cd0
+ .8byte 0x3ff4bfdad5362a27
+ .8byte 0x3ff4f9b2769d2ca7
+ .8byte 0x3ff5342b569d4f82
+ .8byte 0x3ff56f4736b527da
+ .8byte 0x3ff5ab07dd485429
+ .8byte 0x3ff5e76f15ad2148
+ .8byte 0x3ff6247eb03a5585
+ .8byte 0x3ff6623882552225
+ .8byte 0x3ff6a09e667f3bcd
+ .8byte 0x3ff6dfb23c651a2f
+ .8byte 0x3ff71f75e8ec5f74
+ .8byte 0x3ff75feb564267c9
+ .8byte 0x3ff7a11473eb0187
+ .8byte 0x3ff7e2f336cf4e62
+ .8byte 0x3ff82589994cce13
+ .8byte 0x3ff868d99b4492ed
+ .8byte 0x3ff8ace5422aa0db
+ .8byte 0x3ff8f1ae99157736
+ .8byte 0x3ff93737b0cdc5e5
+ .8byte 0x3ff97d829fde4e50
+ .8byte 0x3ff9c49182a3f090
+ .8byte 0x3ffa0c667b5de565
+ .8byte 0x3ffa5503b23e255d
+ .8byte 0x3ffa9e6b5579fdbf
+ .8byte 0x3ffae89f995ad3ad
+ .8byte 0x3ffb33a2b84f15fb
+ .8byte 0x3ffb7f76f2fb5e47
+ .8byte 0x3ffbcc1e904bc1d2
+ .8byte 0x3ffc199bdd85529c
+ .8byte 0x3ffc67f12e57d14b
+ .8byte 0x3ffcb720dcef9069
+ .8byte 0x3ffd072d4a07897c
+ .8byte 0x3ffd5818dcfba487
+ .8byte 0x3ffda9e603db3285
+ .8byte 0x3ffdfc97337b9b5f
+ .8byte 0x3ffe502ee78b3ff6
+ .8byte 0x3ffea4afa2a490da
+ .8byte 0x3ffefa1bee615a27
+ .8byte 0x3fff50765b6e4540
+ .8byte 0x3fffa7c1819e90d8
+
+.KLN2:
+ .8byte 0x40571547652b82fe /* Double precision K/log(2). */
+
+/* Double precision polynomial coefficients. */
+.P0:
+ .8byte 0x3fefffffffffe7c6
+.P1:
+ .8byte 0x3fe00000008d6118
+.P2:
+ .8byte 0x3fc55550da752d4f
+.P3:
+ .8byte 0x3fa56420eb78fa85
+
+.RS:
+ .8byte 0x4168000000000000 /* Double precision 2^23 + 2^22. */
+.NLN2K:
+ .8byte 0xbf862e42fefa39ef /* Double precision -log(2)/K. */
+.DP_EXP_BIAS:
+ .8byte 0x000000000000ffc0 /* Double precision exponent bias. */
+
+ .balign 4
+.SPone:
+ .4byte 0x3f800000 /* Single precision 1.0. */
+.SP_RS:
+ .4byte 0x4b400000 /* Single precision 2^23 + 2^22. */
+
+.SPRANGE: /* Single precision overflow/underflow bounds. */
+ .4byte 0x42b17217 /* if x>this bound, then result overflows. */
+ .4byte 0x42cff1b4 /* if x<this bound, then result underflows. */
+
+.SPLARGE_SMALL:
+ .4byte 0x71800000 /* 2^100. */
+ .4byte 0x0d800000 /* 2^-100. */
+
+.INF_ZERO:
+ .4byte 0x7f800000 /* Single precision Inf. */
+ .4byte 0 /* Single precision zero. */
+
+strong_alias (__ieee754_expf, __expf_finite)
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power8/fpu/multiarch/Implies
deleted file mode 100644
index 7fd86fdf87..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power7/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S
new file mode 100644
index 0000000000..af71382fb2
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_cosf.S
@@ -0,0 +1,509 @@
+/* Optimized cosf(). PowerPC64/POWER8 version.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#define _ERRNO_H 1
+#include <bits/errno.h>
+#include <libm-alias-float.h>
+
+#define FRAMESIZE (FRAME_MIN_SIZE+16)
+
+#define FLOAT_EXPONENT_SHIFT 23
+#define FLOAT_EXPONENT_BIAS 127
+#define INTEGER_BITS 3
+
+#define PI_4 0x3f490fdb /* PI/4 */
+#define NINEPI_4 0x40e231d6 /* 9 * PI/4 */
+#define TWO_PN5 0x3d000000 /* 2^-5 */
+#define TWO_PN27 0x32000000 /* 2^-27 */
+#define INFINITY 0x7f800000
+#define TWO_P23 0x4b000000 /* 2^23 */
+#define FX_FRACTION_1_28 0x9249250 /* 0x100000000 / 28 + 1 */
+
+ /* Implements the function
+
+ float [fp1] cosf (float [fp1] x) */
+
+ .machine power8
+ENTRY (__cosf, 4)
+ addis r9,r2,L(anchor)@toc@ha
+ addi r9,r9,L(anchor)@toc@l
+
+ lis r4,PI_4@h
+ ori r4,r4,PI_4@l
+
+ xscvdpspn v0,v1
+ mfvsrd r8,v0
+ rldicl r3,r8,32,33 /* Remove sign bit. */
+
+ cmpw r3,r4
+ bge L(greater_or_equal_pio4)
+
+ lis r4,TWO_PN5@h
+ ori r4,r4,TWO_PN5@l
+
+ cmpw r3,r4
+ blt L(less_2pn5)
+
+ /* Chebyshev polynomial of the form:
+ * 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))). */
+
+ lfd fp9,(L(C0)-L(anchor))(r9)
+ lfd fp10,(L(C1)-L(anchor))(r9)
+ lfd fp11,(L(C2)-L(anchor))(r9)
+ lfd fp12,(L(C3)-L(anchor))(r9)
+ lfd fp13,(L(C4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ lfd fp3,(L(DPone)-L(anchor))(r9)
+
+ fmadd fp4,fp2,fp13,fp12 /* C3+x^2*C4 */
+ fmadd fp4,fp2,fp4,fp11 /* C2+x^2*(C3+x^2*C4) */
+ fmadd fp4,fp2,fp4,fp10 /* C1+x^2*(C2+x^2*(C3+x^2*C4)) */
+ fmadd fp4,fp2,fp4,fp9 /* C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4))) */
+ fmadd fp1,fp2,fp4,fp3 /* 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))) */
+ frsp fp1,fp1 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(greater_or_equal_pio4):
+ lis r4,NINEPI_4@h
+ ori r4,r4,NINEPI_4@l
+ cmpw r3,r4
+ bge L(greater_or_equal_9pio4)
+
+ /* Calculate quotient of |x|/(PI/4). */
+ lfd fp2,(L(invpio4)-L(anchor))(r9)
+ fabs fp1,fp1 /* |x| */
+ fmul fp2,fp1,fp2 /* |x|/(PI/4) */
+ fctiduz fp2,fp2
+ mfvsrd r3,v2 /* n = |x| mod PI/4 */
+
+ /* Now use that quotient to find |x| mod (PI/2). */
+ addi r7,r3,1
+ rldicr r5,r7,2,60 /* ((n+1) >> 1) << 3 */
+ addi r6,r9,(L(pio2_table)-L(anchor))
+ lfdx fp4,r5,r6
+ fsub fp1,fp1,fp4
+
+ .balign 16
+L(reduced):
+ /* Now we are in the range -PI/4 to PI/4. */
+
+ /* Work out if we are in a positive or negative primary interval. */
+ addi r7,r7,2
+ rldicl r4,r7,62,63 /* ((n+3) >> 2) & 1 */
+
+ /* Load a 1.0 or -1.0. */
+ addi r5,r9,(L(ones)-L(anchor))
+ sldi r4,r4,3
+ lfdx fp0,r4,r5
+
+ /* Are we in the primary interval of sin or cos? */
+ andi. r4,r7,0x2
+ bne L(cos)
+
+ /* Chebyshev polynomial of the form:
+ x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))). */
+
+ lfd fp9,(L(S0)-L(anchor))(r9)
+ lfd fp10,(L(S1)-L(anchor))(r9)
+ lfd fp11,(L(S2)-L(anchor))(r9)
+ lfd fp12,(L(S3)-L(anchor))(r9)
+ lfd fp13,(L(S4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ fmul fp3,fp2,fp1 /* x^3 */
+
+ fmadd fp4,fp2,fp13,fp12 /* S3+x^2*S4 */
+ fmadd fp4,fp2,fp4,fp11 /* S2+x^2*(S3+x^2*S4) */
+ fmadd fp4,fp2,fp4,fp10 /* S1+x^2*(S2+x^2*(S3+x^2*S4)) */
+ fmadd fp4,fp2,fp4,fp9 /* S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4))) */
+ fmadd fp4,fp3,fp4,fp1 /* x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))) */
+ fmul fp4,fp4,fp0 /* Add in the sign. */
+ frsp fp1,fp4 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(cos):
+ /* Chebyshev polynomial of the form:
+ 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))). */
+
+ lfd fp9,(L(C0)-L(anchor))(r9)
+ lfd fp10,(L(C1)-L(anchor))(r9)
+ lfd fp11,(L(C2)-L(anchor))(r9)
+ lfd fp12,(L(C3)-L(anchor))(r9)
+ lfd fp13,(L(C4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ lfd fp3,(L(DPone)-L(anchor))(r9)
+
+ fmadd fp4,fp2,fp13,fp12 /* C3+x^2*C4 */
+ fmadd fp4,fp2,fp4,fp11 /* C2+x^2*(C3+x^2*C4) */
+ fmadd fp4,fp2,fp4,fp10 /* C1+x^2*(C2+x^2*(C3+x^2*C4)) */
+ fmadd fp4,fp2,fp4,fp9 /* C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4))) */
+ fmadd fp4,fp2,fp4,fp3 /* 1.0 + x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))) */
+ fmul fp4,fp4,fp0 /* Add in the sign. */
+ frsp fp1,fp4 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(greater_or_equal_9pio4):
+ lis r4,INFINITY@h
+ ori r4,r4,INFINITY@l
+ cmpw r3,r4
+ bge L(inf_or_nan)
+
+ lis r4,TWO_P23@h
+ ori r4,r4,TWO_P23@l
+ cmpw r3,r4
+ bge L(greater_or_equal_2p23)
+
+ fabs fp1,fp1 /* |x| */
+
+ /* Calculate quotient of |x|/(PI/4). */
+ lfd fp2,(L(invpio4)-L(anchor))(r9)
+
+ lfd fp3,(L(DPone)-L(anchor))(r9)
+ lfd fp4,(L(DPhalf)-L(anchor))(r9)
+ fmul fp2,fp1,fp2 /* |x|/(PI/4) */
+ friz fp2,fp2 /* n = floor(|x|/(PI/4)) */
+
+ /* Calculate (n + 1) / 2. */
+ fadd fp2,fp2,fp3 /* n + 1 */
+ fmul fp3,fp2,fp4 /* (n + 1) / 2 */
+ friz fp3,fp3
+
+ lfd fp4,(L(pio2hi)-L(anchor))(r9)
+ lfd fp5,(L(pio2lo)-L(anchor))(r9)
+
+ fmul fp6,fp4,fp3
+ fadd fp6,fp6,fp1
+ fmadd fp1,fp5,fp3,fp6
+
+ fctiduz fp2,fp2
+ mfvsrd r7,v2 /* n + 1 */
+
+ b L(reduced)
+
+ .balign 16
+L(inf_or_nan):
+ bne L(skip_errno_setting) /* Is a NAN? */
+
+ /* We delayed the creation of the stack frame, as well as the saving of
+ the link register, because only at this point, we are sure that
+ doing so is actually needed. */
+
+ stfd fp1,-8(r1)
+
+ /* Save the link register. */
+ mflr r0
+ std r0,16(r1)
+ cfi_offset(lr, 16)
+
+ /* Create the stack frame. */
+ stdu r1,-FRAMESIZE(r1)
+ cfi_adjust_cfa_offset(FRAMESIZE)
+
+ bl JUMPTARGET(__errno_location)
+ nop
+
+ /* Restore the stack frame. */
+ addi r1,r1,FRAMESIZE
+ cfi_adjust_cfa_offset(-FRAMESIZE)
+ /* Restore the link register. */
+ ld r0,16(r1)
+ mtlr r0
+
+ lfd fp1,-8(r1)
+
+ /* errno = EDOM */
+ li r4,EDOM
+ stw r4,0(r3)
+
+L(skip_errno_setting):
+ fsub fp1,fp1,fp1 /* x - x */
+ blr
+
+ .balign 16
+L(greater_or_equal_2p23):
+ fabs fp1,fp1
+
+ srwi r4,r3,FLOAT_EXPONENT_SHIFT
+ subi r4,r4,FLOAT_EXPONENT_BIAS
+
+ /* We reduce the input modulo pi/4, so we need 3 bits of integer
+ to determine where in 2*pi we are. Index into our array
+ accordingly. */
+ addi r4,r4,INTEGER_BITS
+
+ /* To avoid an expensive divide, for the range we care about (0 - 127)
+ we can transform x/28 into:
+
+ x/28 = (x * ((0x100000000 / 28) + 1)) >> 32
+
+ mulhwu returns the top 32 bits of the 64 bit result, doing the
+ shift for us in the same instruction. The top 32 bits are undefined,
+ so we have to mask them. */
+
+ lis r6,FX_FRACTION_1_28@h
+ ori r6,r6,FX_FRACTION_1_28@l
+ mulhwu r5,r4,r6
+ clrldi r5,r5,32
+
+ /* Get our pointer into the invpio4_table array. */
+ sldi r4,r5,3
+ addi r6,r9,(L(invpio4_table)-L(anchor))
+ add r4,r4,r6
+
+ lfd fp2,0(r4)
+ lfd fp3,8(r4)
+ lfd fp4,16(r4)
+ lfd fp5,24(r4)
+
+ fmul fp6,fp2,fp1
+ fmul fp7,fp3,fp1
+ fmul fp8,fp4,fp1
+ fmul fp9,fp5,fp1
+
+ /* Mask off larger integer bits in highest double word that we don't
+ care about to avoid losing precision when combining with smaller
+ values. */
+ fctiduz fp10,fp6
+ mfvsrd r7,v10
+ rldicr r7,r7,0,(63-INTEGER_BITS)
+ mtvsrd v10,r7
+ fcfidu fp10,fp10 /* Integer bits. */
+
+ fsub fp6,fp6,fp10 /* highest -= integer bits */
+
+ /* Work out the integer component, rounded down. Use the top two
+ limbs for this. */
+ fadd fp10,fp6,fp7 /* highest + higher */
+
+ fctiduz fp10,fp10
+ mfvsrd r7,v10
+ andi. r0,r7,1
+ fcfidu fp10,fp10
+
+ /* Subtract integer component from highest limb. */
+ fsub fp12,fp6,fp10
+
+ beq L(even_integer)
+
+ /* Our integer component is odd, so we are in the -PI/4 to 0 primary
+ region. We need to shift our result down by PI/4, and to do this
+ in the mod (4/PI) space we simply subtract 1. */
+ lfd fp11,(L(DPone)-L(anchor))(r9)
+ fsub fp12,fp12,fp11
+
+ /* Now add up all the limbs in order. */
+ fadd fp12,fp12,fp7
+ fadd fp12,fp12,fp8
+ fadd fp12,fp12,fp9
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+L(even_integer):
+ lfd fp11,(L(DPone)-L(anchor))(r9)
+
+ /* Now add up all the limbs in order. */
+ fadd fp12,fp12,fp7
+ fadd fp12,r12,fp8
+ fadd fp12,r12,fp9
+
+ /* We need to check if the addition of all the limbs resulted in us
+ overflowing 1.0. */
+ fcmpu 0,fp12,fp11
+ bgt L(greater_than_one)
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+L(greater_than_one):
+ /* We did overflow 1.0 when adding up all the limbs. Add 1.0 to our
+ integer, and subtract 1.0 from our result. Since that makes the
+ integer component odd, we need to subtract another 1.0 as
+ explained above. */
+ addi r7,r7,1
+
+ lfd fp11,(L(DPtwo)-L(anchor))(r9)
+ fsub fp12,fp12,fp11
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+ .balign 16
+L(less_2pn5):
+ lis r4,TWO_PN27@h
+ ori r4,r4,TWO_PN27@l
+
+ cmpw r3,r4
+ blt L(less_2pn27)
+
+ /* A simpler Chebyshev approximation is close enough for this range:
+ 1.0+x^2*(CC0+x^3*CC1). */
+
+ lfd fp10,(L(CC0)-L(anchor))(r9)
+ lfd fp11,(L(CC1)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ fmul fp3,fp2,fp1 /* x^3 */
+ lfd fp1,(L(DPone)-L(anchor))(r9)
+
+ fmadd fp4,fp3,fp11,fp10 /* CC0+x^3*CC1 */
+ fmadd fp1,fp2,fp4,fp1 /* 1.0+x^2*(CC0+x^3*CC1) */
+
+ frsp fp1,fp1 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(less_2pn27):
+ /* Handle some special cases:
+
+ cosf(subnormal) raises inexact
+ cosf(min_normalized) raises inexact
+ cosf(normalized) raises inexact. */
+
+ lfd fp2,(L(DPone)-L(anchor))(r9)
+
+ fabs fp1,fp1 /* |x| */
+ fsub fp1,fp2,fp1 /* 1.0-|x| */
+
+ frsp fp1,fp1
+
+ blr
+
+END (__cosf)
+
+ .section .rodata, "a"
+
+ .balign 8
+
+L(anchor):
+
+ /* Chebyshev constants for sin, range -PI/4 - PI/4. */
+L(S0): .8byte 0xbfc5555555551cd9
+L(S1): .8byte 0x3f81111110c2688b
+L(S2): .8byte 0xbf2a019f8b4bd1f9
+L(S3): .8byte 0x3ec71d7264e6b5b4
+L(S4): .8byte 0xbe5a947e1674b58a
+
+ /* Chebyshev constants for cos, range 2^-27 - 2^-5. */
+L(CC0): .8byte 0xbfdfffffff5cc6fd
+L(CC1): .8byte 0x3fa55514b178dac5
+
+ /* Chebyshev constants for cos, range -PI/4 - PI/4. */
+L(C0): .8byte 0xbfdffffffffe98ae
+L(C1): .8byte 0x3fa55555545c50c7
+L(C2): .8byte 0xbf56c16b348b6874
+L(C3): .8byte 0x3efa00eb9ac43cc0
+L(C4): .8byte 0xbe923c97dd8844d7
+
+L(invpio2):
+ .8byte 0x3fe45f306dc9c883 /* 2/PI */
+
+L(invpio4):
+ .8byte 0x3ff45f306dc9c883 /* 4/PI */
+
+L(invpio4_table):
+ .8byte 0x0000000000000000
+ .8byte 0x3ff45f306c000000
+ .8byte 0x3e3c9c882a000000
+ .8byte 0x3c54fe13a8000000
+ .8byte 0x3aaf47d4d0000000
+ .8byte 0x38fbb81b6c000000
+ .8byte 0x3714acc9e0000000
+ .8byte 0x3560e4107c000000
+ .8byte 0x33bca2c756000000
+ .8byte 0x31fbd778ac000000
+ .8byte 0x300b7246e0000000
+ .8byte 0x2e5d2126e8000000
+ .8byte 0x2c97003248000000
+ .8byte 0x2ad77504e8000000
+ .8byte 0x290921cfe0000000
+ .8byte 0x274deb1cb0000000
+ .8byte 0x25829a73e0000000
+ .8byte 0x23fd1046be000000
+ .8byte 0x2224baed10000000
+ .8byte 0x20709d338e000000
+ .8byte 0x1e535a2f80000000
+ .8byte 0x1cef904e64000000
+ .8byte 0x1b0d639830000000
+ .8byte 0x1964ce7d24000000
+ .8byte 0x17b908bf16000000
+
+L(pio4):
+ .8byte 0x3fe921fb54442d18 /* PI/4 */
+
+/* PI/2 as a sum of two doubles. We only use 32 bits of the upper limb
+ to avoid losing significant bits when multiplying with up to
+ (2^22)/(pi/2). */
+L(pio2hi):
+ .8byte 0xbff921fb54400000
+
+L(pio2lo):
+ .8byte 0xbdd0b4611a626332
+
+L(pio2_table):
+ .8byte 0
+ .8byte 0x3ff921fb54442d18 /* 1 * PI/2 */
+ .8byte 0x400921fb54442d18 /* 2 * PI/2 */
+ .8byte 0x4012d97c7f3321d2 /* 3 * PI/2 */
+ .8byte 0x401921fb54442d18 /* 4 * PI/2 */
+ .8byte 0x401f6a7a2955385e /* 5 * PI/2 */
+ .8byte 0x4022d97c7f3321d2 /* 6 * PI/2 */
+ .8byte 0x4025fdbbe9bba775 /* 7 * PI/2 */
+ .8byte 0x402921fb54442d18 /* 8 * PI/2 */
+ .8byte 0x402c463abeccb2bb /* 9 * PI/2 */
+ .8byte 0x402f6a7a2955385e /* 10 * PI/2 */
+
+L(small):
+ .8byte 0x3cd0000000000000 /* 2^-50 */
+
+L(ones):
+ .8byte 0x3ff0000000000000 /* +1.0 */
+ .8byte 0xbff0000000000000 /* -1.0 */
+
+L(DPhalf):
+ .8byte 0x3fe0000000000000 /* 0.5 */
+
+L(DPone):
+ .8byte 0x3ff0000000000000 /* 1.0 */
+
+L(DPtwo):
+ .8byte 0x4000000000000000 /* 2.0 */
+
+libm_alias_float (__cos, cos)
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S
index 3b0c88e5eb..aac54a9364 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_finite.S
@@ -1,5 +1,5 @@
/* isfinite(). PowerPC64/POWER8 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,7 +23,7 @@
/* int [r3] __finite ([fp1] x) */
-EALIGN (__finite, 4, 0)
+ENTRY_TOCLESS (__finite, 4)
CALL_MCOUNT 0
MFVSRD_R3_V1
lis r9,0x8010
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S
index 4708239689..94746ef068 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_isinf.S
@@ -1,5 +1,5 @@
/* isinf(). PowerPC64/POWER8 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,7 +23,7 @@
/* int [r3] __isinf([fp1] x) */
-EALIGN (__isinf, 4, 0)
+ENTRY_TOCLESS (__isinf, 4)
CALL_MCOUNT 0
MFVSRD_R3_V1
lis r9,0x7ff0 /* r9 = 0x7ff0 */
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S
index 0e5c19333c..8aef354f68 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_isnan.S
@@ -1,5 +1,5 @@
/* isnan(). PowerPC64/POWER8 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,7 +23,7 @@
/* int [r3] __isnan([f1] x) */
-EALIGN (__isnan, 4, 0)
+ENTRY_TOCLESS (__isnan, 4)
CALL_MCOUNT 0
MFVSRD_R3_V1
lis r9,0x7ff0
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S
index 0af9feee5a..7f18d705a9 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llrint.S
@@ -1,5 +1,5 @@
/* Round double to long int. POWER8 PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,11 +18,13 @@
#include <sysdep.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
#define MFVSRD_R3_V1 .long 0x7c230066 /* mfvsrd r3,vs1 */
/* long long int[r3] __llrint (double x[fp1]) */
-ENTRY (__llrint)
+ENTRY_TOCLESS (__llrint)
CALL_MCOUNT 0
fctid fp1,fp1
MFVSRD_R3_V1
@@ -30,16 +32,12 @@ ENTRY (__llrint)
END (__llrint)
strong_alias (__llrint, __lrint)
-weak_alias (__llrint, llrint)
-weak_alias (__lrint, lrint)
-
-#ifdef NO_LONG_DOUBLE
-strong_alias (__llrint, __llrintl)
-weak_alias (__llrint, llrintl)
-strong_alias (__lrint, __lrintl)
-weak_alias (__lrint, lrintl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llrint, llrintl, GLIBC_2_1)
-compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
-#endif
+libm_alias_double (__llrint, llrint)
+libm_alias_double (__lrint, lrint)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llrint, __llrintf)
+libm_alias_float (__llrint, llrint)
+strong_alias (__lrint, __lrintf)
+libm_alias_float (__lrint, lrint)
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
index af2409deb8..a22fc63bb3 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
@@ -1,5 +1,5 @@
/* llround function. POWER8 PowerPC64 version.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,12 +19,14 @@
#include <sysdep.h>
#include <endian.h>
#include <math_ldbl_opt.h>
+#include <libm-alias-float.h>
+#include <libm-alias-double.h>
#define MFVSRD_R3_V1 .long 0x7c230066 /* mfvsrd r3,vs1 */
/* long long [r3] llround (float x [fp1]) */
-ENTRY (__llround)
+ENTRY_TOCLESS (__llround)
CALL_MCOUNT 0
frin fp1,fp1 /* Round to nearest +-0.5. */
fctidz fp1,fp1 /* Convert To Integer DW round toward 0. */
@@ -33,16 +35,12 @@ ENTRY (__llround)
END (__llround)
strong_alias (__llround, __lround)
-weak_alias (__llround, llround)
-weak_alias (__lround, lround)
-
-#ifdef NO_LONG_DOUBLE
-weak_alias (__llround, llroundl)
-strong_alias (__llround, __llroundl)
-weak_alias (__lround, lroundl)
-strong_alias (__lround, __lroundl)
-#endif
-#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
-compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
-compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
-#endif
+libm_alias_double (__llround, llround)
+libm_alias_double (__lround, lround)
+/* The double version also works for single-precision as both float and
+ double parameters are passed in 64bit FPRs and both versions are expected
+ to return [long] long type. */
+strong_alias (__llround, __llroundf)
+libm_alias_float (__llround, llround)
+strong_alias (__lround, __lroundf)
+libm_alias_float (__lround, lround)
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llroundf.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llroundf.S
new file mode 100644
index 0000000000..9ea6bd105b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __lroundf is in s_llround.S. */
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S
new file mode 100644
index 0000000000..59e613c102
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_sinf.S
@@ -0,0 +1,520 @@
+/* Optimized sinf(). PowerPC64/POWER8 version.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#define _ERRNO_H 1
+#include <bits/errno.h>
+#include <libm-alias-float.h>
+
+#define FRAMESIZE (FRAME_MIN_SIZE+16)
+
+#define FLOAT_EXPONENT_SHIFT 23
+#define FLOAT_EXPONENT_BIAS 127
+#define INTEGER_BITS 3
+
+#define PI_4 0x3f490fdb /* PI/4 */
+#define NINEPI_4 0x40e231d6 /* 9 * PI/4 */
+#define TWO_PN5 0x3d000000 /* 2^-5 */
+#define TWO_PN27 0x32000000 /* 2^-27 */
+#define INFINITY 0x7f800000
+#define TWO_P23 0x4b000000 /* 2^27 */
+#define FX_FRACTION_1_28 0x9249250 /* 0x100000000 / 28 + 1 */
+
+ /* Implements the function
+
+ float [fp1] sinf (float [fp1] x) */
+
+ .machine power8
+ENTRY (__sinf, 4)
+ addis r9,r2,L(anchor)@toc@ha
+ addi r9,r9,L(anchor)@toc@l
+
+ lis r4,PI_4@h
+ ori r4,r4,PI_4@l
+
+ xscvdpspn v0,v1
+ mfvsrd r8,v0
+ rldicl r3,r8,32,33 /* Remove sign bit. */
+
+ cmpw r3,r4
+ bge L(greater_or_equal_pio4)
+
+ lis r4,TWO_PN5@h
+ ori r4,r4,TWO_PN5@l
+
+ cmpw r3,r4
+ blt L(less_2pn5)
+
+ /* Chebyshev polynomial of the form:
+ * x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))). */
+
+ lfd fp9,(L(S0)-L(anchor))(r9)
+ lfd fp10,(L(S1)-L(anchor))(r9)
+ lfd fp11,(L(S2)-L(anchor))(r9)
+ lfd fp12,(L(S3)-L(anchor))(r9)
+ lfd fp13,(L(S4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ fmul fp3,fp2,fp1 /* x^3 */
+
+ fmadd fp4,fp2,fp13,fp12 /* S3+x^2*S4 */
+ fmadd fp4,fp2,fp4,fp11 /* S2+x^2*(S3+x^2*S4) */
+ fmadd fp4,fp2,fp4,fp10 /* S1+x^2*(S2+x^2*(S3+x^2*S4)) */
+ fmadd fp4,fp2,fp4,fp9 /* S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4))) */
+ fmadd fp1,fp3,fp4,fp1 /* x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))) */
+ frsp fp1,fp1 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(greater_or_equal_pio4):
+ lis r4,NINEPI_4@h
+ ori r4,r4,NINEPI_4@l
+ cmpw r3,r4
+ bge L(greater_or_equal_9pio4)
+
+ /* Calculate quotient of |x|/(PI/4). */
+ lfd fp2,(L(invpio4)-L(anchor))(r9)
+ fabs fp1,fp1 /* |x| */
+ fmul fp2,fp1,fp2 /* |x|/(PI/4) */
+ fctiduz fp2,fp2
+ mfvsrd r3,v2 /* n = |x| mod PI/4 */
+
+ /* Now use that quotient to find |x| mod (PI/2). */
+ addi r7,r3,1
+ rldicr r5,r7,2,60 /* ((n+1) >> 1) << 3 */
+ addi r6,r9,(L(pio2_table)-L(anchor))
+ lfdx fp4,r5,r6
+ fsub fp1,fp1,fp4
+
+ .balign 16
+L(reduced):
+ /* Now we are in the range -PI/4 to PI/4. */
+
+ /* Work out if we are in a positive or negative primary interval. */
+ rldicl r4,r7,62,63 /* ((n+1) >> 2) & 1 */
+
+ /* We are operating on |x|, so we need to add back the original
+ sign. */
+ rldicl r8,r8,33,63 /* (x >> 31) & 1, ie the sign bit. */
+ xor r4,r4,r8 /* 0 if result should be positive,
+ 1 if negative. */
+
+ /* Load a 1.0 or -1.0. */
+ addi r5,r9,(L(ones)-L(anchor))
+ sldi r4,r4,3
+ lfdx fp0,r4,r5
+
+ /* Are we in the primary interval of sin or cos? */
+ andi. r4,r7,0x2
+ bne L(cos)
+
+ /* Chebyshev polynomial of the form:
+ x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))). */
+
+ lfd fp9,(L(S0)-L(anchor))(r9)
+ lfd fp10,(L(S1)-L(anchor))(r9)
+ lfd fp11,(L(S2)-L(anchor))(r9)
+ lfd fp12,(L(S3)-L(anchor))(r9)
+ lfd fp13,(L(S4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ fmul fp3,fp2,fp1 /* x^3 */
+
+ fmadd fp4,fp2,fp13,fp12 /* S3+x^2*S4 */
+ fmadd fp4,fp2,fp4,fp11 /* S2+x^2*(S3+x^2*S4) */
+ fmadd fp4,fp2,fp4,fp10 /* S1+x^2*(S2+x^2*(S3+x^2*S4)) */
+ fmadd fp4,fp2,fp4,fp9 /* S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4))) */
+ fmadd fp4,fp3,fp4,fp1 /* x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))) */
+ fmul fp4,fp4,fp0 /* Add in the sign. */
+ frsp fp1,fp4 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(cos):
+ /* Chebyshev polynomial of the form:
+ 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))). */
+
+ lfd fp9,(L(C0)-L(anchor))(r9)
+ lfd fp10,(L(C1)-L(anchor))(r9)
+ lfd fp11,(L(C2)-L(anchor))(r9)
+ lfd fp12,(L(C3)-L(anchor))(r9)
+ lfd fp13,(L(C4)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ lfd fp3,(L(DPone)-L(anchor))(r9)
+
+ fmadd fp4,fp2,fp13,fp12 /* C3+x^2*C4 */
+ fmadd fp4,fp2,fp4,fp11 /* C2+x^2*(C3+x^2*C4) */
+ fmadd fp4,fp2,fp4,fp10 /* C1+x^2*(C2+x^2*(C3+x^2*C4)) */
+ fmadd fp4,fp2,fp4,fp9 /* C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4))) */
+ fmadd fp4,fp2,fp4,fp3 /* 1.0 + x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))) */
+ fmul fp4,fp4,fp0 /* Add in the sign. */
+ frsp fp1,fp4 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(greater_or_equal_9pio4):
+ lis r4,INFINITY@h
+ ori r4,r4,INFINITY@l
+ cmpw r3,r4
+ bge L(inf_or_nan)
+
+ lis r4,TWO_P23@h
+ ori r4,r4,TWO_P23@l
+ cmpw r3,r4
+ bge L(greater_or_equal_2p23)
+
+ fabs fp1,fp1 /* |x| */
+
+ /* Calculate quotient of |x|/(PI/4). */
+ lfd fp2,(L(invpio4)-L(anchor))(r9)
+
+ lfd fp3,(L(DPone)-L(anchor))(r9)
+ lfd fp4,(L(DPhalf)-L(anchor))(r9)
+ fmul fp2,fp1,fp2 /* |x|/(PI/4) */
+ friz fp2,fp2 /* n = floor(|x|/(PI/4)) */
+
+ /* Calculate (n + 1) / 2. */
+ fadd fp2,fp2,fp3 /* n + 1 */
+ fmul fp3,fp2,fp4 /* (n + 1) / 2 */
+ friz fp3,fp3
+
+ lfd fp4,(L(pio2hi)-L(anchor))(r9)
+ lfd fp5,(L(pio2lo)-L(anchor))(r9)
+
+ fmul fp6,fp4,fp3
+ fadd fp6,fp6,fp1
+ fmadd fp1,fp5,fp3,fp6
+
+ fctiduz fp2,fp2
+ mfvsrd r7,v2 /* n + 1 */
+
+ b L(reduced)
+
+ .balign 16
+L(inf_or_nan):
+ bne L(skip_errno_setting) /* Is a NAN? */
+
+ /* We delayed the creation of the stack frame, as well as the saving of
+ the link register, because only at this point, we are sure that
+ doing so is actually needed. */
+
+ stfd fp1,-8(r1)
+
+ /* Save the link register. */
+ mflr r0
+ std r0,16(r1)
+ cfi_offset(lr, 16)
+
+ /* Create the stack frame. */
+ stdu r1,-FRAMESIZE(r1)
+ cfi_adjust_cfa_offset(FRAMESIZE)
+
+ bl JUMPTARGET(__errno_location)
+ nop
+
+ /* Restore the stack frame. */
+ addi r1,r1,FRAMESIZE
+ cfi_adjust_cfa_offset(-FRAMESIZE)
+ /* Restore the link register. */
+ ld r0,16(r1)
+ mtlr r0
+
+ lfd fp1,-8(r1)
+
+ /* errno = EDOM */
+ li r4,EDOM
+ stw r4,0(r3)
+
+L(skip_errno_setting):
+ fsub fp1,fp1,fp1 /* x - x */
+ blr
+
+ .balign 16
+L(greater_or_equal_2p23):
+ fabs fp1,fp1
+
+ srwi r4,r3,FLOAT_EXPONENT_SHIFT
+ subi r4,r4,FLOAT_EXPONENT_BIAS
+
+ /* We reduce the input modulo pi/4, so we need 3 bits of integer
+ to determine where in 2*pi we are. Index into our array
+ accordingly. */
+ addi r4,r4,INTEGER_BITS
+
+ /* To avoid an expensive divide, for the range we care about (0 - 127)
+ we can transform x/28 into:
+
+ x/28 = (x * ((0x100000000 / 28) + 1)) >> 32
+
+ mulhwu returns the top 32 bits of the 64 bit result, doing the
+ shift for us in the same instruction. The top 32 bits are undefined,
+ so we have to mask them. */
+
+ lis r6,FX_FRACTION_1_28@h
+ ori r6,r6,FX_FRACTION_1_28@l
+ mulhwu r5,r4,r6
+ clrldi r5,r5,32
+
+ /* Get our pointer into the invpio4_table array. */
+ sldi r4,r5,3
+ addi r6,r9,(L(invpio4_table)-L(anchor))
+ add r4,r4,r6
+
+ lfd fp2,0(r4)
+ lfd fp3,8(r4)
+ lfd fp4,16(r4)
+ lfd fp5,24(r4)
+
+ fmul fp6,fp2,fp1
+ fmul fp7,fp3,fp1
+ fmul fp8,fp4,fp1
+ fmul fp9,fp5,fp1
+
+ /* Mask off larger integer bits in highest double word that we don't
+ care about to avoid losing precision when combining with smaller
+ values. */
+ fctiduz fp10,fp6
+ mfvsrd r7,v10
+ rldicr r7,r7,0,(63-INTEGER_BITS)
+ mtvsrd v10,r7
+ fcfidu fp10,fp10 /* Integer bits. */
+
+ fsub fp6,fp6,fp10 /* highest -= integer bits */
+
+ /* Work out the integer component, rounded down. Use the top two
+ limbs for this. */
+ fadd fp10,fp6,fp7 /* highest + higher */
+
+ fctiduz fp10,fp10
+ mfvsrd r7,v10
+ andi. r0,r7,1
+ fcfidu fp10,fp10
+
+ /* Subtract integer component from highest limb. */
+ fsub fp12,fp6,fp10
+
+ beq L(even_integer)
+
+ /* Our integer component is odd, so we are in the -PI/4 to 0 primary
+ region. We need to shift our result down by PI/4, and to do this
+ in the mod (4/PI) space we simply subtract 1. */
+ lfd fp11,(L(DPone)-L(anchor))(r9)
+ fsub fp12,fp12,fp11
+
+ /* Now add up all the limbs in order. */
+ fadd fp12,fp12,fp7
+ fadd fp12,fp12,fp8
+ fadd fp12,fp12,fp9
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+L(even_integer):
+ lfd fp11,(L(DPone)-L(anchor))(r9)
+
+ /* Now add up all the limbs in order. */
+ fadd fp12,fp12,fp7
+ fadd fp12,r12,fp8
+ fadd fp12,r12,fp9
+
+ /* We need to check if the addition of all the limbs resulted in us
+ overflowing 1.0. */
+ fcmpu 0,fp12,fp11
+ bgt L(greater_than_one)
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+L(greater_than_one):
+ /* We did overflow 1.0 when adding up all the limbs. Add 1.0 to our
+ integer, and subtract 1.0 from our result. Since that makes the
+ integer component odd, we need to subtract another 1.0 as
+ explained above. */
+ addi r7,r7,1
+
+ lfd fp11,(L(DPtwo)-L(anchor))(r9)
+ fsub fp12,fp12,fp11
+
+ /* And finally multiply by pi/4. */
+ lfd fp13,(L(pio4)-L(anchor))(r9)
+ fmul fp1,fp12,fp13
+
+ addi r7,r7,1
+ b L(reduced)
+
+ .balign 16
+L(less_2pn5):
+ lis r4,TWO_PN27@h
+ ori r4,r4,TWO_PN27@l
+
+ cmpw r3,r4
+ blt L(less_2pn27)
+
+ /* A simpler Chebyshev approximation is close enough for this range:
+ x+x^3*(SS0+x^2*SS1). */
+
+ lfd fp10,(L(SS0)-L(anchor))(r9)
+ lfd fp11,(L(SS1)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp1 /* x^2 */
+ fmul fp3,fp2,fp1 /* x^3 */
+
+ fmadd fp4,fp2,fp11,fp10 /* SS0+x^2*SS1 */
+ fmadd fp1,fp3,fp4,fp1 /* x+x^3*(SS0+x^2*SS1) */
+
+ frsp fp1,fp1 /* Round to single precision. */
+
+ blr
+
+ .balign 16
+L(less_2pn27):
+ cmpwi r3,0
+ beq L(zero)
+
+ /* Handle some special cases:
+
+ sinf(subnormal) raises inexact/underflow
+ sinf(min_normalized) raises inexact/underflow
+ sinf(normalized) raises inexact. */
+
+ lfd fp2,(L(small)-L(anchor))(r9)
+
+ fmul fp2,fp1,fp2 /* x * small */
+ fsub fp1,fp1,fp2 /* x - x * small */
+
+ frsp fp1,fp1
+
+ blr
+
+ .balign 16
+L(zero):
+ blr
+
+END (__sinf)
+
+ .section .rodata, "a"
+
+ .balign 8
+
+L(anchor):
+
+ /* Chebyshev constants for sin, range -PI/4 - PI/4. */
+L(S0): .8byte 0xbfc5555555551cd9
+L(S1): .8byte 0x3f81111110c2688b
+L(S2): .8byte 0xbf2a019f8b4bd1f9
+L(S3): .8byte 0x3ec71d7264e6b5b4
+L(S4): .8byte 0xbe5a947e1674b58a
+
+ /* Chebyshev constants for sin, range 2^-27 - 2^-5. */
+L(SS0): .8byte 0xbfc555555543d49d
+L(SS1): .8byte 0x3f8110f475cec8c5
+
+ /* Chebyshev constants for cos, range -PI/4 - PI/4. */
+L(C0): .8byte 0xbfdffffffffe98ae
+L(C1): .8byte 0x3fa55555545c50c7
+L(C2): .8byte 0xbf56c16b348b6874
+L(C3): .8byte 0x3efa00eb9ac43cc0
+L(C4): .8byte 0xbe923c97dd8844d7
+
+L(invpio2):
+ .8byte 0x3fe45f306dc9c883 /* 2/PI */
+
+L(invpio4):
+ .8byte 0x3ff45f306dc9c883 /* 4/PI */
+
+L(invpio4_table):
+ .8byte 0x0000000000000000
+ .8byte 0x3ff45f306c000000
+ .8byte 0x3e3c9c882a000000
+ .8byte 0x3c54fe13a8000000
+ .8byte 0x3aaf47d4d0000000
+ .8byte 0x38fbb81b6c000000
+ .8byte 0x3714acc9e0000000
+ .8byte 0x3560e4107c000000
+ .8byte 0x33bca2c756000000
+ .8byte 0x31fbd778ac000000
+ .8byte 0x300b7246e0000000
+ .8byte 0x2e5d2126e8000000
+ .8byte 0x2c97003248000000
+ .8byte 0x2ad77504e8000000
+ .8byte 0x290921cfe0000000
+ .8byte 0x274deb1cb0000000
+ .8byte 0x25829a73e0000000
+ .8byte 0x23fd1046be000000
+ .8byte 0x2224baed10000000
+ .8byte 0x20709d338e000000
+ .8byte 0x1e535a2f80000000
+ .8byte 0x1cef904e64000000
+ .8byte 0x1b0d639830000000
+ .8byte 0x1964ce7d24000000
+ .8byte 0x17b908bf16000000
+
+L(pio4):
+ .8byte 0x3fe921fb54442d18 /* PI/4 */
+
+/* PI/2 as a sum of two doubles. We only use 32 bits of the upper limb
+ to avoid losing significant bits when multiplying with up to
+ (2^22)/(pi/2). */
+L(pio2hi):
+ .8byte 0xbff921fb54400000
+
+L(pio2lo):
+ .8byte 0xbdd0b4611a626332
+
+L(pio2_table):
+ .8byte 0
+ .8byte 0x3ff921fb54442d18 /* 1 * PI/2 */
+ .8byte 0x400921fb54442d18 /* 2 * PI/2 */
+ .8byte 0x4012d97c7f3321d2 /* 3 * PI/2 */
+ .8byte 0x401921fb54442d18 /* 4 * PI/2 */
+ .8byte 0x401f6a7a2955385e /* 5 * PI/2 */
+ .8byte 0x4022d97c7f3321d2 /* 6 * PI/2 */
+ .8byte 0x4025fdbbe9bba775 /* 7 * PI/2 */
+ .8byte 0x402921fb54442d18 /* 8 * PI/2 */
+ .8byte 0x402c463abeccb2bb /* 9 * PI/2 */
+ .8byte 0x402f6a7a2955385e /* 10 * PI/2 */
+
+L(small):
+ .8byte 0x3cd0000000000000 /* 2^-50 */
+
+L(ones):
+ .8byte 0x3ff0000000000000 /* +1.0 */
+ .8byte 0xbff0000000000000 /* -1.0 */
+
+L(DPhalf):
+ .8byte 0x3fe0000000000000 /* 0.5 */
+
+L(DPone):
+ .8byte 0x3ff0000000000000 /* 1.0 */
+
+L(DPtwo):
+ .8byte 0x4000000000000000 /* 2.0 */
+
+libm_alias_float (__sin, sin)
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/w_expf.c b/sysdeps/powerpc/powerpc64/power8/fpu/w_expf.c
new file mode 100644
index 0000000000..b5fe164520
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/w_expf.c
@@ -0,0 +1 @@
+#include <sysdeps/../math/w_expf.c>
diff --git a/sysdeps/powerpc/powerpc64/power8/memchr.S b/sysdeps/powerpc/powerpc64/power8/memchr.S
new file mode 100644
index 0000000000..45ba1b479a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/memchr.S
@@ -0,0 +1,335 @@
+/* Optimized memchr implementation for POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* void *[r3] memchr (const void *s [r3], int c [r4], size_t n [r5]) */
+
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MTVRD(v, r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r, v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t, a, b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+#ifndef MEMCHR
+# define MEMCHR __memchr
+#endif
+/* TODO: change this to .machine power8 when the minimum required binutils
+ allows it. */
+ .machine power7
+ENTRY_TOCLESS (MEMCHR)
+ CALL_MCOUNT 3
+ dcbt 0, r3
+ clrrdi r8, r3, 3
+ insrdi r4, r4, 8, 48
+
+ /* Calculate the last acceptable address and check for possible
+ addition overflow by using satured math:
+ r7 = r3 + r5
+ r7 |= -(r7 < x) */
+ add r7, r3, r5
+ subfc r6, r3, r7
+ subfe r9, r9, r9
+ extsw r6, r9
+ or r7, r7, r6
+
+ insrdi r4, r4, 16, 32
+ cmpldi r5, 32
+ li r9, -1
+ rlwinm r6, r3, 3, 26, 28 /* Calculate padding. */
+ insrdi r4, r4, 32, 0
+ mr r10, r7
+ addi r7, r7, -1
+#ifdef __LITTLE_ENDIAN__
+ sld r9, r9, r6
+#else
+ srd r9, r9, r6
+#endif
+ ble L(small_range)
+ andi. r11, r3, 63
+ beq cr0, L(align_qw)
+ clrldi r11, r3, 61
+ ld r12, 0(r8) /* Load doubleword from memory. */
+ cmpb r3, r12, r4 /* Check for BYTEs in DWORD1. */
+ and r3, r3, r9
+ clrldi r6, r7, 61 /* Byte count - 1 in last dword. */
+ clrrdi r7, r7, 3 /* Address of last doubleword. */
+ cmpldi cr7, r3, 0 /* Does r3 indicate we got a hit? */
+ bne cr7, L(done)
+ addi r8, r8, 8
+ addi r5, r5, -8
+ add r5, r5, r11
+
+ /* Are we now aligned to a quadword boundary? */
+ andi. r11, r8, 15
+ beq cr0, L(align_qw)
+
+ /* Handle DWORD to make it QW aligned. */
+ ld r12, 0(r8)
+ cmpb r3, r12, r4
+ cmpldi cr7, r3, 0
+ bne cr7, L(done)
+ addi r5, r5, -8
+ addi r8, r8, 8
+ /* At this point, r8 is 16B aligned. */
+L(align_qw):
+ vspltisb v0, 0
+ /* Precompute vbpermq constant. */
+ vspltisb v10, 3
+ li r0, 0
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ MTVRD(v1, r4)
+ vspltb v1, v1, 7
+ cmpldi r5, 64
+ ble L(tail64)
+ /* Are we 64-byte aligned? If so, jump to the vectorized loop.
+ Note: aligning to 64-byte will necessarily slow down performance for
+ strings around 64 bytes in length due to the extra comparisons
+ required to check alignment for the vectorized loop. This is a
+ necessary tradeoff we are willing to take in order to speed up the
+ calculation for larger strings. */
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ /* In order to begin the 64B loop, it needs to be 64
+ bytes aligned. So read until it is 64B aligned. */
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ addi r5, r5, -16
+
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ addi r5, r5, -16
+
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ addi r5, r5, -16
+ /* At this point it should be 64B aligned.
+ Prepare for the 64B loop. */
+L(preloop_64B):
+ cmpldi r5, 64 /* Check if r5 < 64. */
+ ble L(tail64)
+ sub r6, r10, r8
+ srdi r9, r6, 6 /* Number of loop iterations. */
+ mtctr r9 /* Setup the counter. */
+ li r11, 16 /* Load required offsets. */
+ li r9, 32
+ li r7, 48
+
+ /* Handle r5 > 64. Loop over the bytes in strides of 64B. */
+ .align 4
+L(loop):
+ lvx v2, 0, r8 /* Load 4 quadwords. */
+ lvx v3, r8, r11
+ lvx v4, v8, r9
+ lvx v5, v8, r7
+ vcmpequb v6, v1, v2
+ vcmpequb v7, v1, v3
+ vcmpequb v8, v1, v4
+ vcmpequb v9, v1, v5
+ vor v11, v6, v7
+ vor v12, v8, v9
+ vor v11, v11, v12 /* Compare and merge into one VR for speed. */
+ vcmpequb. v11, v0, v11
+ bnl cr6, L(found)
+ addi r8, r8, 64 /* Adjust address for the next iteration. */
+ bdnz L(loop)
+ clrldi r5, r6, 58
+
+ /* Handle remainder of 64B loop or r5 > 64. */
+ .align 4
+L(tail64):
+ cmpldi r5, 0
+ beq L(null)
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r8, r8, 16
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ li r3, 0
+ blr
+
+ /* Found a match in 64B loop. */
+ .align 4
+L(found):
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v6, v6, v10)
+ VBPERMQ(v7, v7, v10)
+ VBPERMQ(v8, v8, v10)
+ VBPERMQ(v9, v9, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v7, v7, v7, 2
+ vsldoi v8, v8, v8, 4
+ vsldoi v9, v9, v9, 6
+#else
+ vsldoi v6, v6, v6, 6
+ vsldoi v7, v7, v7, 4
+ vsldoi v8, v8, v8, 2
+#endif
+ /* Merge the results and move to a GPR. */
+ vor v11, v6, v7
+ vor v4, v9, v8
+ vor v4, v11, v4
+ MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+ addi r6, r5, -1
+ andc r6, r6, r5
+ popcntd r6, r6
+#else
+ cntlzd r6, r5 /* Count leading zeros before the match. */
+#endif
+ add r3, r8, r6 /* Compute final length. */
+ blr
+
+ /* Found a match in last 16 bytes. */
+ .align 4
+L(found_16B):
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v6, v6, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ MFVRD(r7, v6)
+ addi r6, r7, -1
+ andc r6, r6, r7
+ popcntd r6, r6
+#else
+ vsldoi v6, v6, v6, 6
+ MFVRD(r7, v6)
+ cntlzd r6, r7 /* Count leading zeros before the match. */
+#endif
+ add r3, r8, r6 /* Compute final length. */
+ cmpld r6, r5
+ bltlr
+ li r3, 0
+ blr
+
+ .align 4
+ /* r3 has the output of the cmpb instruction, that is, it contains
+ 0xff in the same position as BYTE in the original
+ doubleword from the string. Use that to calculate the pointer.
+ We need to make sure BYTE is *before* the end of the range. */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+ addi r0, r3, -1
+ andc r0, r0, r3
+ popcntd r0, r0 /* Count trailing zeros. */
+#else
+ cntlzd r0, r3 /* Count leading zeros before the match. */
+#endif
+ cmpld r8, r7 /* Are we on the last dword? */
+ srdi r0, r0, 3 /* Convert leading/trailing zeros to bytes. */
+ add r3, r8, r0
+ cmpld cr7, r0, r6 /* If on the last dword, check byte offset. */
+ bnelr
+ blelr cr7
+ li r3, 0
+ blr
+
+ .align 4
+L(null):
+ li r3, 0
+ blr
+
+/* Deals with size <= 32. */
+ .align 4
+L(small_range):
+ cmpldi r5, 0
+ beq L(null)
+ ld r12, 0(r8) /* Load word from memory. */
+ cmpb r3, r12, r4 /* Check for BYTE in DWORD1. */
+ and r3, r3, r9
+ cmpldi cr7, r3, 0
+ clrldi r6, r7, 61 /* Byte count - 1 in last dword. */
+ clrrdi r7, r7, 3 /* Address of last doubleword. */
+ cmpld r8, r7 /* Are we done already? */
+ bne cr7, L(done)
+ beqlr
+
+ ldu r12, 8(r8)
+ cmpb r3, r12, r4
+ cmpldi cr6, r3, 0
+ cmpld r8, r7
+ bne cr6, L(done) /* Found something. */
+ beqlr /* Hit end of string (length). */
+
+ ldu r12, 8(r8)
+ cmpb r3, r12, r4
+ cmpldi cr6, r3, 0
+ cmpld r8, r7
+ bne cr6, L(done)
+ beqlr
+
+ ldu r12, 8(r8)
+ cmpb r3, r12, r4
+ cmpldi cr6, r3, 0
+ cmpld r8, r7
+ bne cr6, L(done)
+ beqlr
+
+ ldu r12, 8(r8)
+ cmpb r3, r12, r4
+ cmpldi cr6, r3, 0
+ bne cr6, L(done)
+ blr
+
+END (MEMCHR)
+weak_alias (__memchr, memchr)
+libc_hidden_builtin_def (memchr)
diff --git a/sysdeps/powerpc/powerpc64/power8/memcmp.S b/sysdeps/powerpc/powerpc64/power8/memcmp.S
new file mode 100644
index 0000000000..ec4ccf3382
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/memcmp.S
@@ -0,0 +1,1447 @@
+/* Optimized memcmp implementation for POWER7/PowerPC64.
+ Copyright (C) 2010-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* int [r3] memcmp (const char *s1 [r3],
+ const char *s2 [r4],
+ size_t size [r5]) */
+
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#ifndef MEMCMP
+# define MEMCMP memcmp
+#endif
+ .machine power7
+ENTRY_TOCLESS (MEMCMP, 4)
+ CALL_MCOUNT 3
+
+#define rRTN r3
+#define rSTR1 r3 /* First string arg. */
+#define rSTR2 r4 /* Second string arg. */
+#define rN r5 /* Max string length. */
+#define rWORD1 r6 /* Current word in s1. */
+#define rWORD2 r7 /* Current word in s2. */
+#define rWORD3 r8 /* Next word in s1. */
+#define rWORD4 r9 /* Next word in s2. */
+#define rWORD5 r10 /* Next word in s1. */
+#define rWORD6 r11 /* Next word in s2. */
+
+#define rOFF8 r20 /* 8 bytes offset. */
+#define rOFF16 r21 /* 16 bytes offset. */
+#define rOFF24 r22 /* 24 bytes offset. */
+#define rOFF32 r23 /* 24 bytes offset. */
+#define rWORD6_SHIFT r24 /* Left rotation temp for rWORD8. */
+#define rWORD4_SHIFT r25 /* Left rotation temp for rWORD6. */
+#define rWORD2_SHIFT r26 /* Left rotation temp for rWORD4. */
+#define rWORD8_SHIFT r27 /* Left rotation temp for rWORD2. */
+#define rSHR r28 /* Unaligned shift right count. */
+#define rSHL r29 /* Unaligned shift left count. */
+#define rWORD7 r30 /* Next word in s1. */
+#define rWORD8 r31 /* Next word in s2. */
+
+#define rWORD8SAVE (-8)
+#define rWORD7SAVE (-16)
+#define rOFF8SAVE (-24)
+#define rOFF16SAVE (-32)
+#define rOFF24SAVE (-40)
+#define rOFF32SAVE (-48)
+#define rSHRSAVE (-56)
+#define rSHLSAVE (-64)
+#define rWORD8SHIFTSAVE (-72)
+#define rWORD2SHIFTSAVE (-80)
+#define rWORD4SHIFTSAVE (-88)
+#define rWORD6SHIFTSAVE (-96)
+
+#ifdef __LITTLE_ENDIAN__
+# define LD ldbrx
+#else
+# define LD ldx
+#endif
+
+ xor r10, rSTR2, rSTR1
+ cmpldi cr6, rN, 0
+ cmpldi cr1, rN, 8
+ clrldi. r0, r10, 61
+ clrldi r12, rSTR1, 61
+ cmpldi cr5, r12, 0
+ beq- cr6, L(zeroLength)
+ dcbt 0, rSTR1
+ dcbt 0, rSTR2
+ /* If less than 8 bytes or not aligned, use the unaligned
+ byte loop. */
+ blt cr1, L(bytealigned)
+ bne L(unalignedqw)
+/* At this point we know both strings have the same alignment and the
+ compare length is at least 8 bytes. r12 contains the low order
+ 3 bits of rSTR1 and cr5 contains the result of the logical compare
+ of r12 to 0. If r12 == 0 then we are already double word
+ aligned and can perform the DW aligned loop. */
+
+ .align 4
+L(samealignment):
+ or r11, rSTR2, rSTR1
+ clrldi. r11, r11, 60
+ beq L(qw_align)
+ /* Try to align to QW else proceed to DW loop. */
+ clrldi. r10, r10, 60
+ bne L(DW)
+ /* For the difference to reach QW alignment, load as DW. */
+ clrrdi rSTR1, rSTR1, 3
+ clrrdi rSTR2, rSTR2, 3
+ subfic r10, r12, 8
+ LD rWORD1, 0, rSTR1
+ LD rWORD2, 0, rSTR2
+ sldi r9, r10, 3
+ subfic r9, r9, 64
+ sld rWORD1, rWORD1, r9
+ sld rWORD2, rWORD2, r9
+ cmpld cr6, rWORD1, rWORD2
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ bne cr6, L(ret_diff)
+ subf rN, r10, rN
+
+ cmpld cr6, r11, r12
+ bgt cr6, L(qw_align)
+ LD rWORD1, 0, rSTR1
+ LD rWORD2, 0, rSTR2
+ cmpld cr6, rWORD1, rWORD2
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ bne cr6, L(different)
+ cmpldi cr6, rN, 8
+ ble cr6, L(zeroLength)
+ addi rN, rN, -8
+ /* Now both rSTR1 and rSTR2 are aligned to QW. */
+ .align 4
+L(qw_align):
+ vspltisb v0, 0
+ srdi. r6, rN, 6
+ li r8, 16
+ li r10, 32
+ li r11, 48
+ ble cr0, L(lessthan64)
+ mtctr r6
+ vspltisb v8, 0
+ vspltisb v6, 0
+ /* Aligned vector loop. */
+ .align 4
+L(aligned_loop):
+ lvx v4, 0, rSTR1
+ lvx v5, 0, rSTR2
+ vcmpequb. v7, v6, v8
+ bnl cr6, L(different3)
+ lvx v6, rSTR1, r8
+ lvx v8, rSTR2, r8
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ lvx v4, rSTR1, r10
+ lvx v5, rSTR2, r10
+ vcmpequb. v7, v6, v8
+ bnl cr6, L(different3)
+ lvx v6, rSTR1, r11
+ lvx v8, rSTR2, r11
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ addi rSTR1, rSTR1, 64
+ addi rSTR2, rSTR2, 64
+ bdnz L(aligned_loop)
+ vcmpequb. v7, v6, v8
+ bnl cr6, L(different3)
+ clrldi rN, rN, 58
+ /* Handle remainder for aligned loop. */
+ .align 4
+L(lessthan64):
+ mr r9, rSTR1
+ cmpdi cr6, rN, 0
+ li rSTR1, 0
+ blelr cr6
+ lvx v4, 0, r9
+ lvx v5, 0, rSTR2
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r8
+ lvx v5, rSTR2, r8
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r10
+ lvx v5, rSTR2, r10
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r11
+ lvx v5, rSTR2, r11
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ blr
+
+ /* Calculate and return the difference. */
+ .align 4
+L(different1):
+ cmpdi cr6, rN, 16
+ bge cr6, L(different2)
+ /* Discard unwanted bytes. */
+#ifdef __LITTLE_ENDIAN__
+ lvsr v1, 0, rN
+ vperm v4, v4, v0, v1
+ vperm v5, v5, v0, v1
+#else
+ lvsl v1, 0, rN
+ vperm v4, v0, v4, v1
+ vperm v5, v0, v5, v1
+#endif
+ vcmpequb. v7, v4, v5
+ li rRTN, 0
+ bltlr cr6
+ .align 4
+L(different2):
+#ifdef __LITTLE_ENDIAN__
+ /* Reverse bytes for direct comparison. */
+ lvsl v10, r0, r0
+ vspltisb v8, 15
+ vsububm v9, v8, v10
+ vperm v4, v4, v0, v9
+ vperm v5, v5, v0, v9
+#endif
+ MFVRD(r7, v4)
+ MFVRD(r9, v5)
+ cmpld cr6, r7, r9
+ bne cr6, L(ret_diff)
+ /* Difference in second DW. */
+ vsldoi v4, v4, v4, 8
+ vsldoi v5, v5, v5, 8
+ MFVRD(r7, v4)
+ MFVRD(r9, v5)
+ cmpld cr6, r7, r9
+L(ret_diff):
+ li rRTN, 1
+ bgtlr cr6
+ li rRTN, -1
+ blr
+ .align 4
+L(different3):
+#ifdef __LITTLE_ENDIAN__
+ /* Reverse bytes for direct comparison. */
+ vspltisb v9, 15
+ lvsl v10, r0, r0
+ vsububm v9, v9, v10
+ vperm v6, v6, v0, v9
+ vperm v8, v8, v0, v9
+#endif
+ MFVRD(r7, v6)
+ MFVRD(r9, v8)
+ cmpld cr6, r7, r9
+ bne cr6, L(ret_diff)
+ /* Difference in second DW. */
+ vsldoi v6, v6, v6, 8
+ vsldoi v8, v8, v8, 8
+ MFVRD(r7, v6)
+ MFVRD(r9, v8)
+ cmpld cr6, r7, r9
+ li rRTN, 1
+ bgtlr cr6
+ li rRTN, -1
+ blr
+
+ .align 4
+L(different):
+ cmpldi cr7, rN, 8
+ bgt cr7, L(end)
+ /* Skip unwanted bytes. */
+ sldi r8, rN, 3
+ subfic r8, r8, 64
+ srd rWORD1, rWORD1, r8
+ srd rWORD2, rWORD2, r8
+ cmpld cr6, rWORD1, rWORD2
+ li rRTN, 0
+ beqlr cr6
+L(end):
+ li rRTN, 1
+ bgtlr cr6
+ li rRTN, -1
+ blr
+
+ .align 4
+L(unalignedqw):
+ /* Proceed to DW unaligned loop,if there is a chance of pagecross. */
+ rldicl r9, rSTR1, 0, 52
+ add r9, r9, rN
+ cmpldi cr0, r9, 4096-16
+ bgt cr0, L(unaligned)
+ rldicl r9, rSTR2, 0, 52
+ add r9, r9, rN
+ cmpldi cr0, r9, 4096-16
+ bgt cr0, L(unaligned)
+ li r0, 0
+ li r8, 16
+ vspltisb v0, 0
+ /* Check if rSTR1 is aligned to QW. */
+ andi. r11, rSTR1, 0xF
+ beq L(s1_align)
+
+ /* Compare 16B and align S1 to QW. */
+#ifdef __LITTLE_ENDIAN__
+ lvsr v10, 0, rSTR1 /* Compute mask. */
+ lvsr v6, 0, rSTR2 /* Compute mask. */
+#else
+ lvsl v10, 0, rSTR1 /* Compute mask. */
+ lvsl v6, 0, rSTR2 /* Compute mask. */
+#endif
+ lvx v5, 0, rSTR2
+ lvx v9, rSTR2, r8
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v9, v5, v6
+#else
+ vperm v5, v5, v9, v6
+#endif
+ lvx v4, 0, rSTR1
+ lvx v9, rSTR1, r8
+#ifdef __LITTLE_ENDIAN__
+ vperm v4, v9, v4, v10
+#else
+ vperm v4, v4, v9, v10
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ cmpldi cr6, rN, 16
+ ble cr6, L(zeroLength)
+ subfic r11, r11, 16
+ subf rN, r11, rN
+ add rSTR1, rSTR1, r11
+ add rSTR2, rSTR2, r11
+
+ /* As s1 is QW aligned prepare for unaligned loop. */
+ .align 4
+L(s1_align):
+#ifdef __LITTLE_ENDIAN__
+ lvsr v6, 0, rSTR2
+#else
+ lvsl v6, 0, rSTR2
+#endif
+ lvx v5, 0, rSTR2
+ srdi. r6, rN, 6
+ li r10, 32
+ li r11, 48
+ ble cr0, L(lessthan64_unalign)
+ mtctr r6
+ li r9, 64
+ /* Unaligned vector loop. */
+ .align 4
+L(unalign_qwloop):
+ lvx v4, 0, rSTR1
+ lvx v10, rSTR2, r8
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ vor v5, v10, v10
+ lvx v4, rSTR1, r8
+ lvx v10, rSTR2, r10
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ vor v5, v10, v10
+ lvx v4, rSTR1, r10
+ lvx v10, rSTR2, r11
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ vor v5, v10, v10
+ lvx v4, rSTR1, r11
+ lvx v10, rSTR2, r9
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different2)
+ vor v5, v10, v10
+ addi rSTR1, rSTR1, 64
+ addi rSTR2, rSTR2, 64
+ bdnz L(unalign_qwloop)
+ clrldi rN, rN, 58
+ /* Handle remainder for unaligned loop. */
+ .align 4
+L(lessthan64_unalign):
+ mr r9, rSTR1
+ cmpdi cr6, rN, 0
+ li rSTR1, 0
+ blelr cr6
+ lvx v4, 0, r9
+ lvx v10, rSTR2, r8
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ vor v5, v10, v10
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r8
+ lvx v10, rSTR2, r10
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ vor v5, v10, v10
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r10
+ lvx v10, rSTR2, r11
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ vor v5, v10, v10
+ addi rN, rN, -16
+
+ cmpdi cr6, rN, 0
+ blelr cr6
+ lvx v4, r9, r11
+ addi r11, r11, 16
+ lvx v10, rSTR2, r11
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v10, v5, v6
+#else
+ vperm v5, v5, v10, v6
+#endif
+ vcmpequb. v7, v5, v4
+ bnl cr6, L(different1)
+ blr
+
+/* Otherwise we know the two strings have the same alignment (but not
+ yet DW). So we force the string addresses to the next lower DW
+ boundary and special case this first DW using shift left to
+ eliminate bits preceding the first byte. Since we want to join the
+ normal (DW aligned) compare loop, starting at the second double word,
+ we need to adjust the length (rN) and special case the loop
+ versioning for the first DW. This ensures that the loop count is
+ correct and the first DW (shifted) is in the expected register pair. */
+ .align 4
+L(DW):
+ std rWORD8, rWORD8SAVE(r1)
+ std rWORD7, rWORD7SAVE(r1)
+ std rOFF8, rOFF8SAVE(r1)
+ std rOFF16, rOFF16SAVE(r1)
+ std rOFF24, rOFF24SAVE(r1)
+ std rOFF32, rOFF32SAVE(r1)
+ cfi_offset(rWORD8, rWORD8SAVE)
+ cfi_offset(rWORD7, rWORD7SAVE)
+ cfi_offset(rOFF8, rOFF8SAVE)
+ cfi_offset(rOFF16, rOFF16SAVE)
+ cfi_offset(rOFF24, rOFF24SAVE)
+ cfi_offset(rOFF32, rOFF32SAVE)
+
+ li rOFF8,8
+ li rOFF16,16
+ li rOFF24,24
+ li rOFF32,32
+ clrrdi rSTR1, rSTR1, 3
+ clrrdi rSTR2, rSTR2, 3
+ beq cr5, L(DWaligned)
+ add rN, rN, r12
+ sldi rWORD6, r12, 3
+ srdi r0, rN, 5 /* Divide by 32. */
+ andi. r12, rN, 24 /* Get the DW remainder. */
+ LD rWORD1, 0, rSTR1
+ LD rWORD2, 0, rSTR2
+ cmpldi cr1, r12, 16
+ cmpldi cr7, rN, 32
+ clrldi rN, rN, 61
+ beq L(dPs4)
+ mtctr r0
+ bgt cr1, L(dPs3)
+ beq cr1, L(dPs2)
+
+/* Remainder is 8. */
+ .align 3
+L(dsP1):
+ sld rWORD5, rWORD1, rWORD6
+ sld rWORD6, rWORD2, rWORD6
+ cmpld cr5, rWORD5, rWORD6
+ blt cr7, L(dP1x)
+/* Do something useful in this cycle since we have to branch anyway. */
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ b L(dP1e)
+/* Remainder is 16. */
+ .align 4
+L(dPs2):
+ sld rWORD5, rWORD1, rWORD6
+ sld rWORD6, rWORD2, rWORD6
+ cmpld cr6, rWORD5, rWORD6
+ blt cr7, L(dP2x)
+/* Do something useful in this cycle since we have to branch anyway. */
+ LD rWORD7, rOFF8, rSTR1
+ LD rWORD8, rOFF8, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ b L(dP2e)
+/* Remainder is 24. */
+ .align 4
+L(dPs3):
+ sld rWORD3, rWORD1, rWORD6
+ sld rWORD4, rWORD2, rWORD6
+ cmpld cr1, rWORD3, rWORD4
+ b L(dP3e)
+/* Count is a multiple of 32, remainder is 0. */
+ .align 4
+L(dPs4):
+ mtctr r0
+ sld rWORD1, rWORD1, rWORD6
+ sld rWORD2, rWORD2, rWORD6
+ cmpld cr7, rWORD1, rWORD2
+ b L(dP4e)
+
+/* At this point we know both strings are double word aligned and the
+ compare length is at least 8 bytes. */
+ .align 4
+L(DWaligned):
+ andi. r12, rN, 24 /* Get the DW remainder. */
+ srdi r0, rN, 5 /* Divide by 32. */
+ cmpldi cr1, r12, 16
+ cmpldi cr7, rN, 32
+ clrldi rN, rN, 61
+ beq L(dP4)
+ bgt cr1, L(dP3)
+ beq cr1, L(dP2)
+
+/* Remainder is 8. */
+ .align 4
+L(dP1):
+ mtctr r0
+/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
+ (8-15 byte compare), we want to use only volatile registers. This
+ means we can avoid restoring non-volatile registers since we did not
+ change any on the early exit path. The key here is the non-early
+ exit path only cares about the condition code (cr5), not about which
+ register pair was used. */
+ LD rWORD5, 0, rSTR1
+ LD rWORD6, 0, rSTR2
+ cmpld cr5, rWORD5, rWORD6
+ blt cr7, L(dP1x)
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+L(dP1e):
+ LD rWORD3, rOFF16, rSTR1
+ LD rWORD4, rOFF16, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ LD rWORD5, rOFF24, rSTR1
+ LD rWORD6, rOFF24, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ bne cr5, L(dLcr5x)
+ bne cr7, L(dLcr7x)
+
+ LD rWORD7, rOFF32, rSTR1
+ LD rWORD8, rOFF32, rSTR2
+ addi rSTR1, rSTR1, 32
+ addi rSTR2, rSTR2, 32
+ bne cr1, L(dLcr1)
+ cmpld cr5, rWORD7, rWORD8
+ bdnz L(dLoop)
+ bne cr6, L(dLcr6)
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ .align 3
+L(dP1x):
+ sldi. r12, rN, 3
+ bne cr5, L(dLcr5x)
+ subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
+ bne L(d00)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 0
+ blr
+
+/* Remainder is 16. */
+ .align 4
+L(dP2):
+ mtctr r0
+ LD rWORD5, 0, rSTR1
+ LD rWORD6, 0, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ blt cr7, L(dP2x)
+ LD rWORD7, rOFF8, rSTR1
+ LD rWORD8, rOFF8, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+L(dP2e):
+ LD rWORD1, rOFF16, rSTR1
+ LD rWORD2, rOFF16, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ LD rWORD3, rOFF24, rSTR1
+ LD rWORD4, rOFF24, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ bne cr6, L(dLcr6)
+ bne cr5, L(dLcr5)
+ b L(dLoop2)
+ .align 4
+L(dP2x):
+ LD rWORD3, rOFF8, rSTR1
+ LD rWORD4, rOFF8, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ sldi. r12, rN, 3
+ bne cr6, L(dLcr6x)
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ bne cr1, L(dLcr1x)
+ subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
+ bne L(d00)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 0
+ blr
+
+/* Remainder is 24. */
+ .align 4
+L(dP3):
+ mtctr r0
+ LD rWORD3, 0, rSTR1
+ LD rWORD4, 0, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+L(dP3e):
+ LD rWORD5, rOFF8, rSTR1
+ LD rWORD6, rOFF8, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ blt cr7, L(dP3x)
+ LD rWORD7, rOFF16, rSTR1
+ LD rWORD8, rOFF16, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ LD rWORD1, rOFF24, rSTR1
+ LD rWORD2, rOFF24, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+ bne cr1, L(dLcr1)
+ bne cr6, L(dLcr6)
+ b L(dLoop1)
+/* Again we are on a early exit path (24-31 byte compare), we want to
+ only use volatile registers and avoid restoring non-volatile
+ registers. */
+ .align 4
+L(dP3x):
+ LD rWORD1, rOFF16, rSTR1
+ LD rWORD2, rOFF16, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ sldi. r12, rN, 3
+ bne cr1, L(dLcr1x)
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+ bne cr6, L(dLcr6x)
+ subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
+ bne cr7, L(dLcr7x)
+ bne L(d00)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 0
+ blr
+
+/* Count is a multiple of 32, remainder is 0. */
+ .align 4
+L(dP4):
+ mtctr r0
+ LD rWORD1, 0, rSTR1
+ LD rWORD2, 0, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+L(dP4e):
+ LD rWORD3, rOFF8, rSTR1
+ LD rWORD4, rOFF8, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ LD rWORD5, rOFF16, rSTR1
+ LD rWORD6, rOFF16, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ LD rWORD7, rOFF24, rSTR1
+ LD rWORD8, rOFF24, rSTR2
+ addi rSTR1, rSTR1, 24
+ addi rSTR2, rSTR2, 24
+ cmpld cr5, rWORD7, rWORD8
+ bne cr7, L(dLcr7)
+ bne cr1, L(dLcr1)
+ bdz- L(d24) /* Adjust CTR as we start with +4. */
+/* This is the primary loop. */
+ .align 4
+L(dLoop):
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ bne cr6, L(dLcr6)
+L(dLoop1):
+ LD rWORD3, rOFF16, rSTR1
+ LD rWORD4, rOFF16, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ bne cr5, L(dLcr5)
+L(dLoop2):
+ LD rWORD5, rOFF24, rSTR1
+ LD rWORD6, rOFF24, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ bne cr7, L(dLcr7)
+L(dLoop3):
+ LD rWORD7, rOFF32, rSTR1
+ LD rWORD8, rOFF32, rSTR2
+ addi rSTR1, rSTR1, 32
+ addi rSTR2, rSTR2, 32
+ bne cr1, L(dLcr1)
+ cmpld cr7, rWORD1, rWORD2
+ bdnz L(dLoop)
+
+L(dL4):
+ cmpld cr1, rWORD3, rWORD4
+ bne cr6, L(dLcr6)
+ cmpld cr6, rWORD5, rWORD6
+ bne cr5, L(dLcr5)
+ cmpld cr5, rWORD7, rWORD8
+L(d44):
+ bne cr7, L(dLcr7)
+L(d34):
+ bne cr1, L(dLcr1)
+L(d24):
+ bne cr6, L(dLcr6)
+L(d14):
+ sldi. r12, rN, 3
+ bne cr5, L(dLcr5)
+L(d04):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ subfic rN, r12, 64 /* Shift count is 64 - (rN * 8). */
+ beq L(duzeroLength)
+/* At this point we have a remainder of 1 to 7 bytes to compare. Since
+ we are aligned it is safe to load the whole double word, and use
+ shift right double to eliminate bits beyond the compare length. */
+L(d00):
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ srd rWORD1, rWORD1, rN
+ srd rWORD2, rWORD2, rN
+ cmpld cr7, rWORD1, rWORD2
+ bne cr7, L(dLcr7x)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 0
+ blr
+
+ .align 4
+L(dLcr7):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+L(dLcr7x):
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 1
+ bgtlr cr7
+ li rRTN, -1
+ blr
+ .align 4
+L(dLcr1):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+L(dLcr1x):
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 1
+ bgtlr cr1
+ li rRTN, -1
+ blr
+ .align 4
+L(dLcr6):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+L(dLcr6x):
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 1
+ bgtlr cr6
+ li rRTN, -1
+ blr
+ .align 4
+L(dLcr5):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+L(dLcr5x):
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 1
+ bgtlr cr5
+ li rRTN, -1
+ blr
+
+ .align 4
+L(bytealigned):
+ mtctr rN
+
+/* We need to prime this loop. This loop is swing modulo scheduled
+ to avoid pipe delays. The dependent instruction latencies (load to
+ compare to conditional branch) is 2 to 3 cycles. In this loop each
+ dispatch group ends in a branch and takes 1 cycle. Effectively
+ the first iteration of the loop only serves to load operands and
+ branches based on compares are delayed until the next loop.
+
+ So we must precondition some registers and condition codes so that
+ we don't exit the loop early on the first iteration. */
+
+ lbz rWORD1, 0(rSTR1)
+ lbz rWORD2, 0(rSTR2)
+ bdz L(b11)
+ cmpld cr7, rWORD1, rWORD2
+ lbz rWORD3, 1(rSTR1)
+ lbz rWORD4, 1(rSTR2)
+ bdz L(b12)
+ cmpld cr1, rWORD3, rWORD4
+ lbzu rWORD5, 2(rSTR1)
+ lbzu rWORD6, 2(rSTR2)
+ bdz L(b13)
+ .align 4
+L(bLoop):
+ lbzu rWORD1, 1(rSTR1)
+ lbzu rWORD2, 1(rSTR2)
+ bne cr7, L(bLcr7)
+
+ cmpld cr6, rWORD5, rWORD6
+ bdz L(b3i)
+
+ lbzu rWORD3, 1(rSTR1)
+ lbzu rWORD4, 1(rSTR2)
+ bne cr1, L(bLcr1)
+
+ cmpld cr7, rWORD1, rWORD2
+ bdz L(b2i)
+
+ lbzu rWORD5, 1(rSTR1)
+ lbzu rWORD6, 1(rSTR2)
+ bne cr6, L(bLcr6)
+
+ cmpld cr1, rWORD3, rWORD4
+ bdnz L(bLoop)
+
+/* We speculatively loading bytes before we have tested the previous
+ bytes. But we must avoid overrunning the length (in the ctr) to
+ prevent these speculative loads from causing a segfault. In this
+ case the loop will exit early (before the all pending bytes are
+ tested. In this case we must complete the pending operations
+ before returning. */
+L(b1i):
+ bne cr7, L(bLcr7)
+ bne cr1, L(bLcr1)
+ b L(bx56)
+ .align 4
+L(b2i):
+ bne cr6, L(bLcr6)
+ bne cr7, L(bLcr7)
+ b L(bx34)
+ .align 4
+L(b3i):
+ bne cr1, L(bLcr1)
+ bne cr6, L(bLcr6)
+ b L(bx12)
+ .align 4
+L(bLcr7):
+ li rRTN, 1
+ bgtlr cr7
+ li rRTN, -1
+ blr
+L(bLcr1):
+ li rRTN, 1
+ bgtlr cr1
+ li rRTN, -1
+ blr
+L(bLcr6):
+ li rRTN, 1
+ bgtlr cr6
+ li rRTN, -1
+ blr
+
+L(b13):
+ bne cr7, L(bx12)
+ bne cr1, L(bx34)
+L(bx56):
+ sub rRTN, rWORD5, rWORD6
+ blr
+ nop
+L(b12):
+ bne cr7, L(bx12)
+L(bx34):
+ sub rRTN, rWORD3, rWORD4
+ blr
+L(b11):
+L(bx12):
+ sub rRTN, rWORD1, rWORD2
+ blr
+
+ .align 4
+L(zeroLength):
+ li rRTN, 0
+ blr
+
+ .align 4
+/* At this point we know the strings have different alignment and the
+ compare length is at least 8 bytes. r12 contains the low order
+ 3 bits of rSTR1 and cr5 contains the result of the logical compare
+ of r12 to 0. If r12 == 0 then rStr1 is double word
+ aligned and can perform the DWunaligned loop.
+
+ Otherwise we know that rSTR1 is not already DW aligned yet.
+ So we can force the string addresses to the next lower DW
+ boundary and special case this first DW using shift left to
+ eliminate bits preceding the first byte. Since we want to join the
+ normal (DWaligned) compare loop, starting at the second double word,
+ we need to adjust the length (rN) and special case the loop
+ versioning for the first DW. This ensures that the loop count is
+ correct and the first DW (shifted) is in the expected resister pair. */
+L(unaligned):
+ std rWORD8, rWORD8SAVE(r1)
+ std rWORD7, rWORD7SAVE(r1)
+ std rOFF8, rOFF8SAVE(r1)
+ std rOFF16, rOFF16SAVE(r1)
+ std rOFF24, rOFF24SAVE(r1)
+ std rOFF32, rOFF32SAVE(r1)
+ cfi_offset(rWORD8, rWORD8SAVE)
+ cfi_offset(rWORD7, rWORD7SAVE)
+ cfi_offset(rOFF8, rOFF8SAVE)
+ cfi_offset(rOFF16, rOFF16SAVE)
+ cfi_offset(rOFF24, rOFF24SAVE)
+ cfi_offset(rOFF32, rOFF32SAVE)
+ li rOFF8,8
+ li rOFF16,16
+ li rOFF24,24
+ li rOFF32,32
+ std rSHL, rSHLSAVE(r1)
+ cfi_offset(rSHL, rSHLSAVE)
+ clrldi rSHL, rSTR2, 61
+ beq cr6, L(duzeroLength)
+ std rSHR, rSHRSAVE(r1)
+ cfi_offset(rSHR, rSHRSAVE)
+ beq cr5, L(DWunaligned)
+ std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
+ cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
+/* Adjust the logical start of rSTR2 to compensate for the extra bits
+ in the 1st rSTR1 DW. */
+ sub rWORD8_SHIFT, rSTR2, r12
+/* But do not attempt to address the DW before that DW that contains
+ the actual start of rSTR2. */
+ clrrdi rSTR2, rSTR2, 3
+ std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
+/* Compute the left/right shift counts for the unaligned rSTR2,
+ compensating for the logical (DW aligned) start of rSTR1. */
+ clrldi rSHL, rWORD8_SHIFT, 61
+ clrrdi rSTR1, rSTR1, 3
+ std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
+ sldi rSHL, rSHL, 3
+ cmpld cr5, rWORD8_SHIFT, rSTR2
+ add rN, rN, r12
+ sldi rWORD6, r12, 3
+ std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
+ cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
+ subfic rSHR, rSHL, 64
+ srdi r0, rN, 5 /* Divide by 32. */
+ andi. r12, rN, 24 /* Get the DW remainder. */
+/* We normally need to load 2 DWs to start the unaligned rSTR2, but in
+ this special case those bits may be discarded anyway. Also we
+ must avoid loading a DW where none of the bits are part of rSTR2 as
+ this may cross a page boundary and cause a page fault. */
+ li rWORD8, 0
+ blt cr5, L(dus0)
+ LD rWORD8, 0, rSTR2
+ addi rSTR2, rSTR2, 8
+ sld rWORD8, rWORD8, rSHL
+
+L(dus0):
+ LD rWORD1, 0, rSTR1
+ LD rWORD2, 0, rSTR2
+ cmpldi cr1, r12, 16
+ cmpldi cr7, rN, 32
+ srd r12, rWORD2, rSHR
+ clrldi rN, rN, 61
+ beq L(duPs4)
+ mtctr r0
+ or rWORD8, r12, rWORD8
+ bgt cr1, L(duPs3)
+ beq cr1, L(duPs2)
+
+/* Remainder is 8. */
+ .align 4
+L(dusP1):
+ sld rWORD8_SHIFT, rWORD2, rSHL
+ sld rWORD7, rWORD1, rWORD6
+ sld rWORD8, rWORD8, rWORD6
+ bge cr7, L(duP1e)
+/* At this point we exit early with the first double word compare
+ complete and remainder of 0 to 7 bytes. See L(du14) for details on
+ how we handle the remaining bytes. */
+ cmpld cr5, rWORD7, rWORD8
+ sldi. rN, rN, 3
+ bne cr5, L(duLcr5)
+ cmpld cr7, rN, rSHR
+ beq L(duZeroReturn)
+ li r0, 0
+ ble cr7, L(dutrim)
+ LD rWORD2, rOFF8, rSTR2
+ srd r0, rWORD2, rSHR
+ b L(dutrim)
+/* Remainder is 16. */
+ .align 4
+L(duPs2):
+ sld rWORD6_SHIFT, rWORD2, rSHL
+ sld rWORD5, rWORD1, rWORD6
+ sld rWORD6, rWORD8, rWORD6
+ b L(duP2e)
+/* Remainder is 24. */
+ .align 4
+L(duPs3):
+ sld rWORD4_SHIFT, rWORD2, rSHL
+ sld rWORD3, rWORD1, rWORD6
+ sld rWORD4, rWORD8, rWORD6
+ b L(duP3e)
+/* Count is a multiple of 32, remainder is 0. */
+ .align 4
+L(duPs4):
+ mtctr r0
+ or rWORD8, r12, rWORD8
+ sld rWORD2_SHIFT, rWORD2, rSHL
+ sld rWORD1, rWORD1, rWORD6
+ sld rWORD2, rWORD8, rWORD6
+ b L(duP4e)
+
+/* At this point we know rSTR1 is double word aligned and the
+ compare length is at least 8 bytes. */
+ .align 4
+L(DWunaligned):
+ std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
+ clrrdi rSTR2, rSTR2, 3
+ std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
+ srdi r0, rN, 5 /* Divide by 32. */
+ std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
+ andi. r12, rN, 24 /* Get the DW remainder. */
+ std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
+ cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+ cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
+ cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
+ sldi rSHL, rSHL, 3
+ LD rWORD6, 0, rSTR2
+ LD rWORD8, rOFF8, rSTR2
+ addi rSTR2, rSTR2, 8
+ cmpldi cr1, r12, 16
+ cmpldi cr7, rN, 32
+ clrldi rN, rN, 61
+ subfic rSHR, rSHL, 64
+ sld rWORD6_SHIFT, rWORD6, rSHL
+ beq L(duP4)
+ mtctr r0
+ bgt cr1, L(duP3)
+ beq cr1, L(duP2)
+
+/* Remainder is 8. */
+ .align 4
+L(duP1):
+ srd r12, rWORD8, rSHR
+ LD rWORD7, 0, rSTR1
+ sld rWORD8_SHIFT, rWORD8, rSHL
+ or rWORD8, r12, rWORD6_SHIFT
+ blt cr7, L(duP1x)
+L(duP1e):
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ srd r0, rWORD2, rSHR
+ sld rWORD2_SHIFT, rWORD2, rSHL
+ or rWORD2, r0, rWORD8_SHIFT
+ LD rWORD3, rOFF16, rSTR1
+ LD rWORD4, rOFF16, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ srd r12, rWORD4, rSHR
+ sld rWORD4_SHIFT, rWORD4, rSHL
+ bne cr5, L(duLcr5)
+ or rWORD4, r12, rWORD2_SHIFT
+ LD rWORD5, rOFF24, rSTR1
+ LD rWORD6, rOFF24, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ srd r0, rWORD6, rSHR
+ sld rWORD6_SHIFT, rWORD6, rSHL
+ bne cr7, L(duLcr7)
+ or rWORD6, r0, rWORD4_SHIFT
+ cmpld cr6, rWORD5, rWORD6
+ b L(duLoop3)
+ .align 4
+/* At this point we exit early with the first double word compare
+ complete and remainder of 0 to 7 bytes. See L(du14) for details on
+ how we handle the remaining bytes. */
+L(duP1x):
+ cmpld cr5, rWORD7, rWORD8
+ sldi. rN, rN, 3
+ bne cr5, L(duLcr5)
+ cmpld cr7, rN, rSHR
+ beq L(duZeroReturn)
+ li r0, 0
+ ble cr7, L(dutrim)
+ LD rWORD2, rOFF8, rSTR2
+ srd r0, rWORD2, rSHR
+ b L(dutrim)
+/* Remainder is 16. */
+ .align 4
+L(duP2):
+ srd r0, rWORD8, rSHR
+ LD rWORD5, 0, rSTR1
+ or rWORD6, r0, rWORD6_SHIFT
+ sld rWORD6_SHIFT, rWORD8, rSHL
+L(duP2e):
+ LD rWORD7, rOFF8, rSTR1
+ LD rWORD8, rOFF8, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ srd r12, rWORD8, rSHR
+ sld rWORD8_SHIFT, rWORD8, rSHL
+ or rWORD8, r12, rWORD6_SHIFT
+ blt cr7, L(duP2x)
+ LD rWORD1, rOFF16, rSTR1
+ LD rWORD2, rOFF16, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ bne cr6, L(duLcr6)
+ srd r0, rWORD2, rSHR
+ sld rWORD2_SHIFT, rWORD2, rSHL
+ or rWORD2, r0, rWORD8_SHIFT
+ LD rWORD3, rOFF24, rSTR1
+ LD rWORD4, rOFF24, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ bne cr5, L(duLcr5)
+ srd r12, rWORD4, rSHR
+ sld rWORD4_SHIFT, rWORD4, rSHL
+ or rWORD4, r12, rWORD2_SHIFT
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ cmpld cr1, rWORD3, rWORD4
+ b L(duLoop2)
+ .align 4
+L(duP2x):
+ cmpld cr5, rWORD7, rWORD8
+ addi rSTR1, rSTR1, 8
+ addi rSTR2, rSTR2, 8
+ bne cr6, L(duLcr6)
+ sldi. rN, rN, 3
+ bne cr5, L(duLcr5)
+ cmpld cr7, rN, rSHR
+ beq L(duZeroReturn)
+ li r0, 0
+ ble cr7, L(dutrim)
+ LD rWORD2, rOFF8, rSTR2
+ srd r0, rWORD2, rSHR
+ b L(dutrim)
+
+/* Remainder is 24. */
+ .align 4
+L(duP3):
+ srd r12, rWORD8, rSHR
+ LD rWORD3, 0, rSTR1
+ sld rWORD4_SHIFT, rWORD8, rSHL
+ or rWORD4, r12, rWORD6_SHIFT
+L(duP3e):
+ LD rWORD5, rOFF8, rSTR1
+ LD rWORD6, rOFF8, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ srd r0, rWORD6, rSHR
+ sld rWORD6_SHIFT, rWORD6, rSHL
+ or rWORD6, r0, rWORD4_SHIFT
+ LD rWORD7, rOFF16, rSTR1
+ LD rWORD8, rOFF16, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ bne cr1, L(duLcr1)
+ srd r12, rWORD8, rSHR
+ sld rWORD8_SHIFT, rWORD8, rSHL
+ or rWORD8, r12, rWORD6_SHIFT
+ blt cr7, L(duP3x)
+ LD rWORD1, rOFF24, rSTR1
+ LD rWORD2, rOFF24, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ bne cr6, L(duLcr6)
+ srd r0, rWORD2, rSHR
+ sld rWORD2_SHIFT, rWORD2, rSHL
+ or rWORD2, r0, rWORD8_SHIFT
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+ cmpld cr7, rWORD1, rWORD2
+ b L(duLoop1)
+ .align 4
+L(duP3x):
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+ cmpld cr5, rWORD7, rWORD8
+ bne cr6, L(duLcr6)
+ sldi. rN, rN, 3
+ bne cr5, L(duLcr5)
+ cmpld cr7, rN, rSHR
+ beq L(duZeroReturn)
+ li r0, 0
+ ble cr7, L(dutrim)
+ LD rWORD2, rOFF8, rSTR2
+ srd r0, rWORD2, rSHR
+ b L(dutrim)
+
+/* Count is a multiple of 32, remainder is 0. */
+ .align 4
+L(duP4):
+ mtctr r0
+ srd r0, rWORD8, rSHR
+ LD rWORD1, 0, rSTR1
+ sld rWORD2_SHIFT, rWORD8, rSHL
+ or rWORD2, r0, rWORD6_SHIFT
+L(duP4e):
+ LD rWORD3, rOFF8, rSTR1
+ LD rWORD4, rOFF8, rSTR2
+ cmpld cr7, rWORD1, rWORD2
+ srd r12, rWORD4, rSHR
+ sld rWORD4_SHIFT, rWORD4, rSHL
+ or rWORD4, r12, rWORD2_SHIFT
+ LD rWORD5, rOFF16, rSTR1
+ LD rWORD6, rOFF16, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ bne cr7, L(duLcr7)
+ srd r0, rWORD6, rSHR
+ sld rWORD6_SHIFT, rWORD6, rSHL
+ or rWORD6, r0, rWORD4_SHIFT
+ LD rWORD7, rOFF24, rSTR1
+ LD rWORD8, rOFF24, rSTR2
+ addi rSTR1, rSTR1, 24
+ addi rSTR2, rSTR2, 24
+ cmpld cr6, rWORD5, rWORD6
+ bne cr1, L(duLcr1)
+ srd r12, rWORD8, rSHR
+ sld rWORD8_SHIFT, rWORD8, rSHL
+ or rWORD8, r12, rWORD6_SHIFT
+ cmpld cr5, rWORD7, rWORD8
+ bdz L(du24) /* Adjust CTR as we start with +4. */
+/* This is the primary loop. */
+ .align 4
+L(duLoop):
+ LD rWORD1, rOFF8, rSTR1
+ LD rWORD2, rOFF8, rSTR2
+ cmpld cr1, rWORD3, rWORD4
+ bne cr6, L(duLcr6)
+ srd r0, rWORD2, rSHR
+ sld rWORD2_SHIFT, rWORD2, rSHL
+ or rWORD2, r0, rWORD8_SHIFT
+L(duLoop1):
+ LD rWORD3, rOFF16, rSTR1
+ LD rWORD4, rOFF16, rSTR2
+ cmpld cr6, rWORD5, rWORD6
+ bne cr5, L(duLcr5)
+ srd r12, rWORD4, rSHR
+ sld rWORD4_SHIFT, rWORD4, rSHL
+ or rWORD4, r12, rWORD2_SHIFT
+L(duLoop2):
+ LD rWORD5, rOFF24, rSTR1
+ LD rWORD6, rOFF24, rSTR2
+ cmpld cr5, rWORD7, rWORD8
+ bne cr7, L(duLcr7)
+ srd r0, rWORD6, rSHR
+ sld rWORD6_SHIFT, rWORD6, rSHL
+ or rWORD6, r0, rWORD4_SHIFT
+L(duLoop3):
+ LD rWORD7, rOFF32, rSTR1
+ LD rWORD8, rOFF32, rSTR2
+ addi rSTR1, rSTR1, 32
+ addi rSTR2, rSTR2, 32
+ cmpld cr7, rWORD1, rWORD2
+ bne cr1, L(duLcr1)
+ srd r12, rWORD8, rSHR
+ sld rWORD8_SHIFT, rWORD8, rSHL
+ or rWORD8, r12, rWORD6_SHIFT
+ bdnz L(duLoop)
+
+L(duL4):
+ cmpld cr1, rWORD3, rWORD4
+ bne cr6, L(duLcr6)
+ cmpld cr6, rWORD5, rWORD6
+ bne cr5, L(duLcr5)
+ cmpld cr5, rWORD7, rWORD8
+L(du44):
+ bne cr7, L(duLcr7)
+L(du34):
+ bne cr1, L(duLcr1)
+L(du24):
+ bne cr6, L(duLcr6)
+L(du14):
+ sldi. rN, rN, 3
+ bne cr5, L(duLcr5)
+/* At this point we have a remainder of 1 to 7 bytes to compare. We use
+ shift right double to eliminate bits beyond the compare length.
+
+ However it may not be safe to load rWORD2 which may be beyond the
+ string length. So we compare the bit length of the remainder to
+ the right shift count (rSHR). If the bit count is less than or equal
+ we do not need to load rWORD2 (all significant bits are already in
+ rWORD8_SHIFT). */
+ cmpld cr7, rN, rSHR
+ beq L(duZeroReturn)
+ li r0, 0
+ ble cr7, L(dutrim)
+ LD rWORD2, rOFF8, rSTR2
+ srd r0, rWORD2, rSHR
+ .align 4
+L(dutrim):
+ LD rWORD1, rOFF8, rSTR1
+ ld rWORD8, -8(r1)
+ subfic rN, rN, 64 /* Shift count is 64 - (rN * 8). */
+ or rWORD2, r0, rWORD8_SHIFT
+ ld rWORD7, rWORD7SAVE(r1)
+ ld rSHL, rSHLSAVE(r1)
+ srd rWORD1, rWORD1, rN
+ srd rWORD2, rWORD2, rN
+ ld rSHR, rSHRSAVE(r1)
+ ld rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
+ li rRTN, 0
+ cmpld cr7, rWORD1, rWORD2
+ ld rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
+ ld rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
+ beq cr7, L(dureturn24)
+ li rRTN, 1
+ ld rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ bgtlr cr7
+ li rRTN, -1
+ blr
+ .align 4
+L(duLcr7):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ li rRTN, 1
+ bgt cr7, L(dureturn29)
+ ld rSHL, rSHLSAVE(r1)
+ ld rSHR, rSHRSAVE(r1)
+ li rRTN, -1
+ b L(dureturn27)
+ .align 4
+L(duLcr1):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ li rRTN, 1
+ bgt cr1, L(dureturn29)
+ ld rSHL, rSHLSAVE(r1)
+ ld rSHR, rSHRSAVE(r1)
+ li rRTN, -1
+ b L(dureturn27)
+ .align 4
+L(duLcr6):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ li rRTN, 1
+ bgt cr6, L(dureturn29)
+ ld rSHL, rSHLSAVE(r1)
+ ld rSHR, rSHRSAVE(r1)
+ li rRTN, -1
+ b L(dureturn27)
+ .align 4
+L(duLcr5):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+ li rRTN, 1
+ bgt cr5, L(dureturn29)
+ ld rSHL, rSHLSAVE(r1)
+ ld rSHR, rSHRSAVE(r1)
+ li rRTN, -1
+ b L(dureturn27)
+
+ .align 3
+L(duZeroReturn):
+ li rRTN, 0
+ .align 4
+L(dureturn):
+ ld rWORD8, rWORD8SAVE(r1)
+ ld rWORD7, rWORD7SAVE(r1)
+L(dureturn29):
+ ld rSHL, rSHLSAVE(r1)
+ ld rSHR, rSHRSAVE(r1)
+L(dureturn27):
+ ld rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
+ ld rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
+ ld rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
+L(dureturn24):
+ ld rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ blr
+
+L(duzeroLength):
+ ld rOFF8, rOFF8SAVE(r1)
+ ld rOFF16, rOFF16SAVE(r1)
+ ld rOFF24, rOFF24SAVE(r1)
+ ld rOFF32, rOFF32SAVE(r1)
+ li rRTN, 0
+ blr
+
+END (MEMCMP)
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp, bcmp)
diff --git a/sysdeps/powerpc/powerpc64/power8/memrchr.S b/sysdeps/powerpc/powerpc64/power8/memrchr.S
new file mode 100644
index 0000000000..54de6566bd
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/memrchr.S
@@ -0,0 +1,345 @@
+/* Optimized memrchr implementation for PowerPC64/POWER8.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* int [r3] memrchr (char *s [r3], int byte [r4], int size [r5]) */
+
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MTVRD(v, r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r, v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t, a, b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+#ifndef MEMRCHR
+# define MEMRCHR __memrchr
+#endif
+ .machine power7
+ENTRY_TOCLESS (MEMRCHR)
+ CALL_MCOUNT 3
+ add r7, r3, r5 /* Calculate the last acceptable address. */
+ neg r0, r7
+ addi r7, r7, -1
+ mr r10, r3
+ clrrdi r6, r7, 7
+ li r9, 3<<5
+ dcbt r9, r6, 8 /* Stream hint, decreasing addresses. */
+
+ /* Replicate BYTE to doubleword. */
+ insrdi r4, r4, 8, 48
+ insrdi r4, r4, 16, 32
+ insrdi r4, r4, 32, 0
+ li r6, -8
+ li r9, -1
+ rlwinm r0, r0, 3, 26, 28 /* Calculate padding. */
+ clrrdi r8, r7, 3
+ srd r9, r9, r0
+ cmpldi r5, 32
+ clrrdi r0, r10, 3
+ ble L(small_range)
+
+#ifdef __LITTLE_ENDIAN__
+ ldx r12, 0, r8
+#else
+ ldbrx r12, 0, r8 /* Load reversed doubleword from memory. */
+#endif
+ cmpb r3, r12, r4 /* Check for BYTE in DWORD1. */
+ and r3, r3, r9
+ cmpldi cr7, r3, 0 /* If r3 == 0, no BYTEs have been found. */
+ bne cr7, L(done)
+
+ /* Are we now aligned to a quadword boundary? If so, skip to
+ the main loop. Otherwise, go through the alignment code. */
+ andi. r12, r8, 15
+ beq cr0, L(align_qw)
+
+ /* Handle DWORD2 of pair. */
+#ifdef __LITTLE_ENDIAN__
+ ldx r12, r8, r6
+#else
+ ldbrx r12, r8, r6
+#endif
+ addi r8, r8, -8
+ cmpb r3, r12, r4
+ cmpldi cr7, r3, 0
+ bne cr7, L(done)
+
+ .align 4
+ /* At this point, r8 is 16B aligned. */
+L(align_qw):
+ sub r5, r8, r0
+ vspltisb v0, 0
+ /* Precompute vbpermq constant. */
+ vspltisb v10, 3
+ li r0, 0
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ MTVRD(v1, r4)
+ vspltb v1, v1, 7
+ cmpldi r5, 64
+ ble L(tail64)
+ /* Are we 64-byte aligned? If so, jump to the vectorized loop.
+ Note: aligning to 64-byte will necessarily slow down performance for
+ strings around 64 bytes in length due to the extra comparisons
+ required to check alignment for the vectorized loop. This is a
+ necessary tradeoff we are willing to take in order to speed up the
+ calculation for larger strings. */
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ /* In order to begin the 64B loop, it needs to be 64
+ bytes aligned. So read until it is 64B aligned. */
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r5, r5, -16
+
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r5, r5, -16
+
+ andi. r11, r8, 63
+ beq cr0, L(preloop_64B)
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ addi r5, r5, -16
+ /* At this point it should be 64B aligned.
+ Prepare for the 64B loop. */
+L(preloop_64B):
+ cmpldi r5, 64 /* Check if r5 < 64. */
+ ble L(tail64)
+ srdi r9, r5, 6 /* Number of loop iterations. */
+ mtctr r9 /* Setup the counter. */
+ li r11, 16 /* Load required offsets. */
+ li r9, 32
+ li r7, 48
+
+ /* Handle r5 > 64. Loop over the bytes in strides of 64B. */
+ .align 4
+L(loop):
+ addi r8, r8, -64 /* Adjust address for the next iteration. */
+ lvx v2, 0, r8 /* Load 4 quadwords. */
+ lvx v3, r8, r11
+ lvx v4, v8, r9
+ lvx v5, v8, r7
+ vcmpequb v6, v1, v2
+ vcmpequb v7, v1, v3
+ vcmpequb v8, v1, v4
+ vcmpequb v9, v1, v5
+ vor v11, v6, v7
+ vor v12, v8, v9
+ vor v11, v11, v12 /* Compare and merge into one VR for speed. */
+ vcmpequb. v11, v0, v11
+ bnl cr6, L(found)
+ bdnz L(loop)
+ clrldi r5, r5, 58
+
+ /* Handle remainder of 64B loop or r5 > 64. */
+ .align 4
+L(tail64):
+ cmpldi r5, 0
+ beq L(null)
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ cmpldi cr6, r5, 16
+ ble cr6, L(null)
+ addi r5, r5, -16
+
+ addi r8, r8, -16
+ lvx v4, 0, r8
+ vcmpequb v6, v1, v4
+ vcmpequb. v11, v0, v6
+ bnl cr6, L(found_16B)
+ li r3, 0
+ blr
+
+ /* Found a match in 64B loop. */
+ .align 4
+L(found):
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v6, v6, v10)
+ VBPERMQ(v7, v7, v10)
+ VBPERMQ(v8, v8, v10)
+ VBPERMQ(v9, v9, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v7, v7, v7, 2
+ vsldoi v8, v8, v8, 4
+ vsldoi v9, v9, v9, 6
+#else
+ vsldoi v6, v6, v6, 6
+ vsldoi v7, v7, v7, 4
+ vsldoi v8, v8, v8, 2
+#endif
+ /* Merge the results and move to a GPR. */
+ vor v11, v6, v7
+ vor v4, v9, v8
+ vor v4, v11, v4
+ MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+ cntlzd r6, r5 /* Count leading zeros before the match. */
+#else
+ addi r6, r5, -1
+ andc r6, r6, r5
+ popcntd r6, r6
+#endif
+ addi r8, r8, 63
+ sub r3, r8, r6 /* Compute final address. */
+ cmpld cr7, r3, r10
+ bgelr cr7
+ li r3, 0
+ blr
+
+ /* Found a match in last 16 bytes. */
+ .align 4
+L(found_16B):
+ cmpld r8, r10 /* Are we on the last QW? */
+ bge L(last)
+ /* Now discard bytes before starting address. */
+ sub r9, r10, r8
+ MTVRD(v9, r9)
+ vspltisb v8, 3
+ /* Mask unwanted bytes. */
+#ifdef __LITTLE_ENDIAN__
+ lvsr v7, 0, r10
+ vperm v6, v0, v6, v7
+ vsldoi v9, v0, v9, 8
+ vsl v9, v9, v8
+ vslo v6, v6, v9
+#else
+ lvsl v7, 0, r10
+ vperm v6, v6, v0, v7
+ vsldoi v9, v0, v9, 8
+ vsl v9, v9, v8
+ vsro v6, v6, v9
+#endif
+L(last):
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v6, v6, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v6, v6, v6, 6
+ MFVRD(r7, v6)
+ cntlzd r6, r7 /* Count leading zeros before the match. */
+#else
+ MFVRD(r7, v6)
+ addi r6, r7, -1
+ andc r6, r6, r7
+ popcntd r6, r6
+#endif
+ addi r8, r8, 15
+ sub r3, r8, r6 /* Compute final address. */
+ cmpld r6, r5
+ bltlr
+ li r3, 0
+ blr
+
+ /* r3 has the output of the cmpb instruction, that is, it contains
+ 0xff in the same position as BYTE in the original
+ word from the string. Use that to calculate the pointer.
+ We need to make sure BYTE is *before* the end of the
+ range. */
+L(done):
+ cntlzd r9, r3 /* Count leading zeros before the match. */
+ cmpld r8, r0 /* Are we on the last word? */
+ srdi r6, r9, 3 /* Convert leading zeros to bytes. */
+ addi r0, r6, -7
+ sub r3, r8, r0
+ cmpld cr7, r3, r10
+ bnelr
+ bgelr cr7
+ li r3, 0
+ blr
+
+ .align 4
+L(null):
+ li r3, 0
+ blr
+
+/* Deals with size <= 32. */
+ .align 4
+L(small_range):
+ cmpldi r5, 0
+ beq L(null)
+
+#ifdef __LITTLE_ENDIAN__
+ ldx r12, 0, r8
+#else
+ ldbrx r12, 0, r8 /* Load reversed doubleword from memory. */
+#endif
+ cmpb r3, r12, r4 /* Check for BYTE in DWORD1. */
+ and r3, r3, r9
+ cmpldi cr7, r3, 0
+ bne cr7, L(done)
+
+ /* Are we done already? */
+ cmpld r8, r0
+ addi r8, r8, -8
+ beqlr
+
+ .align 5
+L(loop_small):
+#ifdef __LITTLE_ENDIAN__
+ ldx r12, 0, r8
+#else
+ ldbrx r12, 0, r8
+#endif
+ cmpb r3, r12, r4
+ cmpld r8, r0
+ cmpldi cr7, r3, 0
+ bne cr7, L(done)
+ addi r8, r8, -8
+ bne L(loop_small)
+ blr
+
+END (MEMRCHR)
+weak_alias (__memrchr, memrchr)
+libc_hidden_builtin_def (memrchr)
diff --git a/sysdeps/powerpc/powerpc64/power8/memset.S b/sysdeps/powerpc/powerpc64/power8/memset.S
index 11433d89ad..a42232b42a 100644
--- a/sysdeps/powerpc/powerpc64/power8/memset.S
+++ b/sysdeps/powerpc/powerpc64/power8/memset.S
@@ -1,5 +1,5 @@
/* Optimized memset implementation for PowerPC64/POWER8.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
+ Copyright (C) 2014-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,14 +20,18 @@
#define MTVSRD_V1_R4 .long 0x7c240166 /* mtvsrd v1,r4 */
-/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
Returns 's'. */
+#ifndef MEMSET
+# define MEMSET memset
+#endif
+
/* No need to use .machine power8 since mtvsrd is already
handled by the define. It avoid breakage on binutils
that does not support this machine specifier. */
.machine power7
-EALIGN (memset, 5, 0)
+ENTRY_TOCLESS (MEMSET, 5)
CALL_MCOUNT 3
L(_memset):
@@ -373,7 +377,10 @@ L(write_LT_32):
subf r5,r0,r5
2: bf 30,1f
- sth r4,0(r10)
+ /* Use stb instead of sth because it doesn't generate
+ alignment interrupts on cache-inhibited storage. */
+ stb r4,0(r10)
+ stb r4,1(r10)
addi r10,r10,2
1: bf 31,L(end_4bytes_alignment)
@@ -433,17 +440,80 @@ L(tail5):
/* Handles copies of 0~8 bytes. */
.align 4
L(write_LE_8):
- bne cr6,L(tail4)
+ bne cr6,L(LE7_tail4)
+ /* If input is word aligned, use stw, else use stb. */
+ andi. r0,r10,3
+ bne L(8_unalign)
stw r4,0(r10)
stw r4,4(r10)
blr
-END_GEN_TB (memset,TB_TOCLESS)
+
+ /* Unaligned input and size is 8. */
+ .align 4
+L(8_unalign):
+ andi. r0,r10,1
+ beq L(8_hwalign)
+ stb r4,0(r10)
+ sth r4,1(r10)
+ sth r4,3(r10)
+ sth r4,5(r10)
+ stb r4,7(r10)
+ blr
+
+ /* Halfword aligned input and size is 8. */
+ .align 4
+L(8_hwalign):
+ sth r4,0(r10)
+ sth r4,2(r10)
+ sth r4,4(r10)
+ sth r4,6(r10)
+ blr
+
+ .align 4
+ /* Copies 4~7 bytes. */
+L(LE7_tail4):
+ /* Use stb instead of sth because it doesn't generate
+ alignment interrupts on cache-inhibited storage. */
+ bf 29,L(LE7_tail2)
+ stb r4,0(r10)
+ stb r4,1(r10)
+ stb r4,2(r10)
+ stb r4,3(r10)
+ bf 30,L(LE7_tail5)
+ stb r4,4(r10)
+ stb r4,5(r10)
+ bflr 31
+ stb r4,6(r10)
+ blr
+
+ .align 4
+ /* Copies 2~3 bytes. */
+L(LE7_tail2):
+ bf 30,1f
+ stb r4,0(r10)
+ stb r4,1(r10)
+ bflr 31
+ stb r4,2(r10)
+ blr
+
+ .align 4
+L(LE7_tail5):
+ bflr 31
+ stb r4,4(r10)
+ blr
+
+ .align 4
+1: bflr 31
+ stb r4,0(r10)
+ blr
+
+END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)
/* Copied from bzero.S to prevent the linker from inserting a stub
between bzero and memset. */
-ENTRY (__bzero)
+ENTRY_TOCLESS (__bzero)
CALL_MCOUNT 3
mr r5,r4
li r4,0
diff --git a/sysdeps/powerpc/powerpc64/power8/multiarch/Implies b/sysdeps/powerpc/powerpc64/power8/multiarch/Implies
deleted file mode 100644
index 1fc7b7cd39..0000000000
--- a/sysdeps/powerpc/powerpc64/power8/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power7/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power8/stpcpy.S b/sysdeps/powerpc/powerpc64/power8/stpcpy.S
index fd8f7fa63a..ebdfaab97c 100644
--- a/sysdeps/powerpc/powerpc64/power8/stpcpy.S
+++ b/sysdeps/powerpc/powerpc64/power8/stpcpy.S
@@ -1,5 +1,5 @@
/* Optimized stpcpy implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/power8/stpncpy.S b/sysdeps/powerpc/powerpc64/power8/stpncpy.S
index 067910b373..95c86e9677 100644
--- a/sysdeps/powerpc/powerpc64/power8/stpncpy.S
+++ b/sysdeps/powerpc/powerpc64/power8/stpncpy.S
@@ -1,5 +1,5 @@
/* Optimized stpncpy implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,3 +18,7 @@
#define USE_AS_STPNCPY
#include <sysdeps/powerpc/powerpc64/power8/strncpy.S>
+
+weak_alias (__stpncpy, stpncpy)
+libc_hidden_def (__stpncpy)
+libc_hidden_builtin_def (stpncpy)
diff --git a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
new file mode 100644
index 0000000000..3a2efe2a64
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
@@ -0,0 +1,457 @@
+/* Optimized strcasecmp implementation for PowerPC64.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <locale-defines.h>
+
+/* int [r3] strcasecmp (const char *s1 [r3], const char *s2 [r4] ) */
+
+#ifndef USE_AS_STRNCASECMP
+# define __STRCASECMP __strcasecmp
+# define STRCASECMP strcasecmp
+#else
+# define __STRCASECMP __strncasecmp
+# define STRCASECMP strncasecmp
+#endif
+/* Convert 16 bytes to lowercase and compare */
+#define TOLOWER() \
+ vaddubm v8, v4, v1; \
+ vaddubm v7, v4, v3; \
+ vcmpgtub v8, v8, v2; \
+ vsel v4, v7, v4, v8; \
+ vaddubm v8, v5, v1; \
+ vaddubm v7, v5, v3; \
+ vcmpgtub v8, v8, v2; \
+ vsel v5, v7, v5, v8; \
+ vcmpequb. v7, v5, v4;
+
+/*
+ * Get 16 bytes for unaligned case.
+ * reg1: Vector to hold next 16 bytes.
+ * reg2: Address to read from.
+ * reg3: Permute control vector.
+ * v8: Tmp vector used to mask unwanted bytes.
+ * v9: Tmp vector,0 when null is found on first 16 bytes
+ */
+#ifdef __LITTLE_ENDIAN__
+#define GET16BYTES(reg1, reg2, reg3) \
+ lvx reg1, 0, reg2; \
+ vspltisb v8, -1; \
+ vperm v8, v8, reg1, reg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm reg1, v9, reg1, reg3;
+#else
+#define GET16BYTES(reg1, reg2, reg3) \
+ lvx reg1, 0, reg2; \
+ vspltisb v8, -1; \
+ vperm v8, reg1, v8, reg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm reg1, reg1, v9, reg3;
+#endif
+
+/* Check null in v4, v5 and convert to lower. */
+#define CHECKNULLANDCONVERT() \
+ vcmpequb. v7, v0, v5; \
+ beq cr6, 3f; \
+ vcmpequb. v7, v0, v4; \
+ beq cr6, 3f; \
+ b L(null_found); \
+ .align 4; \
+3: \
+ TOLOWER()
+
+#ifdef _ARCH_PWR8
+# define VCLZD_V8_v7 vclzd v8, v7;
+# define MFVRD_R3_V1 mfvrd r3, v1;
+# define VSUBUDM_V9_V8 vsubudm v9, v9, v8;
+# define VPOPCNTD_V8_V8 vpopcntd v8, v8;
+# define VADDUQM_V7_V8 vadduqm v9, v7, v8;
+#else
+# define VCLZD_V8_v7 .long 0x11003fc2
+# define MFVRD_R3_V1 .long 0x7c230067
+# define VSUBUDM_V9_V8 .long 0x112944c0
+# define VPOPCNTD_V8_V8 .long 0x110047c3
+# define VADDUQM_V7_V8 .long 0x11274100
+#endif
+
+ .machine power7
+
+ENTRY (__STRCASECMP)
+#ifdef USE_AS_STRNCASECMP
+ CALL_MCOUNT 3
+#else
+ CALL_MCOUNT 2
+#endif
+#define rRTN r3 /* Return value */
+#define rSTR1 r10 /* 1st string */
+#define rSTR2 r4 /* 2nd string */
+#define rCHAR1 r6 /* Byte read from 1st string */
+#define rCHAR2 r7 /* Byte read from 2nd string */
+#define rADDR1 r8 /* Address of tolower(rCHAR1) */
+#define rADDR2 r12 /* Address of tolower(rCHAR2) */
+#define rLWR1 r8 /* Word tolower(rCHAR1) */
+#define rLWR2 r12 /* Word tolower(rCHAR2) */
+#define rTMP r9
+#define rLOC r11 /* Default locale address */
+
+ cmpd cr7, rRTN, rSTR2
+
+ /* Get locale address. */
+ ld rTMP, __libc_tsd_LOCALE@got@tprel(r2)
+ add rLOC, rTMP, __libc_tsd_LOCALE@tls
+ ld rLOC, 0(rLOC)
+
+ mr rSTR1, rRTN
+ li rRTN, 0
+ beqlr cr7
+#ifdef USE_AS_STRNCASECMP
+ cmpdi cr7, r5, 0
+ beq cr7, L(retnull)
+ cmpdi cr7, r5, 16
+ blt cr7, L(bytebybyte)
+#endif
+ vspltisb v0, 0
+ vspltisb v8, -1
+ /* Check for null in initial characters.
+ Check max of 16 char depending on the alignment.
+ If null is present, proceed byte by byte. */
+ lvx v4, 0, rSTR1
+#ifdef __LITTLE_ENDIAN__
+ lvsr v10, 0, rSTR1 /* Compute mask. */
+ vperm v9, v8, v4, v10 /* Mask bits that are not part of string. */
+#else
+ lvsl v10, 0, rSTR1
+ vperm v9, v4, v8, v10
+#endif
+ vcmpequb. v9, v0, v9 /* Check for null bytes. */
+ bne cr6, L(bytebybyte)
+ lvx v5, 0, rSTR2
+ /* Calculate alignment. */
+#ifdef __LITTLE_ENDIAN__
+ lvsr v6, 0, rSTR2
+ vperm v9, v8, v5, v6 /* Mask bits that are not part of string. */
+#else
+ lvsl v6, 0, rSTR2
+ vperm v9, v5, v8, v6
+#endif
+ vcmpequb. v9, v0, v9 /* Check for null bytes. */
+ bne cr6, L(bytebybyte)
+ /* Check if locale has non ascii characters. */
+ ld rTMP, 0(rLOC)
+ addi r6, rTMP,LOCALE_DATA_VALUES+_NL_CTYPE_NONASCII_CASE*SIZEOF_VALUES
+ lwz rTMP, 0(r6)
+ cmpdi cr7, rTMP, 1
+ beq cr7, L(bytebybyte)
+
+ /* Load vector registers with values used for TOLOWER. */
+ /* Load v1 = 0xbf, v2 = 0x19 v3 = 0x20 in each byte. */
+ vspltisb v3, 2
+ vspltisb v9, 4
+ vsl v3, v3, v9
+ vaddubm v1, v3, v3
+ vnor v1, v1, v1
+ vspltisb v2, 7
+ vsububm v2, v3, v2
+
+ andi. rADDR1, rSTR1, 0xF
+ beq cr0, L(align)
+ addi r6, rSTR1, 16
+ lvx v9, 0, r6
+ /* Compute 16 bytes from previous two loads. */
+#ifdef __LITTLE_ENDIAN__
+ vperm v4, v9, v4, v10
+#else
+ vperm v4, v4, v9, v10
+#endif
+L(align):
+ andi. rADDR2, rSTR2, 0xF
+ beq cr0, L(align1)
+ addi r6, rSTR2, 16
+ lvx v9, 0, r6
+ /* Compute 16 bytes from previous two loads. */
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v9, v5, v6
+#else
+ vperm v5, v5, v9, v6
+#endif
+L(align1):
+ CHECKNULLANDCONVERT()
+ blt cr6, L(match)
+ b L(different)
+ .align 4
+L(match):
+ clrldi r6, rSTR1, 60
+ subfic r7, r6, 16
+#ifdef USE_AS_STRNCASECMP
+ sub r5, r5, r7
+#endif
+ add rSTR1, rSTR1, r7
+ add rSTR2, rSTR2, r7
+ andi. rADDR2, rSTR2, 0xF
+ addi rSTR1, rSTR1, -16
+ addi rSTR2, rSTR2, -16
+ beq cr0, L(aligned)
+#ifdef __LITTLE_ENDIAN__
+ lvsr v6, 0, rSTR2
+#else
+ lvsl v6, 0, rSTR2
+#endif
+ /* There are 2 loops depending on the input alignment.
+ Each loop gets 16 bytes from s1 and s2, check for null,
+ convert to lowercase and compare. Loop till difference
+ or null occurs. */
+L(s1_align):
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+#ifdef USE_AS_STRNCASECMP
+ cmpdi cr7, r5, 16
+ blt cr7, L(bytebybyte)
+ addi r5, r5, -16
+#endif
+ lvx v4, 0, rSTR1
+ GET16BYTES(v5, rSTR2, v6)
+ CHECKNULLANDCONVERT()
+ blt cr6, L(s1_align)
+ b L(different)
+ .align 4
+L(aligned):
+ addi rSTR1, rSTR1, 16
+ addi rSTR2, rSTR2, 16
+#ifdef USE_AS_STRNCASECMP
+ cmpdi cr7, r5, 16
+ blt cr7, L(bytebybyte)
+ addi r5, r5, -16
+#endif
+ lvx v4, 0, rSTR1
+ lvx v5, 0, rSTR2
+ CHECKNULLANDCONVERT()
+ blt cr6, L(aligned)
+
+ /* Calculate and return the difference. */
+L(different):
+ vaddubm v1, v3, v3
+ vcmpequb v7, v0, v7
+#ifdef __LITTLE_ENDIAN__
+ /* Count trailing zero. */
+ vspltisb v8, -1
+ VADDUQM_V7_V8
+ vandc v8, v9, v7
+ VPOPCNTD_V8_V8
+ vspltb v6, v8, 15
+ vcmpequb. v6, v6, v1
+ blt cr6, L(shift8)
+#else
+ /* Count leading zero. */
+ VCLZD_V8_v7
+ vspltb v6, v8, 7
+ vcmpequb. v6, v6, v1
+ blt cr6, L(shift8)
+ vsro v8, v8, v1
+#endif
+ b L(skipsum)
+ .align 4
+L(shift8):
+ vsumsws v8, v8, v0
+L(skipsum):
+#ifdef __LITTLE_ENDIAN__
+ /* Shift registers based on leading zero count. */
+ vsro v6, v5, v8
+ vsro v7, v4, v8
+ /* Merge and move to GPR. */
+ vmrglb v6, v6, v7
+ vslo v1, v6, v1
+ MFVRD_R3_V1
+ /* Place the characters that are different in first position. */
+ sldi rSTR2, rRTN, 56
+ srdi rSTR2, rSTR2, 56
+ sldi rSTR1, rRTN, 48
+ srdi rSTR1, rSTR1, 56
+#else
+ vslo v6, v5, v8
+ vslo v7, v4, v8
+ vmrghb v1, v6, v7
+ MFVRD_R3_V1
+ srdi rSTR2, rRTN, 48
+ sldi rSTR2, rSTR2, 56
+ srdi rSTR2, rSTR2, 56
+ srdi rSTR1, rRTN, 56
+#endif
+ subf rRTN, rSTR1, rSTR2
+ extsw rRTN, rRTN
+ blr
+
+ .align 4
+ /* OK. We've hit the end of the string. We need to be careful that
+ we don't compare two strings as different because of junk beyond
+ the end of the strings... */
+L(null_found):
+ vaddubm v10, v3, v3
+#ifdef __LITTLE_ENDIAN__
+ /* Count trailing zero. */
+ vspltisb v8, -1
+ VADDUQM_V7_V8
+ vandc v8, v9, v7
+ VPOPCNTD_V8_V8
+ vspltb v6, v8, 15
+ vcmpequb. v6, v6, v10
+ blt cr6, L(shift_8)
+#else
+ /* Count leading zero. */
+ VCLZD_V8_v7
+ vspltb v6, v8, 7
+ vcmpequb. v6, v6, v10
+ blt cr6, L(shift_8)
+ vsro v8, v8, v10
+#endif
+ b L(skipsum1)
+ .align 4
+L(shift_8):
+ vsumsws v8, v8, v0
+L(skipsum1):
+ /* Calculate shift count based on count of zero. */
+ vspltisb v10, 7
+ vslb v10, v10, v10
+ vsldoi v9, v0, v10, 1
+ VSUBUDM_V9_V8
+ vspltisb v8, 8
+ vsldoi v8, v0, v8, 1
+ VSUBUDM_V9_V8
+ /* Shift and remove junk after null character. */
+#ifdef __LITTLE_ENDIAN__
+ vslo v5, v5, v9
+ vslo v4, v4, v9
+#else
+ vsro v5, v5, v9
+ vsro v4, v4, v9
+#endif
+ /* Convert and compare 16 bytes. */
+ TOLOWER()
+ blt cr6, L(retnull)
+ b L(different)
+ .align 4
+L(retnull):
+ li rRTN, 0
+ blr
+ .align 4
+L(bytebybyte):
+ /* Unrolling loop for POWER: loads are done with 'lbz' plus
+ offset and string descriptors are only updated in the end
+ of loop unrolling. */
+ ld rLOC, LOCALE_CTYPE_TOLOWER(rLOC)
+ lbz rCHAR1, 0(rSTR1) /* Load char from s1 */
+ lbz rCHAR2, 0(rSTR2) /* Load char from s2 */
+#ifdef USE_AS_STRNCASECMP
+ rldicl rTMP, r5, 62, 2
+ cmpdi cr7, rTMP, 0
+ beq cr7, L(lessthan4)
+ mtctr rTMP
+#endif
+L(loop):
+ cmpdi rCHAR1, 0 /* *s1 == '\0' ? */
+ sldi rADDR1, rCHAR1, 2 /* Calculate address for tolower(*s1) */
+ sldi rADDR2, rCHAR2, 2 /* Calculate address for tolower(*s2) */
+ lwzx rLWR1, rLOC, rADDR1 /* Load tolower(*s1) */
+ lwzx rLWR2, rLOC, rADDR2 /* Load tolower(*s2) */
+ cmpw cr1, rLWR1, rLWR2 /* r = tolower(*s1) == tolower(*s2) ? */
+ crorc 4*cr1+eq,eq,4*cr1+eq /* (*s1 != '\0') || (r == 1) */
+ beq cr1, L(done)
+ lbz rCHAR1, 1(rSTR1)
+ lbz rCHAR2, 1(rSTR2)
+ cmpdi rCHAR1, 0
+ sldi rADDR1, rCHAR1, 2
+ sldi rADDR2, rCHAR2, 2
+ lwzx rLWR1, rLOC, rADDR1
+ lwzx rLWR2, rLOC, rADDR2
+ cmpw cr1, rLWR1, rLWR2
+ crorc 4*cr1+eq,eq,4*cr1+eq
+ beq cr1, L(done)
+ lbz rCHAR1, 2(rSTR1)
+ lbz rCHAR2, 2(rSTR2)
+ cmpdi rCHAR1, 0
+ sldi rADDR1, rCHAR1, 2
+ sldi rADDR2, rCHAR2, 2
+ lwzx rLWR1, rLOC, rADDR1
+ lwzx rLWR2, rLOC, rADDR2
+ cmpw cr1, rLWR1, rLWR2
+ crorc 4*cr1+eq,eq,4*cr1+eq
+ beq cr1, L(done)
+ lbz rCHAR1, 3(rSTR1)
+ lbz rCHAR2, 3(rSTR2)
+ cmpdi rCHAR1, 0
+ /* Increment both string descriptors */
+ addi rSTR1, rSTR1, 4
+ addi rSTR2, rSTR2, 4
+ sldi rADDR1, rCHAR1, 2
+ sldi rADDR2, rCHAR2, 2
+ lwzx rLWR1, rLOC, rADDR1
+ lwzx rLWR2, rLOC, rADDR2
+ cmpw cr1, rLWR1, rLWR2
+ crorc 4*cr1+eq,eq,4*cr1+eq
+ beq cr1, L(done)
+ lbz rCHAR1, 0(rSTR1) /* Load char from s1 */
+ lbz rCHAR2, 0(rSTR2) /* Load char from s2 */
+#ifdef USE_AS_STRNCASECMP
+ bdnz L(loop)
+#else
+ b L(loop)
+#endif
+#ifdef USE_AS_STRNCASECMP
+L(lessthan4):
+ clrldi r5, r5, 62
+ cmpdi cr7, r5, 0
+ beq cr7, L(retnull)
+ mtctr r5
+L(loop1):
+ cmpdi rCHAR1, 0
+ sldi rADDR1, rCHAR1, 2
+ sldi rADDR2, rCHAR2, 2
+ lwzx rLWR1, rLOC, rADDR1
+ lwzx rLWR2, rLOC, rADDR2
+ cmpw cr1, rLWR1, rLWR2
+ crorc 4*cr1+eq,eq,4*cr1+eq
+ beq cr1, L(done)
+ addi rSTR1, rSTR1, 1
+ addi rSTR2, rSTR2, 1
+ lbz rCHAR1, 0(rSTR1)
+ lbz rCHAR2, 0(rSTR2)
+ bdnz L(loop1)
+#endif
+L(done):
+ subf r0, rLWR2, rLWR1
+ extsw rRTN, r0
+ blr
+END (__STRCASECMP)
+
+weak_alias (__STRCASECMP, STRCASECMP)
+libc_hidden_builtin_def (__STRCASECMP)
diff --git a/sysdeps/powerpc/powerpc64/power8/strcasestr-ppc64.c b/sysdeps/powerpc/powerpc64/power8/strcasestr-ppc64.c
new file mode 100644
index 0000000000..221d4733f4
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strcasestr-ppc64.c
@@ -0,0 +1,29 @@
+/* Optimized strcasestr implementation for PowerPC64/POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+
+#define STRCASESTR __strcasestr_ppc
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(__name)
+
+#undef weak_alias
+#define weak_alias(a,b)
+extern __typeof (strcasestr) __strcasestr_ppc attribute_hidden;
+
+#include <string/strcasestr.c>
diff --git a/sysdeps/powerpc/powerpc64/power8/strcasestr.S b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
new file mode 100644
index 0000000000..9fc24c29f9
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
@@ -0,0 +1,538 @@
+/* Optimized strcasestr implementation for PowerPC64/POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <locale-defines.h>
+
+/* Char * [r3] strcasestr (char *s [r3], char * pat[r4]) */
+
+/* The performance gain is obtained by comparing 16 bytes. */
+
+/* When the first char of r4 is hit ITERATIONS times in r3
+ fallback to default. */
+#define ITERATIONS 64
+
+#ifndef STRCASESTR
+# define STRCASESTR __strcasestr
+#endif
+
+#ifndef STRLEN
+/* For builds without IFUNC support, local calls should be made to internal
+ GLIBC symbol (created by libc_hidden_builtin_def). */
+# ifdef SHARED
+# define STRLEN __GI_strlen
+# else
+# define STRLEN strlen
+# endif
+#endif
+
+#ifndef STRNLEN
+/* For builds without IFUNC support, local calls should be made to internal
+ GLIBC symbol (created by libc_hidden_builtin_def). */
+# ifdef SHARED
+# define STRNLEN __GI_strnlen
+# else
+# define STRNLEN __strnlen
+# endif
+#endif
+
+#ifndef STRCHR
+# ifdef SHARED
+# define STRCHR __GI_strchr
+# else
+# define STRCHR strchr
+# endif
+#endif
+
+/* Convert 16 bytes of v4 and reg to lowercase and compare. */
+#define TOLOWER(reg) \
+ vcmpgtub v6, v4, v1; \
+ vcmpgtub v7, v2, v4; \
+ vand v8, v7, v6; \
+ vand v8, v8, v3; \
+ vor v4, v8, v4; \
+ vcmpgtub v6, reg, v1; \
+ vcmpgtub v7, v2, reg; \
+ vand v8, v7, v6; \
+ vand v8, v8, v3; \
+ vor reg, v8, reg; \
+ vcmpequb. v6, reg, v4;
+
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#ifdef _ARCH_PWR8
+#define VCLZD_V8_v7 vclzd v8, v7;
+#else
+#define VCLZD_V8_v7 .long 0x11003fc2
+#endif
+
+#define FRAMESIZE (FRAME_MIN_SIZE+48)
+/* TODO: change this to .machine power8 when the minimum required binutils
+ allows it. */
+ .machine power7
+ENTRY (STRCASESTR, 4)
+ CALL_MCOUNT 2
+ mflr r0 /* Load link register LR to r0. */
+ std r31, -8(r1) /* Save callers register r31. */
+ std r30, -16(r1) /* Save callers register r30. */
+ std r29, -24(r1) /* Save callers register r29. */
+ std r28, -32(r1) /* Save callers register r28. */
+ std r27, -40(r1) /* Save callers register r27. */
+ std r0, 16(r1) /* Store the link register. */
+ cfi_offset(r31, -8)
+ cfi_offset(r30, -16)
+ cfi_offset(r29, -24)
+ cfi_offset(r28, -32)
+ cfi_offset(r27, -40)
+ cfi_offset(lr, 16)
+ stdu r1, -FRAMESIZE(r1) /* Create the stack frame. */
+ cfi_adjust_cfa_offset(FRAMESIZE)
+
+ dcbt 0, r3
+ dcbt 0, r4
+ cmpdi cr7, r3, 0 /* Input validation. */
+ beq cr7, L(retnull)
+ cmpdi cr7, r4, 0
+ beq cr7, L(retnull)
+
+ mr r29, r3
+ mr r30, r4
+ /* Load first byte from r4 and check if its null. */
+ lbz r6, 0(r4)
+ cmpdi cr7, r6, 0
+ beq cr7, L(ret_r3)
+
+ ld r10, __libc_tsd_LOCALE@got@tprel(r2)
+ add r9, r10, __libc_tsd_LOCALE@tls
+ ld r9, 0(r9)
+ ld r9, LOCALE_CTYPE_TOUPPER(r9)
+ sldi r10, r6, 2 /* Convert to upper case. */
+ lwzx r28, r9, r10
+
+ ld r10, __libc_tsd_LOCALE@got@tprel(r2)
+ add r11, r10, __libc_tsd_LOCALE@tls
+ ld r11, 0(r11)
+ ld r11, LOCALE_CTYPE_TOLOWER(r11)
+ sldi r10, r6, 2 /* Convert to lower case. */
+ lwzx r27, r11, r10
+
+ /* Check if the first char is present. */
+ mr r4, r27
+ bl STRCHR
+ nop
+ mr r5, r3
+ mr r3, r29
+ mr r29, r5
+ mr r4, r28
+ bl STRCHR
+ nop
+ cmpdi cr7, r29, 0
+ beq cr7, L(firstpos)
+ cmpdi cr7, r3, 0
+ beq cr7, L(skipcheck)
+ cmpw cr7, r3, r29
+ ble cr7, L(firstpos)
+ /* Move r3 to the first occurence. */
+L(skipcheck):
+ mr r3, r29
+L(firstpos):
+ mr r29, r3
+
+ sldi r9, r27, 8
+ or r28, r9, r28
+ /* Reg r27 is used to count the number of iterations. */
+ li r27, 0
+ /* If first char of search str is not present. */
+ cmpdi cr7, r3, 0
+ ble cr7, L(end)
+
+ /* Find the length of pattern. */
+ mr r3, r30
+ bl STRLEN
+ nop
+
+ cmpdi cr7, r3, 0 /* If search str is null. */
+ beq cr7, L(ret_r3)
+
+ mr r31, r3
+ mr r4, r3
+ mr r3, r29
+ bl STRNLEN
+ nop
+
+ cmpd cr7, r3, r31 /* If len(r3) < len(r4). */
+ blt cr7, L(retnull)
+
+ mr r3, r29
+
+ /* Locales not matching ASCII for single bytes. */
+ ld r10, __libc_tsd_LOCALE@got@tprel(r2)
+ add r9, r10, __libc_tsd_LOCALE@tls
+ ld r9, 0(r9)
+ ld r7, 0(r9)
+ addi r7, r7, LOCALE_DATA_VALUES+_NL_CTYPE_NONASCII_CASE*SIZEOF_VALUES
+ lwz r8, 0(r7)
+ cmpdi cr7, r8, 1
+ beq cr7, L(bytebybyte)
+
+ /* If len(r4) < 16 handle byte by byte. */
+ /* For shorter strings we will not use vector registers. */
+ cmpdi cr7, r31, 16
+ blt cr7, L(bytebybyte)
+
+ /* Comparison values used for TOLOWER. */
+ /* Load v1 = 64('A' - 1), v2 = 91('Z' + 1), v3 = 32 in each byte. */
+ vspltish v0, 0
+ vspltisb v5, 2
+ vspltisb v4, 4
+ vsl v3, v5, v4
+ vaddubm v1, v3, v3
+ vspltisb v5, 15
+ vaddubm v2, v5, v5
+ vaddubm v2, v1, v2
+ vspltisb v4, -3
+ vaddubm v2, v2, v4
+
+ /*
+ 1. Load 16 bytes from r3 and r4
+ 2. Check if there is null, If yes, proceed byte by byte path.
+ 3. Else,Convert both to lowercase and compare.
+ 4. If they are same proceed to 1.
+ 5. If they dont match, find if first char of r4 is present in the
+ loaded 16 byte of r3.
+ 6. If yes, move position, load next 16 bytes of r3 and proceed to 2.
+ */
+
+ mr r8, r3 /* Save r3 for future use. */
+ mr r4, r30 /* Restore r4. */
+ clrldi r10, r4, 60
+ lvx v5, 0, r4 /* Load 16 bytes from r4. */
+ cmpdi cr7, r10, 0
+ beq cr7, L(begin2)
+ /* If r4 is unaligned, load another 16 bytes. */
+#ifdef __LITTLE_ENDIAN__
+ lvsr v7, 0, r4
+#else
+ lvsl v7, 0, r4
+#endif
+ addi r5, r4, 16
+ lvx v9, 0, r5
+#ifdef __LITTLE_ENDIAN__
+ vperm v5, v9, v5, v7
+#else
+ vperm v5, v5, v9, v7
+#endif
+L(begin2):
+ lvx v4, 0, r3
+ vcmpequb. v7, v0, v4 /* Check for null. */
+ beq cr6, L(nullchk6)
+ b L(trailcheck)
+
+ .align 4
+L(nullchk6):
+ clrldi r10, r3, 60
+ cmpdi cr7, r10, 0
+ beq cr7, L(next16)
+#ifdef __LITTLE_ENDIAN__
+ lvsr v7, 0, r3
+#else
+ lvsl v7, 0, r3
+#endif
+ addi r5, r3, 16
+ /* If r3 is unaligned, load another 16 bytes. */
+ lvx v10, 0, r5
+#ifdef __LITTLE_ENDIAN__
+ vperm v4, v10, v4, v7
+#else
+ vperm v4, v4, v10, v7
+#endif
+L(next16):
+ vcmpequb. v6, v0, v5 /* Check for null. */
+ beq cr6, L(nullchk)
+ b L(trailcheck)
+
+ .align 4
+L(nullchk):
+ vcmpequb. v6, v0, v4
+ beq cr6, L(nullchk1)
+ b L(retnull)
+
+ .align 4
+L(nullchk1):
+ /* Convert both v3 and v4 to lower. */
+ TOLOWER(v5)
+ /* If both are same, branch to match. */
+ blt cr6, L(match)
+ /* Find if the first char is present in next 15 bytes. */
+#ifdef __LITTLE_ENDIAN__
+ vspltb v6, v5, 15
+ vsldoi v7, v0, v4, 15
+#else
+ vspltb v6, v5, 0
+ vspltisb v7, 8
+ vslo v7, v4, v7
+#endif
+ vcmpequb v7, v6, v7
+ vcmpequb. v6, v0, v7
+ /* Shift r3 by 16 bytes and proceed. */
+ blt cr6, L(shift16)
+ VCLZD_V8_v7
+#ifdef __LITTLE_ENDIAN__
+ vspltb v6, v8, 15
+#else
+ vspltb v6, v8, 7
+#endif
+ vcmpequb. v6, v6, v1
+ /* Shift r3 by 8 bytes and proceed. */
+ blt cr6, L(shift8)
+ b L(begin)
+
+ .align 4
+L(match):
+ /* There is a match of 16 bytes, check next bytes. */
+ cmpdi cr7, r31, 16
+ mr r29, r3
+ beq cr7, L(ret_r3)
+
+L(secondmatch):
+ addi r3, r3, 16
+ addi r4, r4, 16
+ /* Load next 16 bytes of r3 and r4 and compare. */
+ clrldi r10, r4, 60
+ cmpdi cr7, r10, 0
+ beq cr7, L(nextload)
+ /* Handle unaligned case. */
+ vor v6, v9, v9
+ vcmpequb. v7, v0, v6
+ beq cr6, L(nullchk2)
+ b L(trailcheck)
+
+ .align 4
+L(nullchk2):
+#ifdef __LITTLE_ENDIAN__
+ lvsr v7, 0, r4
+#else
+ lvsl v7, 0, r4
+#endif
+ addi r5, r4, 16
+ /* If r4 is unaligned, load another 16 bytes. */
+ lvx v9, 0, r5
+#ifdef __LITTLE_ENDIAN__
+ vperm v11, v9, v6, v7
+#else
+ vperm v11, v6, v9, v7
+#endif
+ b L(compare)
+
+ .align 4
+L(nextload):
+ lvx v11, 0, r4
+L(compare):
+ vcmpequb. v7, v0, v11
+ beq cr6, L(nullchk3)
+ b L(trailcheck)
+
+ .align 4
+L(nullchk3):
+ clrldi r10, r3, 60
+ cmpdi cr7, r10, 0
+ beq cr7, L(nextload1)
+ /* Handle unaligned case. */
+ vor v4, v10, v10
+ vcmpequb. v7, v0, v4
+ beq cr6, L(nullchk4)
+ b L(retnull)
+
+ .align 4
+L(nullchk4):
+#ifdef __LITTLE_ENDIAN__
+ lvsr v7, 0, r3
+#else
+ lvsl v7, 0, r3
+#endif
+ addi r5, r3, 16
+ /* If r3 is unaligned, load another 16 bytes. */
+ lvx v10, 0, r5
+#ifdef __LITTLE_ENDIAN__
+ vperm v4, v10, v4, v7
+#else
+ vperm v4, v4, v10, v7
+#endif
+ b L(compare1)
+
+ .align 4
+L(nextload1):
+ lvx v4, 0, r3
+L(compare1):
+ vcmpequb. v7, v0, v4
+ beq cr6, L(nullchk5)
+ b L(retnull)
+
+ .align 4
+L(nullchk5):
+ /* Convert both v3 and v4 to lower. */
+ TOLOWER(v11)
+ /* If both are same, branch to secondmatch. */
+ blt cr6, L(secondmatch)
+ /* Continue the search. */
+ b L(begin)
+
+ .align 4
+L(trailcheck):
+ ld r10, __libc_tsd_LOCALE@got@tprel(r2)
+ add r11, r10, __libc_tsd_LOCALE@tls
+ ld r11, 0(r11)
+ ld r11, LOCALE_CTYPE_TOLOWER(r11)
+L(loop2):
+ lbz r5, 0(r3) /* Load byte from r3. */
+ lbz r6, 0(r4) /* Load next byte from r4. */
+ cmpdi cr7, r6, 0 /* Is it null? */
+ beq cr7, L(updater3)
+ cmpdi cr7, r5, 0 /* Is it null? */
+ beq cr7, L(retnull) /* If yes, return. */
+ addi r3, r3, 1
+ addi r4, r4, 1 /* Increment r4. */
+ sldi r10, r5, 2 /* Convert to lower case. */
+ lwzx r10, r11, r10
+ sldi r7, r6, 2 /* Convert to lower case. */
+ lwzx r7, r11, r7
+ cmpw cr7, r7, r10 /* Compare with byte from r4. */
+ bne cr7, L(begin)
+ b L(loop2)
+
+ .align 4
+L(shift8):
+ addi r8, r8, 7
+ b L(begin)
+ .align 4
+L(shift16):
+ addi r8, r8, 15
+ .align 4
+L(begin):
+ addi r8, r8, 1
+ mr r3, r8
+ /* When our iterations exceed ITERATIONS,fall back to default. */
+ addi r27, r27, 1
+ cmpdi cr7, r27, ITERATIONS
+ beq cr7, L(default)
+ mr r4, r30 /* Restore r4. */
+ b L(begin2)
+
+ /* Handling byte by byte. */
+ .align 4
+L(loop1):
+ mr r3, r8
+ addi r27, r27, 1
+ cmpdi cr7, r27, ITERATIONS
+ beq cr7, L(default)
+ mr r29, r8
+ srdi r4, r28, 8
+ /* Check if the first char is present. */
+ bl STRCHR
+ nop
+ mr r5, r3
+ mr r3, r29
+ mr r29, r5
+ sldi r4, r28, 56
+ srdi r4, r4, 56
+ bl STRCHR
+ nop
+ cmpdi cr7, r29, 0
+ beq cr7, L(nextpos)
+ cmpdi cr7, r3, 0
+ beq cr7, L(skipcheck1)
+ cmpw cr7, r3, r29
+ ble cr7, L(nextpos)
+ /* Move r3 to first occurence. */
+L(skipcheck1):
+ mr r3, r29
+L(nextpos):
+ mr r29, r3
+ cmpdi cr7, r3, 0
+ ble cr7, L(retnull)
+L(bytebybyte):
+ ld r10, __libc_tsd_LOCALE@got@tprel(r2)
+ add r11, r10, __libc_tsd_LOCALE@tls
+ ld r11, 0(r11)
+ ld r11, LOCALE_CTYPE_TOLOWER(r11)
+ mr r4, r30 /* Restore r4. */
+ mr r8, r3 /* Save r3. */
+ addi r8, r8, 1
+
+L(loop):
+ addi r3, r3, 1
+ lbz r5, 0(r3) /* Load byte from r3. */
+ addi r4, r4, 1 /* Increment r4. */
+ lbz r6, 0(r4) /* Load next byte from r4. */
+ cmpdi cr7, r6, 0 /* Is it null? */
+ beq cr7, L(updater3)
+ cmpdi cr7, r5, 0 /* Is it null? */
+ beq cr7, L(retnull) /* If yes, return. */
+ sldi r10, r5, 2 /* Convert to lower case. */
+ lwzx r10, r11, r10
+ sldi r7, r6, 2 /* Convert to lower case. */
+ lwzx r7, r11, r7
+ cmpw cr7, r7, r10 /* Compare with byte from r4. */
+ bne cr7, L(loop1)
+ b L(loop)
+
+ /* Handling return values. */
+ .align 4
+L(updater3):
+ subf r3, r31, r3 /* Reduce r31 (len of r4) from r3. */
+ b L(end)
+
+ .align 4
+L(ret_r3):
+ mr r3, r29 /* Return point of match. */
+ b L(end)
+
+ .align 4
+L(retnull):
+ li r3, 0 /* Substring was not found. */
+ b L(end)
+
+ .align 4
+L(default):
+ mr r4, r30
+ bl __strcasestr_ppc
+ nop
+
+ .align 4
+L(end):
+ addi r1, r1, FRAMESIZE /* Restore stack pointer. */
+ cfi_adjust_cfa_offset(-FRAMESIZE)
+ ld r0, 16(r1) /* Restore the saved link register. */
+ ld r27, -40(r1)
+ ld r28, -32(r1)
+ ld r29, -24(r1) /* Restore callers save register r29. */
+ ld r30, -16(r1) /* Restore callers save register r30. */
+ ld r31, -8(r1) /* Restore callers save register r31. */
+ cfi_restore(lr)
+ cfi_restore(r27)
+ cfi_restore(r28)
+ cfi_restore(r29)
+ cfi_restore(r30)
+ cfi_restore(r31)
+ mtlr r0 /* Branch to link register. */
+ blr
+END (STRCASESTR)
+
+weak_alias (__strcasestr, strcasestr)
+libc_hidden_def (__strcasestr)
+libc_hidden_builtin_def (strcasestr)
diff --git a/sysdeps/powerpc/powerpc64/power8/strchr.S b/sysdeps/powerpc/powerpc64/power8/strchr.S
new file mode 100644
index 0000000000..c5e28d9c9e
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strchr.S
@@ -0,0 +1,377 @@
+/* Optimized strchr implementation for PowerPC64/POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+#ifdef USE_AS_STRCHRNUL
+# ifndef STRCHRNUL
+# define FUNC_NAME __strchrnul
+# else
+# define FUNC_NAME STRCHRNUL
+# endif
+#else
+# ifndef STRCHR
+# define FUNC_NAME strchr
+# else
+# define FUNC_NAME STRCHR
+# endif
+#endif /* !USE_AS_STRCHRNUL */
+
+/* int [r3] strchr (char *s [r3], int c [r4]) */
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t,a,b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+/* TODO: change this to .machine power8 when the minimum required binutils
+ allows it. */
+ .machine power7
+ENTRY_TOCLESS (FUNC_NAME)
+ CALL_MCOUNT 2
+ dcbt 0,r3
+ clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
+ cmpdi cr7,r4,0
+ ld r12,0(r8) /* Load doubleword from memory. */
+ li r0,0 /* Doubleword with null chars to use
+ with cmpb. */
+
+ rlwinm r6,r3,3,26,28 /* Calculate padding. */
+
+ beq cr7,L(null_match)
+
+ /* Replicate byte to doubleword. */
+ insrdi r4,r4,8,48
+ insrdi r4,r4,16,32
+ insrdi r4,r4,32,0
+
+ /* Now r4 has a doubleword of c bytes and r0 has
+ a doubleword of null bytes. */
+
+ cmpb r10,r12,r4 /* Compare each byte against c byte. */
+ cmpb r11,r12,r0 /* Compare each byte against null byte. */
+
+ /* Move the doublewords left and right to discard the bits that are
+ not part of the string and bring them back as zeros. */
+#ifdef __LITTLE_ENDIAN__
+ srd r10,r10,r6
+ srd r11,r11,r6
+ sld r10,r10,r6
+ sld r11,r11,r6
+#else
+ sld r10,r10,r6
+ sld r11,r11,r6
+ srd r10,r10,r6
+ srd r11,r11,r6
+#endif
+ or r5,r10,r11 /* OR the results to speed things up. */
+ cmpdi cr7,r5,0 /* If r5 == 0, no c or null bytes
+ have been found. */
+ bne cr7,L(done)
+
+ mtcrf 0x01,r8
+
+ /* Are we now aligned to a doubleword boundary? If so, skip to
+ the main loop. Otherwise, go through the alignment code. */
+
+ bt 28,L(loop)
+
+ /* Handle WORD2 of pair. */
+ ldu r12,8(r8)
+ cmpb r10,r12,r4
+ cmpb r11,r12,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ bne cr7,L(done)
+ b L(loop) /* We branch here (rather than falling through)
+ to skip the nops due to heavy alignment
+ of the loop below. */
+
+ .p2align 5
+L(loop):
+ /* Load two doublewords, compare and merge in a
+ single register for speed. This is an attempt
+ to speed up the null-checking process for bigger strings. */
+ ld r12,8(r8)
+ ldu r9,16(r8)
+ cmpb r10,r12,r4
+ cmpb r11,r12,r0
+ cmpb r6,r9,r4
+ cmpb r7,r9,r0
+ or r5,r10,r11
+ or r9,r6,r7
+ or r12,r5,r9
+ cmpdi cr7,r12,0
+ beq cr7,L(vector)
+ /* OK, one (or both) of the doublewords contains a c/null byte. Check
+ the first doubleword and decrement the address in case the first
+ doubleword really contains a c/null byte. */
+
+ cmpdi cr6,r5,0
+ addi r8,r8,-8
+ bne cr6,L(done)
+
+ /* The c/null byte must be in the second doubleword. Adjust the
+ address again and move the result of cmpb to r10 so we can calculate
+ the pointer. */
+
+ mr r10,r6
+ mr r11,r7
+ addi r8,r8,8
+#ifdef USE_AS_STRCHRNUL
+ mr r5, r9
+#endif
+ /* r10/r11 have the output of the cmpb instructions, that is,
+ 0xff in the same position as the c/null byte in the original
+ doubleword from the string. Use that to calculate the pointer. */
+L(done):
+#ifdef USE_AS_STRCHRNUL
+ mr r10, r5
+#endif
+#ifdef __LITTLE_ENDIAN__
+ addi r3,r10,-1
+ andc r3,r3,r10
+ popcntd r0,r3
+# ifndef USE_AS_STRCHRNUL
+ addi r4,r11,-1
+ andc r4,r4,r11
+ cmpld cr7,r3,r4
+ bgt cr7,L(no_match)
+# endif
+#else
+ cntlzd r0,r10 /* Count leading zeros before c matches. */
+# ifndef USE_AS_STRCHRNUL
+ cmpld cr7,r11,r10
+ bgt cr7,L(no_match)
+# endif
+#endif
+ srdi r0,r0,3 /* Convert leading zeros to bytes. */
+ add r3,r8,r0 /* Return address of the matching c byte
+ or null in case c was not found. */
+ blr
+
+ /* Check the first 32B in GPR's and move to vectorized loop. */
+ .p2align 5
+L(vector):
+ addi r3, r8, 8
+ andi. r10, r3, 31
+ bne cr0, L(loop)
+ vspltisb v0, 0
+ /* Precompute vbpermq constant. */
+ vspltisb v10, 3
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ MTVRD(v1,r4)
+ li r5, 16
+ vspltb v1, v1, 7
+ /* Compare 32 bytes in each loop. */
+L(continue):
+ lvx v4, 0, r3
+ lvx v5, r3, r5
+ vcmpequb v2, v0, v4
+ vcmpequb v3, v0, v5
+ vcmpequb v6, v1, v4
+ vcmpequb v7, v1, v5
+ vor v8, v2, v3
+ vor v9, v6, v7
+ vor v11, v8, v9
+ vcmpequb. v11, v0, v11
+ addi r3, r3, 32
+ blt cr6, L(continue)
+ /* One (or both) of the quadwords contains a c/null byte. */
+ addi r3, r3, -32
+#ifndef USE_AS_STRCHRNUL
+ vcmpequb. v11, v0, v9
+ blt cr6, L(no_match)
+#endif
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v2, v2, v10)
+ VBPERMQ(v3, v3, v10)
+ VBPERMQ(v6, v6, v10)
+ VBPERMQ(v7, v7, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v3, v3, v3, 2
+ vsldoi v7, v7, v7, 2
+#else
+ vsldoi v2, v2, v2, 6
+ vsldoi v3, v3, v3, 4
+ vsldoi v6, v6, v6, 6
+ vsldoi v7, v7, v7, 4
+#endif
+
+ /* Merge the results and move to a GPR. */
+ vor v1, v3, v2
+ vor v2, v6, v7
+ vor v4, v1, v2
+ MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+ addi r6, r5, -1
+ andc r6, r6, r5
+ popcntd r6, r6
+#else
+ cntlzd r6, r5 /* Count leading zeros before the match. */
+#endif
+ add r3, r3, r6 /* Compute final length. */
+ /* Return NULL if null found before c. */
+#ifndef USE_AS_STRCHRNUL
+ lbz r4, 0(r3)
+ cmpdi cr7, r4, 0
+ beq cr7, L(no_match)
+#endif
+ blr
+
+#ifndef USE_AS_STRCHRNUL
+ .align 4
+L(no_match):
+ li r3,0
+ blr
+#endif
+
+/* We are here because strchr was called with a null byte. */
+ .align 4
+L(null_match):
+ /* r0 has a doubleword of null bytes. */
+
+ cmpb r5,r12,r0 /* Compare each byte against null bytes. */
+
+ /* Move the doublewords left and right to discard the bits that are
+ not part of the string and bring them back as zeros. */
+#ifdef __LITTLE_ENDIAN__
+ srd r5,r5,r6
+ sld r5,r5,r6
+#else
+ sld r5,r5,r6
+ srd r5,r5,r6
+#endif
+ cmpdi cr7,r5,0 /* If r10 == 0, no c or null bytes
+ have been found. */
+ bne cr7,L(done_null)
+
+ mtcrf 0x01,r8
+
+ /* Are we now aligned to a quadword boundary? If so, skip to
+ the main loop. Otherwise, go through the alignment code. */
+
+ bt 28,L(loop_null)
+
+ /* Handle WORD2 of pair. */
+ ldu r12,8(r8)
+ cmpb r5,r12,r0
+ cmpdi cr7,r5,0
+ bne cr7,L(done_null)
+ b L(loop_null) /* We branch here (rather than falling through)
+ to skip the nops due to heavy alignment
+ of the loop below. */
+
+ /* Main loop to look for the end of the string. Since it's a
+ small loop (< 8 instructions), align it to 32-bytes. */
+ .p2align 5
+L(loop_null):
+ /* Load two doublewords, compare and merge in a
+ single register for speed. This is an attempt
+ to speed up the null-checking process for bigger strings. */
+ ld r12,8(r8)
+ ldu r11,16(r8)
+ cmpb r5,r12,r0
+ cmpb r10,r11,r0
+ or r6,r5,r10
+ cmpdi cr7,r6,0
+ beq cr7,L(vector1)
+
+ /* OK, one (or both) of the doublewords contains a null byte. Check
+ the first doubleword and decrement the address in case the first
+ doubleword really contains a null byte. */
+
+ cmpdi cr6,r5,0
+ addi r8,r8,-8
+ bne cr6,L(done_null)
+
+ /* The null byte must be in the second doubleword. Adjust the address
+ again and move the result of cmpb to r10 so we can calculate the
+ pointer. */
+
+ mr r5,r10
+ addi r8,r8,8
+
+ /* r5 has the output of the cmpb instruction, that is, it contains
+ 0xff in the same position as the null byte in the original
+ doubleword from the string. Use that to calculate the pointer. */
+L(done_null):
+#ifdef __LITTLE_ENDIAN__
+ addi r0,r5,-1
+ andc r0,r0,r5
+ popcntd r0,r0
+#else
+ cntlzd r0,r5 /* Count leading zeros before the match. */
+#endif
+ srdi r0,r0,3 /* Convert leading zeros to bytes. */
+ add r3,r8,r0 /* Return address of the matching null byte. */
+ blr
+ .p2align 5
+L(vector1):
+ addi r3, r8, 8
+ andi. r10, r3, 31
+ bne cr0, L(loop_null)
+ vspltisb v8, -1
+ vspltisb v0, 0
+ vspltisb v10, 3
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ li r5, 16
+L(continue1):
+ lvx v4, 0, r3
+ lvx v5, r3, r5
+ vcmpequb v2, v0, v4
+ vcmpequb v3, v0, v5
+ vor v8, v2, v3
+ vcmpequb. v11, v0, v8
+ addi r3, r3, 32
+ blt cr6, L(continue1)
+ addi r3, r3, -32
+L(end1):
+ VBPERMQ(v2, v2, v10)
+ VBPERMQ(v3, v3, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v3, v3, v3, 2
+#else
+ vsldoi v2, v2, v2, 6
+ vsldoi v3, v3, v3, 4
+#endif
+
+ /* Merge the results and move to a GPR. */
+ vor v4, v3, v2
+ MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+ addi r6, r5, -1
+ andc r6, r6, r5
+ popcntd r6, r6
+#else
+ cntlzd r6, r5 /* Count leading zeros before the match. */
+#endif
+ add r3, r3, r6 /* Compute final length. */
+ blr
+END (FUNC_NAME)
+
+#ifndef USE_AS_STRCHRNUL
+weak_alias (strchr, index)
+libc_hidden_builtin_def (strchr)
+#endif
diff --git a/sysdeps/powerpc/powerpc64/power8/strchrnul.S b/sysdeps/powerpc/powerpc64/power8/strchrnul.S
new file mode 100644
index 0000000000..022ad67a6b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strchrnul.S
@@ -0,0 +1,23 @@
+/* Optimized strchrnul implementation for PowerPC64/POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define USE_AS_STRCHRNUL 1
+#include <sysdeps/powerpc/powerpc64/power8/strchr.S>
+
+weak_alias (__strchrnul,strchrnul)
+libc_hidden_builtin_def (__strchrnul)
diff --git a/sysdeps/powerpc/powerpc64/power8/strcmp.S b/sysdeps/powerpc/powerpc64/power8/strcmp.S
index 4d6c477194..15e7351d1b 100644
--- a/sysdeps/powerpc/powerpc64/power8/strcmp.S
+++ b/sysdeps/powerpc/powerpc64/power8/strcmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,10 @@
#include <sysdep.h>
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
/* Implements the function
size_t [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
@@ -27,24 +31,24 @@
64K as default, the page cross handling assumes minimum page size of
4k. */
-EALIGN (strcmp, 4, 0)
+ENTRY_TOCLESS (STRCMP, 4)
li r0,0
- /* Check if [s1]+32 or [s2]+32 will cross a 4K page boundary using
+ /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
the code:
(((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
- with PAGE_SIZE being 4096 and ITER_SIZE begin 32. */
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
rldicl r7,r3,0,52
rldicl r9,r4,0,52
- cmpldi cr7,r7,4096-32
+ cmpldi cr7,r7,4096-16
bgt cr7,L(pagecross_check)
- cmpldi cr5,r9,4096-32
+ cmpldi cr5,r9,4096-16
bgt cr5,L(pagecross_check)
- /* For short string up to 32 bytes, load both s1 and s2 using
+ /* For short string up to 16 bytes, load both s1 and s2 using
unaligned dwords and compare. */
ld r8,0(r3)
ld r10,0(r4)
@@ -60,25 +64,11 @@ EALIGN (strcmp, 4, 0)
orc. r9,r12,r11
bne cr0,L(different_nocmpb)
- ld r8,16(r3)
- ld r10,16(r4)
- cmpb r12,r8,r0
- cmpb r11,r8,r10
- orc. r9,r12,r11
- bne cr0,L(different_nocmpb)
-
- ld r8,24(r3)
- ld r10,24(r4)
- cmpb r12,r8,r0
- cmpb r11,r8,r10
- orc. r9,r12,r11
- bne cr0,L(different_nocmpb)
-
- addi r7,r3,32
- addi r4,r4,32
+ addi r7,r3,16
+ addi r4,r4,16
L(align_8b):
- /* Now it has checked for first 32 bytes, align source1 to doubleword
+ /* Now it has checked for first 16 bytes, align source1 to doubleword
and adjust source2 address. */
rldicl r9,r7,0,61 /* source1 alignment to doubleword */
subf r4,r9,r4 /* Adjust source2 address based on source1
@@ -253,5 +243,5 @@ L(pagecross_retdiff):
L(pagecross_nullfound):
li r3,0
b L(pagecross_retdiff)
-END (strcmp)
+END (STRCMP)
libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/power8/strcpy.S b/sysdeps/powerpc/powerpc64/power8/strcpy.S
index 5130831c6a..956faf714f 100644
--- a/sysdeps/powerpc/powerpc64/power8/strcpy.S
+++ b/sysdeps/powerpc/powerpc64/power8/strcpy.S
@@ -1,5 +1,5 @@
/* Optimized strcpy/stpcpy implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,10 +19,18 @@
#include <sysdep.h>
#ifdef USE_AS_STPCPY
-# define FUNC_NAME __stpcpy
+# ifndef STPCPY
+# define FUNC_NAME __stpcpy
+# else
+# define FUNC_NAME STPCPY
+# endif
#else
-# define FUNC_NAME strcpy
-#endif
+# ifndef STRCPY
+# define FUNC_NAME strcpy
+# else
+# define FUNC_NAME STRCPY
+# endif
+#endif /* !USE_AS_STPCPY */
/* Implements the function
@@ -39,8 +47,8 @@
64K as default, the page cross handling assumes minimum page size of
4k. */
- .machine power7
-EALIGN (FUNC_NAME, 4, 0)
+ .machine power8
+ENTRY_TOCLESS (FUNC_NAME, 4)
li r0,0 /* Doubleword with null chars to use
with cmpb. */
@@ -112,7 +120,7 @@ L(pagecross):
ldu r8, 8(r7)
L(loop_before):
- /* Save the two doublewords readed from source and align the source
+ /* Save the two doublewords read from source and align the source
to 16 bytes for the loop. */
mr r11,r3
std r12,0(r11)
@@ -121,7 +129,150 @@ L(loop_before):
rldicl r9,r4,0,60
subf r7,r9,r7
subf r11,r9,r11
- b L(loop_start)
+ /* Source is adjusted to 16B alignment and destination r11 is
+ also moved based on that adjustment. Now check if r11 is
+ also 16B aligned to move to vectorized loop. */
+ andi. r6, r11, 0xF
+ bne L(loop_start)
+
+ /* Prepare for the loop. */
+ subf r4, r9, r4 /* Adjust r4 based on alignment. */
+ li r7, 16 /* Load required offsets. */
+ li r8, 32
+ li r9, 48
+ vspltisb v0, 0
+ addi r4, r4, 16
+ /* Are we 64-byte aligned? If so, jump to the vectorized loop.
+ Else copy 16B till r4 is 64B aligned. */
+ andi. r6, r4, 63
+ beq L(qw_loop)
+
+ lvx v6, 0, r4 /* Load 16 bytes from memory. */
+ vcmpequb. v5, v0, v6 /* Check for null. */
+ bne cr6, L(qw_done)
+ stvx v6, 0, r11 /* Store 16 bytes. */
+ addi r4, r4, 16 /* Increment the address. */
+ addi r11, r11, 16
+ andi. r6, r4, 63
+ beq L(qw_loop)
+
+ lvx v6, 0, r4
+ vcmpequb. v5, v0, v6
+ bne cr6, L(qw_done)
+ stvx v6, 0, r11
+ addi r4, r4, 16
+ addi r11, r11, 16
+ andi. r6, r4, 63
+ beq L(qw_loop)
+
+ lvx v6, 0, r4
+ vcmpequb. v5, v0, v6
+ bne cr6, L(qw_done)
+ stvx v6, 0, r11
+ addi r4, r4, 16
+ addi r11, r11, 16
+
+ .align 4
+L(qw_loop):
+ lvx v1, r4, r0 /* Load 4 quadwords. */
+ lvx v2, r4, r7
+ lvx v3, r4, r8
+ lvx v4, r4, r9
+ vminub v5, v1, v2 /* Compare and merge into one VR for speed. */
+ vminub v8, v3, v4
+ vminub v7, v5, v8
+ vcmpequb. v7, v7, v0 /* Check for NULLs. */
+ bne cr6, L(qw_loop_done)
+ stvx v1, r11, r0 /* Store 4 quadwords. */
+ stvx v2, r11, r7
+ stvx v3, r11, r8
+ stvx v4, r11, r9
+ addi r4, r4, 64 /* Adjust address for the next iteration. */
+ addi r11, r11, 64 /* Adjust address for the next iteration. */
+
+ lvx v1, r4, r0 /* Load 4 quadwords. */
+ lvx v2, r4, r7
+ lvx v3, r4, r8
+ lvx v4, r4, r9
+ vminub v5, v1, v2 /* Compare and merge into one VR for speed. */
+ vminub v8, v3, v4
+ vminub v7, v5, v8
+ vcmpequb. v7, v7, v0 /* Check for NULLs. */
+ bne cr6, L(qw_loop_done)
+ stvx v1, r11, r0 /* Store 4 quadwords. */
+ stvx v2, r11, r7
+ stvx v3, r11, r8
+ stvx v4, r11, r9
+ addi r4, r4, 64 /* Adjust address for the next iteration. */
+ addi r11, r11, 64 /* Adjust address for the next iteration. */
+
+ lvx v1, r4, r0 /* Load 4 quadwords. */
+ lvx v2, r4, r7
+ lvx v3, r4, r8
+ lvx v4, r4, r9
+ vminub v5, v1, v2 /* Compare and merge into one VR for speed. */
+ vminub v8, v3, v4
+ vminub v7, v5, v8
+ vcmpequb. v7, v7, v0 /* Check for NULLs. */
+ bne cr6, L(qw_loop_done)
+ stvx v1, r11, r0 /* Store 4 quadwords. */
+ stvx v2, r11, r7
+ stvx v3, r11, r8
+ stvx v4, r11, r9
+ addi r4, r4, 64 /* Adjust address for the next iteration. */
+ addi r11, r11, 64 /* Adjust address for the next iteration. */
+ b L(qw_loop)
+
+ .align 4
+L(qw_loop_done):
+ /* Null found in one of the 4 loads. */
+ vcmpequb. v7, v1, v0
+ vor v6, v1, v1
+ bne cr6, L(qw_done)
+ /* Not on the first 16B, So store it. */
+ stvx v1, r11, r0
+ addi r4, r4, 16
+ addi r11, r11, 16
+ vcmpequb. v7, v2, v0
+ vor v6, v2, v2
+ bne cr6, L(qw_done)
+ /* Not on the second 16B, So store it. */
+ stvx v2, r11, r0
+ addi r4, r4, 16
+ addi r11, r11, 16
+ vcmpequb. v7, v3, v0
+ vor v6, v3, v3
+ bne cr6, L(qw_done)
+ /* Not on the third 16B, So store it. */
+ stvx v6, r11, r0
+ addi r4, r4, 16
+ addi r11, r11, 16
+ vor v6, v4, v4
+
+ .align 4
+L(qw_done):
+ mr r7, r4
+ /* Move the result to GPR. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v4, v6, v0, 8
+ mfvrd r12, v4
+#else
+ mfvrd r12, v6
+#endif
+ /* Check for null in the first 8 bytes. */
+ cmpb r10, r12, r0
+ cmpdi cr6, r10, 0
+ bne cr6, L(done2)
+ /* Null found in second doubleword. */
+#ifdef __LITTLE_ENDIAN__
+ mfvrd r6, v6
+#else
+ vsldoi v6, v6, v0, 8
+ mfvrd r6, v6
+#endif
+ cmpb r10, r6, r0
+ addi r7, r7, 8
+ b L(done2)
.align 5
L(loop):
diff --git a/sysdeps/powerpc/powerpc64/power8/strcspn.S b/sysdeps/powerpc/powerpc64/power8/strcspn.S
new file mode 100644
index 0000000000..c2d130e7db
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strcspn.S
@@ -0,0 +1,20 @@
+/* Optimized strcspn implementation for PowerPC64/POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define USE_AS_STRCSPN 1
+#include <sysdeps/powerpc/powerpc64/power8/strspn.S>
diff --git a/sysdeps/powerpc/powerpc64/power8/strlen.S b/sysdeps/powerpc/powerpc64/power8/strlen.S
new file mode 100644
index 0000000000..719b5c604c
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strlen.S
@@ -0,0 +1,290 @@
+/* Optimized strlen implementation for PowerPC64/POWER8 using a vectorized
+ loop.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t,a,b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+/* int [r3] strlen (char *s [r3]) */
+
+#ifndef STRLEN
+# define STRLEN strlen
+#endif
+
+/* TODO: change this to .machine power8 when the minimum required binutils
+ allows it. */
+ .machine power7
+ENTRY_TOCLESS (STRLEN, 4)
+ CALL_MCOUNT 1
+ dcbt 0,r3
+ clrrdi r4,r3,3 /* Align the address to doubleword boundary. */
+ rlwinm r6,r3,3,26,28 /* Calculate padding. */
+ li r0,0 /* Doubleword with null chars to use
+ with cmpb. */
+ li r5,-1 /* MASK = 0xffffffffffffffff. */
+ ld r12,0(r4) /* Load doubleword from memory. */
+#ifdef __LITTLE_ENDIAN__
+ sld r5,r5,r6
+#else
+ srd r5,r5,r6 /* MASK = MASK >> padding. */
+#endif
+ orc r9,r12,r5 /* Mask bits that are not part of the string. */
+ cmpb r10,r9,r0 /* Check for null bytes in DWORD1. */
+ cmpdi cr7,r10,0 /* If r10 == 0, no null's have been found. */
+ bne cr7,L(done)
+
+ /* For shorter strings (< 64 bytes), we will not use vector registers,
+ as the overhead isn't worth it. So, let's use GPRs instead. This
+ will be done the same way as we do in the POWER7 implementation.
+ Let's see if we are aligned to a quadword boundary. If so, we can
+ jump to the first (non-vectorized) loop. Otherwise, we have to
+ handle the next DWORD first. */
+ mtcrf 0x01,r4
+ mr r9,r4
+ addi r9,r9,8
+ bt 28,L(align64)
+
+ /* Handle the next 8 bytes so we are aligned to a quadword
+ boundary. */
+ ldu r5,8(r4)
+ cmpb r10,r5,r0
+ cmpdi cr7,r10,0
+ addi r9,r9,8
+ bne cr7,L(done)
+
+L(align64):
+ /* Proceed to the old (POWER7) implementation, checking two doublewords
+ per iteraction. For the first 56 bytes, we will just check for null
+ characters. After that, we will also check if we are 64-byte aligned
+ so we can jump to the vectorized implementation. We will unroll
+ these loops to avoid excessive branching. */
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+ bne cr7,L(dword_zero)
+
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+ bne cr7,L(dword_zero)
+
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+ bne cr7,L(dword_zero)
+
+ /* Are we 64-byte aligned? If so, jump to the vectorized loop.
+ Note: aligning to 64-byte will necessarily slow down performance for
+ strings around 64 bytes in length due to the extra comparisons
+ required to check alignment for the vectorized loop. This is a
+ necessary tradeoff we are willing to take in order to speed up the
+ calculation for larger strings. */
+ andi. r10,r9,63
+ beq cr0,L(preloop)
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+ bne cr7,L(dword_zero)
+
+ andi. r10,r9,63
+ beq cr0,L(preloop)
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+ bne cr7,L(dword_zero)
+
+ andi. r10,r9,63
+ beq cr0,L(preloop)
+ ld r6,8(r4)
+ ldu r5,16(r4)
+ cmpb r10,r6,r0
+ cmpb r11,r5,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ addi r9,r9,16
+
+ /* At this point, we are necessarily 64-byte aligned. If no zeroes were
+ found, jump to the vectorized loop. */
+ beq cr7,L(preloop)
+
+L(dword_zero):
+ /* OK, one (or both) of the doublewords contains a null byte. Check
+ the first doubleword and decrement the address in case the first
+ doubleword really contains a null byte. */
+
+ cmpdi cr6,r10,0
+ addi r4,r4,-8
+ bne cr6,L(done)
+
+ /* The null byte must be in the second doubleword. Adjust the address
+ again and move the result of cmpb to r10 so we can calculate the
+ length. */
+
+ mr r10,r11
+ addi r4,r4,8
+
+ /* If the null byte was found in the non-vectorized code, compute the
+ final length. r10 has the output of the cmpb instruction, that is,
+ it contains 0xff in the same position as the null byte in the
+ original doubleword from the string. Use that to calculate the
+ length. */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+ addi r9, r10,-1 /* Form a mask from trailing zeros. */
+ andc r9, r9,r10
+ popcntd r0, r9 /* Count the bits in the mask. */
+#else
+ cntlzd r0,r10 /* Count leading zeros before the match. */
+#endif
+ subf r5,r3,r4
+ srdi r0,r0,3 /* Convert leading/trailing zeros to bytes. */
+ add r3,r5,r0 /* Compute final length. */
+ blr
+
+ /* Vectorized implementation starts here. */
+ .p2align 4
+L(preloop):
+ /* Set up for the loop. */
+ mr r4,r9
+ li r7, 16 /* Load required offsets. */
+ li r8, 32
+ li r9, 48
+ li r12, 8
+ vxor v0,v0,v0 /* VR with null chars to use with
+ vcmpequb. */
+
+ /* Main loop to look for the end of the string. We will read in
+ 64-byte chunks. Align it to 32 bytes and unroll it 3 times to
+ leverage the icache performance. */
+ .p2align 5
+L(loop):
+ lvx v1,r4,r0 /* Load 4 quadwords. */
+ lvx v2,r4,r7
+ lvx v3,r4,r8
+ lvx v4,r4,r9
+ vminub v5,v1,v2 /* Compare and merge into one VR for speed. */
+ vminub v6,v3,v4
+ vminub v7,v5,v6
+ vcmpequb. v7,v7,v0 /* Check for NULLs. */
+ addi r4,r4,64 /* Adjust address for the next iteration. */
+ bne cr6,L(vmx_zero)
+
+ lvx v1,r4,r0 /* Load 4 quadwords. */
+ lvx v2,r4,r7
+ lvx v3,r4,r8
+ lvx v4,r4,r9
+ vminub v5,v1,v2 /* Compare and merge into one VR for speed. */
+ vminub v6,v3,v4
+ vminub v7,v5,v6
+ vcmpequb. v7,v7,v0 /* Check for NULLs. */
+ addi r4,r4,64 /* Adjust address for the next iteration. */
+ bne cr6,L(vmx_zero)
+
+ lvx v1,r4,r0 /* Load 4 quadwords. */
+ lvx v2,r4,r7
+ lvx v3,r4,r8
+ lvx v4,r4,r9
+ vminub v5,v1,v2 /* Compare and merge into one VR for speed. */
+ vminub v6,v3,v4
+ vminub v7,v5,v6
+ vcmpequb. v7,v7,v0 /* Check for NULLs. */
+ addi r4,r4,64 /* Adjust address for the next iteration. */
+ beq cr6,L(loop)
+
+L(vmx_zero):
+ /* OK, we found a null byte. Let's look for it in the current 64-byte
+ block and mark it in its corresponding VR. */
+ vcmpequb v1,v1,v0
+ vcmpequb v2,v2,v0
+ vcmpequb v3,v3,v0
+ vcmpequb v4,v4,v0
+
+ /* We will now 'compress' the result into a single doubleword, so it
+ can be moved to a GPR for the final calculation. First, we
+ generate an appropriate mask for vbpermq, so we can permute bits into
+ the first halfword. */
+ vspltisb v10,3
+ lvsl v11,r0,r0
+ vslb v10,v11,v10
+
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v1,v1,v10)
+ VBPERMQ(v2,v2,v10)
+ VBPERMQ(v3,v3,v10)
+ VBPERMQ(v4,v4,v10)
+
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v2,v2,v2,2
+ vsldoi v3,v3,v3,4
+ vsldoi v4,v4,v4,6
+#else
+ vsldoi v1,v1,v1,6
+ vsldoi v2,v2,v2,4
+ vsldoi v3,v3,v3,2
+#endif
+
+ /* Merge the results and move to a GPR. */
+ vor v1,v2,v1
+ vor v2,v3,v4
+ vor v4,v1,v2
+ MFVRD(r10,v4)
+
+ /* Adjust address to the begninning of the current 64-byte block. */
+ addi r4,r4,-64
+
+#ifdef __LITTLE_ENDIAN__
+ addi r9, r10,-1 /* Form a mask from trailing zeros. */
+ andc r9, r9,r10
+ popcntd r0, r9 /* Count the bits in the mask. */
+#else
+ cntlzd r0,r10 /* Count leading zeros before the match. */
+#endif
+ subf r5,r3,r4
+ add r3,r5,r0 /* Compute final length. */
+ blr
+
+END (STRLEN)
+libc_hidden_builtin_def (strlen)
diff --git a/sysdeps/powerpc/powerpc64/power8/strncase.S b/sysdeps/powerpc/powerpc64/power8/strncase.S
new file mode 100644
index 0000000000..050b63ab91
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strncase.S
@@ -0,0 +1,20 @@
+/* Optimized strncasecmp implementation for POWER8.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define USE_AS_STRNCASECMP 1
+#include <sysdeps/powerpc/powerpc64/power8/strcasecmp.S>
diff --git a/sysdeps/powerpc/powerpc64/power8/strncmp.S b/sysdeps/powerpc/powerpc64/power8/strncmp.S
index 1ce9e3fc65..2eefa4a2ba 100644
--- a/sysdeps/powerpc/powerpc64/power8/strncmp.S
+++ b/sysdeps/powerpc/powerpc64/power8/strncmp.S
@@ -1,5 +1,5 @@
/* Optimized strncmp implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,10 @@
#include <sysdep.h>
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
/* Implements the function
int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
@@ -28,7 +32,7 @@
4k. */
.machine power7
-EALIGN (strncmp, 4, 0)
+ENTRY_TOCLESS (STRNCMP, 4)
/* Check if size is 0. */
mr. r10,r5
beq cr0,L(ret0)
@@ -319,5 +323,5 @@ L(byte_ne_4):
extsw r10,r9
mr r9,r8
b L(size_reached_1)
-END(strncmp)
+END(STRNCMP)
libc_hidden_builtin_def(strncmp)
diff --git a/sysdeps/powerpc/powerpc64/power8/strncpy.S b/sysdeps/powerpc/powerpc64/power8/strncpy.S
index 17c3afb5fe..e8c5c71f87 100644
--- a/sysdeps/powerpc/powerpc64/power8/strncpy.S
+++ b/sysdeps/powerpc/powerpc64/power8/strncpy.S
@@ -1,5 +1,5 @@
/* Optimized strncpy/stpncpy implementation for PowerPC64/POWER8.
- Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Copyright (C) 2015-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,11 +19,32 @@
#include <sysdep.h>
#ifdef USE_AS_STPNCPY
-# define FUNC_NAME __stpncpy
+# ifndef STPNCPY
+# define FUNC_NAME __stpncpy
+# else
+# define FUNC_NAME STPNCPY
+# endif
#else
-# define FUNC_NAME strncpy
+# ifndef STRNCPY
+# define FUNC_NAME strncpy
+# else
+# define FUNC_NAME STRNCPY
+# endif
+#endif /* !USE_AS_STPNCPY */
+
+#ifndef MEMSET
+/* For builds without IFUNC support, local calls should be made to internal
+ GLIBC symbol (created by libc_hidden_builtin_def). */
+# ifdef SHARED
+# define MEMSET_is_local
+# define MEMSET __GI_memset
+# else
+# define MEMSET memset
+# endif
#endif
+#define FRAMESIZE (FRAME_MIN_SIZE+48)
+
/* Implements the function
char * [r3] strncpy (char *dest [r3], const char *src [r4], size_t n [r5])
@@ -40,7 +61,12 @@
4k. */
.machine power7
-EALIGN (FUNC_NAME, 4, 0)
+#ifdef MEMSET_is_local
+ENTRY_TOCLESS (FUNC_NAME, 4)
+#else
+ENTRY (FUNC_NAME, 4)
+#endif
+ CALL_MCOUNT 3
/* Check if the [src]+15 will cross a 4K page by checking if the bit
indicating the page size changes. Basically:
@@ -54,8 +80,7 @@ EALIGN (FUNC_NAME, 4, 0)
addi r10,r4,16
rlwinm r9,r4,0,19,19
- /* Since it is a leaf function, save some non-volatile registers on the
- protected/red zone. */
+ /* Save some non-volatile registers on the stack. */
std r26,-48(r1)
std r27,-40(r1)
@@ -69,6 +94,14 @@ EALIGN (FUNC_NAME, 4, 0)
std r30,-16(r1)
std r31,-8(r1)
+ /* Update CFI. */
+ cfi_offset(r26, -48)
+ cfi_offset(r27, -40)
+ cfi_offset(r28, -32)
+ cfi_offset(r29, -24)
+ cfi_offset(r30, -16)
+ cfi_offset(r31, -8)
+
beq cr7,L(unaligned_lt_16)
rldicl r9,r4,0,61
subfic r8,r9,8
@@ -180,79 +213,66 @@ L(short_path_loop_end):
ld r31,-8(r1)
blr
- /* This code pads the remainder dest with NULL bytes. The algorithm
- calculate the remanining size and issues a doubleword unrolled
- loops followed by a byte a byte set. */
+ /* This code pads the remainder of dest with NULL bytes. The algorithm
+ calculates the remaining size and calls memset. */
.align 4
L(zero_pad_start):
mr r5,r10
mr r9,r6
L(zero_pad_start_1):
- srdi. r8,r5,r3
- mr r10,r9
-#ifdef USE_AS_STPNCPY
- mr r3,r9
+ /* At this point:
+ - r5 holds the number of bytes that still have to be written to
+ dest.
+ - r9 points to the position, in dest, where the first null byte
+ will be written.
+ The above statements are true both when control reaches this label
+ from a branch or when falling through the previous lines. */
+#ifndef USE_AS_STPNCPY
+ mr r30,r3 /* Save the return value of strncpy. */
+#endif
+ /* Prepare the call to memset. */
+ mr r3,r9 /* Pointer to the area to be zero-filled. */
+ li r4,0 /* Byte to be written (zero). */
+
+ /* We delayed the creation of the stack frame, as well as the saving of
+ the link register, because only at this point, we are sure that
+ doing so is actually needed. */
+
+ /* Save the link register. */
+ mflr r0
+ std r0,16(r1)
+
+ /* Create the stack frame. */
+ stdu r1,-FRAMESIZE(r1)
+ cfi_adjust_cfa_offset(FRAMESIZE)
+ cfi_offset(lr, 16)
+
+ bl MEMSET
+#ifndef MEMSET_is_local
+ nop
#endif
- beq- cr0,L(zero_pad_loop_b_start)
- cmpldi cr7,r8,1
- li cr7,0
- std r7,0(r9)
- beq cr7,L(zero_pad_loop_b_prepare)
- addic. r8,r8,-2
- addi r10,r9,r16
- std r7,8(r9)
- beq cr0,L(zero_pad_loop_dw_2)
- std r7,16(r9)
- li r9,0
- b L(zero_pad_loop_dw_1)
-
- .align 4
-L(zero_pad_loop_dw):
- addi r10,r10,16
- std r9,-8(r10)
- beq cr0,L(zero_pad_loop_dw_2)
- std r9,0(r10)
-L(zero_pad_loop_dw_1):
- cmpldi cr7,r8,1
- std r9,0(r10)
- addic. r8,r8,-2
- bne cr7,L(zero_pad_loop_dw)
- addi r10,r10,8
-L(zero_pad_loop_dw_2):
- rldicl r5,r5,0,61
-L(zero_pad_loop_b_start):
- cmpdi cr7,r5,0
- addi r5,r5,-1
- addi r9,r10,-1
- add r10,r10,5
- subf r10,r9,r10
- li r8,0
- beq- cr7,L(short_path_loop_end)
-
- /* Write remaining 1-8 bytes. */
- .align 4
- addi r9,r9,1
- mtocrf 0x1,r10
- bf 29,4f
- stw r8,0(r9)
- addi r9,r9,4
- .align 4
-4: bf 30,2f
- sth r8,0(r9)
- addi r9,r9,2
+ ld r0,FRAMESIZE+16(r1)
- .align 4
-2: bf 31,1f
- stb r8,0(r9)
+#ifndef USE_AS_STPNCPY
+ mr r3,r30 /* Restore the return value of strncpy, i.e.:
+ dest. For stpncpy, the return value is the
+ same as return value of memset. */
+#endif
- /* Restore non-volatile registers. */
-1: ld r26,-48(r1)
- ld r27,-40(r1)
- ld r28,-32(r1)
- ld r29,-24(r1)
- ld r30,-16(r1)
- ld r31,-8(r1)
+ /* Restore non-volatile registers and return. */
+ ld r26,FRAMESIZE-48(r1)
+ ld r27,FRAMESIZE-40(r1)
+ ld r28,FRAMESIZE-32(r1)
+ ld r29,FRAMESIZE-24(r1)
+ ld r30,FRAMESIZE-16(r1)
+ ld r31,FRAMESIZE-8(r1)
+ /* Restore the stack frame. */
+ addi r1,r1,FRAMESIZE
+ cfi_adjust_cfa_offset(-FRAMESIZE)
+ /* Restore the link register. */
+ mtlr r0
+ cfi_restore(lr)
blr
/* The common case where [src]+16 will not cross a 4K page boundary.
@@ -301,7 +321,7 @@ L(pagecross):
#endif
orc r9,r7,r9 /* Mask bits that are not part of the
string. */
- li cr7,0
+ li r7,0
cmpb r9,r9,r7 /* Check for null bytes in DWORD1. */
cmpdi cr7,r9,0
bne cr7,L(short_path_prepare_2)
@@ -312,14 +332,14 @@ L(pagecross):
/* For next checks we have aligned address, so we check for more
three doublewords to make sure we can read 16 unaligned bytes
to start the bulk copy with 16 aligned addresses. */
- ld cr7,8(r11)
+ ld r7,8(r11)
cmpb r9,r7,r9
cmpdi cr7,r9,0
bne cr7,L(short_path_prepare_2)
- addi cr7,r8,-8
+ addi r7,r8,-8
cmpldi cr7,r7,8
ble cr7,L(short_path_prepare_2)
- ld cr7,16(r11)
+ ld r7,16(r11)
cmpb r9,r7,r9
cmpdi cr7,r9,0
bne cr7,L(short_path_prepare_2)
@@ -443,18 +463,12 @@ L(short_path_prepare_2_3):
mr r4,r28
mr r9,r29
b L(short_path_2)
-L(zero_pad_loop_b_prepare):
- addi r10,r9,8
- rldicl r5,r5,0,61
- b L(zero_pad_loop_b_start)
L(zero_pad_start_prepare_1):
mr r5,r6
mr r9,r8
b L(zero_pad_start_1)
END (FUNC_NAME)
-#ifdef USE_AS_STPNCPY
-libc_hidden_def (__stpncpy)
-#else
+#ifndef USE_AS_STPNCPY
libc_hidden_builtin_def (strncpy)
#endif
diff --git a/sysdeps/powerpc/powerpc64/power8/strnlen.S b/sysdeps/powerpc/powerpc64/power8/strnlen.S
new file mode 100644
index 0000000000..a98dfba4bd
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strnlen.S
@@ -0,0 +1,425 @@
+/* Optimized strnlen implementation for POWER8 using a vmx loop.
+
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* It is implemented the following heuristic:
+ 1. Case maxlen <= 32: align the pointer to 8 bytes to loop through
+ reading doublewords. Uses the POWER7 algorithm.
+ 2. Case maxlen > 32: check for null bytes in the first 16 bytes using
+ unaligned accesses. Return length if found. Otherwise:
+ 2.1 Case maxlen < 64: deduct the bytes previously read, align
+ the pointer to 16 bytes and loop through reading quadwords
+ until find null bytes or reach maxlen.
+ 2.2 Case maxlen > 64: deduct the bytes previously read, align
+ the pointer to 64 bytes and set up a counter to loop through
+ reading in strides of 64 bytes. In case it finished the loop
+ with null bytes not found, process the remainder bytes by
+ switching to the loop to heuristic in 2.1. */
+
+#include <sysdep.h>
+
+/* Define default page size to 4KB. */
+#define PAGE_SIZE 4096
+
+/* The following macros implement Power ISA v2.07 opcodes
+ that could not be used directly into this code to the keep
+ compatibility with older binutils versions. */
+
+/* Move from vector register doubleword. */
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+
+/* Move to vector register doubleword. */
+#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+
+/* Vector Bit Permute Quadword. */
+#define VBPERMQ(t,a,b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+/* Vector Population Count Halfword. */
+#define VPOPCNTH(t,b) .long (0x10000743 | ((t)<<(32-11)) | ((b)<<(32-21)))
+
+/* Vector Count Leading Zeros Halfword. */
+#define VCLZH(t,b) .long (0x10000742 | ((t)<<(32-11)) | ((b)<<(32-21)))
+
+
+/* int [r3] strnlen (char *s [r3], size_t maxlen [r4]) */
+/* TODO: change to power8 when minimum required binutils allows it. */
+ .machine power7
+ENTRY_TOCLESS (__strnlen)
+ CALL_MCOUNT 2
+ dcbt 0,r3
+
+ cmpldi r4,32 /* Check if maxlen <= 32. */
+ ble L(small_range) /* If maxlen <= 32. */
+
+ /* Upcoming 16 bytes unaligned accesses cannot cross the page boundary
+ otherwise the processor throws an memory access error.
+ Use following code to check there is room for such as accesses:
+ (((size_t) s) % PAGE_SIZE > (PAGE_SIZE - 16)
+ If it is disallowed then switch to the code that handles
+ the string when maxlen <= 32. */
+ clrldi r10,r3,52
+ cmpldi cr7,r10,PAGE_SIZE-16
+ bgt cr7,L(small_range) /* If less than 16B of page end. */
+
+ /* Compute our permute constant r8. */
+ li r7,0
+ /* Compute a bpermd constant to move bit 0 of each word into
+ a halfword value, and count trailing zeros. */
+#ifdef __LITTLE_ENDIAN__
+ li r8,0x2820
+ oris r8,r8,0x3830
+ sldi r8,r8,32
+ ori r8,r8,0x0800
+ oris r8,r8,0x1810
+#else
+ li r8,0x1018
+ oris r8,r8,0x0008
+ sldi r8,r8,32
+ ori r8,r8,0x3038
+ oris r8,r8,0x2028
+#endif
+
+ /* maxlen > 32. Optimistically check for null bytes in the first
+ 16 bytes of the string using unaligned accesses. */
+ ld r5,0(r3)
+ ld r6,8(r3)
+ cmpb r10,r7,r5 /* Check for null bytes in DWORD1. */
+ cmpb r11,r7,r6 /* Check for null bytes in DWORD2. */
+ or. r7,r10,r11
+ bne cr0, L(early_find) /* If found null bytes. */
+
+ /* At this point maxlen > 32 and null bytes were not found at first
+ 16 bytes. Prepare for loop using VMX. */
+
+ /* r3 == s, r4 == maxlen. All other volatile regs are unused now. */
+
+ addi r5,r3,16 /* Align up, or just add the 16B we
+ already checked. */
+ li r0,15
+ and r7,r5,r0 /* Find offset into 16B alignment. */
+ andc r5,r5,r0 /* Quadword align up s to the next quadword. */
+ li r0,16
+ subf r0,r7,r0
+ subf r4,r0,r4 /* Deduct unaligned bytes from maxlen. */
+
+
+ /* Compute offsets for vmx loads, and precompute the vbpermq
+ constants for both the 64B and 16B loops. */
+ li r6,0
+ vspltisb v0,0
+ vspltisb v10,3
+ lvsl v11,r6,r6
+ vslb v10,v11,v10
+
+ cmpldi r4,64 /* Check maxlen < 64. */
+ blt L(smaller) /* If maxlen < 64 */
+
+ /* In order to begin the 64B loop, it needs to be 64
+ bytes aligned. So read quadwords until it is aligned or found null
+ bytes. At worst case it will be aligned after the fourth iteration,
+ so unroll the loop to avoid counter checking. */
+ andi. r7,r5,63 /* Check if is 64 bytes aligned. */
+ beq cr0,L(preloop_64B) /* If it is already 64B aligned. */
+ lvx v1,r5,r6
+ vcmpequb. v1,v1,v0
+ addi r5,r5,16
+ addi r4,r4,-16 /* Decrement maxlen in 16 bytes. */
+ bne cr6,L(found_aligning64B) /* If found null bytes. */
+
+ /* Unroll 2x above code block until aligned or find null bytes. */
+ andi. r7,r5,63
+ beq cr0,L(preloop_64B)
+ lvx v1,r5,r6
+ vcmpequb. v1,v1,v0
+ addi r5,r5,16
+ addi r4,r4,-16
+ bne cr6,L(found_aligning64B)
+
+ andi. r7,r5,63
+ beq cr0,L(preloop_64B)
+ lvx v1,r5,r6
+ vcmpequb. v1,v1,v0
+ addi r5,r5,16
+ addi r4,r4,-16
+ bne cr6,L(found_aligning64B)
+
+ /* At this point it should be 16 bytes aligned.
+ Prepare for the 64B loop. */
+ .p2align 4
+L(preloop_64B):
+ /* Check if maxlen became is less than 64, therefore disallowing the
+ 64B loop. If it happened switch to the 16B loop code. */
+ cmpldi r4,64 /* Check if maxlen < 64. */
+ blt L(smaller) /* If maxlen < 64. */
+ /* Set some constant values. */
+ li r7,16
+ li r10,32
+ li r9,48
+
+ /* Compute the number of 64 bytes iterations needed. */
+ srdi r11,r4,6 /* Compute loop count (maxlen / 64). */
+ andi. r4,r4,63 /* Set maxlen the remainder (maxlen % 64). */
+ mtctr r11 /* Move loop count to counter register. */
+
+ /* Handle maxlen > 64. Loop over the bytes in strides of 64B. */
+ .p2align 4
+L(loop_64B):
+ lvx v1,r5,r6 /* r5 is the pointer to s. */
+ lvx v2,r5,r7
+ lvx v3,r5,r10
+ lvx v4,r5,r9
+ /* Compare the four 16B vectors to obtain the least 16 values.
+ Null bytes should emerge into v7, then check for null bytes. */
+ vminub v5,v1,v2
+ vminub v6,v3,v4
+ vminub v7,v5,v6
+ vcmpequb. v7,v7,v0 /* Check for null bytes. */
+ addi r5,r5,64 /* Add pointer to next iteraction. */
+ bne cr6,L(found_64B) /* If found null bytes. */
+ bdnz L(loop_64B) /* Continue the loop if count > 0. */
+
+/* Hit loop end without null match. So branch to handle the remainder. */
+
+ /* Prepare a 16B loop to handle two cases:
+ 1. If 32 > maxlen < 64.
+ 2. If maxlen >= 64, and reached end of the 64B loop with null
+ bytes not found. Thus handle the remainder bytes here. */
+ .p2align 4
+L(smaller):
+ cmpldi r4,0 /* Check maxlen is zero. */
+ beq L(done) /* If maxlen is zero. */
+
+ /* Place rounded up number of qw's to check into a vmx
+ register, and use some vector tricks to minimize
+ branching. */
+ MTVRD(v7,r4) /* Copy maxlen from GPR to vector register. */
+ vspltisb v5,1
+ vspltisb v6,15
+ vspltb v2,v7,7
+ vaddubs v3,v5,v6
+
+#ifdef __LITTLE_ENDIAN__
+ vspltish v5,1 /* Compute 16 in each byte. */
+#endif
+
+ /* Loop in 16B aligned incremements now. */
+ .p2align 4
+L(loop_16B):
+ lvx v1,r5,r6 /* Load quadword into vector register. */
+ addi r5,r5,16 /* Increment address to next 16B block. */
+ vor v7,v2,v2 /* Save loop count (v2) into v7. */
+ vsububs v2,v2,v3 /* Subtract 16B from count, saturate at 0. */
+ vminub v4,v1,v2
+ vcmpequb. v4,v4,v0 /* Checking for null bytes. */
+ beq cr6,L(loop_16B) /* If null bytes not found. */
+
+ vcmpequb v1,v1,v0
+ VBPERMQ(v1,v1,v10)
+#ifdef __LITTLE_ENDIAN__
+ vsubuhm v2,v1,v5 /* Form a mask of trailing zeros. */
+ vandc v2,v2,v1
+ VPOPCNTH(v1,v2) /* Count of trailing zeros, 16 if none. */
+#else
+ VCLZH(v1,v1) /* Count the leading zeros, 16 if none. */
+#endif
+ /* Truncate to maximum allowable offset. */
+ vcmpgtub v2,v1,v7 /* Compare and truncate for matches beyond
+ maxlen. */
+ vsel v1,v1,v7,v2 /* 0-16 is now in byte 7. */
+
+ MFVRD(r0,v1)
+ addi r5,r5,-16 /* Undo speculative bump. */
+ extsb r0,r0 /* Clear whatever gunk is in the high 56b. */
+ add r5,r5,r0 /* Add the offset of whatever was found. */
+L(done):
+ subf r3,r3,r5 /* Length is equal to the offset of null byte
+ matched minus the pointer to s. */
+ blr /* Done. */
+
+ /* Handle case of maxlen > 64 and found null bytes in last block
+ of 64 bytes read. */
+ .p2align 4
+L(found_64B):
+ /* A zero was found. Reduce the result. */
+ vcmpequb v1,v1,v0
+ vcmpequb v2,v2,v0
+ vcmpequb v3,v3,v0
+ vcmpequb v4,v4,v0
+
+ /* Permute the first bit of each byte into bits 48-63. */
+ VBPERMQ(v1,v1,v10)
+ VBPERMQ(v2,v2,v10)
+ VBPERMQ(v3,v3,v10)
+ VBPERMQ(v4,v4,v10)
+
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v2,v2,v2,2
+ vsldoi v3,v3,v3,4
+ vsldoi v4,v4,v4,6
+#else
+ vsldoi v1,v1,v1,6
+ vsldoi v2,v2,v2,4
+ vsldoi v3,v3,v3,2
+#endif
+
+ /* Merge the results and move to a GPR. */
+ vor v1,v2,v1
+ vor v2,v3,v4
+ vor v4,v1,v2
+
+ /* Adjust address to the start of the current 64B block. */
+ addi r5,r5,-64
+
+ MFVRD(r10,v4)
+#ifdef __LITTLE_ENDIAN__
+ addi r9,r10,-1 /* Form a mask from trailing zeros. */
+ andc r9,r9,r10
+ popcntd r0,r9 /* Count the bits in the mask. */
+#else
+ cntlzd r0,r10 /* Count leading zeros before the match. */
+#endif
+ subf r5,r3,r5
+ add r3,r5,r0 /* Compute final length. */
+ blr /* Done. */
+
+ /* Handle case where null bytes were found while aligning
+ as a preparation for the 64B loop. */
+ .p2align 4
+L(found_aligning64B):
+ VBPERMQ(v1,v1,v10)
+#ifdef __LITTLE_ENDIAN__
+ MFVRD(r10,v1)
+ addi r9,r10,-1 /* Form a mask from trailing zeros. */
+ andc r9,r9,r10
+ popcntd r0,r9 /* Count the bits in the mask. */
+#else
+ vsldoi v1,v1,v1,6
+ MFVRD(r10,v1)
+ cntlzd r0,r10 /* Count leading zeros before the match. */
+#endif
+ addi r5,r5,-16 /* Adjust address to offset of last 16 bytes
+ read. */
+ /* Calculate length as subtracted the pointer to s of last 16 bytes
+ offset, added with the bytes before the match. */
+ subf r5,r3,r5
+ add r3,r5,r0
+ blr /* Done. */
+
+ /* Handle case of maxlen > 32 and found a null bytes within the first
+ 16 bytes of s. */
+ .p2align 4
+L(early_find):
+ bpermd r5,r8,r10 /* r8 contains the bit permute constants. */
+ bpermd r6,r8,r11
+ sldi r5,r5,8
+ or r5,r5,r6 /* r5 should hold a 16B mask of
+ a potential 0. */
+ cntlzd r5,r5 /* Count leading zeros. */
+ addi r3,r5,-48 /* Deduct the 48 leading zeros always
+ present. */
+ blr /* Done. */
+
+ /* Handle case of maxlen <= 32. Use the POWER7 algorithm. */
+ .p2align 4
+L(small_range):
+ clrrdi r8,r3,3 /* Align the pointer to 8B. */
+ li r0,0
+ /* Register's content at this point:
+ r3 == pointer to s, r4 == maxlen, r8 == pointer to s aligned to 8B,
+ r7 == last acceptable address. */
+ cmpldi r4,0 /* Check if maxlen is zero. */
+ beq L(end_max) /* If maxlen is zero. */
+
+ /* Calculate the last acceptable address and check for possible
+ addition overflow by using satured math:
+ r7 = r3 + r4
+ r7 |= -(r7 < x) */
+ add r7,r3,r4
+ subfc r6,r3,r7
+ subfe r9,r9,r9
+ extsw r6,r9
+ or r7,r7,r6
+ addi r7,r7,-1
+
+ clrrdi r7,r7,3 /* Align to 8B address of last
+ acceptable address. */
+
+ rlwinm r6,r3,3,26,28 /* Calculate padding. */
+ ld r12,0(r8) /* Load aligned doubleword. */
+ cmpb r10,r12,r0 /* Check for null bytes. */
+#ifdef __LITTLE_ENDIAN__
+ srd r10,r10,r6
+ sld r10,r10,r6
+#else
+ sld r10,r10,r6
+ srd r10,r10,r6
+#endif /* __LITTLE_ENDIAN__ */
+ cmpldi cr7,r10,0
+ bne cr7,L(done_small) /* If found null byte. */
+
+ cmpld r8,r7 /* Check if reached maxlen. */
+ beq L(end_max) /* If reached maxlen. */
+
+ /* Still handling case of maxlen <= 32. Read doubleword aligned until
+ find null bytes or reach maxlen. */
+ .p2align 4
+L(loop_small):
+ ldu r12,8(r8) /* Load next doubleword and update r8. */
+ cmpb r10,r12,r0 /* Check for null bytes. */
+ cmpldi cr6,r10,0
+ bne cr6,L(done_small) /* If found null bytes. */
+ cmpld r8,r7 /* Check if reached maxlen. */
+ bne L(loop_small) /* If it has more bytes to read. */
+ mr r3,r4 /* Reached maxlen with null bytes not found.
+ Length is equal to maxlen. */
+ blr /* Done. */
+
+ /* Still handling case of maxlen <= 32. Found null bytes.
+ Registers: r10 == match bits within doubleword, r8 == address of
+ last doubleword read, r3 == pointer to s, r4 == maxlen. */
+ .p2align 4
+L(done_small):
+#ifdef __LITTLE_ENDIAN__
+ /* Count trailing zeros. */
+ addi r0,r10,-1
+ andc r0,r0,r10
+ popcntd r0,r0
+#else
+ cntlzd r0,r10 /* Count leading zeros before the match. */
+#endif
+ sub r3,r8,r3 /* Calculate total of bytes before the match. */
+ srdi r0,r0,3 /* Convert leading/trailing zeros to bytes. */
+ add r3,r3,r0 /* Length until the match. */
+ cmpld r3,r4 /* Check length is greater than maxlen. */
+ blelr
+ mr r3,r4 /* If length is greater than maxlen, return
+ maxlen. */
+ blr
+
+ /* Handle case of reached maxlen with null bytes not found. */
+ .p2align 4
+L(end_max):
+ mr r3,r4 /* Length is equal to maxlen. */
+ blr /* Done. */
+
+
+END (__strnlen)
+libc_hidden_def (__strnlen)
+weak_alias (__strnlen, strnlen)
+libc_hidden_def (strnlen)
diff --git a/sysdeps/powerpc/powerpc64/power8/strrchr.S b/sysdeps/powerpc/powerpc64/power8/strrchr.S
new file mode 100644
index 0000000000..6ff8a528b6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strrchr.S
@@ -0,0 +1,468 @@
+/* Optimized strrchr implementation for PowerPC64/POWER7 using cmpb insn.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* char *[r3] strrchr (char *s [r3], int c [r4]) */
+/* TODO: change these to the actual instructions when the minimum required
+ binutils allows it. */
+#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define VBPERMQ(t,a,b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+#define VCLZD(r,v) .long (0x100007c2 | ((r)<<(32-11)) | ((v)<<(32-21)))
+#define VPOPCNTD(r,v) .long (0x100007c3 | ((r)<<(32-11)) | ((v)<<(32-21)))
+#define VADDUQM(t,a,b) .long (0x10000100 \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+#ifdef __LITTLE_ENDIAN__
+/* Find the match position from v6 and place result in r6. */
+# define CALCULATE_MATCH() \
+ VBPERMQ(v6, v6, v10); \
+ vsldoi v6, v6, v6, 6; \
+ MFVRD(r7, v6); \
+ cntlzd r6, r7; \
+ subfic r6, r6, 15;
+/*
+ * Find the first null position to mask bytes after null.
+ * (reg): vcmpequb result: v2 for 1st qw v3 for 2nd qw.
+ * Result placed at v2.
+ */
+# define FIND_NULL_POS(reg) \
+ vspltisb v11, -1; \
+ VADDUQM(v11, reg, v11); \
+ vandc v11, v11, reg; \
+ VPOPCNTD(v2, v11); \
+ vspltb v11, v2, 15; \
+ vcmpequb. v11, v11, v9; \
+ blt cr6, 1f; \
+ vsldoi v9, v0, v9, 1; \
+ vslo v2, v2, v9; \
+1: \
+ vsumsws v2, v2, v0;
+#else
+# define CALCULATE_MATCH() \
+ VBPERMQ(v6, v6, v10); \
+ MFVRD(r7, v6); \
+ addi r6, r7, -1; \
+ andc r6, r6, r7; \
+ popcntd r6, r6; \
+ subfic r6, r6, 15;
+# define FIND_NULL_POS(reg) \
+ VCLZD(v2, reg); \
+ vspltb v11, v2, 7; \
+ vcmpequb. v11, v11, v9; \
+ blt cr6, 1f; \
+ vsldoi v9, v0, v9, 1; \
+ vsro v2, v2, v9; \
+1: \
+ vsumsws v2, v2, v0;
+#endif /* !__LITTLE_ENDIAN__ */
+
+#ifndef STRRCHR
+# define STRRCHR strrchr
+#endif
+ .machine power7
+ENTRY_TOCLESS (STRRCHR)
+ CALL_MCOUNT 2
+ dcbt 0,r3
+ clrrdi r8,r3,3 /* Align the address to doubleword boundary. */
+ cmpdi cr7,r4,0
+ ld r12,0(r8) /* Load doubleword from memory. */
+ li r9,0 /* Used to store last occurence. */
+ li r0,0 /* Doubleword with null chars to use
+ with cmpb. */
+
+ rlwinm r6,r3,3,26,28 /* Calculate padding. */
+
+ beq cr7,L(null_match)
+
+ /* Replicate byte to doubleword. */
+ insrdi r4,r4,8,48
+ insrdi r4,r4,16,32
+ insrdi r4,r4,32,0
+
+ /* r4 is changed now. If it's passed more chars, then
+ check for null again. */
+ cmpdi cr7,r4,0
+ beq cr7,L(null_match)
+ /* Now r4 has a doubleword of c bytes and r0 has
+ a doubleword of null bytes. */
+
+ cmpb r10,r12,r4 /* Compare each byte against c byte. */
+ cmpb r11,r12,r0 /* Compare each byte against null byte. */
+
+ /* Move the doublewords left and right to discard the bits that are
+ not part of the string and bring them back as zeros. */
+#ifdef __LITTLE_ENDIAN__
+ srd r10,r10,r6
+ srd r11,r11,r6
+ sld r10,r10,r6
+ sld r11,r11,r6
+#else
+ sld r10,r10,r6
+ sld r11,r11,r6
+ srd r10,r10,r6
+ srd r11,r11,r6
+#endif
+ or r5,r10,r11 /* OR the results to speed things up. */
+ cmpdi cr7,r5,0 /* If r5 == 0, no c or null bytes
+ have been found. */
+ bne cr7,L(done)
+
+L(align):
+ andi. r12, r8, 15
+
+ /* Are we now aligned to a doubleword boundary? If so, skip to
+ the main loop. Otherwise, go through the alignment code. */
+
+ bne cr0, L(loop)
+
+ /* Handle WORD2 of pair. */
+ ldu r12,8(r8)
+ cmpb r10,r12,r4
+ cmpb r11,r12,r0
+ or r5,r10,r11
+ cmpdi cr7,r5,0
+ bne cr7,L(done)
+ b L(loop) /* We branch here (rather than falling through)
+ to skip the nops due to heavy alignment
+ of the loop below. */
+ .p2align 5
+L(loop):
+ /* Load two doublewords, compare and merge in a
+ single register for speed. This is an attempt
+ to speed up the null-checking process for bigger strings. */
+ ld r12,8(r8)
+ ldu r7,16(r8)
+ cmpb r10,r12,r4
+ cmpb r11,r12,r0
+ cmpb r6,r7,r4
+ cmpb r7,r7,r0
+ or r12,r10,r11
+ or r5,r6,r7
+ or r5,r12,r5
+ cmpdi cr7,r5,0
+ beq cr7,L(vector)
+
+ /* OK, one (or both) of the doublewords contains a c/null byte. Check
+ the first doubleword and decrement the address in case the first
+ doubleword really contains a c/null byte. */
+ cmpdi cr6,r12,0
+ addi r8,r8,-8
+ bne cr6,L(done)
+
+ /* The c/null byte must be in the second doubleword. Adjust the
+ address again and move the result of cmpb to r10 so we can calculate
+ the pointer. */
+
+ mr r10,r6
+ mr r11,r7
+ addi r8,r8,8
+
+ /* r10/r11 have the output of the cmpb instructions, that is,
+ 0xff in the same position as the c/null byte in the original
+ doubleword from the string. Use that to calculate the pointer. */
+
+L(done):
+ /* If there are more than one 0xff in r11, find the first position of
+ 0xff in r11 and fill r10 with 0 from that position. */
+ cmpdi cr7,r11,0
+ beq cr7,L(no_null)
+#ifdef __LITTLE_ENDIAN__
+ addi r3,r11,-1
+ andc r3,r3,r11
+ popcntd r0,r3
+#else
+ cntlzd r0,r11
+#endif
+ subfic r0,r0,63
+ li r6,-1
+#ifdef __LITTLE_ENDIAN__
+ srd r0,r6,r0
+#else
+ sld r0,r6,r0
+#endif
+ and r10,r0,r10
+L(no_null):
+#ifdef __LITTLE_ENDIAN__
+ cntlzd r0,r10 /* Count leading zeros before c matches. */
+ addi r3,r10,-1
+ andc r3,r3,r10
+ addi r10,r11,-1
+ andc r10,r10,r11
+ cmpld cr7,r3,r10
+ bgt cr7,L(no_match)
+#else
+ addi r3,r10,-1 /* Count trailing zeros before c matches. */
+ andc r3,r3,r10
+ popcntd r0,r3
+ cmpld cr7,r11,r10
+ bgt cr7,L(no_match)
+#endif
+ srdi r0,r0,3 /* Convert trailing zeros to bytes. */
+ subfic r0,r0,7
+ add r9,r8,r0 /* Return address of the matching c byte
+ or null in case c was not found. */
+ li r0,0
+ cmpdi cr7,r11,0 /* If r11 == 0, no null's have been found. */
+ beq cr7,L(align)
+
+ .align 4
+L(no_match):
+ mr r3,r9
+ blr
+
+/* Check the first 32B in GPR's and move to vectorized loop. */
+ .p2align 5
+L(vector):
+ addi r3, r8, 8
+ /* Make sure 32B aligned. */
+ andi. r10, r3, 31
+ bne cr0, L(loop)
+ vspltisb v0, 0
+ /* Precompute vbpermq constant. */
+ vspltisb v10, 3
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ MTVRD(v1, r4)
+ li r5, 16
+ vspltb v1, v1, 7
+ /* Compare 32 bytes in each loop. */
+L(continue):
+ lvx v4, 0, r3
+ lvx v5, r3, r5
+ vcmpequb v2, v0, v4
+ vcmpequb v3, v0, v5
+ vcmpequb v6, v1, v4
+ vcmpequb v7, v1, v5
+ vor v8, v2, v3
+ vor v9, v6, v7
+ vor v11, v8, v9
+ vcmpequb. v11, v0, v11
+ addi r3, r3, 32
+ blt cr6, L(continue)
+ vcmpequb. v8, v0, v8
+ blt cr6, L(match)
+
+ /* One (or both) of the quadwords contains c/null. */
+ vspltisb v8, 2
+ vspltisb v9, 5
+ /* Precompute values used for comparison. */
+ vsl v9, v8, v9 /* v9 = 0x4040404040404040. */
+ vaddubm v8, v9, v9
+ vsldoi v8, v0, v8, 1 /* v8 = 0x80. */
+
+ /* Check if null is in second qw. */
+ vcmpequb. v11, v0, v2
+ blt cr6, L(secondqw)
+
+ /* Null found in first qw. */
+ addi r8, r3, -32
+ /* Calculate the null position. */
+ FIND_NULL_POS(v2)
+ /* Check if null is in the first byte. */
+ vcmpequb. v11, v0, v2
+ blt cr6, L(no_match)
+ vsububm v2, v8, v2
+ /* Mask unwanted bytes after null. */
+#ifdef __LITTLE_ENDIAN__
+ vslo v6, v6, v2
+ vsro v6, v6, v2
+#else
+ vsro v6, v6, v2
+ vslo v6, v6, v2
+#endif
+ vcmpequb. v11, v0, v6
+ blt cr6, L(no_match)
+ /* Found a match before null. */
+ CALCULATE_MATCH()
+ add r3, r8, r6
+ blr
+
+L(secondqw):
+ addi r8, r3, -16
+ FIND_NULL_POS(v3)
+ vcmpequb. v11, v0, v2
+ blt cr6, L(no_match1)
+ vsububm v2, v8, v2
+ /* Mask unwanted bytes after null. */
+#ifdef __LITTLE_ENDIAN__
+ vslo v7, v7, v2
+ vsro v7, v7, v2
+#else
+ vsro v7, v7, v2
+ vslo v7, v7, v2
+#endif
+ vcmpequb. v11, v0, v7
+ blt cr6, L(no_match1)
+ addi r8, r8, 16
+ vor v6, v0, v7
+L(no_match1):
+ addi r8, r8, -16
+ vcmpequb. v11, v0, v6
+ blt cr6, L(no_match)
+ /* Found a match before null. */
+ CALCULATE_MATCH()
+ add r3, r8, r6
+ blr
+
+L(match):
+ /* One (or both) of the quadwords contains a match. */
+ mr r8, r3
+ vcmpequb. v8, v0, v7
+ blt cr6, L(firstqw)
+ /* Match found in second qw. */
+ addi r8, r8, 16
+ vor v6, v0, v7
+L(firstqw):
+ addi r8, r8, -32
+ CALCULATE_MATCH()
+ add r9, r8, r6 /* Compute final length. */
+ b L(continue)
+/* We are here because strrchr was called with a null byte. */
+ .align 4
+L(null_match):
+ /* r0 has a doubleword of null bytes. */
+
+ cmpb r5,r12,r0 /* Compare each byte against null bytes. */
+
+ /* Move the doublewords left and right to discard the bits that are
+ not part of the string and bring them back as zeros. */
+#ifdef __LITTLE_ENDIAN__
+ srd r5,r5,r6
+ sld r5,r5,r6
+#else
+ sld r5,r5,r6
+ srd r5,r5,r6
+#endif
+ cmpdi cr7,r5,0 /* If r5 == 0, no c or null bytes
+ have been found. */
+ bne cr7,L(done_null)
+
+ andi. r12, r8, 15
+
+ /* Are we now aligned to a quadword boundary? If so, skip to
+ the main loop. Otherwise, go through the alignment code. */
+
+ bne cr0, L(loop_null)
+
+ /* Handle WORD2 of pair. */
+ ldu r12,8(r8)
+ cmpb r5,r12,r0
+ cmpdi cr7,r5,0
+ bne cr7,L(done_null)
+ b L(loop_null) /* We branch here (rather than falling through)
+ to skip the nops due to heavy alignment
+ of the loop below. */
+
+ /* Main loop to look for the end of the string. Since it's a
+ small loop (< 8 instructions), align it to 32-bytes. */
+ .p2align 5
+L(loop_null):
+ /* Load two doublewords, compare and merge in a
+ single register for speed. This is an attempt
+ to speed up the null-checking process for bigger strings. */
+ ld r12,8(r8)
+ ldu r11,16(r8)
+ cmpb r5,r12,r0
+ cmpb r10,r11,r0
+ or r6,r5,r10
+ cmpdi cr7,r6,0
+ beq cr7,L(vector1)
+
+ /* OK, one (or both) of the doublewords contains a null byte. Check
+ the first doubleword and decrement the address in case the first
+ doubleword really contains a null byte. */
+
+ cmpdi cr6,r5,0
+ addi r8,r8,-8
+ bne cr6,L(done_null)
+
+ /* The null byte must be in the second doubleword. Adjust the address
+ again and move the result of cmpb to r10 so we can calculate the
+ pointer. */
+
+ mr r5,r10
+ addi r8,r8,8
+
+ /* r5 has the output of the cmpb instruction, that is, it contains
+ 0xff in the same position as the null byte in the original
+ doubleword from the string. Use that to calculate the pointer. */
+L(done_null):
+#ifdef __LITTLE_ENDIAN__
+ addi r0,r5,-1
+ andc r0,r0,r5
+ popcntd r0,r0
+#else
+ cntlzd r0,r5 /* Count leading zeros before the match. */
+#endif
+ srdi r0,r0,3 /* Convert trailing zeros to bytes. */
+ add r3,r8,r0 /* Return address of the matching null byte. */
+ blr
+/* Check the first 32B in GPR's and move to vectorized loop. */
+ .p2align 5
+L(vector1):
+ addi r3, r8, 8
+ /* Make sure 32B aligned. */
+ andi. r10, r3, 31
+ bne cr0, L(loop_null)
+ vspltisb v0, 0
+ /* Precompute vbpermq constant. */
+ vspltisb v10, 3
+ lvsl v11, r0, r0
+ vslb v10, v11, v10
+ li r5, 16
+ /* Compare 32 bytes in each loop. */
+L(continue1):
+ lvx v4, 0, r3
+ lvx v5, r3, r5
+ vcmpequb v2, v0, v4
+ vcmpequb v3, v0, v5
+ vor v8, v2, v3
+ vcmpequb. v11, v0, v8
+ addi r3, r3, 32
+ blt cr6, L(continue1)
+ addi r3, r3, -32
+ VBPERMQ(v2, v2, v10)
+ VBPERMQ(v3, v3, v10)
+ /* Shift each component into its correct position for merging. */
+#ifdef __LITTLE_ENDIAN__
+ vsldoi v3, v3, v3, 2
+#else
+ vsldoi v2, v2, v2, 6
+ vsldoi v3, v3, v3, 4
+#endif
+ /* Merge the results and move to a GPR. */
+ vor v4, v3, v2
+ MFVRD(r5, v4)
+#ifdef __LITTLE_ENDIAN__
+ addi r6, r5, -1
+ andc r6, r6, r5
+ popcntd r6, r6
+#else
+ cntlzd r6, r5 /* Count leading zeros before the match. */
+#endif
+ add r3, r3, r6 /* Compute final length. */
+ blr
+END_GEN_TB (STRRCHR, TB_TOCLESS)
+weak_alias (strrchr, rindex)
+libc_hidden_builtin_def (strrchr)
diff --git a/sysdeps/powerpc/powerpc64/power8/strspn.S b/sysdeps/powerpc/powerpc64/power8/strspn.S
new file mode 100644
index 0000000000..095f6d6f41
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power8/strspn.S
@@ -0,0 +1,202 @@
+/* Optimized strspn implementation for Power8.
+
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* size_t [r3] strspn (const char *string [r3],
+ const char *needleAccept [r4]) */
+
+/* This takes a novel approach by computing a 256 bit mask whereby
+ each set bit implies the byte is "accepted". P8 vector hardware
+ has extremely efficient hardware for selecting bits from a mask.
+
+ One might ask "why not use bpermd for short strings"? It is
+ so slow that its performance about matches the generic PPC64
+ variant without any fancy masking, with the added expense of
+ making the mask. That was the first variant of this. */
+
+
+
+#include "sysdep.h"
+
+#ifndef USE_AS_STRCSPN
+# define USE_AS_STRCSPN 0
+# ifndef STRSPN
+# define STRSPN strspn
+# endif
+# define INITIAL_MASK 0
+# define UPDATE_MASK(RA, RS, RB) or RA, RS, RB
+#else
+# ifndef STRSPN
+# define STRSPN strcspn
+# endif
+# define INITIAL_MASK -1
+# define UPDATE_MASK(RA, RS, RB) andc RA, RS, RB
+#endif
+
+/* Simple macro to use VSX instructions in overlapping VR's. */
+#define XXVR(insn, vrt, vra, vrb) \
+ insn 32+vrt, 32+vra, 32+vrb
+
+/* ISA 2.07B instructions are not all defined for older binutils.
+ Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+
+/* Note, TX/SX is always set as VMX regs are the high 32 VSX regs. */
+#define MTVRD(v,r) .long (0x7c000167 | ((v)<<(32-11)) | ((r)<<(32-16)))
+#define MFVRD(r,v) .long (0x7c000067 | ((v)<<(32-11)) | ((r)<<(32-16)))
+
+#define VBPERMQ(t,a,b) .long (0x1000054c \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+ /* This can be updated to power8 once the minimum version of
+ binutils supports power8 and the above instructions. */
+ .machine power7
+ENTRY_TOCLESS (STRSPN, 4)
+ CALL_MCOUNT 2
+
+ /* Generate useful constants for later on. */
+ vspltisb v1, 7
+ vspltisb v2, -1
+ vslb v1, v1, v1 /* 0x80 to swap high bit for vbpermq. */
+ vspltisb v10, 0
+ vsldoi v4, v10, v2, 2 /* 0xFFFF into vr4. */
+ XXVR(xxmrgld, v4, v4, v10) /* Mask for checking matches. */
+
+ /* Prepare to compute 256b mask. */
+ addi r4, r4, -1
+ li r5, INITIAL_MASK
+ li r6, INITIAL_MASK
+ li r7, INITIAL_MASK
+ li r8, INITIAL_MASK
+
+#if USE_AS_STRCSPN
+ /* Ensure the null character never matches by clearing ISA bit 0 in
+ in r5 which is the bit which will check for it in the later usage
+ of vbpermq. */
+ srdi r5, r5, 1
+#endif
+
+ li r11, 1
+ sldi r11, r11, 63
+
+ /* Start interleaved Mask computation.
+ This will eventually or 1's into ignored bits from vbpermq. */
+ lvsr v11, 0, r3
+ vspltb v11, v11, 0 /* Splat shift constant. */
+
+ /* Build a 256b mask in r5-r8. */
+ .align 4
+L(next_needle):
+ lbzu r9, 1(r4)
+
+ cmpldi cr0, r9, 0
+ cmpldi cr1, r9, 128
+
+ /* This is a little tricky. srd only uses the first 7 bits,
+ and if bit 7 is set, value is always 0. So, we can
+ effectively shift 128b in this case. */
+ xori r12, r9, 0x40 /* Invert bit 6. */
+ srd r10, r11, r9 /* Mask for bits 0-63. */
+ srd r12, r11, r12 /* Mask for bits 64-127. */
+
+ beq cr0, L(start_cmp)
+
+ /* Now, or the value into the correct GPR. */
+ bge cr1,L(needle_gt128)
+ UPDATE_MASK (r5, r5, r10) /* 0 - 63. */
+ UPDATE_MASK (r6, r6, r12) /* 64 - 127. */
+ b L(next_needle)
+
+ .align 4
+L(needle_gt128):
+ UPDATE_MASK (r7, r7, r10) /* 128 - 191. */
+ UPDATE_MASK (r8, r8, r12) /* 192 - 255. */
+ b L(next_needle)
+
+
+ .align 4
+L(start_cmp):
+ /* Move and merge bitmap into 2 VRs. bpermd is slower on P8. */
+ mr r0, r3 /* Save r3 for final length computation. */
+ MTVRD (v5, r5)
+ MTVRD (v6, r6)
+ MTVRD (v7, r7)
+ MTVRD (v8, r8)
+
+ /* Continue interleaved mask generation. */
+#ifdef __LITTLE_ENDIAN__
+ vsrw v11, v2, v11 /* Note, shift ignores higher order bits. */
+ vsplth v11, v11, 0 /* Only care about the high 16 bits of v10. */
+#else
+ vslw v11, v2, v11 /* Note, shift ignores higher order bits. */
+ vsplth v11, v11, 1 /* Only care about the low 16 bits of v10. */
+#endif
+ lvx v0, 0, r3 /* Note, unaligned load ignores lower bits. */
+
+ /* Do the merging of the bitmask. */
+ XXVR(xxmrghd, v5, v5, v6)
+ XXVR(xxmrghd, v6, v7, v8)
+
+ /* Finish mask generation. */
+ vand v11, v11, v4 /* Throwaway bits not in the mask. */
+
+ /* Compare the first 1-16B, while masking unwanted bytes. */
+ clrrdi r3, r3, 4 /* Note, counts from qw boundaries. */
+ vxor v9, v0, v1 /* Swap high bit. */
+ VBPERMQ (v8, v5, v0)
+ VBPERMQ (v7, v6, v9)
+ vor v7, v7, v8
+ vor v7, v7, v11 /* Ignore non-participating bytes. */
+ vcmpequh. v8, v7, v4
+ bnl cr6, L(done)
+
+ addi r3, r3, 16
+
+ .align 4
+L(vec):
+ lvx v0, 0, r3
+ addi r3, r3, 16
+ vxor v9, v0, v1 /* Swap high bit. */
+ VBPERMQ (v8, v5, v0)
+ VBPERMQ (v7, v6, v9)
+ vor v7, v7, v8
+ vcmpequh. v8, v7, v4
+ blt cr6, L(vec)
+
+ addi r3, r3, -16
+L(done):
+ subf r3, r0, r3
+ MFVRD (r10, v7)
+
+#ifdef __LITTLE_ENDIAN__
+ addi r0, r10, 1 /* Count the trailing 1's. */
+ andc r10, r10, r0
+ popcntd r10, r10
+#else
+ xori r10, r10, 0xffff /* Count leading 1's by inverting. */
+ addi r3, r3, -48 /* Account for the extra leading zeros. */
+ cntlzd r10, r10
+#endif
+
+ add r3, r3, r10
+ blr
+
+END(STRSPN)
+libc_hidden_builtin_def (STRSPN)
diff --git a/sysdeps/powerpc/powerpc64/power9/Implies b/sysdeps/powerpc/powerpc64/power9/Implies
deleted file mode 100644
index fad2505ab9..0000000000
--- a/sysdeps/powerpc/powerpc64/power9/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power8/fpu
-powerpc/powerpc64/power8
diff --git a/sysdeps/powerpc/powerpc64/power9/fpu/Implies b/sysdeps/powerpc/powerpc64/power9/fpu/Implies
deleted file mode 100644
index fad2505ab9..0000000000
--- a/sysdeps/powerpc/powerpc64/power9/fpu/Implies
+++ /dev/null
@@ -1,2 +0,0 @@
-powerpc/powerpc64/power8/fpu
-powerpc/powerpc64/power8
diff --git a/sysdeps/powerpc/powerpc64/power9/fpu/multiarch/Implies b/sysdeps/powerpc/powerpc64/power9/fpu/multiarch/Implies
deleted file mode 100644
index f11e1bdba2..0000000000
--- a/sysdeps/powerpc/powerpc64/power9/fpu/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power8/fpu/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power9/multiarch/Implies b/sysdeps/powerpc/powerpc64/power9/multiarch/Implies
deleted file mode 100644
index dd6bca4b36..0000000000
--- a/sysdeps/powerpc/powerpc64/power9/multiarch/Implies
+++ /dev/null
@@ -1 +0,0 @@
-powerpc/powerpc64/power8/multiarch
diff --git a/sysdeps/powerpc/powerpc64/power9/strcmp.S b/sysdeps/powerpc/powerpc64/power9/strcmp.S
new file mode 100644
index 0000000000..98243a9d51
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power9/strcmp.S
@@ -0,0 +1,268 @@
+/* Optimized strcmp implementation for PowerPC64/POWER9.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+#ifdef __LITTLE_ENDIAN__
+#include <sysdep.h>
+
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+/* Implements the function
+
+ int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+ The implementation uses unaligned doubleword access for first 32 bytes
+ as in POWER8 patch and uses vectorised loops after that. */
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+# define VCTZLSBB(r,v) .long (0x10010602 | ((r)<<(32-11)) | ((v)<<(32-21)))
+
+# define VEXTUBRX(t,a,b) .long (0x1000070d \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+# define VCMPNEZB(t,a,b) .long (0x10000507 \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+/* Get 16 bytes for unaligned case.
+ reg1: Vector to hold next 16 bytes.
+ reg2: Address to read from.
+ reg3: Permute control vector. */
+# define GET16BYTES(reg1, reg2, reg3) \
+ lvx reg1, 0, reg2; \
+ vperm v8, v2, reg1, reg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm reg1, v9, reg1, reg3;
+
+/* TODO: change this to .machine power9 when the minimum required binutils
+ allows it. */
+
+ .machine power7
+ENTRY_TOCLESS (STRCMP, 4)
+ li r0, 0
+
+ /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
+ the code:
+
+ (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
+
+ rldicl r7, r3, 0, 52
+ rldicl r9, r4, 0, 52
+ cmpldi cr7, r7, 4096-16
+ bgt cr7, L(pagecross_check)
+ cmpldi cr5, r9, 4096-16
+ bgt cr5, L(pagecross_check)
+
+ /* For short strings up to 16 bytes, load both s1 and s2 using
+ unaligned dwords and compare. */
+ ld r8, 0(r3)
+ ld r10, 0(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ ld r8, 8(r3)
+ ld r10, 8(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ addi r7, r3, 16
+ addi r4, r4, 16
+
+L(align):
+ /* Now it has checked for first 16 bytes. */
+ vspltisb v0, 0
+ vspltisb v2, -1
+ lvsr v6, 0, r4 /* Compute mask. */
+ or r5, r4, r7
+ andi. r5, r5, 0xF
+ beq cr0, L(aligned)
+ andi. r5, r7, 0xF
+ beq cr0, L(s1_align)
+ lvsr v10, 0, r7 /* Compute mask. */
+
+ /* Both s1 and s2 are unaligned. */
+ GET16BYTES(v4, r7, v10)
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ beq cr6, L(match)
+ b L(different)
+
+ /* Align s1 to qw and adjust s2 address. */
+ .align 4
+L(match):
+ clrldi r6, r7, 60
+ subfic r5, r6, 16
+ add r7, r7, r5
+ add r4, r4, r5
+ andi. r5, r4, 0xF
+ beq cr0, L(aligned)
+ lvsr v6, 0, r4
+ /* There are 2 loops depending on the input alignment.
+ Each loop gets 16 bytes from s1 and s2 and compares.
+ Loop until a mismatch or null occurs. */
+L(s1_align):
+ lvx v4, r7, r0
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, r7, r0
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, r7, r0
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, r7, r0
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ beq cr6, L(s1_align)
+ b L(different)
+
+ .align 4
+L(aligned):
+ lvx v4, 0, r7
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, 0, r7
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, 0, r7
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ bne cr6, L(different)
+
+ lvx v4, 0, r7
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ addi r7, r7, 16
+ addi r4, r4, 16
+ beq cr6, L(aligned)
+
+ /* Calculate and return the difference. */
+L(different):
+ VCTZLSBB(r6, v7)
+ VEXTUBRX(r5, r6, v4)
+ VEXTUBRX(r4, r6, v5)
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+
+ .align 4
+L(different_nocmpb):
+ neg r3, r9
+ and r9, r9, r3
+ cntlzd r9, r9
+ subfic r9, r9, 63
+ srd r3, r8, r9
+ srd r10, r10, r9
+ rldicl r10, r10, 0, 56
+ rldicl r3, r3, 0, 56
+ subf r3, r10, r3
+ extsw r3, r3
+ blr
+
+ .align 4
+L(pagecross_check):
+ subfic r9, r9, 4096
+ subfic r7, r7, 4096
+ cmpld cr7, r7, r9
+ bge cr7, L(pagecross)
+ mr r7, r9
+
+ /* If unaligned 16 bytes reads across a 4K page boundary, it uses
+ a simple byte a byte comparison until the page alignment for s1
+ is reached. */
+L(pagecross):
+ add r7, r3, r7
+ subf r9, r3, r7
+ mtctr r9
+
+ .align 4
+L(pagecross_loop):
+ /* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
+ and if *s1 is '\0'. */
+ lbz r9, 0(r3)
+ lbz r10, 0(r4)
+ addi r3, r3, 1
+ addi r4, r4, 1
+ cmplw cr7, r9, r10
+ cmpdi cr5, r9, r0
+ bne cr7, L(pagecross_ne)
+ beq cr5, L(pagecross_nullfound)
+ bdnz L(pagecross_loop)
+ b L(align)
+
+ .align 4
+L(pagecross_ne):
+ extsw r3, r9
+ mr r9, r10
+L(pagecross_retdiff):
+ subf r9, r9, r3
+ extsw r3, r9
+ blr
+
+ .align 4
+L(pagecross_nullfound):
+ li r3, 0
+ b L(pagecross_retdiff)
+END (STRCMP)
+libc_hidden_builtin_def (strcmp)
+#else
+#include <sysdeps/powerpc/powerpc64/power8/strcmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/power9/strncmp.S b/sysdeps/powerpc/powerpc64/power9/strncmp.S
new file mode 100644
index 0000000000..40be98ff45
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/power9/strncmp.S
@@ -0,0 +1,379 @@
+/* Optimized strncmp implementation for PowerPC64/POWER9.
+ Copyright (C) 2016-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+#ifdef __LITTLE_ENDIAN__
+#include <sysdep.h>
+
+/* Implements the function
+
+ int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
+
+ The implementation uses unaligned doubleword access to avoid specialized
+ code paths depending of data alignment for first 32 bytes and uses
+ vectorised loops after that. */
+
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+# define VCTZLSBB(r,v) .long (0x10010602 | ((r)<<(32-11)) | ((v)<<(32-21)))
+
+# define VEXTUBRX(t,a,b) .long (0x1000070d \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+# define VCMPNEZB(t,a,b) .long (0x10000507 \
+ | ((t)<<(32-11)) \
+ | ((a)<<(32-16)) \
+ | ((b)<<(32-21)) )
+
+/* Get 16 bytes for unaligned case.
+ reg1: Vector to hold next 16 bytes.
+ reg2: Address to read from.
+ reg3: Permute control vector. */
+# define GET16BYTES(reg1, reg2, reg3) \
+ lvx reg1, 0, reg2; \
+ vperm v8, v2, reg1, reg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ cmplw cr6, r5, r11; \
+ ble cr6, 2f; \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm reg1, v9, reg1, reg3;
+
+/* TODO: change this to .machine power9 when minimum binutils
+ is upgraded to 2.27. */
+ .machine power7
+ENTRY_TOCLESS (STRNCMP, 4)
+ /* Check if size is 0. */
+ cmpdi cr0, r5, 0
+ beq cr0, L(ret0)
+ li r0, 0
+
+ /* Check if [s1]+32 or [s2]+32 will cross a 4K page boundary using
+ the code:
+
+ (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 32. */
+ rldicl r8, r3, 0, 52
+ cmpldi cr7, r8, 4096-32
+ bgt cr7, L(pagecross)
+ rldicl r9, r4, 0, 52
+ cmpldi cr7, r9, 4096-32
+ bgt cr7, L(pagecross)
+
+ /* For short strings up to 32 bytes, load both s1 and s2 using
+ unaligned dwords and compare. */
+
+ ld r7, 0(r3)
+ ld r9, 0(r4)
+ li r8, 0
+ cmpb r8, r7, r8
+ cmpb r6, r7, r9
+ orc. r8, r8, r6
+ bne cr0, L(different1)
+
+ /* If the strings compared are equal, but size is less or equal
+ to 8, return 0. */
+ cmpldi cr7, r5, 8
+ li r9, 0
+ ble cr7, L(ret1)
+ addi r5, r5, -8
+
+ ld r7, 8(r3)
+ ld r9, 8(r4)
+ cmpb r8, r7, r8
+ cmpb r6, r7, r9
+ orc. r8, r8, r6
+ bne cr0, L(different1)
+ cmpldi cr7, r5, 8
+ mr r9, r8
+ ble cr7, L(ret1)
+ /* Update pointers and size. */
+ addi r5, r5, -8
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ ld r7, 0(r3)
+ ld r9, 0(r4)
+ li r8, 0
+ cmpb r8, r7, r8
+ cmpb r6, r7, r9
+ orc. r8, r8, r6
+ bne cr0, L(different1)
+ cmpldi cr7, r5, 8
+ li r9, 0
+ ble cr7, L(ret1)
+ addi r5, r5, -8
+
+ ld r7, 8(r3)
+ ld r9, 8(r4)
+ cmpb r8, r7, r8
+ cmpb r6, r7, r9
+ orc. r8, r8, r6
+ bne cr0, L(different1)
+ cmpldi cr7, r5, 8
+ mr r9, r8
+ ble cr7, L(ret1)
+
+ /* Update pointers and size. */
+ addi r5, r5, -8
+ addi r3, r3, 16
+ addi r4, r4, 16
+L(align):
+ /* Now it has checked for first 32 bytes, align source1 to doubleword
+ and adjust source2 address. */
+ vspltisb v0, 0
+ vspltisb v2, -1
+ or r6, r4, r3
+ andi. r6, r6, 0xF
+ beq cr0, L(aligned)
+ lvsr v6, 0, r4 /* Compute mask. */
+ clrldi r6, r4, 60
+ subfic r11, r6, 16
+ andi. r6, r3, 0xF
+ beq cr0, L(s1_align)
+ /* Both s1 and s2 are unaligned. */
+ GET16BYTES(v5, r4, v6)
+ lvsr v10, 0, r3 /* Compute mask. */
+ clrldi r6, r3, 60
+ subfic r11, r6, 16
+ GET16BYTES(v4, r3, v10)
+ VCMPNEZB(v7, v5, v4)
+ beq cr6, L(match)
+ b L(different)
+
+ /* Align s1 to qw and adjust s2 address. */
+ .align 4
+L(match):
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ subf r5, r11, r5
+ add r3, r3, r11
+ add r4, r4, r11
+ andi. r11, r4, 0xF
+ beq cr0, L(aligned)
+ lvsr v6, 0, r4
+ clrldi r6, r4, 60
+ subfic r11, r6, 16
+ /* There are 2 loops depending on the input alignment.
+ Each loop gets 16 bytes from s1 and s2, checks for null
+ and compares them. Loops until a mismatch or null occurs. */
+L(s1_align):
+ lvx v4, 0, r3
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ GET16BYTES(v5, r4, v6)
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+ b L(s1_align)
+ .align 4
+L(aligned):
+ lvx v4, 0, r3
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+
+ lvx v4, 0, r3
+ lvx v5, 0, r4
+ VCMPNEZB(v7, v5, v4)
+ bne cr6, L(different)
+ cmpldi cr7, r5, 16
+ ble cr7, L(ret0)
+ addi r5, r5, -16
+ addi r3, r3, 16
+ addi r4, r4, 16
+ b L(aligned)
+ /* Calculate and return the difference. */
+L(different):
+ VCTZLSBB(r6, v7)
+ cmplw cr7, r5, r6
+ ble cr7, L(ret0)
+ VEXTUBRX(r5, r6, v4)
+ VEXTUBRX(r4, r6, v5)
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+
+ .align 4
+L(ret0):
+ li r9, 0
+L(ret1):
+ mr r3, r9
+ blr
+
+ /* The code now checks if r8 and r5 are different by issuing a
+ cmpb and shifts the result based on its output:
+
+ leadzero = (__builtin_ffsl (z1) - 1);
+ leadzero = leadzero > (n-1)*8 ? (n-1)*8 : leadzero;
+ r1 = (r1 >> leadzero) & 0xFFUL;
+ r2 = (r2 >> leadzero) & 0xFFUL;
+ return r1 - r2; */
+
+ .align 4
+L(different1):
+ neg r11, r8
+ sldi r5, r5, 3
+ and r8, r11, r8
+ addi r5, r5, -8
+ cntlzd r8, r8
+ subfic r8, r8, 63
+ extsw r8, r8
+ cmpld cr7, r8, r5
+ ble cr7, L(different2)
+ mr r8, r5
+L(different2):
+ extsw r8, r8
+ srd r7, r7, r8
+ srd r9, r9, r8
+ rldicl r3, r7, 0, 56
+ rldicl r9, r9, 0, 56
+ subf r9, r9, 3
+ extsw r9, r9
+ mr r3, r9
+ blr
+
+ /* If unaligned 16 bytes reads across a 4K page boundary, it uses
+ a simple byte a byte comparison until the page alignment for s1
+ is reached. */
+ .align 4
+L(pagecross):
+ lbz r7, 0(r3)
+ lbz r9, 0(r4)
+ subfic r8, r8,4095
+ cmplw cr7, r9, r7
+ bne cr7, L(byte_ne_3)
+ cmpdi cr7, r9, 0
+ beq cr7, L(byte_ne_0)
+ addi r5, r5, -1
+ subf r7, r8, r5
+ subf r9, r7, r5
+ addi r9, r9, 1
+ mtctr r9
+ b L(pagecross_loop1)
+
+ .align 4
+L(pagecross_loop0):
+ beq cr7, L(ret0)
+ lbz r9, 0(r3)
+ lbz r8, 0(r4)
+ addi r5, r5, -1
+ cmplw cr7, r9, r8
+ cmpdi cr5, r9, 0
+ bne cr7, L(byte_ne_2)
+ beq cr5, L(byte_ne_0)
+L(pagecross_loop1):
+ cmpdi cr7, r5, 0
+ addi r3, r3, 1
+ addi r4, r4, 1
+ bdnz L(pagecross_loop0)
+ cmpdi cr7, r7, 0
+ li r9, 0
+ bne+ cr7, L(align)
+ b L(ret1)
+
+ .align 4
+L(byte_ne_0):
+ li r7, 0
+L(byte_ne_1):
+ subf r9, r9, r7
+ extsw r9, r9
+ b L(ret1)
+
+ .align 4
+L(byte_ne_2):
+ extsw r7, r9
+ mr r9, r8
+ b L(byte_ne_1)
+L(byte_ne_3):
+ extsw r7, r7
+ b L(byte_ne_1)
+END(STRNCMP)
+libc_hidden_builtin_def(strncmp)
+#else
+#include <sysdeps/powerpc/powerpc64/power8/strncmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/ppc-mcount.S b/sysdeps/powerpc/powerpc64/ppc-mcount.S
index 51a4db3048..2d3f14d4cd 100644
--- a/sysdeps/powerpc/powerpc64/ppc-mcount.S
+++ b/sysdeps/powerpc/powerpc64/ppc-mcount.S
@@ -1,5 +1,5 @@
/* PowerPC64-specific implementation of profiling support.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,13 +24,15 @@
ENTRY(_mcount)
mflr r4
ld r11, 0(r1)
+ std r4, FRAME_LR_SAVE(r1)
stdu r1,-FRAME_MIN_SIZE(r1)
cfi_adjust_cfa_offset (FRAME_MIN_SIZE)
- std r4, FRAME_MIN_SIZE+FRAME_LR_SAVE(r1)
cfi_offset (lr, FRAME_LR_SAVE)
ld r3, FRAME_LR_SAVE(r11)
bl JUMPTARGET(__mcount_internal)
+#ifndef SHARED
nop
+#endif
ld r0, FRAME_MIN_SIZE+FRAME_LR_SAVE(r1)
mtlr r0
addi r1,r1,FRAME_MIN_SIZE
diff --git a/sysdeps/powerpc/powerpc64/register-dump.h b/sysdeps/powerpc/powerpc64/register-dump.h
index db499d4a04..57a9a02b6d 100644
--- a/sysdeps/powerpc/powerpc64/register-dump.h
+++ b/sysdeps/powerpc/powerpc64/register-dump.h
@@ -1,5 +1,5 @@
/* Dump registers.
- Copyright (C) 1998-2016 Free Software Foundation, Inc.
+ Copyright (C) 1998-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/setjmp-bug21895.c b/sysdeps/powerpc/powerpc64/setjmp-bug21895.c
new file mode 100644
index 0000000000..945a251d95
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/setjmp-bug21895.c
@@ -0,0 +1,51 @@
+/* Shared object part of test for setjmp interoperability with static
+ dlopen BZ #21895.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <string.h>
+#include <setjmp.h>
+
+/* Copy r1 adress to a local variable. */
+#define GET_STACK_POINTER(sp) \
+ ({ \
+ asm volatile ("mr %0, 1\n\t" \
+ : "=r" (sp)); \
+ })
+
+jmp_buf jb;
+void (*bar)(jmp_buf, unsigned long);
+
+void
+lbar (unsigned long sp)
+{
+ bar(jb, sp);
+ for(;;);
+}
+
+void
+foo (void)
+{
+ unsigned long sp;
+ /* Copy r1 (stack pointer) to sp. It will be use later to get
+ TOC area. */
+ GET_STACK_POINTER(sp);
+ setjmp(jb);
+ lbar(sp);
+
+ for(;;);
+}
diff --git a/sysdeps/powerpc/powerpc64/setjmp-common.S b/sysdeps/powerpc/powerpc64/setjmp-common.S
index 83361f537c..ae92bb9b92 100644
--- a/sysdeps/powerpc/powerpc64/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc64/setjmp-common.S
@@ -1,5 +1,5 @@
/* setjmp for PowerPC64.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -232,3 +232,14 @@ L(no_vmx):
blr
#endif
END (__sigsetjmp_symbol)
+
+#if defined SHARED && !IS_IN (rtld) && !defined __NO_VMX__
+/* When called from within libc we need a special version of __sigsetjmp
+ that saves r2 since the call won't go via a plt call stub. See
+ bugz #269. */
+ENTRY (__GI___sigsetjmp)
+ std r2,FRAME_TOC_SAVE(r1) /* Save the callers TOC in the save area. */
+ CALL_MCOUNT 1
+ b JUMPTARGET (GLUE(__sigsetjmp_symbol,_ent))
+END (__GI___sigsetjmp)
+#endif
diff --git a/sysdeps/powerpc/powerpc64/setjmp.S b/sysdeps/powerpc/powerpc64/setjmp.S
index dd0fbf5725..a0f0e768f2 100644
--- a/sysdeps/powerpc/powerpc64/setjmp.S
+++ b/sysdeps/powerpc/powerpc64/setjmp.S
@@ -1,5 +1,5 @@
/* AltiVec (new) version of setjmp for PowerPC.
- Copyright (C) 1995-2016 Free Software Foundation, Inc.
+ Copyright (C) 1995-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/start.S b/sysdeps/powerpc/powerpc64/start.S
index d35b5ec1b4..bd7189310c 100644
--- a/sysdeps/powerpc/powerpc64/start.S
+++ b/sysdeps/powerpc/powerpc64/start.S
@@ -1,5 +1,5 @@
/* Startup code for programs linked with GNU libc. PowerPC64 version.
- Copyright (C) 1998-2016 Free Software Foundation, Inc.
+ Copyright (C) 1998-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -62,7 +62,7 @@ L(start_addresses):
.L01:
.tc L(start_addresses)[TC],L(start_addresses)
.section ".text"
-ENTRY(_start)
+ENTRY (_start)
/* Save the stack pointer, in case we're statically linked under Linux. */
mr r9,r1
/* Set up an initial stack frame, and clear the LR. */
@@ -78,7 +78,7 @@ ENTRY(_start)
/* and continue in libc-start, in glibc. */
b JUMPTARGET(__libc_start_main)
-/* The linker needs this nop to recognize that it's OK to call via a
+/* Older versions of ld need this nop to recognize that it's OK to call via a
TOC adjusting stub. */
nop
diff --git a/sysdeps/powerpc/powerpc64/strchr.S b/sysdeps/powerpc/powerpc64/strchr.S
index 19c695d7e7..f4fbbea103 100644
--- a/sysdeps/powerpc/powerpc64/strchr.S
+++ b/sysdeps/powerpc/powerpc64/strchr.S
@@ -1,5 +1,5 @@
/* Optimized strchr implementation for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -22,7 +22,11 @@
/* char * [r3] strchr (const char *s [r3] , int c [r4] ) */
-ENTRY (strchr)
+#ifndef STRCHR
+# define STRCHR strchr
+#endif
+
+ENTRY_TOCLESS (STRCHR)
CALL_MCOUNT 2
#define rTMP1 r0
@@ -145,7 +149,7 @@ L(foundit):
#endif
add rRTN, rSTR, rCLZB
blr
-END (strchr)
+END (STRCHR)
weak_alias (strchr, index)
libc_hidden_builtin_def (strchr)
diff --git a/sysdeps/powerpc/powerpc64/strcmp.S b/sysdeps/powerpc/powerpc64/strcmp.S
index d4fb5b364a..1862a2e5ce 100644
--- a/sysdeps/powerpc/powerpc64/strcmp.S
+++ b/sysdeps/powerpc/powerpc64/strcmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -22,7 +22,11 @@
/* int [r3] strcmp (const char *s1 [r3], const char *s2 [r4]) */
-EALIGN (strcmp, 4, 0)
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+ENTRY_TOCLESS (STRCMP, 4)
CALL_MCOUNT 2
#define rTMP2 r0
@@ -172,5 +176,5 @@ L(u3): sub rRTN, rWORD1, rWORD2
L(u4): lbz rWORD1, -1(rSTR1)
sub rRTN, rWORD1, rWORD2
blr
-END (strcmp)
+END (STRCMP)
libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/strcspn.S b/sysdeps/powerpc/powerpc64/strcspn.S
deleted file mode 100644
index 31e619dde0..0000000000
--- a/sysdeps/powerpc/powerpc64/strcspn.S
+++ /dev/null
@@ -1,127 +0,0 @@
-/* Optimized strcspn implementation for PowerPC64.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* size_t [r3] strcspn (const char [r4] *s, const char [r5] *reject) */
-
-EALIGN (strcspn, 4, 0)
- CALL_MCOUNT 3
-
- /* The idea to speed up the algorithm is to create a lookup table
- for fast check if input character should be considered. For ASCII
- or ISO-8859-X character sets it has 256 positions. */
-
- /* PPC64 ELF ABI stack is aligned to 16 bytes. */
- addi r9,r1,-256
- /* Clear the table with 0 values */
- li r6, 0
- li r8, 4
- mtctr r8
- mr r10, r9
- .align 4
-L(zerohash):
- std r6, 0(r10)
- std r6, 8(r10)
- std r6, 16(r10)
- std r6, 24(r10)
- std r6, 32(r10)
- std r6, 40(r10)
- std r6, 48(r10)
- std r6, 56(r10)
- addi r10, r10, 64
- bdnz L(zerohash)
-
- lbz r10,0(r4)
- cmpdi cr7,r10,0 /* reject[0] == '\0' ? */
- li r8,1
- beq cr7,L(finish_table) /* If reject[0] == '\0' skip */
-
- /* Initialize the table as:
- for (i=0; reject[i]; i++
- table[reject[i]]] = 1 */
- .align 4
-L(init_table):
- stbx r8,r9,r10
- lbzu r10,1(r4)
- cmpdi cr7,r10,0 /* If reject[0] == '\0' finish */
- bne cr7,L(init_table)
-L(finish_table):
- /* set table[0] = 1 */
- li r10,1
- stb r10,0(r9)
- li r10,0
- b L(mainloop)
-
- /* Unrool the loop 4 times and check using the table as:
- i = 0;
- while (1)
- {
- if (table[input[i++]] == 1)
- return i - 1;
- if (table[input[i++]] == 1)
- return i - 1;
- if (table[input[i++]] == 1)
- return i - 1;
- if (table[input[i++]] == 1)
- return i - 1;
- } */
- .align 4
-L(unroll):
- lbz r8,1(r3)
- addi r10,r10,4
- lbzx r8,r9,r8
- cmpwi r7,r8,1
- beq cr7,L(end)
- lbz r8,2(r3)
- addi r3,r3,4
- lbzx r8,r9,r8
- cmpwi cr7,r8,1
- beq cr7,L(end2)
- lbz r8,3(r7)
- lbzx r8,r9,r8
- cmpwi cr7,r8,1
- beq cr7,L(end3)
-L(mainloop):
- lbz r8,0(r3)
- mr r7,r3
- addi r6,r10,1
- addi r4,r10,2
- addi r5,r10,3
- lbzx r8,r9,8
- cmpwi cr7,r8,1
- bne cr7,L(unroll)
- mr r3,r10
- blr
-
- .align 4
-L(end):
- mr r3,r6
- blr
-
- .align 4
-L(end2):
- mr r3,r4
- blr
-
- .align 4
-L(end3):
- mr r3,r5
- blr
-END (strcspn)
-libc_hidden_builtin_def (strcspn)
diff --git a/sysdeps/powerpc/powerpc64/strlen.S b/sysdeps/powerpc/powerpc64/strlen.S
index 3175090d60..4604f9c90f 100644
--- a/sysdeps/powerpc/powerpc64/strlen.S
+++ b/sysdeps/powerpc/powerpc64/strlen.S
@@ -1,5 +1,5 @@
/* Optimized strlen implementation for PowerPC64.
- Copyright (C) 1997-2016 Free Software Foundation, Inc.
+ Copyright (C) 1997-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -80,7 +80,11 @@
/* int [r3] strlen (char *s [r3]) */
-ENTRY (strlen)
+#ifndef STRLEN
+# define STRLEN strlen
+#endif
+
+ENTRY_TOCLESS (STRLEN)
CALL_MCOUNT 1
#define rTMP4 r0
@@ -195,5 +199,5 @@ L(done1):
blr
#endif
-END (strlen)
+END (STRLEN)
libc_hidden_builtin_def (strlen)
diff --git a/sysdeps/powerpc/powerpc64/strncmp.S b/sysdeps/powerpc/powerpc64/strncmp.S
index 11b9e4c657..bf535838df 100644
--- a/sysdeps/powerpc/powerpc64/strncmp.S
+++ b/sysdeps/powerpc/powerpc64/strncmp.S
@@ -1,5 +1,5 @@
/* Optimized strcmp implementation for PowerPC64.
- Copyright (C) 2003-2016 Free Software Foundation, Inc.
+ Copyright (C) 2003-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -22,7 +22,11 @@
/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5]) */
-EALIGN (strncmp, 4, 0)
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
+ENTRY_TOCLESS (STRNCMP, 4)
CALL_MCOUNT 3
#define rTMP2 r0
@@ -202,5 +206,5 @@ L(u1):
L(u2): lbzu rWORD1, -1(rSTR1)
L(u3): sub rRTN, rWORD1, rWORD2
blr
-END (strncmp)
+END (STRNCMP)
libc_hidden_builtin_def (strncmp)
diff --git a/sysdeps/powerpc/powerpc64/strpbrk.S b/sysdeps/powerpc/powerpc64/strpbrk.S
deleted file mode 100644
index 5e9d1a66aa..0000000000
--- a/sysdeps/powerpc/powerpc64/strpbrk.S
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Optimized strpbrk implementation for PowerPC64.
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-
-/* char [r3] *strpbrk(const char [r4] *s, const char [r5] *accept) */
-
-EALIGN (strpbrk, 4, 0)
- CALL_MCOUNT 3
-
- lbz r10,0(r4)
- cmpdi cr7,r10,0 /* accept[0] == '\0' ? */
- beq cr7,L(nullfound)
-
- /* The idea to speed up the algorithm is to create a lookup table
- for fast check if input character should be considered. For ASCII
- or ISO-8859-X character sets it has 256 positions. */
-
- /* PPC64 ELF ABI stack is aligned to 16 bytes. */
- addi r9,r1,-256
- /* Clear the table with 0 values */
- li r6, 0
- li r7, 4
- mtctr r7
- mr r8, r9
- .align 4
-L(zerohash):
- std r6, 0(r8)
- std r6, 8(r8)
- std r6, 16(r8)
- std r6, 24(r8)
- std r6, 32(r8)
- std r6, 40(r8)
- std r6, 48(r8)
- std r6, 56(r8)
- addi r8, r8, 64
- bdnz L(zerohash)
-
- /* Initialize the table as:
- for (i=0; accept[i]; i++
- table[accept[i]]] = 1 */
- li r0,1
- .align 4
-L(init_table):
- stbx r0,r9,r10
- lbzu r10,1(r4)
- cmpdi r0,r10,0
- bne cr0,L(init_table)
-L(finish_table):
- /* set table[0] = 1 */
- li r4,1
- stb r4,0(r9)
- b L(mainloop)
-
- /* Unrool the loop 4 times and check using the table as:
- i = 0;
- while (1)
- {
- if (table[input[i++]] == 1)
- return (s[i -1] ? s + i - 1: NULL);
- if (table[input[i++]] == 1)
- return (s[i -1] ? s + i - 1: NULL);
- if (table[input[i++]] == 1)
- return (s[i -1] ? s + i - 1: NULL);
- if (table[input[i++]] == 1)
- return (s[i -1] ? s + i - 1: NULL);
- } */
- .align 4
-L(unroll):
- lbz r0,1(r3)
- lbzx r8,r9,r0
- cmpwi cr6,r8,1
- beq cr6,L(checkend2)
- lbz r10,2(r3)
- lbzx r4,r9,r10
- cmpwi cr7,r4,1
- beq cr7,L(checkend3)
- lbz r12,3(r3)
- addi r3,r3,4
- lbzx r11,r9,r12
- cmpwi cr0,r11,1
- beq cr0,L(checkend)
-L(mainloop):
- lbz r12,0(r3)
- addi r11,r3,1
- addi r5,r3,2
- addi r7,r3,3
- lbzx r6,r9,r12
- cmpwi cr1,r6,1
- bne cr1,L(unroll)
- cmpdi cr0,r12,0
- beq cr0,L(nullfound)
-L(end):
- blr
-
- .align 4
-L(checkend):
- cmpdi cr1,r12,0
- mr r3,r7
- bne cr1,L(end)
-L(nullfound):
- /* return NULL */
- li 3,0
- blr
-
- .align 4
-L(checkend2):
- cmpdi cr7,r0,0
- mr r3,r11
- beq cr7,L(nullfound)
- blr
-
- .align 4
-L(checkend3):
- cmpdi cr6,r10,0
- mr r3,r5
- beq cr6,L(nullfound)
- blr
-END (strpbrk)
-libc_hidden_builtin_def (strpbrk)
diff --git a/sysdeps/powerpc/powerpc64/strspn.S b/sysdeps/powerpc/powerpc64/strspn.S
deleted file mode 100644
index cf10da1997..0000000000
--- a/sysdeps/powerpc/powerpc64/strspn.S
+++ /dev/null
@@ -1,144 +0,0 @@
-/* Optimized strspn implementation for PowerPC64.
-
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-/* size_t [r3] strspn (const char *string [r3],
- const char *needleAccept [r4] */
-
-/* Performance gains are grabbed through following techniques:
-
- > hashing of needle.
- > hashing avoids scanning of duplicate entries in needle
- across the string.
- > unrolling when scanning for character in string
- across hash table. */
-
-/* Algorithm is as below:
- 1. A empty hash table/dictionary is created comprising of
- 256 ascii character set
- 2. When hash entry is found in needle , the hash index
- is initialized to 1
- 3. The string is scanned until end and for every character,
- its corresponding hash index is compared.
- 4. initial length of string (count) until first hit of
- accept needle to be found is set to 0
- 4. If hash index is set to 1 for the index of string,
- count is returned.
- 5. Otherwise count is incremented and scanning continues
- until end of string. */
-
-#include <sysdep.h>
-
-EALIGN(strspn, 4, 0)
- CALL_MCOUNT 3
-
- /* PPC64 ELF ABI stack is aligned to 16 bytes. */
- addi r9,r1,-256
- /* Clear the table with 0 values */
- li r6, 0
- li r8, 4
- mtctr r8
- mr r10, r9
- .align 4
-L(zerohash):
- std r6, 0(r10)
- std r6, 8(r10)
- std r6, 16(r10)
- std r6, 24(r10)
- std r6, 32(r10)
- std r6, 40(r10)
- std r6, 48(r10)
- std r6, 56(r10)
- addi r10, r10, 64
- bdnz L(zerohash)
-
- lbz r10,0(r4)
- li r8, 1 /* r8=1, marker into hash if found in
- needle */
- cmpdi cr7, r10, 0 /* accept needle is NULL */
- beq cr7, L(skipHashing) /* if needle is NULL, skip hashing */
-
- .align 4 /* align section to 16 byte boundary */
-L(hashing):
- stbx r8, r9, r10 /* update hash with marker for the pivot of
- the needle */
- lbzu r10, 1(r4) /* load needle into r10 and update to next */
- cmpdi cr7, r10, 0 /* if needle is has reached NULL, continue */
- bne cr7, L(hashing) /* loop to hash the needle */
-
-L(skipHashing):
- li r10, 0 /* load counter = 0 */
- b L(beginScan)
-
- .align 4 /* align section to 16 byte boundary */
-L(scanUnroll):
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* if we hit marker in hash, we have found
- accept needle */
- beq cr7, L(ret1stIndex) /* we have hit accept needle, return the
- count */
-
- lbz r8, 1(r3) /* load string[1] into r8 */
- addi r10, r10, 4 /* increment counter */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* if we hit marker in hash, we have found
- accept needle */
- beq cr7, L(ret2ndIndex) /* we have hit accept needle, return the
- count */
-
- lbz r8, 2(r3) /* load string[2] into r8 */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* if we hit marker in hash, we have found
- accept needle */
- beq cr7, L(ret3rdIndex) /* we have hit accept needle, return the
- count */
-
- lbz r8, 3(r3) /* load string[3] into r8 */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- addi r3, r3, 4 /* unroll factor , increment string by 4 */
- cmpwi cr7, r8, 0 /* if we hit marker in hash, we have found
- accept needle */
- beq cr7,L(ret4thIndex) /* we have hit accept needle, return the
- count */
-
-L(beginScan):
- lbz r8, 0(r3) /* load string[0] into r8 */
- addi r6, r10, 1 /* place holder for counter + 1 */
- addi r5, r10, 2 /* place holder for counter + 2 */
- addi r4, r10, 3 /* place holder for counter + 3 */
- cmpdi cr7, r8, 0 /* if we hit marker in hash, we have found
- accept needle */
- bne cr7, L(scanUnroll) /* continue scanning */
-
-L(ret1stIndex):
- mr r3, r10 /* update r3 for return */
- blr /* return */
-
-L(ret2ndIndex):
- mr r3, r6 /* update r3 for return */
- blr /* return */
-
-L(ret3rdIndex):
- mr r3, r5 /* update r3 for return */
- blr /* return */
-
-L(ret4thIndex):
- mr r3, r4 /* update r3 for return */
- blr /* done */
-END(strspn)
-libc_hidden_builtin_def (strspn)
diff --git a/sysdeps/powerpc/powerpc64/strtok.S b/sysdeps/powerpc/powerpc64/strtok.S
deleted file mode 100644
index 51ce9b291e..0000000000
--- a/sysdeps/powerpc/powerpc64/strtok.S
+++ /dev/null
@@ -1,226 +0,0 @@
-/* Optimized strtok implementation for PowerPC64.
-
- Copyright (C) 2014-2016 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-/* Performance gains are grabbed through following techniques:
-
- > hashing of needle.
- > hashing avoids scanning of duplicate entries in needle
- across the string.
- > unrolling when scanning for character in string
- across hash table. */
-
-/* Algorithm is as below:
- 1. A empty hash table/dictionary is created comprising of
- 256 ascii character set
- 2. When hash entry is found in needle , the hash index
- is initialized to 1
- 3. The string is scanned until end and for every character,
- its corresponding hash index is compared.
- 4. initial length of string (count) until first hit of
- accept needle is calculated and moved.(strspn)
- 5. The string is again scanned until end and for every character,
- its corresponding hash index is compared.(strpbrk)
- 6. If hash index is set to 1 for the index of string,
- set it to null and set the saveptr to point to the next char.
- 7. Otherwise count is incremented and scanning continues
- until end of string. */
-
-#include <sysdep.h>
-#ifdef USE_AS_STRTOK_R
-# define FUNC_NAME __strtok_r
-#else
-# define FUNC_NAME strtok
-#endif
-
-EALIGN(FUNC_NAME, 4, 0)
-#ifdef USE_AS_STRTOK_R
- CALL_MCOUNT 3
- cmpdi cr7, r3, 0 /* Is input null? */
- bne cr7, L(inputnotNull)
- ld r3, 0(r5) /* Load from r5 */
-#else
- CALL_MCOUNT 2
- addis r5, r2, .LANCHOR0@toc@ha
- cmpdi cr7, r3, 0 /* Is r3 NULL? */
- bne cr7, L(inputnotNull)
- ld r3, .LANCHOR0@toc@l(r5) /* Load from saveptr */
-#endif
-L(inputnotNull):
- mr r7, r3
- cmpdi cr7, r3, 0
- beq cr7, L(returnNULL)
- lbz r8, 0(r3)
- cmpdi cr7, r8, 0
- beq cr7, L(returnNULL)
-
- addi r9, r1, -256 /* r9 is a hash of 256 bytes */
-
- /*Iniatliaze hash table with Zeroes */
- li r6, 0
- li r8, 4
- mtctr r8
- mr r10, r9
- .align 4
-L(zerohash):
- std r6, 0(r10)
- std r6, 8(r10)
- std r6, 16(r10)
- std r6, 24(r10)
- std r6, 32(r10)
- std r6, 40(r10)
- std r6, 48(r10)
- std r6, 56(r10)
- addi r10, r10, 64
- bdnz L(zerohash)
-
-
- lbz r10, 0(r4) /* load r10 with needle (r4) */
- li r8, 1 /* r8=1, marker into hash if found in
- needle */
-
- cmpdi cr7, r10, 0 /* accept needle is NULL */
- beq cr7, L(skipHashing) /* if needle is NULL, skip hashing */
-
- .align 4 /* align section to 16 byte boundary */
-L(hashing):
- stbx r8, r9, r10 /* update hash with marker for the pivot of
- the needle */
- lbzu r10, 1(r4) /* load needle into r10 and update to next */
- cmpdi cr7, r10, 0 /* if needle is has reached NULL, continue */
- bne cr7, L(hashing) /* loop to hash the needle */
-
-L(skipHashing):
- b L(beginScan)
-
- .align 4 /* align section to 16 byte boundary */
-L(scanUnroll):
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* check the hash value */
- beq cr7, L(ret1stIndex) /* we have hit accept needle */
-
- lbz r8, 1(r7) /* load string[1] into r8 */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* check the hash value */
- beq cr7, L(ret2ndIndex) /* we have hit accept needle */
-
- lbz r8, 2(r7) /* load string[1] into r8 */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* check the hash value */
- beq cr7, L(ret3rdIndex) /* we have hit accept needle */
-
- lbz r8, 3(r7) /* load string[1] into r8 */
- addi r7, r7, 4
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi cr7, r8, 0 /* check the hash value */
- beq cr7,L(ret4thIndex) /* we have hit accept needle */
-
-L(beginScan):
- lbz r8, 0(r7) /* load string[0] into r8 */
- addi r6, r7, 1
- addi r11, r7, 2
- addi r4, r7, 3
- cmpdi cr7, r8, 0 /* check if its null */
- bne cr7, L(scanUnroll) /* continue scanning */
-
-L(ret1stIndex):
- mr r3, r7
- b L(next)
-L(ret2ndIndex):
- mr r3, r6
- b L(next)
-L(ret3rdIndex):
- mr r3, r11
- b L(next)
-L(ret4thIndex):
- mr r3, r4
-L(next):
- mr r7, r3
- lbz r8, 0(r7)
- cmpdi cr7, r8, 0
- beq cr7, L(returnNULL)
- li r8, 1
- li r10, 0 /* load counter = 0 */
- stbx r8, r9, r10 /* update hash for NULL */
- b L(mainloop)
-
-L(unroll):
- lbz r8, 1(r7) /* load string[1] into r8 */
- lbzx r8, r9, r8 /* load r8 with hash value at index */
- cmpwi r7, r8, 1 /* check the hash */
- beq cr7, L(foundat1st) /* we have hit accept needle */
- lbz r8, 2(r7)
- lbzx r8, r9, r8
- cmpwi cr7, r8, 1
- beq cr7, L(foundat2nd)
- lbz r8, 3(r7)
- addi r7, r7, 4
- lbzx r8, r9, r8
- cmpwi cr7, r8, 1
- beq cr7, L(foundat3rd)
-L(mainloop):
- lbz r8, 0(r7)
- addi r6, r7, 1
- addi r11, r7, 2
- addi r4, r7, 3
- lbzx r8, r9, r8
- cmpwi cr7, r8, 1
- bne cr7, L(unroll) /* continue scanning */
-
- b L(found)
-L(foundat1st):
- mr r7, r6
- b L(found)
-L(foundat2nd):
- mr r7, r11
- b L(found)
-L(foundat3rd):
- mr r7, r4
-L(found):
- lbz r8, 0(r7)
- cmpdi cr7, r8, 0
- beq cr7, L(end)
- li r10, 0
- stb r10, 0(r7) /* Terminate string */
- addi r7, r7, 1 /* Store the pointer to the next char */
-L(end):
-#ifdef USE_AS_STRTOK_R
- std r7, 0(r5) /* Update saveptr */
-#else
- std r7, .LANCHOR0@toc@l(r5)
-#endif
- blr /* done */
-L(returnNULL):
-#ifndef USE_AS_STRTOK_R
- li r7, 0
-#endif
- li r3, 0 /* return NULL */
- b L(end)
-END(FUNC_NAME)
-#ifdef USE_AS_STRTOK_R
-libc_hidden_builtin_def (strtok_r)
-#else
- .section ".bss"
- .align 3
- .set .LANCHOR0,. + 0
- .type olds, @object
- .size olds, 8
-olds:
- .zero 8
-libc_hidden_builtin_def (strtok)
-#endif
diff --git a/sysdeps/powerpc/powerpc64/submul_1.S b/sysdeps/powerpc/powerpc64/submul_1.S
index 0f289377df..7d2dea0281 100644
--- a/sysdeps/powerpc/powerpc64/submul_1.S
+++ b/sysdeps/powerpc/powerpc64/submul_1.S
@@ -1,6 +1,6 @@
/* PowerPC64 __mpn_addmul_1 -- Multiply a limb vector with a limb and subtract
the result to a second limb vector.
- Copyright (C) 2013-2016 Free Software Foundation, Inc.
+ Copyright (C) 2013-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/sysdeps/powerpc/powerpc64/sysdep.h b/sysdeps/powerpc/powerpc64/sysdep.h
index 7393944ba6..2df1d9b6e6 100644
--- a/sysdeps/powerpc/powerpc64/sysdep.h
+++ b/sysdeps/powerpc/powerpc64/sysdep.h
@@ -1,5 +1,5 @@
/* Assembly macros for 64-bit PowerPC.
- Copyright (C) 2002-2016 Free Software Foundation, Inc.
+ Copyright (C) 2002-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,66 +21,40 @@
#ifdef __ASSEMBLER__
/* Stack frame offsets. */
-#if _CALL_ELF != 2
-#define FRAME_MIN_SIZE 112
-#define FRAME_MIN_SIZE_PARM 112
#define FRAME_BACKCHAIN 0
#define FRAME_CR_SAVE 8
#define FRAME_LR_SAVE 16
+#if _CALL_ELF != 2
+#define FRAME_MIN_SIZE 112
+#define FRAME_MIN_SIZE_PARM 112
#define FRAME_TOC_SAVE 40
#define FRAME_PARM_SAVE 48
-#define FRAME_PARM1_SAVE 48
-#define FRAME_PARM2_SAVE 56
-#define FRAME_PARM3_SAVE 64
-#define FRAME_PARM4_SAVE 72
-#define FRAME_PARM5_SAVE 80
-#define FRAME_PARM6_SAVE 88
-#define FRAME_PARM7_SAVE 96
-#define FRAME_PARM8_SAVE 104
-#define FRAME_PARM9_SAVE 112
#else
#define FRAME_MIN_SIZE 32
#define FRAME_MIN_SIZE_PARM 96
-#define FRAME_BACKCHAIN 0
-#define FRAME_CR_SAVE 8
-#define FRAME_LR_SAVE 16
#define FRAME_TOC_SAVE 24
#define FRAME_PARM_SAVE 32
-#define FRAME_PARM1_SAVE 32
-#define FRAME_PARM2_SAVE 40
-#define FRAME_PARM3_SAVE 48
-#define FRAME_PARM4_SAVE 56
-#define FRAME_PARM5_SAVE 64
-#define FRAME_PARM6_SAVE 72
-#define FRAME_PARM7_SAVE 80
-#define FRAME_PARM8_SAVE 88
-#define FRAME_PARM9_SAVE 96
#endif
/* Support macros for CALL_MCOUNT. */
-#if _CALL_ELF == 2
-#define call_mcount_parm_offset (-64)
-#else
-#define call_mcount_parm_offset FRAME_PARM_SAVE
-#endif
.macro SAVE_ARG NARG
.if \NARG
SAVE_ARG \NARG-1
- std 2+\NARG,call_mcount_parm_offset-8+8*(\NARG)(1)
+ std 2+\NARG,-FRAME_MIN_SIZE_PARM+FRAME_PARM_SAVE-8+8*(\NARG)(1)
.endif
.endm
.macro REST_ARG NARG
.if \NARG
REST_ARG \NARG-1
- ld 2+\NARG,FRAME_MIN_SIZE_PARM+call_mcount_parm_offset-8+8*(\NARG)(1)
+ ld 2+\NARG,FRAME_PARM_SAVE-8+8*(\NARG)(1)
.endif
.endm
.macro CFI_SAVE_ARG NARG
.if \NARG
CFI_SAVE_ARG \NARG-1
- cfi_offset(2+\NARG,call_mcount_parm_offset-8+8*(\NARG))
+ cfi_offset(2+\NARG,-FRAME_MIN_SIZE_PARM+FRAME_PARM_SAVE-8+8*(\NARG))
.endif
.endm
@@ -132,25 +106,25 @@
# define OPD_ENT(name) .quad BODY_LABEL (name), .TOC.@tocbase, 0
#endif
-#define ENTRY_1(name) \
+#define ENTRY_1(name) \
.type BODY_LABEL(name),@function; \
.globl name; \
.section ".opd","aw"; \
- .align 3; \
-name##: OPD_ENT (name); \
- .previous;
+ .p2align 3;FUNC_LABEL(name): \
+ OPD_ENT (name); \
+ .previous
-#define DOT_LABEL(X) X
+#define FUNC_LABEL(X) X
#define BODY_LABEL(X) .LY##X
-#define ENTRY_2(name) \
+#define ENTRY_2(name) \
.type name,@function; \
ENTRY_1(name)
-#define END_2(name) \
+#define END_2(name) \
.size name,.-BODY_LABEL(name); \
- .size BODY_LABEL(name),.-BODY_LABEL(name);
+ .size BODY_LABEL(name),.-BODY_LABEL(name)
#define LOCALENTRY(name)
-#else /* _CALL_ELF */
+#else /* _CALL_ELF == 2 */
/* Macro to prepare for calling via a function pointer. */
.macro PPC64_LOAD_FUNCPTR PTR
@@ -158,48 +132,65 @@ name##: OPD_ENT (name); \
mtctr r12
.endm
-#define DOT_LABEL(X) X
+#define FUNC_LABEL(X) X
#define BODY_LABEL(X) X
-#define ENTRY_2(name) \
+#define ENTRY_2(name) \
.globl name; \
- .type name,@function;
-#define END_2(name) \
- .size name,.-name;
-#define LOCALENTRY(name) \
-1: addis r2,r12,.TOC.-1b@ha; \
- addi r2,r2,.TOC.-1b@l; \
- .localentry name,.-name;
+ .type name,@function
+#define END_2(name) \
+ .size name,.-name
+#define LOCALENTRY(name) \
+1: addis r2,r12,.TOC.-1b@ha; \
+ addi r2,r2,.TOC.-1b@l; \
+ .localentry name,.-name
#endif /* _CALL_ELF */
-#define ENTRY(name) \
- .section ".text"; \
- ENTRY_2(name) \
- .align ALIGNARG(2); \
-BODY_LABEL(name): \
- cfi_startproc; \
- LOCALENTRY(name)
+ .macro NOPS NARG
+ .if \NARG
+ NOPS \NARG-1
+ nop
+ .endif
+ .endm
+
+ .macro ENTRY_3 name, alignp2=2, nopwords=0
+ .text
+ ENTRY_2(\name)
+ .p2align \alignp2
+ NOPS \nopwords
+BODY_LABEL(\name):
+ .endm
-#define EALIGN_W_0 /* No words to insert. */
-#define EALIGN_W_1 nop
-#define EALIGN_W_2 nop;nop
-#define EALIGN_W_3 nop;nop;nop
-#define EALIGN_W_4 EALIGN_W_3;nop
-#define EALIGN_W_5 EALIGN_W_4;nop
-#define EALIGN_W_6 EALIGN_W_5;nop
-#define EALIGN_W_7 EALIGN_W_6;nop
-
-/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes
- past a 2^alignt boundary. */
-#define EALIGN(name, alignt, words) \
- .section ".text"; \
- ENTRY_2(name) \
- .align ALIGNARG(alignt); \
- EALIGN_W_##words; \
-BODY_LABEL(name): \
+/* Use ENTRY_TOCLESS for functions that make no use of r2 and
+ guarantee r2 is unchanged on exit. Any function that has @toc or
+ @got relocs uses r2. Functions that call other functions via the
+ PLT use r2. Use ENTRY for functions that may use or change r2.
+ The first argument is the function name.
+ The optional second argument specifies alignment of the function's
+ code, as the logarithm base two of the byte alignment. For
+ example, a value of four aligns to a sixteen byte boundary.
+ The optional third argument specifies the number of NOPs to emit
+ before the start of the function's code. */
+#ifndef PROF
+#define ENTRY_TOCLESS(name, ...) \
+ ENTRY_3 name, ## __VA_ARGS__; \
+ cfi_startproc
+
+#define ENTRY(name, ...) \
+ ENTRY_TOCLESS(name, ## __VA_ARGS__); \
+ LOCALENTRY(name)
+#else
+/* The call to _mcount is potentially via the plt, so profiling code
+ is never free of an r2 use. */
+#define ENTRY_TOCLESS(name, ...) \
+ ENTRY_3 name, ## __VA_ARGS__; \
cfi_startproc; \
LOCALENTRY(name)
+#define ENTRY(name, ...) \
+ ENTRY_TOCLESS(name, ## __VA_ARGS__)
+#endif
+
/* Local labels stripped out by the linker. */
#undef L
#define L(x) .L##x
@@ -246,7 +237,7 @@ LT_LABEL(name): ; \
LT_LABELSUFFIX(name,_name_start): ;\
.ascii stringify(name) ; \
LT_LABELSUFFIX(name,_name_end): ; \
- .align 2 ;
+ .p2align 2
#define TRACEBACK_MASK(name,mask) \
LT_LABEL(name): ; \
@@ -257,23 +248,23 @@ LT_LABEL(name): ; \
LT_LABELSUFFIX(name,_name_start): ;\
.ascii stringify(name) ; \
LT_LABELSUFFIX(name,_name_end): ; \
- .align 2 ;
+ .p2align 2
/* END generates Traceback tables */
#undef END
#define END(name) \
cfi_endproc; \
- TRACEBACK(name) \
+ TRACEBACK(name); \
END_2(name)
/* This form supports more informative traceback tables */
#define END_GEN_TB(name,mask) \
cfi_endproc; \
- TRACEBACK_MASK(name,mask) \
+ TRACEBACK_MASK(name,mask); \
END_2(name)
-#if !IS_IN(rtld) && defined (ENABLE_LOCK_ELISION)
-# define ABORT_TRANSACTION \
+#if !IS_IN(rtld)
+# define ABORT_TRANSACTION_IMPL \
cmpdi 13,0; \
beq 1f; \
lwz 0,TM_CAPABLE(13); \
@@ -281,11 +272,12 @@ LT_LABELSUFFIX(name,_name_end): ; \
beq 1f; \
li 11,_ABORT_SYSCALL; \
tabort. 11; \
- .align 4; \
+ .p2align 4; \
1:
#else
-# define ABORT_TRANSACTION
+# define ABORT_TRANSACTION_IMPL
#endif
+#define ABORT_TRANSACTION ABORT_TRANSACTION_IMPL
#define DO_CALL(syscall) \
ABORT_TRANSACTION \
@@ -294,12 +286,12 @@ LT_LABELSUFFIX(name,_name_end): ; \
/* ppc64 is always PIC */
#undef JUMPTARGET
-#define JUMPTARGET(name) DOT_LABEL(name)
+#define JUMPTARGET(name) FUNC_LABEL(name)
#define PSEUDO(name, syscall_name, args) \
- .section ".text"; \
- ENTRY (name) \
- DO_CALL (SYS_ify (syscall_name));
+ .section ".text"; \
+ ENTRY (name); \
+ DO_CALL (SYS_ify (syscall_name))
#ifdef SHARED
#define TAIL_CALL_SYSCALL_ERROR \
@@ -340,9 +332,9 @@ LT_LABELSUFFIX(name,_name_end): ; \
END (name)
#define PSEUDO_NOERRNO(name, syscall_name, args) \
- .section ".text"; \
- ENTRY (name) \
- DO_CALL (SYS_ify (syscall_name));
+ .section ".text"; \
+ ENTRY (name); \
+ DO_CALL (SYS_ify (syscall_name))
#define PSEUDO_RET_NOERRNO \
blr
@@ -354,9 +346,9 @@ LT_LABELSUFFIX(name,_name_end): ; \
END (name)
#define PSEUDO_ERRVAL(name, syscall_name, args) \
- .section ".text"; \
- ENTRY (name) \
- DO_CALL (SYS_ify (syscall_name));
+ .section ".text"; \
+ ENTRY (name); \
+ DO_CALL (SYS_ify (syscall_name))
#define PSEUDO_RET_ERRVAL \
blr
@@ -372,53 +364,53 @@ LT_LABELSUFFIX(name,_name_end): ; \
#if _CALL_ELF != 2
#define PPC64_LOAD_FUNCPTR(ptr) \
- "ld 12,0(" #ptr ");\n" \
- "ld 2,8(" #ptr ");\n" \
- "mtctr 12;\n" \
- "ld 11,16(" #ptr ");"
+ "ld 12,0(" #ptr ")\n" \
+ "ld 2,8(" #ptr ")\n" \
+ "mtctr 12\n" \
+ "ld 11,16(" #ptr ")"
#ifdef USE_PPC64_OVERLAPPING_OPD
-# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase;"
+# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase"
#else
-# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase, 0;"
+# define OPD_ENT(name) ".quad " BODY_PREFIX #name ", .TOC.@tocbase, 0"
#endif
#define ENTRY_1(name) \
- ".type " BODY_PREFIX #name ",@function;\n" \
- ".globl " #name ";\n" \
- ".pushsection \".opd\",\"aw\";\n" \
- ".align 3;\n" \
+ ".type " BODY_PREFIX #name ",@function\n" \
+ ".globl " #name "\n" \
+ ".pushsection \".opd\",\"aw\"\n" \
+ ".p2align 3\n" \
#name ":\n" \
OPD_ENT (name) "\n" \
- ".popsection;"
+ ".popsection"
#define DOT_PREFIX ""
#define BODY_PREFIX ".LY"
#define ENTRY_2(name) \
- ".type " #name ",@function;\n" \
+ ".type " #name ",@function\n" \
ENTRY_1(name)
#define END_2(name) \
- ".size " #name ",.-" BODY_PREFIX #name ";\n" \
- ".size " BODY_PREFIX #name ",.-" BODY_PREFIX #name ";"
+ ".size " #name ",.-" BODY_PREFIX #name "\n" \
+ ".size " BODY_PREFIX #name ",.-" BODY_PREFIX #name
#define LOCALENTRY(name)
#else /* _CALL_ELF */
#define PPC64_LOAD_FUNCPTR(ptr) \
- "mr 12," #ptr ";\n" \
- "mtctr 12;"
+ "mr 12," #ptr "\n" \
+ "mtctr 12"
#define DOT_PREFIX ""
#define BODY_PREFIX ""
#define ENTRY_2(name) \
- ".type " #name ",@function;\n" \
- ".globl " #name ";"
+ ".type " #name ",@function\n" \
+ ".globl " #name
#define END_2(name) \
- ".size " #name ",.-" #name ";"
+ ".size " #name ",.-" #name
#define LOCALENTRY(name) \
- "1: addis 2,12,.TOC.-1b@ha;\n" \
- "addi 2,2,.TOC.-1b@l;\n" \
- ".localentry " #name ",.-" #name ";"
+ "1: addis 2,12,.TOC.-1b@ha\n" \
+ "addi 2,2,.TOC.-1b@l\n" \
+ ".localentry " #name ",.-" #name
#endif /* _CALL_ELF */
diff --git a/sysdeps/powerpc/powerpc64/tls-macros.h b/sysdeps/powerpc/powerpc64/tls-macros.h
index 42a95ec5c1..79a0b2579c 100644
--- a/sysdeps/powerpc/powerpc64/tls-macros.h
+++ b/sysdeps/powerpc/powerpc64/tls-macros.h
@@ -18,13 +18,11 @@
__result; \
})
-#define __TLS_GET_ADDR "__tls_get_addr"
-
/* PowerPC64 Local Dynamic TLS access. */
#define TLS_LD(x) \
({ int * __result; \
asm ("addi 3,2," #x "@got@tlsld\n\t" \
- "bl " __TLS_GET_ADDR "\n\t" \
+ "bl __tls_get_addr\n\t" \
"nop \n\t" \
"addis %0,3," #x "@dtprel@ha\n\t" \
"addi %0,%0," #x "@dtprel@l" \
@@ -36,7 +34,7 @@
#define TLS_GD(x) \
({ register int *__result __asm__ ("r3"); \
asm ("addi 3,2," #x "@got@tlsgd\n\t" \
- "bl " __TLS_GET_ADDR "\n\t" \
+ "bl __tls_get_addr\n\t" \
"nop " \
: "=r" (__result) : \
: __TLS_CALL_CLOBBERS); \
diff --git a/sysdeps/powerpc/powerpc64/tst-audit.h b/sysdeps/powerpc/powerpc64/tst-audit.h
index f856682e71..1d8ea6fd76 100644
--- a/sysdeps/powerpc/powerpc64/tst-audit.h
+++ b/sysdeps/powerpc/powerpc64/tst-audit.h
@@ -1,6 +1,6 @@
/* Definitions for testing PLT entry/exit auditing. PowerPC64 version.
- Copyright (C) 2012-2016 Free Software Foundation, Inc.
+ Copyright (C) 2012-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
diff --git a/sysdeps/powerpc/powerpc64/tst-setjmp-bug21895-static.c b/sysdeps/powerpc/powerpc64/tst-setjmp-bug21895-static.c
new file mode 100644
index 0000000000..31ee88cd62
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/tst-setjmp-bug21895-static.c
@@ -0,0 +1,75 @@
+/* Test setjmp interoperability with static dlopen BZ #21895.
+ Copyright (C) 2017-2018 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <setjmp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+
+/* Set TOC area pointed by sp to zero. */
+#define SET_TOC_TO_ZERO(sp) \
+ ({ \
+ unsigned int zero = 0; \
+ asm volatile ("std %0, 24(%1)\n\t" :: "r" (zero), "r" (sp)); \
+ })
+
+static void
+bar (jmp_buf jb, unsigned long sp)
+{
+ static int i;
+ if (i++==1)
+ exit(0); /* Success. */
+
+ /* This will set TOC are on caller frame (foo) to zero. __longjmp
+ must restore r2 otherwise a segmentation fault will happens after
+ it jumps back to foo. */
+ SET_TOC_TO_ZERO(sp);
+ longjmp(jb, i);
+}
+
+static int
+do_test (void)
+{
+ void *h = dlopen("setjmp-bug21895.so", RTLD_NOW);
+ if (!h)
+ {
+ puts(dlerror());
+ return 1;
+ }
+
+ void (*pfoo)(void) = dlsym(h, "foo");
+ if (!pfoo)
+ {
+ puts(dlerror());
+ return 1;
+ }
+
+ void (**ppbar)(jmp_buf, unsigned long) = dlsym(h, "bar");
+ if (!ppbar)
+ {
+ puts(dlerror());
+ return 1;
+ }
+
+ *ppbar = bar;
+ pfoo();
+
+ for(;;);
+}
+
+#include <support/test-driver.c>