diff options
42 files changed, 925 insertions, 652 deletions
@@ -1,3 +1,115 @@ +2009-10-28 Roland McGrath <roland@redhat.com> + + * Makefile (dist-prepare): New target. + (tag-for-dist): Target removed. + (glibc-%.tar): Pattern rule removed. + (%.tar): New pattern rule, does simple use of git archive. + (dist-version): Remove variable definition. + (dist): Just depend on that. Add .tar.xz dependency. + If dist-version variable not set on command line, depend on + dist-prepare, re-invoke with dist-version set via git describe. + + * Makerules (%.xz): New pattern rule. + +2009-11-03 Andreas Schwab <schwab@redhat.com> + + * sysdeps/unix/sysv/linux/powerpc/powerpc32/syscalls.list: Add + readahead. + +2009-11-04 Jakub Jelinek <jakub@redhat.com> + + * misc/sys/uio.h (preadv, pwritev): Fix type of last argument + when -D_FILE_OFFSET_BITS=64. + + * sysdeps/unix/sysv/linux/ia64/bits/fcntl.h (fallocate): Fix types + of last two arguments when -D_FILE_OFFSET_BITS=64. + * sysdeps/unix/sysv/linux/i386/bits/fcntl.h (fallocate): Likewise. + * sysdeps/unix/sysv/linux/s390/bits/fcntl.h (fallocate): Likewise. + * sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h (fallocate): Likewise. + * sysdeps/unix/sysv/linux/x86_64/bits/fcntl.h (fallocate): Likewise. + * sysdeps/unix/sysv/linux/sparc/bits/fcntl.h (fallocate): Likewise. + * sysdeps/unix/sysv/linux/sh/bits/fcntl.h (fallocate): Likewise. + +2009-09-05 H.J. Lu <hongjiu.lu@intel.com> + + * configure.in: Support binutils 2.100 and 3.0. + +2009-09-07 Ulrich Drepper <drepper@redhat.com> + + * locale/programs/ld-collate.c (struct locale_collate_t): Add + unnamed_section_defined field. + (collate_read): Test and set unnamed_section_defined. + + * posix/getconf.c (vars): Handle POSIX2_LINE_MAX in addition to + _POSIX2_LINE_MAX. + +2009-09-04 H.J. Lu <hongjiu.lu@intel.com> + + * configure.in: Support binutils 2.20. + +2009-09-01 Andreas Schwab <schwab@redhat.com> + + * hesiod/nss_hesiod/hesiod-grp.c (internal_gid_from_group): Fix + parsing of group entry. + +2009-09-02 Andreas Schwab <schwab@redhat.com> + + * libio/wfileops.c (_IO_wfile_seekoff): Account for readahead in + external buffer. Always discard readahead in internal buffer. + * libio/Makefile (tests): Add bug-wsetpos. + * libio/bug-wsetpos.c: New file. + +2009-08-31 Andreas Schwab <schwab@redhat.com> + + * libio/wfileops.c (_IO_wfile_seekoff): Remove dead code and + reformulate in-buffer optimisation check to match code in + _IO_new_file_seekoff. + +2009-08-31 Joshua W. Boyer <jwboyer@linux.vnet.ibm.com> + + * sysdeps/powerpc/powerpc32/power6/memcpy.S: Change srdi instruction + to srwi in 32-bit memcpy for power6. + +2009-08-31 Ulrich Drepper <drepper@redhat.com> + + [BZ #10560] + * malloc/malloc.c: Add local assert definition to avoid problems with + memory allocation in the real one. + +2009-08-25 Joseph Myers <joseph@codesourcery.com> + + * math/s_fdiml.c (__fdiml): Use fpclassify instead of fpclassifyl. + +2009-08-24 Ulrich Drepper <drepper@redhat.com> + + * math/s_fdim.c: In case of overflows set errno. + * math/s_fdimf.c: Likewise. + * math/s_fdiml.c: Likewise. + +2009-08-23 Ulrich Drepper <drepper@redhat.com> + + * posix/regcomp.c (parse_dup_op): Verify the expression is correctly + terminated. + * posix/Makefile (tests): Add bug-regex29. + * posix/bug-regex29.c: New file. + + * posix/unistd.h: Define _POSIX_VERSION and _POSIX2_* correctly if + older POSIX versions are selected. + +2009-08-10 SUGIOKA Toshinobu <sugioka@itonet.co.jp> + + * stdlib/longlong.h [__sh__] (udiv_qrnnd, sub_ddmmss): Add "t" to + clobber list. + +2009-07-31 Jakub Jelinek <jakub@redhat.com> + + * malloc/Makefile (CFLAGS-obstack.c): Add $(uses-callbacks). + +2009-07-30 Ulrich Drepper <drepper@redhat.com> + + * sysdeps/ia64/backtrace.c (backtrace_helper): Stop backtrace when + we make no more progress. + 2009-07-25 Ulrich Drepper <drepper@redhat.com> [BZ #10448] @@ -347,51 +347,38 @@ TAGS: | $(ETAGS) -o $@ - # Make the distribution tarfile. -.PHONY: dist tag-for-dist +.PHONY: dist dist-prepare generated := $(generated) stubs.h -files-for-dist := README FAQ INSTALL NOTES configure - -tag-of-stem = glibc-$(subst .,_,$*) -dist-selector = -r $(tag-of-stem) - -# Add-ons in the main repository but distributed in their own tar files. -dist-separate = libidn - -glibc-%.tar $(dist-separate:%=glibc-%-%.tar): $(files-for-dist) \ - $(foreach D,$(dist-separate),\ - $D/configure) - @rm -fr glibc-$* - $(MAKE) -q `find sysdeps $(addsuffix /sysdeps,$(sysdeps-add-ons)) \ - -name configure` - cvs $(CVSOPTS) -Q export -d glibc-$* $(dist-selector) libc -# Touch all the configure scripts going into the tarball since cvs export -# might have delivered configure.in newer than configure. - find glibc-$* -name configure -print | xargs touch - $(dist-do-separate-dirs) - tar cf glibc-$*.tar glibc-$* - rm -fr glibc-$* -define dist-do-separate-dirs -$(foreach dir,$(dist-separate), - @rm -fr glibc-$(dir)-$* - mv glibc-$*/$(dir) glibc-$(dir)-$* - tar cf glibc-$(dir)-$*.tar glibc-$(dir)-$* - rm -fr glibc-$(dir)-$* -) -endef +files-for-dist := README FAQ INSTALL NOTES configure ChangeLog NEWS + +# Regenerate stuff, then error if these things are not committed yet. +dist-prepare: $(files-for-dist) + conf=`find sysdeps $(addsuffix /sysdeps,$(sysdeps-add-ons)) \ + -name configure`; \ + $(MAKE) $$conf && \ + git diff --stat HEAD -- $^ $$conf \ + | $(AWK) '{ print; rc=1 } END { exit rc }' + +%.tar: FORCE + git archive --prefix=$*/ $* > $@.new + mv -f $@.new $@ # Do `make dist dist-version=X.Y.Z' to make tar files of an older version. -dist-version = $(version) -dist: $(foreach Z,.bz2 .gz,glibc-$(dist-version).tar$Z \ - $(foreach D,$(dist-separate),\ - glibc-$D-$(dist-version).tar$Z)) +ifneq (,$(strip $(dist-version))) +dist: $(foreach Z,.bz2 .gz .xz,$(dist-version).tar$Z) md5sum $^ - -tag-for-dist: tag-$(dist-version) -tag-%: $(files-for-dist) - cvs $(CVSOPTS) -Q tag -c $(tag-of-stem) +else +dist: dist-prepare + @if v=`git describe`; then \ + echo Distribution version $$v; \ + $(MAKE) dist dist-version=$$v; \ + else \ + false; \ + fi +endif define format-me @rm -f $@ @@ -1,4 +1,4 @@ -# Copyright (C) 1991-2006, 2007, 2008 Free Software Foundation, Inc. +# Copyright (C) 1991-2006,2007,2008,2009 Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -1335,6 +1335,7 @@ echo-headers: %.bz2: %; bzip2 -9vk $< %.gz: %; gzip -9vnc $< > $@.new && mv -f $@.new $@ +%.xz: %; xz -9evk $< # Common cleaning targets. @@ -4839,7 +4839,7 @@ $as_echo_n "checking version of $AS... " >&6; } ac_prog_version=`$AS --version 2>&1 | sed -n 's/^.*GNU assembler.* \([0-9]*\.[0-9.]*\).*$/\1/p'` case $ac_prog_version in '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; - 2.1[3-9]*) + 2.1[3-9]*|2.1[0-9][0-9]*|2.[2-9]*|[3-9].*) ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; @@ -4902,7 +4902,7 @@ $as_echo_n "checking version of $LD... " >&6; } ac_prog_version=`$LD --version 2>&1 | sed -n 's/^.*GNU ld.* \([0-9][0-9]*\.[0-9.]*\).*$/\1/p'` case $ac_prog_version in '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; - 2.1[3-9]*) + 2.1[3-9]*|2.1[0-9][0-9]*|2.[2-9]*|3-9.*) ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; diff --git a/configure.in b/configure.in index 6a92bd876a..4bd85015a2 100644 --- a/configure.in +++ b/configure.in @@ -897,10 +897,10 @@ AC_SUBST(MIG)dnl Needed by sysdeps/mach/configure.in # Accept binutils 2.13 or newer. AC_CHECK_PROG_VER(AS, $AS, --version, [GNU assembler.* \([0-9]*\.[0-9.]*\)], - [2.1[3-9]*], AS=: critic_missing="$critic_missing as") + [2.1[3-9]*|2.1[0-9][0-9]*|2.[2-9]*|[3-9].*], AS=: critic_missing="$critic_missing as") AC_CHECK_PROG_VER(LD, $LD, --version, [GNU ld.* \([0-9][0-9]*\.[0-9.]*\)], - [2.1[3-9]*], LD=: critic_missing="$critic_missing ld") + [2.1[3-9]*|2.1[0-9][0-9]*|2.[2-9]*]|[3-9].*, LD=: critic_missing="$critic_missing ld") # We need the physical current working directory. We cannot use the # "pwd -P" shell builtin since that's not portable. Instead we try to diff --git a/hesiod/nss_hesiod/hesiod-grp.c b/hesiod/nss_hesiod/hesiod-grp.c index 50c53f7893..f0c8c31e06 100644 --- a/hesiod/nss_hesiod/hesiod-grp.c +++ b/hesiod/nss_hesiod/hesiod-grp.c @@ -139,21 +139,19 @@ internal_gid_from_group (void *context, const char *groupname, gid_t *group) { char *p = *grp_res; + /* Skip to third field. */ while (*p != '\0' && *p != ':') ++p; - while (*p != '\0' && *p == ':') + if (*p != '\0') ++p; while (*p != '\0' && *p != ':') ++p; - while (*p != '\0' && *p == ':') - ++p; - if (*p == ':') + if (*p != '\0') { char *endp; char *q = ++p; long int val; - q = p; while (*q != '\0' && *q != ':') ++q; diff --git a/libio/Makefile b/libio/Makefile index 501e80c2ee..83b9458dc2 100644 --- a/libio/Makefile +++ b/libio/Makefile @@ -58,7 +58,7 @@ tests = tst_swprintf tst_wprintf tst_swscanf tst_wscanf tst_getwc tst_putwc \ tst-memstream1 tst-memstream2 \ tst-wmemstream1 tst-wmemstream2 \ bug-memstream1 bug-wmemstream1 \ - tst-setvbuf1 tst-popen1 tst-fgetwc + tst-setvbuf1 tst-popen1 tst-fgetwc bug-wsetpos test-srcs = test-freopen all: # Make this the default target; it will be defined in Rules. diff --git a/libio/bug-wsetpos.c b/libio/bug-wsetpos.c new file mode 100644 index 0000000000..ccb22a4b62 --- /dev/null +++ b/libio/bug-wsetpos.c @@ -0,0 +1,75 @@ +/* Test program for fsetpos on a wide character stream. */ + +#include <assert.h> +#include <stdio.h> +#include <wchar.h> + +static void do_prepare (void); +#define PREPARE(argc, argv) do_prepare () +static int do_test (void); +#define TEST_FUNCTION do_test () +#include <test-skeleton.c> + +static const char pattern[] = "12345"; +static char *temp_file; + +static void +do_prepare (void) +{ + int fd = create_temp_file ("bug-wsetpos.", &temp_file); + if (fd == -1) + { + printf ("cannot create temporary file: %m\n"); + exit (1); + } + write (fd, pattern, sizeof (pattern)); + close (fd); +} + +static int +do_test (void) +{ + FILE *fp = fopen (temp_file, "r"); + fpos_t pos; + wchar_t c; + + if (fp == NULL) + { + printf ("fdopen: %m\n"); + return 1; + } + + c = fgetwc (fp); assert (c == L'1'); + c = fgetwc (fp); assert (c == L'2'); + + if (fgetpos (fp, &pos) == EOF) + { + printf ("fgetpos: %m\n"); + return 1; + } + + rewind (fp); + if (ferror (fp)) + { + printf ("rewind: %m\n"); + return 1; + } + + c = fgetwc (fp); assert (c == L'1'); + + if (fsetpos (fp, &pos) == EOF) + { + printf ("fsetpos: %m\n"); + return 1; + } + + c = fgetwc (fp); + if (c != L'3') + { + puts ("fsetpos failed"); + return 1; + } + + puts ("Test succeeded."); + return 0; +} diff --git a/libio/wfileops.c b/libio/wfileops.c index 57ed786a8f..5bc08bedfb 100644 --- a/libio/wfileops.c +++ b/libio/wfileops.c @@ -631,8 +631,12 @@ _IO_wfile_seekoff (fp, offset, dir, mode) clen = (*cv->__codecvt_do_encoding) (cv); if (clen > 0) - offset -= (fp->_wide_data->_IO_read_end - - fp->_wide_data->_IO_read_ptr) * clen; + { + offset -= (fp->_wide_data->_IO_read_end + - fp->_wide_data->_IO_read_ptr) * clen; + /* Adjust by readahead in external buffer. */ + offset -= fp->_IO_read_end - fp->_IO_read_ptr; + } else { int nread; @@ -678,88 +682,28 @@ _IO_wfile_seekoff (fp, offset, dir, mode) if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL && !_IO_in_backup (fp)) { - /* Offset relative to start of main get area. */ - _IO_off64_t rel_offset = (offset - fp->_offset - + (fp->_IO_read_end - fp->_IO_read_base)); - if (rel_offset >= 0) + _IO_off64_t start_offset = (fp->_offset + - (fp->_IO_read_end - fp->_IO_buf_base)); + if (offset >= start_offset && offset < fp->_offset) { -#if 0 - if (_IO_in_backup (fp)) - _IO_switch_to_main_get_area (fp); -#endif - if (rel_offset <= fp->_IO_read_end - fp->_IO_read_base) - { - enum __codecvt_result status; - struct _IO_codecvt *cd = fp->_codecvt; - const char *read_ptr_copy; - - fp->_IO_read_ptr = fp->_IO_read_base + rel_offset; - _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); - - /* Now set the pointer for the internal buffer. This - might be an iterative process. Though the read - pointer is somewhere in the current external buffer - this does not mean we can convert this whole buffer - at once fitting in the internal buffer. */ - fp->_wide_data->_IO_state = fp->_wide_data->_IO_last_state; - read_ptr_copy = fp->_IO_read_base; - fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_read_base; - do - { - wchar_t buffer[1024]; - wchar_t *ignore; - status = (*cd->__codecvt_do_in) (cd, - &fp->_wide_data->_IO_state, - read_ptr_copy, - fp->_IO_read_ptr, - &read_ptr_copy, - buffer, - buffer - + (sizeof (buffer) - / sizeof (buffer[0])), - &ignore); - if (status != __codecvt_ok && status != __codecvt_partial) - { - fp->_flags |= _IO_ERR_SEEN; - goto dumb; - } - } - while (read_ptr_copy != fp->_IO_read_ptr); - - fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_read_base; - - _IO_mask_flags (fp, 0, _IO_EOF_SEEN); - goto resync; - } -#ifdef TODO - /* If we have streammarkers, seek forward by reading ahead. */ - if (_IO_have_markers (fp)) - { - int to_skip = rel_offset - - (fp->_IO_read_ptr - fp->_IO_read_base); - if (ignore (to_skip) != to_skip) - goto dumb; - _IO_mask_flags (fp, 0, _IO_EOF_SEEN); - goto resync; - } -#endif - } -#ifdef TODO - if (rel_offset < 0 && rel_offset >= Bbase () - Bptr ()) - { - if (!_IO_in_backup (fp)) - _IO_switch_to_backup_area (fp); - gbump (fp->_IO_read_end + rel_offset - fp->_IO_read_ptr); + enum __codecvt_result status; + struct _IO_codecvt *cd = fp->_codecvt; + const char *read_ptr_copy; + + _IO_setg (fp, fp->_IO_buf_base, + fp->_IO_buf_base + (offset - start_offset), + fp->_IO_read_end); + _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); + _IO_wsetg (fp, fp->_wide_data->_IO_buf_base, + fp->_wide_data->_IO_buf_base, + fp->_wide_data->_IO_buf_base); + _IO_wsetp (fp, fp->_wide_data->_IO_buf_base, + fp->_wide_data->_IO_buf_base); _IO_mask_flags (fp, 0, _IO_EOF_SEEN); goto resync; } -#endif } -#ifdef TODO - INTUSE(_IO_unsave_markers) (fp); -#endif - if (fp->_flags & _IO_NO_READS) goto dumb; @@ -792,6 +736,9 @@ _IO_wfile_seekoff (fp, offset, dir, mode) _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta, fp->_IO_buf_base + count); _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base); + _IO_wsetg (fp, fp->_wide_data->_IO_buf_base, + fp->_wide_data->_IO_buf_base, fp->_wide_data->_IO_buf_base); + _IO_wsetp (fp, fp->_wide_data->_IO_buf_base, fp->_wide_data->_IO_buf_base); fp->_offset = result + count; _IO_mask_flags (fp, 0, _IO_EOF_SEEN); return offset; diff --git a/locale/programs/ld-collate.c b/locale/programs/ld-collate.c index bf50e77aab..11bd7eacad 100644 --- a/locale/programs/ld-collate.c +++ b/locale/programs/ld-collate.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1995-2003, 2005-2007, 2008 Free Software Foundation, Inc. +/* Copyright (C) 1995-2003, 2005-2008, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@gnu.org>, 1995. @@ -203,6 +203,8 @@ struct locale_collate_t struct section_list *current_section; /* There always can be an unnamed section. */ struct section_list unnamed_section; + /* Flag whether the unnamed section has been defined. */ + bool unnamed_section_defined; /* To make handling of errors easier we have another section. */ struct section_list error_section; /* Sometimes we are defining the values for collating symbols before @@ -634,7 +636,7 @@ find_element (struct linereader *ldfile, struct locale_collate_t *collate, if (find_entry (&collate->seq_table, str, len, &result) != 0) { /* Nope, not define yet. So we see whether it is a - collation symbol. */ + collation symbol. */ void *ptr; if (find_entry (&collate->sym_table, str, len, &ptr) == 0) @@ -788,7 +790,7 @@ insert_weights (struct linereader *ldfile, struct element_t *elem, if (*cp == '<') { /* Ahh, it's a bsymbol or an UCS4 value. If it's - the latter we have to unify the name. */ + the latter we have to unify the name. */ const char *startp = ++cp; size_t len; @@ -1302,8 +1304,8 @@ order for `%.*s' already defined at %s:%Zu"), else { /* Determine the range. To do so we have to determine the - common prefix of the both names and then the numeric - values of both ends. */ + common prefix of the both names and then the numeric + values of both ends. */ size_t lenfrom = strlen (startp->name); size_t lento = strlen (endp->name); char buf[lento + 1]; @@ -2222,14 +2224,14 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap, else { /* The entries in the list are sorted by length and then - alphabetically. This is the order in which we will add the - elements to the collation table. This allows simply walking + alphabetically. This is the order in which we will add the + elements to the collation table. This allows simply walking the table in sequence and stopping at the first matching - entry. Since the longer sequences are coming first in the - list they have the possibility to match first, just as it - has to be. In the worst case we are walking to the end of - the list where we put, if no singlebyte sequence is defined - in the locale definition, the weights for UNDEFINED. + entry. Since the longer sequences are coming first in the + list they have the possibility to match first, just as it + has to be. In the worst case we are walking to the end of + the list where we put, if no singlebyte sequence is defined + in the locale definition, the weights for UNDEFINED. To reduce the length of the search list we compress them a bit. This happens by collecting sequences of consecutive byte @@ -2297,7 +2299,7 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap, obstack_1grow_fast (&extrapool, curp->mbs[i]); /* Now find the end of the consecutive sequence and - add all the indeces in the indirect pool. */ + add all the indeces in the indirect pool. */ do { weightidx = output_weight (&weightpool, collate, curp); @@ -2312,7 +2314,7 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap, obstack_int32_grow (&indirectpool, weightidx); /* And add the end byte sequence. Without length this - time. */ + time. */ for (i = 1; i < curp->nmbs; ++i) obstack_1grow_fast (&extrapool, curp->mbs[i]); } @@ -2356,7 +2358,7 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap, & (__alignof__ (int32_t) - 1)) == 0); /* If the final entry in the list is not a single character we - add an UNDEFINED entry here. */ + add an UNDEFINED entry here. */ if (lastp->nmbs != 1) { int added = ((sizeof (int32_t) + 1 + 1 + __alignof__ (int32_t) - 1) @@ -3293,7 +3295,7 @@ error while adding equivalent collating symbol")); else { /* One should not be allowed to open the same - section twice. */ + section twice. */ if (sp->first != NULL) lr_error (ldfile, _("\ %s: multiple order definitions for section `%s'"), @@ -3349,7 +3351,7 @@ error while adding equivalent collating symbol")); section. */ collate->current_section = &collate->unnamed_section; - if (collate->unnamed_section.first != NULL) + if (collate->unnamed_section_defined) lr_error (ldfile, _("\ %s: multiple order definitions for unnamed section"), "LC_COLLATE"); @@ -3359,6 +3361,7 @@ error while adding equivalent collating symbol")); the collate->sections list. */ collate->unnamed_section.next = collate->sections; collate->sections = &collate->unnamed_section; + collate->unnamed_section_defined = true; } } @@ -3579,9 +3582,9 @@ error while adding equivalent collating symbol")); else { /* This is bad. The section after which we have to - reorder does not exist. Therefore we cannot - process the whole rest of this reorder - specification. */ + reorder does not exist. Therefore we cannot + process the whole rest of this reorder + specification. */ lr_error (ldfile, _("%s: section `%.*s' not known"), "LC_COLLATE", (int) arg->val.str.lenmb, arg->val.str.startmb); @@ -3657,9 +3660,9 @@ error while adding equivalent collating symbol")); if (state == 0) { /* We are outside an `order_start' region. This means - we must only accept definitions of values for - collation symbols since these are purely abstract - values and don't need directions associated. */ + we must only accept definitions of values for + collation symbols since these are purely abstract + values and don't need directions associated. */ void *ptr; if (find_entry (&collate->seq_table, symstr, symlen, &ptr) == 0) @@ -3741,7 +3744,7 @@ error while adding equivalent collating symbol")); seqp->next->last = seqp->last; /* We also have to check whether this entry is the - first or last of a section. */ + first or last of a section. */ if (seqp->section->first == seqp) { if (seqp->section->first == seqp->section->last) @@ -3798,7 +3801,7 @@ error while adding equivalent collating symbol")); } /* Process the rest of the line which might change - the collation rules. */ + the collation rules. */ arg = lr_token (ldfile, charmap, result, repertoire, verbose); if (arg->tok != tok_eof && arg->tok != tok_eol) @@ -3810,8 +3813,8 @@ error while adding equivalent collating symbol")); else if (was_ellipsis != tok_none) { /* Using the information in the `ellipsis_weight' - element and this and the last value we have to handle - the ellipsis now. */ + element and this and the last value we have to handle + the ellipsis now. */ assert (state == 1); handle_ellipsis (ldfile, symstr, symlen, was_ellipsis, charmap, @@ -3871,7 +3874,7 @@ error while adding equivalent collating symbol")); case tok_ellipsis3: /* absolute ellipsis */ case tok_ellipsis4: /* symbolic decimal ellipsis */ /* This is the symbolic (decimal or hexadecimal) or absolute - ellipsis. */ + ellipsis. */ if (was_ellipsis != tok_none) goto err_label; diff --git a/malloc/Makefile b/malloc/Makefile index 1099335fff..e7ec1abf93 100644 --- a/malloc/Makefile +++ b/malloc/Makefile @@ -104,6 +104,7 @@ $(objpfx)memusagestat: $(memusagestat-modules:%=$(objpfx)%.o) include ../Rules CFLAGS-mcheck-init.c = $(PIC-ccflag) +CFLAGS-obstack.c = $(uses-callbacks) $(objpfx)libmcheck.a: $(objpfx)mcheck-init.o -rm -f $@ diff --git a/malloc/malloc.c b/malloc/malloc.c index 4b623e2200..5954376e51 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -107,7 +107,7 @@ and status information. Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) - 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) + 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte ptrs but 4 byte size) or 24 (for 8/8) additional bytes are @@ -126,7 +126,7 @@ minimal mmap unit); typically 4096 or 8192 bytes. Maximum allocated size: 4-byte size_t: 2^32 minus about two pages - 8-byte size_t: 2^64 minus about two pages + 8-byte size_t: 2^64 minus about two pages It is assumed that (possibly signed) size_t values suffice to represent chunk sizes. `Possibly signed' is due to the fact @@ -329,7 +329,29 @@ extern "C" { or other mallocs available that do this. */ -#include <assert.h> +#ifdef NDEBUG +# define assert(expr) ((void) 0) +#else +# define assert(expr) \ + ((expr) \ + ? ((void) 0) \ + : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__)) + +extern const char *__progname; + +static void +__malloc_assert (const char *assertion, const char *file, unsigned int line, + const char *function) +{ + (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n", + __progname, __progname[0] ? ": " : "", + file, line, + function ? function : "", function ? ": " : "", + assertion); + fflush (stderr); + abort (); +} +#endif /* @@ -1000,17 +1022,17 @@ int public_mALLOPt(); arena: current total non-mmapped bytes allocated from system ordblks: the number of free chunks smblks: the number of fastbin blocks (i.e., small chunks that - have been freed but not use resused or consolidated) + have been freed but not use resused or consolidated) hblks: current number of mmapped regions hblkhd: total bytes held in mmapped regions usmblks: the maximum total allocated space. This will be greater - than current total if trimming has occurred. + than current total if trimming has occurred. fsmblks: total bytes held in fastbin blocks uordblks: current total allocated space (normal or mmapped) fordblks: total free space keepcost: the maximum number of bytes that could ideally be released - back to system via malloc_trim. ("ideally" means that - it ignores page restrictions etc.) + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) Because these fields are ints, but internal bookkeeping may be kept as longs, the reported values may wrap around zero and @@ -1817,17 +1839,17 @@ struct malloc_chunk { chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk, if allocated | | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of chunk, in bytes |M|P| + | Size of previous chunk, if allocated | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk, in bytes |M|P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | User data starts here... . - . . - . (malloc_usable_size() bytes) . - . | + | User data starts here... . + . . + . (malloc_usable_size() bytes) . + . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Where "chunk" is the front of the chunk for the purpose of most of @@ -1841,20 +1863,20 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Free chunks are stored in circular doubly-linked lists, and look like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Size of previous chunk | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Forward pointer to next chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Back pointer to previous chunk in list | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Unused space (may be 0 bytes long) . - . . - . | + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ The P (PREV_INUSE) bit, stored in the unused low-order bit of the chunk size (which is always a multiple of two words), is an in-use @@ -1875,14 +1897,14 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ The two exceptions to all this are 1. The special chunk `top' doesn't bother using the - trailing size field since there is no next contiguous chunk - that would have to index off it. After initialization, `top' - is forced to always exist. If it would become less than - MINSIZE bytes long, it is replenished. + trailing size field since there is no next contiguous chunk + that would have to index off it. After initialization, `top' + is forced to always exist. If it would become less than + MINSIZE bytes long, it is replenished. 2. Chunks allocated via mmap, which have the second-lowest-order - bit M (IS_MMAPPED) set in their size fields. Because they are - allocated one-by-one, each must contain its own trailing size field. + bit M (IS_MMAPPED) set in their size fields. Because they are + allocated one-by-one, each must contain its own trailing size field. */ @@ -2155,7 +2177,7 @@ typedef struct malloc_chunk* mbinptr; ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + 126) // XXX It remains to be seen whether it is good to keep the widths of // XXX the buckets the same or whether it should be scaled by a factor @@ -2166,7 +2188,7 @@ typedef struct malloc_chunk* mbinptr; ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + 126) #define largebin_index(sz) \ (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz)) @@ -2592,8 +2614,8 @@ static void do_check_chunk(av, p) mstate av; mchunkptr p; /* Has legal address ... */ if (p != av->top) { if (contiguous(av)) { - assert(((char*)p) >= min_address); - assert(((char*)p + sz) <= ((char*)(av->top))); + assert(((char*)p) >= min_address); + assert(((char*)p + sz) <= ((char*)(av->top))); } } else { @@ -2850,9 +2872,9 @@ static void do_check_malloc_state(mstate av) unsigned int binbit = get_binmap(av,i); int empty = last(b) == b; if (!binbit) - assert(empty); + assert(empty); else if (!empty) - assert(binbit); + assert(binbit); } for (p = last(b); p != b; p = p->bk) { @@ -2861,12 +2883,12 @@ static void do_check_malloc_state(mstate av) size = chunksize(p); total += size; if (i >= 2) { - /* chunk belongs in bin */ - idx = bin_index(size); - assert(idx == i); - /* lists are sorted */ - assert(p->bk == b || - (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); + /* chunk belongs in bin */ + idx = bin_index(size); + assert(idx == i); + /* lists are sorted */ + assert(p->bk == b || + (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); if (!in_smallbin_range(size)) { @@ -2894,10 +2916,10 @@ static void do_check_malloc_state(mstate av) assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); /* chunk is followed by a legal chain of inuse chunks */ for (q = next_chunk(p); - (q != av->top && inuse(q) && - (unsigned long)(chunksize(q)) >= MINSIZE); - q = next_chunk(q)) - do_check_inuse_chunk(av, q); + (q != av->top && inuse(q) && + (unsigned long)(chunksize(q)) >= MINSIZE); + q = next_chunk(q)) + do_check_inuse_chunk(av, q); } } @@ -2913,14 +2935,14 @@ static void do_check_malloc_state(mstate av) assert(mp_.n_mmaps <= mp_.max_n_mmaps); assert((unsigned long)(av->system_mem) <= - (unsigned long)(av->max_system_mem)); + (unsigned long)(av->max_system_mem)); assert((unsigned long)(mp_.mmapped_mem) <= - (unsigned long)(mp_.max_mmapped_mem)); + (unsigned long)(mp_.max_mmapped_mem)); #ifdef NO_THREADS assert((unsigned long)(mp_.max_total_mem) >= - (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem)); + (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem)); #endif } #endif @@ -3005,51 +3027,51 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; if (mm != MAP_FAILED) { - /* - The offset to the start of the mmapped region is stored - in the prev_size field of the chunk. This allows us to adjust - returned start address to meet alignment requirements here - and in memalign(), and still be able to compute proper - address argument for later munmap in free() and realloc(). - */ + /* + The offset to the start of the mmapped region is stored + in the prev_size field of the chunk. This allows us to adjust + returned start address to meet alignment requirements here + and in memalign(), and still be able to compute proper + address argument for later munmap in free() and realloc(). + */ #if 1 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ - assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0); + assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0); #else - front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; - if (front_misalign > 0) { - correction = MALLOC_ALIGNMENT - front_misalign; - p = (mchunkptr)(mm + correction); - p->prev_size = correction; - set_head(p, (size - correction) |IS_MMAPPED); - } - else + front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + correction = MALLOC_ALIGNMENT - front_misalign; + p = (mchunkptr)(mm + correction); + p->prev_size = correction; + set_head(p, (size - correction) |IS_MMAPPED); + } + else #endif { p = (mchunkptr)mm; set_head(p, size|IS_MMAPPED); } - /* update statistics */ + /* update statistics */ - if (++mp_.n_mmaps > mp_.max_n_mmaps) - mp_.max_n_mmaps = mp_.n_mmaps; + if (++mp_.n_mmaps > mp_.max_n_mmaps) + mp_.max_n_mmaps = mp_.n_mmaps; - sum = mp_.mmapped_mem += size; - if (sum > (unsigned long)(mp_.max_mmapped_mem)) - mp_.max_mmapped_mem = sum; + sum = mp_.mmapped_mem += size; + if (sum > (unsigned long)(mp_.max_mmapped_mem)) + mp_.max_mmapped_mem = sum; #ifdef NO_THREADS - sum += av->system_mem; - if (sum > (unsigned long)(mp_.max_total_mem)) - mp_.max_total_mem = sum; + sum += av->system_mem; + if (sum > (unsigned long)(mp_.max_total_mem)) + mp_.max_total_mem = sum; #endif - check_chunk(av, p); + check_chunk(av, p); - return chunk2mem(p); + return chunk2mem(p); } } } @@ -3069,8 +3091,8 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; */ assert((old_top == initial_top(av) && old_size == 0) || - ((unsigned long) (old_size) >= MINSIZE && - prev_inuse(old_top) && + ((unsigned long) (old_size) >= MINSIZE && + prev_inuse(old_top) && ((unsigned long)old_end & pagemask) == 0)); /* Precondition: not enough current space to satisfy nb request */ @@ -3096,7 +3118,7 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; arena_mem += old_heap->size - old_heap_size; #if 0 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem) - max_total_mem = mmapped_mem + arena_mem + sbrked_mem; + max_total_mem = mmapped_mem + arena_mem + sbrked_mem; #endif set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top) | PREV_INUSE); @@ -3205,17 +3227,17 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; if (mbrk != MAP_FAILED) { - /* We do not need, and cannot use, another sbrk call to find end */ - brk = mbrk; - snd_brk = brk + size; - - /* - Record that we no longer have a contiguous sbrk region. - After the first time mmap is used as backup, we do not - ever rely on contiguous space since this could incorrectly - bridge regions. - */ - set_noncontiguous(av); + /* We do not need, and cannot use, another sbrk call to find end */ + brk = mbrk; + snd_brk = brk + size; + + /* + Record that we no longer have a contiguous sbrk region. + After the first time mmap is used as backup, we do not + ever rely on contiguous space since this could incorrectly + bridge regions. + */ + set_noncontiguous(av); } } #endif @@ -3242,19 +3264,19 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; Otherwise, make adjustments: * If the first time through or noncontiguous, we need to call sbrk - just to find out where the end of memory lies. + just to find out where the end of memory lies. * We need to ensure that all returned chunks from malloc will meet - MALLOC_ALIGNMENT + MALLOC_ALIGNMENT * If there was an intervening foreign sbrk, we need to adjust sbrk - request size to account for fact that we will not be able to - combine new space with existing space in old_top. + request size to account for fact that we will not be able to + combine new space with existing space in old_top. * Almost all systems internally allocate whole pages at a time, in - which case we might as well use the whole last page of request. - So we allocate enough more memory to hit a page boundary now, - which in turn causes future contiguous calls to page-align. + which case we might as well use the whole last page of request. + So we allocate enough more memory to hit a page boundary now, + which in turn causes future contiguous calls to page-align. */ else { @@ -3270,51 +3292,51 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; if (old_size) av->system_mem += brk - old_end; - /* Guarantee alignment of first new chunk made from this space */ + /* Guarantee alignment of first new chunk made from this space */ - front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; - if (front_misalign > 0) { + front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { - /* - Skip over some bytes to arrive at an aligned position. - We don't need to specially mark these wasted front bytes. - They will never be accessed anyway because - prev_inuse of av->top (and any chunk created from its start) - is always true after initialization. - */ + /* + Skip over some bytes to arrive at an aligned position. + We don't need to specially mark these wasted front bytes. + They will never be accessed anyway because + prev_inuse of av->top (and any chunk created from its start) + is always true after initialization. + */ - correction = MALLOC_ALIGNMENT - front_misalign; - aligned_brk += correction; - } + correction = MALLOC_ALIGNMENT - front_misalign; + aligned_brk += correction; + } - /* - If this isn't adjacent to existing space, then we will not - be able to merge with old_top space, so must add to 2nd request. - */ + /* + If this isn't adjacent to existing space, then we will not + be able to merge with old_top space, so must add to 2nd request. + */ - correction += old_size; + correction += old_size; - /* Extend the end address to hit a page boundary */ - end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); - correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; + /* Extend the end address to hit a page boundary */ + end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; - assert(correction >= 0); - snd_brk = (char*)(MORECORE(correction)); + assert(correction >= 0); + snd_brk = (char*)(MORECORE(correction)); - /* - If can't allocate correction, try to at least find out current - brk. It might be enough to proceed without failing. + /* + If can't allocate correction, try to at least find out current + brk. It might be enough to proceed without failing. - Note that if second sbrk did NOT fail, we assume that space - is contiguous with first sbrk. This is a safe assumption unless - program is multithreaded but doesn't use locks and a foreign sbrk - occurred between our first and second calls. - */ + Note that if second sbrk did NOT fail, we assume that space + is contiguous with first sbrk. This is a safe assumption unless + program is multithreaded but doesn't use locks and a foreign sbrk + occurred between our first and second calls. + */ - if (snd_brk == (char*)(MORECORE_FAILURE)) { - correction = 0; - snd_brk = (char*)(MORECORE(0)); - } else { + if (snd_brk == (char*)(MORECORE_FAILURE)) { + correction = 0; + snd_brk = (char*)(MORECORE(0)); + } else { /* Call the `morecore' hook if necessary. */ void (*hook) (void) = force_reg (__after_morecore_hook); if (__builtin_expect (hook != NULL, 0)) @@ -3324,61 +3346,61 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; /* handle non-contiguous cases */ else { - /* MORECORE/mmap must correctly align */ - assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); + /* MORECORE/mmap must correctly align */ + assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); - /* Find out current end of memory */ - if (snd_brk == (char*)(MORECORE_FAILURE)) { - snd_brk = (char*)(MORECORE(0)); - } + /* Find out current end of memory */ + if (snd_brk == (char*)(MORECORE_FAILURE)) { + snd_brk = (char*)(MORECORE(0)); + } } /* Adjust top based on results of second sbrk */ if (snd_brk != (char*)(MORECORE_FAILURE)) { - av->top = (mchunkptr)aligned_brk; - set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); - av->system_mem += correction; - - /* - If not the first time through, we either have a - gap due to foreign sbrk or a non-contiguous region. Insert a - double fencepost at old_top to prevent consolidation with space - we don't own. These fenceposts are artificial chunks that are - marked as inuse and are in any case too small to use. We need - two to make sizes and alignments work out. - */ - - if (old_size != 0) { - /* - Shrink old_top to insert fenceposts, keeping size a - multiple of MALLOC_ALIGNMENT. We know there is at least - enough space in old_top to do this. - */ - old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK; - set_head(old_top, old_size | PREV_INUSE); - - /* - Note that the following assignments completely overwrite - old_top when old_size was previously MINSIZE. This is - intentional. We need the fencepost, even if old_top otherwise gets - lost. - */ - chunk_at_offset(old_top, old_size )->size = - (2*SIZE_SZ)|PREV_INUSE; - - chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size = - (2*SIZE_SZ)|PREV_INUSE; - - /* If possible, release the rest. */ - if (old_size >= MINSIZE) { + av->top = (mchunkptr)aligned_brk; + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); + av->system_mem += correction; + + /* + If not the first time through, we either have a + gap due to foreign sbrk or a non-contiguous region. Insert a + double fencepost at old_top to prevent consolidation with space + we don't own. These fenceposts are artificial chunks that are + marked as inuse and are in any case too small to use. We need + two to make sizes and alignments work out. + */ + + if (old_size != 0) { + /* + Shrink old_top to insert fenceposts, keeping size a + multiple of MALLOC_ALIGNMENT. We know there is at least + enough space in old_top to do this. + */ + old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK; + set_head(old_top, old_size | PREV_INUSE); + + /* + Note that the following assignments completely overwrite + old_top when old_size was previously MINSIZE. This is + intentional. We need the fencepost, even if old_top otherwise gets + lost. + */ + chunk_at_offset(old_top, old_size )->size = + (2*SIZE_SZ)|PREV_INUSE; + + chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size = + (2*SIZE_SZ)|PREV_INUSE; + + /* If possible, release the rest. */ + if (old_size >= MINSIZE) { #ifdef ATOMIC_FASTBINS - _int_free(av, old_top, 1); + _int_free(av, old_top, 1); #else - _int_free(av, old_top); + _int_free(av, old_top); #endif - } + } - } + } } } @@ -3456,13 +3478,13 @@ static int sYSTRIm(pad, av) size_t pad; mstate av; if (current_brk == (char*)(av->top) + top_size) { /* - Attempt to release memory. We ignore MORECORE return value, - and instead call again to find out where new end of memory is. - This avoids problems if first call releases less than we asked, - of if failure somehow altered brk value. (We could still - encounter problems if it altered brk in some very bad way, - but the only thing we can do is adjust anyway, which will cause - some downstream failure.) + Attempt to release memory. We ignore MORECORE return value, + and instead call again to find out where new end of memory is. + This avoids problems if first call releases less than we asked, + of if failure somehow altered brk value. (We could still + encounter problems if it altered brk in some very bad way, + but the only thing we can do is adjust anyway, which will cause + some downstream failure.) */ MORECORE(-extra); @@ -3473,15 +3495,15 @@ static int sYSTRIm(pad, av) size_t pad; mstate av; new_brk = (char*)(MORECORE(0)); if (new_brk != (char*)MORECORE_FAILURE) { - released = (long)(current_brk - new_brk); - - if (released != 0) { - /* Success. Adjust top. */ - av->system_mem -= released; - set_head(av->top, (top_size - released) | PREV_INUSE); - check_malloc_state(av); - return 1; - } + released = (long)(current_brk - new_brk); + + if (released != 0) { + /* Success. Adjust top. */ + av->system_mem -= released; + set_head(av->top, (top_size - released) | PREV_INUSE); + check_malloc_state(av); + return 1; + } } } } @@ -3559,7 +3581,7 @@ mremap_chunk(p, new_size) mchunkptr p; size_t new_size; return p; cp = (char *)mremap((char *)p - offset, size + offset, new_size, - MREMAP_MAYMOVE); + MREMAP_MAYMOVE); if (cp == MAP_FAILED) return 0; @@ -3650,8 +3672,8 @@ public_mALLOc(size_t bytes) ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); (void)mutex_unlock(&main_arena.mutex); if(ar_ptr) { - victim = _int_malloc(ar_ptr, bytes); - (void)mutex_unlock(&ar_ptr->mutex); + victim = _int_malloc(ar_ptr, bytes); + (void)mutex_unlock(&ar_ptr->mutex); } #endif } @@ -3689,7 +3711,7 @@ public_fREe(Void_t* mem) /* see if the dynamic brk/mmap threshold needs adjusting */ if (!mp_.no_dyn_threshold && p->size > mp_.mmap_threshold - && p->size <= DEFAULT_MMAP_THRESHOLD_MAX) + && p->size <= DEFAULT_MMAP_THRESHOLD_MAX) { mp_.mmap_threshold = chunksize (p); mp_.trim_threshold = 2 * mp_.mmap_threshold; @@ -3871,8 +3893,8 @@ public_mEMALIGn(size_t alignment, size_t bytes) (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = arena_get2(prev, bytes); if(ar_ptr) { - p = _int_memalign(ar_ptr, alignment, bytes); - (void)mutex_unlock(&ar_ptr->mutex); + p = _int_memalign(ar_ptr, alignment, bytes); + (void)mutex_unlock(&ar_ptr->mutex); } #endif } @@ -3919,8 +3941,8 @@ public_vALLOc(size_t bytes) /* ... or sbrk() has failed and there is still a chance to mmap() */ ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); if(ar_ptr) { - p = _int_memalign(ar_ptr, pagesz, bytes); - (void)mutex_unlock(&ar_ptr->mutex); + p = _int_memalign(ar_ptr, pagesz, bytes); + (void)mutex_unlock(&ar_ptr->mutex); } #endif } @@ -3965,8 +3987,8 @@ public_pVALLOc(size_t bytes) ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes + 2*pagesz + MINSIZE); if(ar_ptr) { - p = _int_memalign(ar_ptr, pagesz, rounded_bytes); - (void)mutex_unlock(&ar_ptr->mutex); + p = _int_memalign(ar_ptr, pagesz, rounded_bytes); + (void)mutex_unlock(&ar_ptr->mutex); } #endif } @@ -4059,8 +4081,8 @@ public_cALLOc(size_t n, size_t elem_size) av = arena_get2(av->next ? av : 0, sz); (void)mutex_unlock(&main_arena.mutex); if(av) { - mem = _int_malloc(av, sz); - (void)mutex_unlock(&av->mutex); + mem = _int_malloc(av, sz); + (void)mutex_unlock(&av->mutex); } #endif } @@ -4303,16 +4325,16 @@ _int_malloc(mstate av, size_t bytes) if ( (victim = last(bin)) != bin) { if (victim == 0) /* initialization check */ - malloc_consolidate(av); + malloc_consolidate(av); else { - bck = victim->bk; - set_inuse_bit_at_offset(victim, nb); - bin->bk = bck; - bck->fd = bin; + bck = victim->bk; + set_inuse_bit_at_offset(victim, nb); + bin->bk = bck; + bck->fd = bin; - if (av != &main_arena) + if (av != &main_arena) victim->size |= NON_MAIN_ARENA; - check_malloced_chunk(av, victim, nb); + check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); @@ -4363,36 +4385,36 @@ _int_malloc(mstate av, size_t bytes) size = chunksize(victim); /* - If a small request, try to use last remainder if it is the - only chunk in unsorted bin. This helps promote locality for - runs of consecutive small requests. This is the only - exception to best-fit, and applies only when there is - no exact fit for a small chunk. + If a small request, try to use last remainder if it is the + only chunk in unsorted bin. This helps promote locality for + runs of consecutive small requests. This is the only + exception to best-fit, and applies only when there is + no exact fit for a small chunk. */ if (in_smallbin_range(nb) && - bck == unsorted_chunks(av) && - victim == av->last_remainder && - (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { - - /* split and reattach remainder */ - remainder_size = size - nb; - remainder = chunk_at_offset(victim, nb); - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; - av->last_remainder = remainder; - remainder->bk = remainder->fd = unsorted_chunks(av); + bck == unsorted_chunks(av) && + victim == av->last_remainder && + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { + + /* split and reattach remainder */ + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + av->last_remainder = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } - set_head(victim, nb | PREV_INUSE | + set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); - check_malloced_chunk(av, victim, nb); + check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); @@ -4406,10 +4428,10 @@ _int_malloc(mstate av, size_t bytes) /* Take now instead of binning if exact fit */ if (size == nb) { - set_inuse_bit_at_offset(victim, size); + set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; - check_malloced_chunk(av, victim, nb); + check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) alloc_perturb (p, bytes); @@ -4419,30 +4441,30 @@ _int_malloc(mstate av, size_t bytes) /* place chunk in bin */ if (in_smallbin_range(size)) { - victim_index = smallbin_index(size); - bck = bin_at(av, victim_index); - fwd = bck->fd; + victim_index = smallbin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; } else { - victim_index = largebin_index(size); - bck = bin_at(av, victim_index); - fwd = bck->fd; + victim_index = largebin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; - /* maintain large bins in sorted order */ - if (fwd != bck) { + /* maintain large bins in sorted order */ + if (fwd != bck) { /* Or with inuse bit to speed comparisons */ - size |= PREV_INUSE; - /* if smaller than smallest, bypass loop below */ + size |= PREV_INUSE; + /* if smaller than smallest, bypass loop below */ assert((bck->bk->size & NON_MAIN_ARENA) == 0); if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) { - fwd = bck; - bck = bck->bk; + fwd = bck; + bck = bck->bk; victim->fd_nextsize = fwd->fd; victim->bk_nextsize = fwd->fd->bk_nextsize; fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; - } - else { + } + else { assert((fwd->size & NON_MAIN_ARENA) == 0); while ((unsigned long) size < fwd->size) { @@ -4461,7 +4483,7 @@ _int_malloc(mstate av, size_t bytes) victim->bk_nextsize->fd_nextsize = victim; } bck = fwd->bk; - } + } } else victim->fd_nextsize = victim->bk_nextsize = victim; } @@ -4487,32 +4509,32 @@ _int_malloc(mstate av, size_t bytes) /* skip scan if empty or largest chunk is too small */ if ((victim = first(bin)) != bin && - (unsigned long)(victim->size) >= (unsigned long)(nb)) { + (unsigned long)(victim->size) >= (unsigned long)(nb)) { victim = victim->bk_nextsize; - while (((unsigned long)(size = chunksize(victim)) < - (unsigned long)(nb))) - victim = victim->bk_nextsize; + while (((unsigned long)(size = chunksize(victim)) < + (unsigned long)(nb))) + victim = victim->bk_nextsize; /* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ if (victim != last(bin) && victim->size == victim->fd->size) victim = victim->fd; - remainder_size = size - nb; - unlink(victim, bck, fwd); + remainder_size = size - nb; + unlink(victim, bck, fwd); - /* Exhaust */ - if (remainder_size < MINSIZE) { - set_inuse_bit_at_offset(victim, size); + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; - } - /* Split */ - else { - remainder = chunk_at_offset(victim, nb); - /* We cannot assume the unsorted list is empty and therefore - have to perform a complete insert here. */ + } + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + /* We cannot assume the unsorted list is empty and therefore + have to perform a complete insert here. */ bck = unsorted_chunks(av); fwd = bck->fd; remainder->bk = bck; @@ -4524,11 +4546,11 @@ _int_malloc(mstate av, size_t bytes) remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } - set_head(victim, nb | PREV_INUSE | + set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); - } + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) @@ -4558,20 +4580,20 @@ _int_malloc(mstate av, size_t bytes) /* Skip rest of block if there are no more set bits in this block. */ if (bit > map || bit == 0) { - do { - if (++block >= BINMAPSIZE) /* out of bins */ - goto use_top; - } while ( (map = av->binmap[block]) == 0); + do { + if (++block >= BINMAPSIZE) /* out of bins */ + goto use_top; + } while ( (map = av->binmap[block]) == 0); - bin = bin_at(av, (block << BINMAPSHIFT)); - bit = 1; + bin = bin_at(av, (block << BINMAPSHIFT)); + bit = 1; } /* Advance to bin with set bit. There must be one. */ while ((bit & map) == 0) { - bin = next_bin(bin); - bit <<= 1; - assert(bit != 0); + bin = next_bin(bin); + bit <<= 1; + assert(bit != 0); } /* Inspect the bin. It is likely to be non-empty */ @@ -4579,32 +4601,32 @@ _int_malloc(mstate av, size_t bytes) /* If a false alarm (empty bin), clear the bit. */ if (victim == bin) { - av->binmap[block] = map &= ~bit; /* Write through */ - bin = next_bin(bin); - bit <<= 1; + av->binmap[block] = map &= ~bit; /* Write through */ + bin = next_bin(bin); + bit <<= 1; } else { - size = chunksize(victim); + size = chunksize(victim); - /* We know the first chunk in this bin is big enough to use. */ - assert((unsigned long)(size) >= (unsigned long)(nb)); + /* We know the first chunk in this bin is big enough to use. */ + assert((unsigned long)(size) >= (unsigned long)(nb)); - remainder_size = size - nb; + remainder_size = size - nb; - /* unlink */ - unlink(victim, bck, fwd); + /* unlink */ + unlink(victim, bck, fwd); - /* Exhaust */ - if (remainder_size < MINSIZE) { - set_inuse_bit_at_offset(victim, size); + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); if (av != &main_arena) victim->size |= NON_MAIN_ARENA; - } + } - /* Split */ - else { - remainder = chunk_at_offset(victim, nb); + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); /* We cannot assume the unsorted list is empty and therefore have to perform a complete insert here. */ @@ -4615,19 +4637,19 @@ _int_malloc(mstate av, size_t bytes) bck->fd = remainder; fwd->bk = remainder; - /* advertise as last remainder */ - if (in_smallbin_range(nb)) - av->last_remainder = remainder; + /* advertise as last remainder */ + if (in_smallbin_range(nb)) + av->last_remainder = remainder; if (!in_smallbin_range(remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } - set_head(victim, nb | PREV_INUSE | + set_head(victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); - } + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + } check_malloced_chunk(av, victim, nb); void *p = chunk2mem(victim); if (__builtin_expect (perturb_byte, 0)) @@ -5079,53 +5101,53 @@ static void malloc_consolidate(av) mstate av; #ifndef ATOMIC_FASTBINS *fb = 0; #endif - do { - check_inuse_chunk(av, p); - nextp = p->fd; - - /* Slightly streamlined version of consolidation code in free() */ - size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); - nextchunk = chunk_at_offset(p, size); - nextsize = chunksize(nextchunk); - - if (!prev_inuse(p)) { - prevsize = p->prev_size; - size += prevsize; - p = chunk_at_offset(p, -((long) prevsize)); - unlink(p, bck, fwd); - } - - if (nextchunk != av->top) { - nextinuse = inuse_bit_at_offset(nextchunk, nextsize); - - if (!nextinuse) { - size += nextsize; - unlink(nextchunk, bck, fwd); - } else + do { + check_inuse_chunk(av, p); + nextp = p->fd; + + /* Slightly streamlined version of consolidation code in free() */ + size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); + + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } + + if (nextchunk != av->top) { + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + + if (!nextinuse) { + size += nextsize; + unlink(nextchunk, bck, fwd); + } else clear_inuse_bit_at_offset(nextchunk, 0); - first_unsorted = unsorted_bin->fd; - unsorted_bin->fd = p; - first_unsorted->bk = p; + first_unsorted = unsorted_bin->fd; + unsorted_bin->fd = p; + first_unsorted->bk = p; - if (!in_smallbin_range (size)) { + if (!in_smallbin_range (size)) { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } - set_head(p, size | PREV_INUSE); - p->bk = unsorted_bin; - p->fd = first_unsorted; - set_foot(p, size); - } + set_head(p, size | PREV_INUSE); + p->bk = unsorted_bin; + p->fd = first_unsorted; + set_foot(p, size); + } - else { - size += nextsize; - set_head(p, size | PREV_INUSE); - av->top = p; - } + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + } - } while ( (p = nextp) != 0); + } while ( (p = nextp) != 0); } } while (fb++ != maxfb); @@ -5201,82 +5223,82 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, else { /* Try to expand forward into top */ if (next == av->top && - (unsigned long)(newsize = oldsize + nextsize) >= - (unsigned long)(nb + MINSIZE)) { - set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); - av->top = chunk_at_offset(oldp, nb); - set_head(av->top, (newsize - nb) | PREV_INUSE); + (unsigned long)(newsize = oldsize + nextsize) >= + (unsigned long)(nb + MINSIZE)) { + set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); + av->top = chunk_at_offset(oldp, nb); + set_head(av->top, (newsize - nb) | PREV_INUSE); check_inuse_chunk(av, oldp); - return chunk2mem(oldp); + return chunk2mem(oldp); } /* Try to expand forward into next chunk; split off remainder below */ else if (next != av->top && - !inuse(next) && - (unsigned long)(newsize = oldsize + nextsize) >= - (unsigned long)(nb)) { - newp = oldp; - unlink(next, bck, fwd); + !inuse(next) && + (unsigned long)(newsize = oldsize + nextsize) >= + (unsigned long)(nb)) { + newp = oldp; + unlink(next, bck, fwd); } /* allocate, copy, free */ else { - newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); - if (newmem == 0) - return 0; /* propagate failure */ - - newp = mem2chunk(newmem); - newsize = chunksize(newp); - - /* - Avoid copy if newp is next chunk after oldp. - */ - if (newp == next) { - newsize += oldsize; - newp = oldp; - } - else { - /* - Unroll copy of <= 36 bytes (72 if 8byte sizes) - We know that contents have an odd number of - INTERNAL_SIZE_T-sized words; minimally 3. - */ - - copysize = oldsize - SIZE_SZ; - s = (INTERNAL_SIZE_T*)(chunk2mem(oldp)); - d = (INTERNAL_SIZE_T*)(newmem); - ncopies = copysize / sizeof(INTERNAL_SIZE_T); - assert(ncopies >= 3); - - if (ncopies > 9) - MALLOC_COPY(d, s, copysize); - - else { - *(d+0) = *(s+0); - *(d+1) = *(s+1); - *(d+2) = *(s+2); - if (ncopies > 4) { - *(d+3) = *(s+3); - *(d+4) = *(s+4); - if (ncopies > 6) { - *(d+5) = *(s+5); - *(d+6) = *(s+6); - if (ncopies > 8) { - *(d+7) = *(s+7); - *(d+8) = *(s+8); - } - } - } - } + newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); + if (newmem == 0) + return 0; /* propagate failure */ + + newp = mem2chunk(newmem); + newsize = chunksize(newp); + + /* + Avoid copy if newp is next chunk after oldp. + */ + if (newp == next) { + newsize += oldsize; + newp = oldp; + } + else { + /* + Unroll copy of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. + */ + + copysize = oldsize - SIZE_SZ; + s = (INTERNAL_SIZE_T*)(chunk2mem(oldp)); + d = (INTERNAL_SIZE_T*)(newmem); + ncopies = copysize / sizeof(INTERNAL_SIZE_T); + assert(ncopies >= 3); + + if (ncopies > 9) + MALLOC_COPY(d, s, copysize); + + else { + *(d+0) = *(s+0); + *(d+1) = *(s+1); + *(d+2) = *(s+2); + if (ncopies > 4) { + *(d+3) = *(s+3); + *(d+4) = *(s+4); + if (ncopies > 6) { + *(d+5) = *(s+5); + *(d+6) = *(s+6); + if (ncopies > 8) { + *(d+7) = *(s+7); + *(d+8) = *(s+8); + } + } + } + } #ifdef ATOMIC_FASTBINS - _int_free(av, oldp, 1); + _int_free(av, oldp, 1); #else - _int_free(av, oldp); + _int_free(av, oldp); #endif - check_inuse_chunk(av, newp); - return chunk2mem(newp); - } + check_inuse_chunk(av, newp); + return chunk2mem(newp); + } } } @@ -5342,11 +5364,11 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, /* update statistics */ sum = mp_.mmapped_mem += newsize - oldsize; if (sum > (unsigned long)(mp_.max_mmapped_mem)) - mp_.max_mmapped_mem = sum; + mp_.max_mmapped_mem = sum; #ifdef NO_THREADS sum += main_arena.system_mem; if (sum > (unsigned long)(mp_.max_total_mem)) - mp_.max_total_mem = sum; + mp_.max_total_mem = sum; #endif return chunk2mem(newp); @@ -5360,11 +5382,11 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, /* Must alloc, copy, free. */ newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK); if (newmem != 0) { - MALLOC_COPY(newmem, chunk2mem(oldp), oldsize - 2*SIZE_SZ); + MALLOC_COPY(newmem, chunk2mem(oldp), oldsize - 2*SIZE_SZ); #ifdef ATOMIC_FASTBINS - _int_free(av, oldp, 1); + _int_free(av, oldp, 1); #else - _int_free(av, oldp); + _int_free(av, oldp); #endif } } @@ -5440,7 +5462,7 @@ _int_memalign(mstate av, size_t alignment, size_t bytes) */ brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & - -((signed long) alignment)); + -((signed long) alignment)); if ((unsigned long)(brk - (char*)(p)) < MINSIZE) brk += alignment; @@ -5468,7 +5490,7 @@ _int_memalign(mstate av, size_t alignment, size_t bytes) p = newp; assert (newsize >= nb && - (((unsigned long)(chunk2mem(p))) % alignment) == 0); + (((unsigned long)(chunk2mem(p))) % alignment) == 0); } /* Also give back spare room at the end */ @@ -5518,9 +5540,9 @@ Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; #endif { /* - Unroll clear of <= 36 bytes (72 if 8byte sizes) - We know that contents have an odd number of - INTERNAL_SIZE_T-sized words; minimally 3. + Unroll clear of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. */ d = (INTERNAL_SIZE_T*)mem; @@ -5529,24 +5551,24 @@ Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; assert(nclears >= 3); if (nclears > 9) - MALLOC_ZERO(d, clearsize); + MALLOC_ZERO(d, clearsize); else { - *(d+0) = 0; - *(d+1) = 0; - *(d+2) = 0; - if (nclears > 4) { - *(d+3) = 0; - *(d+4) = 0; - if (nclears > 6) { - *(d+5) = 0; - *(d+6) = 0; - if (nclears > 8) { - *(d+7) = 0; - *(d+8) = 0; - } - } - } + *(d+0) = 0; + *(d+1) = 0; + *(d+2) = 0; + if (nclears > 4) { + *(d+3) = 0; + *(d+4) = 0; + if (nclears > 6) { + *(d+5) = 0; + *(d+6) = 0; + if (nclears > 8) { + *(d+7) = 0; + *(d+8) = 0; + } + } + } } } } @@ -5689,9 +5711,9 @@ mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[]; marray[i] = chunk2mem(p); if (i != n_elements-1) { if (element_size != 0) - size = element_size; + size = element_size; else - size = request2size(sizes[i]); + size = request2size(sizes[i]); remainder_size -= size; set_head(p, size | size_flags); p = chunk_at_offset(p, size); @@ -5779,9 +5801,9 @@ static int mTRIm(av, pad) mstate av; size_t pad; for (int i = 1; i < NBINS; ++i) if (i == 1 || i >= psindex) { - mbinptr bin = bin_at (av, i); + mbinptr bin = bin_at (av, i); - for (mchunkptr p = last (bin); p != bin; p = p->bk) + for (mchunkptr p = last (bin); p != bin; p = p->bk) { INTERNAL_SIZE_T size = chunksize (p); @@ -5964,7 +5986,7 @@ void mSTATs() fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop); fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait); fprintf(stderr, "locked total = %10ld\n", - stat_lock_direct + stat_lock_loop + stat_lock_wait); + stat_lock_direct + stat_lock_loop + stat_lock_wait); #endif #ifdef _LIBC ((_IO_FILE *) stderr)->_flags2 |= old_flags2; @@ -6153,12 +6175,12 @@ int mALLOPt(param_number, value) int param_number; int value; if (size > 0) { if (size < MINIMUM_MORECORE_SIZE) - size = MINIMUM_MORECORE_SIZE; + size = MINIMUM_MORECORE_SIZE; if (CurrentExecutionLevel() == kTaskLevel) - ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); if (ptr == 0) { - return (void *) MORECORE_FAILURE; + return (void *) MORECORE_FAILURE; } // save ptrs so they can be freed during cleanup our_os_pools[next_os_pool] = ptr; @@ -6188,8 +6210,8 @@ int mALLOPt(param_number, value) int param_number; int value; for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) if (*ptr) { - PoolDeallocate(*ptr); - *ptr = 0; + PoolDeallocate(*ptr); + *ptr = 0; } } diff --git a/math/s_fdim.c b/math/s_fdim.c index 5804e631c3..677fdcde1a 100644 --- a/math/s_fdim.c +++ b/math/s_fdim.c @@ -1,5 +1,5 @@ /* Return positive difference between arguments. - Copyright (C) 1997, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 2004, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. @@ -18,6 +18,7 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +#include <errno.h> #include <math.h> double @@ -31,7 +32,14 @@ __fdim (double x, double y) /* Raise invalid flag. */ return x - y; - return x <= y ? 0 : x - y; + if (x <= y) + return 0.0; + + double r = x - y; + if (fpclassify (r) == FP_INFINITE) + __set_errno (ERANGE); + + return r; } weak_alias (__fdim, fdim) #ifdef NO_LONG_DOUBLE diff --git a/math/s_fdimf.c b/math/s_fdimf.c index 2f3ce303ae..737413a5f4 100644 --- a/math/s_fdimf.c +++ b/math/s_fdimf.c @@ -1,5 +1,5 @@ /* Return positive difference between arguments. - Copyright (C) 1997, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 2004, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. @@ -18,6 +18,7 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +#include <errno.h> #include <math.h> float @@ -31,6 +32,13 @@ __fdimf (float x, float y) /* Raise invalid flag. */ return x - y; - return x <= y ? 0 : x - y; + if (x <= y) + return 0.0f; + + float r = x - y; + if (fpclassify (r) == FP_INFINITE) + __set_errno (ERANGE); + + return r; } weak_alias (__fdimf, fdimf) diff --git a/math/s_fdiml.c b/math/s_fdiml.c index 70246bafbd..f3072b99a0 100644 --- a/math/s_fdiml.c +++ b/math/s_fdiml.c @@ -1,5 +1,5 @@ /* Return positive difference between arguments. - Copyright (C) 1997, 2004 Free Software Foundation, Inc. + Copyright (C) 1997, 2004, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. @@ -18,6 +18,7 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +#include <errno.h> #include <math.h> long double @@ -31,6 +32,13 @@ __fdiml (long double x, long double y) /* Raise invalid flag. */ return x - y; - return x <= y ? 0 : x - y; + if (x <= y) + return 0.0f; + + long double r = x - y; + if (fpclassify (r) == FP_INFINITE) + __set_errno (ERANGE); + + return r; } weak_alias (__fdiml, fdiml) diff --git a/misc/sys/uio.h b/misc/sys/uio.h index 05d956bfd3..a32b7ed8e5 100644 --- a/misc/sys/uio.h +++ b/misc/sys/uio.h @@ -80,10 +80,10 @@ extern ssize_t pwritev (int __fd, __const struct iovec *__iovec, int __count, # else # ifdef __REDIRECT extern ssize_t __REDIRECT (preadv, (int __fd, __const struct iovec *__iovec, - int __count, __off_t __offset), + int __count, __off64_t __offset), preadv64) __wur; extern ssize_t __REDIRECT (pwritev, (int __fd, __const struct iovec *__iovec, - int __count, __off_t __offset), + int __count, __off64_t __offset), pwritev64) __wur; # else # define preadv preadv64 diff --git a/nptl/ChangeLog b/nptl/ChangeLog index 2f02a94701..6cd09c5d75 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,3 +1,31 @@ +2009-09-07 Andreas Schwab <schwab@redhat.com> + + * sysdeps/pthread/bits/libc-lock.h (BP_SYM): Remove space before paren. + +2009-09-02 Suzuki K P <suzuki@in.ibm.com> + Joseph Myers <joseph@codesourcery.com> + + [BZ #7094] + * sysdeps/unix/sysv/linux/timer_create.c (timer_create): + Initialize the sigev_notify field for newly created timer to make sure + the timer gets deleted from the active timer's list upon timer_delete. + +2009-08-27 Andrew Stubbs <ams@codesourcery.com> + + * sysdeps/unix/sysv/linux/sh/lowlevellock.S (__lll_timedlock_wait): + Correct a logic error. + +2009-08-11 Ulrich Drepper <drepper@redhat.com> + + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Add CFI + directives. + +2009-08-10 Ulrich Drepper <drepper@redhat.com> + + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Add CFI + directives. + * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Likewise. + 2009-07-27 Ulrich Drepper <drepper@redhat.com> [BZ #10418] diff --git a/nptl/sysdeps/pthread/bits/libc-lock.h b/nptl/sysdeps/pthread/bits/libc-lock.h index a597f3b5a6..0cad8aa899 100644 --- a/nptl/sysdeps/pthread/bits/libc-lock.h +++ b/nptl/sysdeps/pthread/bits/libc-lock.h @@ -529,7 +529,7 @@ extern int __pthread_atfork (void (*__prepare) (void), # if _LIBC # include <bp-sym.h> # else -# define BP_SYM (sym) sym +# define BP_SYM(sym) sym # endif weak_extern (BP_SYM (__pthread_mutex_init)) weak_extern (BP_SYM (__pthread_mutex_destroy)) diff --git a/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S index b80c369a3c..49b4e6d6e0 100644 --- a/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S +++ b/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S @@ -52,7 +52,7 @@ # define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, reg ; \ - add reg, tmp ; \ + add reg, tmp ; \ bra 98f ; \ mov.l @tmp, reg ; \ 99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \ @@ -61,7 +61,7 @@ # define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, reg ; \ - add reg, tmp ; \ + add reg, tmp ; \ mov.l @tmp, reg ; \ bra 98f ; \ mov #FUTEX_WAIT, tmp ; \ @@ -71,7 +71,7 @@ # define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, reg ; \ - add reg, tmp ; \ + add reg, tmp ; \ mov.l @tmp, reg ; \ bra 98f ; \ mov #FUTEX_WAKE, tmp ; \ @@ -81,7 +81,7 @@ # define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, tmp2 ; \ - add tmp2, tmp ; \ + add tmp2, tmp ; \ mov.l @tmp, tmp2 ; \ bra 98f ; \ mov #FUTEX_PRIVATE_FLAG, tmp ; \ @@ -93,7 +93,7 @@ # define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, tmp2 ; \ - add tmp2, tmp ; \ + add tmp2, tmp ; \ mov.l @tmp, tmp2 ; \ bra 98f ; \ mov #FUTEX_PRIVATE_FLAG, tmp ; \ @@ -107,7 +107,7 @@ # define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, tmp2 ; \ - add tmp2, tmp ; \ + add tmp2, tmp ; \ mov.l @tmp, tmp2 ; \ bra 98f ; \ mov #FUTEX_PRIVATE_FLAG, tmp ; \ @@ -123,7 +123,7 @@ # define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \ stc gbr, tmp ; \ mov.w 99f, tmp2 ; \ - add tmp2, tmp ; \ + add tmp2, tmp ; \ mov.l @tmp, tmp2 ; \ bra 98f ; \ mov #FUTEX_PRIVATE_FLAG, tmp ; \ @@ -253,7 +253,7 @@ __lll_timedlock_wait: mov #2, r6 cmp/eq r6, r2 bf/s 2f - mov r2, r6 + mov r6, r2 1: mov #2, r6 @@ -327,7 +327,7 @@ __lll_timedlock_wait: tst r3, r3 bt 6f - + 1: /* Get current time. */ mov r15, r4 diff --git a/nptl/sysdeps/unix/sysv/linux/timer_create.c b/nptl/sysdeps/unix/sysv/linux/timer_create.c index a07234d7d1..cdf127cb44 100644 --- a/nptl/sysdeps/unix/sysv/linux/timer_create.c +++ b/nptl/sysdeps/unix/sysv/linux/timer_create.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2003,2004, 2007 Free Software Foundation, Inc. +/* Copyright (C) 2003,2004, 2007, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. @@ -167,6 +167,7 @@ timer_create (clock_id, evp, timerid) /* Copy the thread parameters the user provided. */ newp->sival = evp->sigev_value; newp->thrfunc = evp->sigev_notify_function; + newp->sigev_notify = SIGEV_THREAD; /* We cannot simply copy the thread attributes since the implementation might keep internal information for diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S index 80fedd4ab1..35eb09cd0c 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -30,6 +30,7 @@ .type __pthread_rwlock_rdlock,@function .align 16 __pthread_rwlock_rdlock: + cfi_startproc xorq %r10, %r10 /* Get the lock. */ @@ -167,6 +168,7 @@ __pthread_rwlock_rdlock: subq $MUTEX, %rdi #endif jmp 13b + cfi_endproc .size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock .globl pthread_rwlock_rdlock diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S index cf7f607d9c..03391d0fc2 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -29,6 +29,7 @@ .type __pthread_rwlock_unlock,@function .align 16 __pthread_rwlock_unlock: + cfi_startproc /* Get the lock. */ movl $1, %esi xorl %eax, %eax @@ -119,7 +120,7 @@ __pthread_rwlock_unlock: #endif callq __lll_unlock_wake jmp 8b - + cfi_endproc .size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock .globl pthread_rwlock_unlock diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S index 209c0e9a94..be6b8d8e20 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -30,6 +30,7 @@ .type __pthread_rwlock_wrlock,@function .align 16 __pthread_rwlock_wrlock: + cfi_startproc xorq %r10, %r10 /* Get the lock. */ @@ -155,6 +156,7 @@ __pthread_rwlock_wrlock: subq $MUTEX, %rdi #endif jmp 13b + cfi_endproc .size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock .globl pthread_rwlock_wrlock diff --git a/nptl_db/ChangeLog b/nptl_db/ChangeLog index 1ade1968a7..f79fc18e4b 100644 --- a/nptl_db/ChangeLog +++ b/nptl_db/ChangeLog @@ -1,3 +1,10 @@ +2009-08-23 Roland McGrath <roland@redhat.com> + + * td_ta_map_lwp2thr.c (__td_ta_lookup_th_unique): Move ta_ok check + and LOG call back to ... + (td_ta_map_lwp2thr): ... here. + Reported by Maciej W. Rozycki <macro@codesourcery.com>. + 2009-05-25 Aurelien Jarno <aurelien@aurel32.net> [BZ #10200] diff --git a/nptl_db/td_ta_map_lwp2thr.c b/nptl_db/td_ta_map_lwp2thr.c index 78cfcab769..4835f31f94 100644 --- a/nptl_db/td_ta_map_lwp2thr.c +++ b/nptl_db/td_ta_map_lwp2thr.c @@ -1,5 +1,5 @@ /* Which thread is running on an LWP? - Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc. + Copyright (C) 2003,2004,2007,2009 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -33,12 +33,6 @@ __td_ta_lookup_th_unique (const td_thragent_t *ta_arg, prgregset_t regs; psaddr_t addr; - LOG ("td_ta_map_lwp2thr"); - - /* Test whether the TA parameter is ok. */ - if (! ta_ok (ta)) - return TD_BADTA; - if (ta->ta_howto == ta_howto_unknown) { /* We need to read in from the inferior the instructions what to do. */ @@ -181,6 +175,12 @@ td_ta_map_lwp2thr (const td_thragent_t *ta_arg, { td_thragent_t *const ta = (td_thragent_t *) ta_arg; + LOG ("td_ta_map_lwp2thr"); + + /* Test whether the TA parameter is ok. */ + if (! ta_ok (ta)) + return TD_BADTA; + /* We cannot rely on thread registers and such information at all before __pthread_initialize_minimal has gotten far enough. They sometimes contain garbage that would confuse us, left by the kernel diff --git a/posix/Makefile b/posix/Makefile index 5af49dffd4..f19b121a17 100644 --- a/posix/Makefile +++ b/posix/Makefile @@ -82,6 +82,7 @@ tests := tstgetopt testfnm runtests runptests \ bug-regex17 bug-regex18 bug-regex19 bug-regex20 \ bug-regex21 bug-regex22 bug-regex23 bug-regex24 \ bug-regex25 bug-regex26 bug-regex27 bug-regex28 \ + bug-regex29 \ tst-nice tst-nanosleep tst-regex2 \ transbug tst-rxspencer tst-pcre tst-boost \ bug-ga1 tst-vfork1 tst-vfork2 tst-vfork3 tst-waitid \ diff --git a/posix/bug-regex29.c b/posix/bug-regex29.c new file mode 100644 index 0000000000..bd796c6c2a --- /dev/null +++ b/posix/bug-regex29.c @@ -0,0 +1,22 @@ +#include <regex.h> + +static int +do_test (void) +{ + regex_t r; + int e = regcomp(&r, "xy\\{4,5,7\\}zabc", 0); + char buf[100]; + regerror(e, &r, buf, sizeof (buf)); + printf ("e = %d (%s)\n", e, buf); + int res = e != REG_BADBR; + + e = regcomp(&r, "xy\\{4,5a\\}zabc", 0); + regerror(e, &r, buf, sizeof (buf)); + printf ("e = %d (%s)\n", e, buf); + res |= e != REG_BADBR; + + return res; +} + +#define TEST_FUNCTION do_test () +#include "../test-skeleton.c" diff --git a/posix/getconf.c b/posix/getconf.c index 59ccab606c..bd7dff7167 100644 --- a/posix/getconf.c +++ b/posix/getconf.c @@ -484,6 +484,7 @@ static const struct conf vars[] = #endif #ifdef _SC_LINE_MAX { "_POSIX2_LINE_MAX", _SC_LINE_MAX, SYSCONF }, + { "POSIX2_LINE_MAX", _SC_LINE_MAX, SYSCONF }, #endif #ifdef _SC_2_LOCALEDEF { "POSIX2_LOCALEDEF", _SC_2_LOCALEDEF, SYSCONF }, diff --git a/posix/regcomp.c b/posix/regcomp.c index 4843cfea33..446fed5445 100644 --- a/posix/regcomp.c +++ b/posix/regcomp.c @@ -2481,7 +2481,7 @@ parse_dup_op (bin_tree_t *elem, re_string_t *regexp, re_dfa_t *dfa, return elem; } - if (BE (end != -1 && start > end, 0)) + if (BE ((end != -1 && start > end) || token->type != OP_CLOSE_DUP_NUM, 0)) { /* First number greater than second. */ *err = REG_BADBR; diff --git a/posix/unistd.h b/posix/unistd.h index 24ec74e05e..a487883eb8 100644 --- a/posix/unistd.h +++ b/posix/unistd.h @@ -30,30 +30,58 @@ __BEGIN_DECLS /* These may be used to determine what facilities are present at compile time. Their values can be obtained at run time from `sysconf'. */ +#ifdef __USE_XOPEN2K8 /* POSIX Standard approved as ISO/IEC 9945-1 as of September 2008. */ -#define _POSIX_VERSION 200809L +# define _POSIX_VERSION 200809L +#elif defined __USE_XOPEN2K +/* POSIX Standard approved as ISO/IEC 9945-1 as of December 2001. */ +# define _POSIX_VERSION 200112L +#elif defined __USE_POSIX199506 +/* POSIX Standard approved as ISO/IEC 9945-1 as of June 1995. */ +# define _POSIX_VERSION 199506L +#elif defined __USE_POSIX199309 +/* POSIX Standard approved as ISO/IEC 9945-1 as of September 1993. */ +# define _POSIX_VERSION 199309L +#else +/* POSIX Standard approved as ISO/IEC 9945-1 as of September 1990. */ +# define _POSIX_VERSION 199009L +#endif /* These are not #ifdef __USE_POSIX2 because they are in the theoretically application-owned namespace. */ +#ifdef __USE_XOPEN2K8 +# define __POSIX2_THIS_VERSION 200809L +/* The utilities on GNU systems also correspond to this version. */ +#elif defined __USE_XOPEN2K +/* The utilities on GNU systems also correspond to this version. */ +# define __POSIX2_THIS_VERSION 200112L +#elif defined __USE_POSIX199506 +/* The utilities on GNU systems also correspond to this version. */ +# define __POSIX2_THIS_VERSION 199506L +#else +/* The utilities on GNU systems also correspond to this version. */ +# define __POSIX2_THIS_VERSION 199209L +#endif + /* The utilities on GNU systems also correspond to this version. */ -#define _POSIX2_VERSION 200809L +#define _POSIX2_VERSION __POSIX2_THIS_VERSION /* If defined, the implementation supports the C Language Bindings Option. */ -#define _POSIX2_C_BIND 200809L +#define _POSIX2_C_BIND __POSIX2_THIS_VERSION /* If defined, the implementation supports the C Language Development Utilities Option. */ -#define _POSIX2_C_DEV 200809L +#define _POSIX2_C_DEV __POSIX2_THIS_VERSION /* If defined, the implementation supports the Software Development Utilities Option. */ -#define _POSIX2_SW_DEV 200809L +#define _POSIX2_SW_DEV __POSIX2_THIS_VERSION /* If defined, the implementation supports the creation of locales with the localedef utility. */ -#define _POSIX2_LOCALEDEF 200809L +#define _POSIX2_LOCALEDEF __POSIX2_THIS_VERSION /* X/Open version number to which the library conforms. It is selectable. */ #ifdef __USE_XOPEN2K8 diff --git a/stdlib/longlong.h b/stdlib/longlong.h index a2f38ae2a5..e7d6099c7e 100644 --- a/stdlib/longlong.h +++ b/stdlib/longlong.h @@ -1,6 +1,6 @@ /* longlong.h -- definitions for mixed size 32/64 bit arithmetic. Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, - 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. + 2002, 2003, 2004, 2005, 2006, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -918,7 +918,7 @@ UDItype __umulsidi3 (USItype, USItype); " or r1,%0" \ : "=r" (q), "=&z" (r) \ : "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16) \ - : "r1", "r2", "r4", "r5", "r6", "pr"); \ + : "r1", "r2", "r4", "r5", "r6", "pr", "t"); \ } while (0) #define UDIV_TIME 80 @@ -926,7 +926,8 @@ UDItype __umulsidi3 (USItype, USItype); #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("clrt;subc %5,%1; subc %4,%0" \ : "=r" (sh), "=r" (sl) \ - : "0" (ah), "1" (al), "r" (bh), "r" (bl)) + : "0" (ah), "1" (al), "r" (bh), "r" (bl) \ + : "t") #endif /* __sh__ */ diff --git a/sysdeps/ia64/backtrace.c b/sysdeps/ia64/backtrace.c index 423fed80a8..5cefb86ae4 100644 --- a/sysdeps/ia64/backtrace.c +++ b/sysdeps/ia64/backtrace.c @@ -1,5 +1,5 @@ /* Return backtrace of current program state. - Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc. + Copyright (C) 2003, 2004, 2005, 2007, 2009 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>, 2003. @@ -61,7 +61,13 @@ backtrace_helper (struct _Unwind_Context *ctx, void *a) /* We are first called with address in the __backtrace function. Skip it. */ if (arg->cnt != -1) - arg->array[arg->cnt] = (void *) unwind_getip (ctx); + { + arg->array[arg->cnt] = (void *) unwind_getip (ctx); + + /* Check whether we make any progress. */ + if (arg->cnt > 0 && arg->array[arg->cnt - 1] == arg->array[arg->cnt]) + return _URC_END_OF_STACK; + } if (++arg->cnt == arg->size) return _URC_END_OF_STACK; return _URC_NO_REASON; diff --git a/sysdeps/powerpc/powerpc32/power6/memcpy.S b/sysdeps/powerpc/powerpc32/power6/memcpy.S index 156b0bd8cc..cafe9174fd 100644 --- a/sysdeps/powerpc/powerpc32/power6/memcpy.S +++ b/sysdeps/powerpc/powerpc32/power6/memcpy.S @@ -220,7 +220,7 @@ L(word_unaligned_short): subf 10,0,5 add 12,4,0 blt cr6,5f - srdi 7,6,16 + srwi 7,6,16 bgt cr6,3f sth 6,0(3) b 7f diff --git a/sysdeps/unix/sysv/linux/i386/bits/fcntl.h b/sysdeps/unix/sysv/linux/i386/bits/fcntl.h index 35ef665998..35dfb299d2 100644 --- a/sysdeps/unix/sysv/linux/i386/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/i386/bits/fcntl.h @@ -240,8 +240,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/ia64/bits/fcntl.h b/sysdeps/unix/sysv/linux/ia64/bits/fcntl.h index 6abc5ced65..92b96bd14c 100644 --- a/sysdeps/unix/sysv/linux/ia64/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/ia64/bits/fcntl.h @@ -234,8 +234,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h b/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h index 90b669ab60..493d5cba5f 100644 --- a/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/powerpc/bits/fcntl.h @@ -240,8 +240,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/syscalls.list b/sysdeps/unix/sysv/linux/powerpc/powerpc32/syscalls.list index 82025b4855..1233be671a 100644 --- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/syscalls.list +++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/syscalls.list @@ -3,3 +3,6 @@ # System calls with wrappers. oldgetrlimit EXTRA getrlimit i:ip __old_getrlimit getrlimit@GLIBC_2.0 oldsetrlimit EXTRA setrlimit i:ip __old_setrlimit setrlimit@GLIBC_2.0 + +# Due to 64bit alignment there is a dummy second parameter +readahead - readahead i:iiiii __readahead readahead diff --git a/sysdeps/unix/sysv/linux/s390/bits/fcntl.h b/sysdeps/unix/sysv/linux/s390/bits/fcntl.h index ff5941df65..54c4c52751 100644 --- a/sysdeps/unix/sysv/linux/s390/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/s390/bits/fcntl.h @@ -260,8 +260,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/sh/bits/fcntl.h b/sysdeps/unix/sysv/linux/sh/bits/fcntl.h index 35ef665998..35dfb299d2 100644 --- a/sysdeps/unix/sysv/linux/sh/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/sh/bits/fcntl.h @@ -240,8 +240,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/sparc/bits/fcntl.h b/sysdeps/unix/sysv/linux/sparc/bits/fcntl.h index d59744a55e..56d9c004e7 100644 --- a/sysdeps/unix/sysv/linux/sparc/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/sparc/bits/fcntl.h @@ -259,8 +259,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 diff --git a/sysdeps/unix/sysv/linux/x86_64/bits/fcntl.h b/sysdeps/unix/sysv/linux/x86_64/bits/fcntl.h index bc0f4d687b..1d68a201a5 100644 --- a/sysdeps/unix/sysv/linux/x86_64/bits/fcntl.h +++ b/sysdeps/unix/sysv/linux/x86_64/bits/fcntl.h @@ -254,8 +254,8 @@ extern ssize_t tee (int __fdin, int __fdout, size_t __len, extern int fallocate (int __fd, int __mode, __off_t __offset, __off_t __len); # else # ifdef __REDIRECT -extern int __REDIRECT (fallocate, (int __fd, int __mode, __off_t __offset, - __off_t __len), +extern int __REDIRECT (fallocate, (int __fd, int __mode, __off64_t __offset, + __off64_t __len), fallocate64); # else # define fallocate fallocate64 @@ -1,4 +1,4 @@ /* This file just defines the current version number of libc. */ #define RELEASE "stable" -#define VERSION "2.10.1" +#define VERSION "2.10.2" |