summaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2006-10-25 19:13:42 +0000
committerJakub Jelinek <jakub@redhat.com>2006-10-25 19:13:42 +0000
commit21cb7ca55c2fdd7e9aca6c7a80ae0d7ca4f6c7da (patch)
tree9bce2d28d077684abe0904fdfb3974e06ceb29f6 /elf
parent16d1b47b4f3f9ae13535ea7a2c02bd207c069d5c (diff)
Updated to fedora-glibc-20061025T1857cvs/fedora-glibc-2_5_90-1
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-close.c154
-rw-r--r--elf/dl-fptr.c4
-rw-r--r--elf/dl-libc.c5
-rw-r--r--elf/dl-load.c8
-rw-r--r--elf/dl-lookup.c6
-rw-r--r--elf/dl-minimal.c10
-rw-r--r--elf/dl-object.c21
-rw-r--r--elf/dl-open.c69
-rw-r--r--elf/dl-profile.c14
-rw-r--r--elf/dl-runtime.c50
-rw-r--r--elf/dl-sym.c86
-rw-r--r--elf/rtld.c15
12 files changed, 343 insertions, 99 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 6a2ad976a7..bfcceea4bc 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -19,6 +19,7 @@
#include <assert.h>
#include <dlfcn.h>
+#include <errno.h>
#include <libintl.h>
#include <stddef.h>
#include <stdio.h>
@@ -29,12 +30,17 @@
#include <ldsodefs.h>
#include <sys/types.h>
#include <sys/mman.h>
+#include <sysdep-cancel.h>
/* Type of the constructor functions. */
typedef void (*fini_t) (void);
+/* Special l_idx value used to indicate which objects remain loaded. */
+#define IDX_STILL_USED -1
+
+
#ifdef USE_TLS
/* Returns true we an non-empty was found. */
static bool
@@ -188,7 +194,7 @@ _dl_close (void *_map)
done[done_index] = 1;
used[done_index] = 1;
/* Signal the object is still needed. */
- l->l_idx = -1;
+ l->l_idx = IDX_STILL_USED;
/* Mark all dependencies as used. */
if (l->l_initfini != NULL)
@@ -196,7 +202,7 @@ _dl_close (void *_map)
struct link_map **lp = &l->l_initfini[1];
while (*lp != NULL)
{
- if ((*lp)->l_idx != -1)
+ if ((*lp)->l_idx != IDX_STILL_USED)
{
assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
@@ -217,7 +223,7 @@ _dl_close (void *_map)
{
struct link_map *jmap = l->l_reldeps[j];
- if (jmap->l_idx != -1)
+ if (jmap->l_idx != IDX_STILL_USED)
{
assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
@@ -310,8 +316,9 @@ _dl_close (void *_map)
/* Else used[i]. */
else if (imap->l_type == lt_loaded)
{
- if (imap->l_searchlist.r_list == NULL
- && imap->l_initfini != NULL)
+ struct r_scope_elem *new_list = NULL;
+
+ if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
{
/* The object is still used. But one of the objects we are
unloading right now is responsible for loading it. If
@@ -328,44 +335,119 @@ _dl_close (void *_map)
imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
imap->l_searchlist.r_nlist = cnt;
- for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
- /* This relies on l_scope[] entries being always set either
- to its own l_symbolic_searchlist address, or some map's
- l_searchlist address. */
- if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
- {
- struct link_map *tmap;
-
- tmap = (struct link_map *) ((char *) imap->l_scope[cnt]
- - offsetof (struct link_map,
- l_searchlist));
- assert (tmap->l_ns == ns);
- if (tmap->l_idx != -1)
- {
- imap->l_scope[cnt] = &imap->l_searchlist;
- break;
- }
- }
+ new_list = &imap->l_searchlist;
}
- else
+
+ /* Count the number of scopes which remain after the unload.
+ When we add the local search list count it. Always add
+ one for the terminating NULL pointer. */
+ size_t remain = (new_list != NULL) + 1;
+ bool removed_any = false;
+ for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
+ /* This relies on l_scope[] entries being always set either
+ to its own l_symbolic_searchlist address, or some map's
+ l_searchlist address. */
+ if (imap->l_scoperec->scope[cnt] != &imap->l_symbolic_searchlist)
+ {
+ struct link_map *tmap = (struct link_map *)
+ ((char *) imap->l_scoperec->scope[cnt]
+ - offsetof (struct link_map, l_searchlist));
+ assert (tmap->l_ns == ns);
+ if (tmap->l_idx == IDX_STILL_USED)
+ ++remain;
+ else
+ removed_any = true;
+ }
+ else
+ ++remain;
+
+ if (removed_any)
{
- unsigned int cnt = 0;
- while (imap->l_scope[cnt] != NULL)
+ /* Always allocate a new array for the scope. This is
+ necessary since we must be able to determine the last
+ user of the current array. If possible use the link map's
+ memory. */
+ size_t new_size;
+ struct r_scoperec *newp;
+ if (imap->l_scoperec != &imap->l_scoperec_mem
+ && remain < NINIT_SCOPE_ELEMS (imap)
+ && imap->l_scoperec_mem.nusers == 0)
+ {
+ new_size = NINIT_SCOPE_ELEMS (imap);
+ newp = &imap->l_scoperec_mem;
+ }
+ else
+ {
+ new_size = imap->l_scope_max;
+ newp = (struct r_scoperec *)
+ malloc (sizeof (struct r_scoperec)
+ + new_size * sizeof (struct r_scope_elem *));
+ if (newp == NULL)
+ _dl_signal_error (ENOMEM, "dlclose", NULL,
+ N_("cannot create scope list"));
+ }
+
+ newp->nusers = 0;
+ newp->remove_after_use = false;
+ newp->notify = false;
+
+ /* Copy over the remaining scope elements. */
+ remain = 0;
+ for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt)
+ {
+ if (imap->l_scoperec->scope[cnt]
+ != &imap->l_symbolic_searchlist)
+ {
+ struct link_map *tmap = (struct link_map *)
+ ((char *) imap->l_scoperec->scope[cnt]
+ - offsetof (struct link_map, l_searchlist));
+ if (tmap->l_idx != IDX_STILL_USED)
+ {
+ /* Remove the scope. Or replace with own map's
+ scope. */
+ if (new_list != NULL)
+ {
+ newp->scope[remain++] = new_list;
+ new_list = NULL;
+ }
+ continue;
+ }
+ }
+
+ newp->scope[remain++] = imap->l_scoperec->scope[cnt];
+ }
+ newp->scope[remain] = NULL;
+
+ struct r_scoperec *old = imap->l_scoperec;
+
+ if (SINGLE_THREAD_P)
+ imap->l_scoperec = newp;
+ else
{
- if (imap->l_scope[cnt] == &map->l_searchlist)
+ __rtld_mrlock_change (imap->l_scoperec_lock);
+ imap->l_scoperec = newp;
+ __rtld_mrlock_done (imap->l_scoperec_lock);
+
+ if (atomic_increment_val (&old->nusers) != 1)
{
- while ((imap->l_scope[cnt] = imap->l_scope[cnt + 1])
- != NULL)
- ++cnt;
- break;
+ old->remove_after_use = true;
+ old->notify = true;
+ if (atomic_decrement_val (&old->nusers) != 0)
+ __rtld_waitzero (old->nusers);
}
- ++cnt;
}
+
+ /* No user anymore, we can free it now. */
+ if (old != &imap->l_scoperec_mem)
+ free (old);
+
+ imap->l_scope_max = new_size;
}
/* The loader is gone, so mark the object as not having one.
- Note: l_idx != -1 -> object will be removed. */
- if (imap->l_loader != NULL && imap->l_loader->l_idx != -1)
+ Note: l_idx != IDX_STILL_USED -> object will be removed. */
+ if (imap->l_loader != NULL
+ && imap->l_loader->l_idx != IDX_STILL_USED)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
@@ -570,8 +652,8 @@ _dl_close (void *_map)
free (imap->l_initfini);
/* Remove the scope array if we allocated it. */
- if (imap->l_scope != imap->l_scope_mem)
- free (imap->l_scope);
+ if (imap->l_scoperec != &imap->l_scoperec_mem)
+ free (imap->l_scoperec);
if (imap->l_phdr_allocated)
free ((void *) imap->l_phdr);
diff --git a/elf/dl-fptr.c b/elf/dl-fptr.c
index 78beecfdcb..e068124d6f 100644
--- a/elf/dl-fptr.c
+++ b/elf/dl-fptr.c
@@ -1,5 +1,5 @@
/* Manage function descriptors. Generic version.
- Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+ Copyright (C) 1999-2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -40,7 +40,7 @@
#ifndef COMPARE_AND_SWAP
# define COMPARE_AND_SWAP(ptr, old, new) \
- (atomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
+ (catomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
#endif
ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN];
diff --git a/elf/dl-libc.c b/elf/dl-libc.c
index 1b995eda92..8b78a7a388 100644
--- a/elf/dl-libc.c
+++ b/elf/dl-libc.c
@@ -1,5 +1,5 @@
/* Handle loading and unloading shared objects for internal libc purposes.
- Copyright (C) 1999,2000,2001,2002,2004,2005 Free Software Foundation, Inc.
+ Copyright (C) 1999-2002,2004,2005,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Zack Weinberg <zack@rabi.columbia.edu>, 1999.
@@ -133,7 +133,8 @@ do_dlsym_private (void *ptr)
struct do_dlsym_args *args = (struct do_dlsym_args *) ptr;
args->ref = NULL;
l = GLRO(dl_lookup_symbol_x) (args->name, args->map, &args->ref,
- args->map->l_scope, &vers, 0, 0, NULL);
+ args->map->l_scoperec->scope, &vers, 0, 0,
+ NULL);
args->loadbase = l;
}
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 36dc123c01..172fb2fc35 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1473,7 +1473,7 @@ cannot enable executable stack as shared object requires");
have to do this for the main map. */
if ((mode & RTLD_DEEPBIND) == 0
&& __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
- && &l->l_searchlist != l->l_scope[0])
+ && &l->l_searchlist != l->l_scoperec->scope[0])
{
/* Create an appropriate searchlist. It contains only this map.
This is the definition of DT_SYMBOLIC in SysVr4. */
@@ -1490,11 +1490,11 @@ cannot enable executable stack as shared object requires");
l->l_symbolic_searchlist.r_nlist = 1;
/* Now move the existing entries one back. */
- memmove (&l->l_scope[1], &l->l_scope[0],
- (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
+ memmove (&l->l_scoperec->scope[1], &l->l_scoperec->scope[0],
+ (l->l_scope_max - 1) * sizeof (l->l_scoperec->scope[0]));
/* Now add the new entry. */
- l->l_scope[0] = &l->l_symbolic_searchlist;
+ l->l_scoperec->scope[0] = &l->l_symbolic_searchlist;
}
/* Remember whether this object must be initialized first. */
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index 7cfcc620a7..72381698db 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -207,7 +207,11 @@ _dl_debug_bindings (const char *undef_name, struct link_map *undef_map,
/* Search loaded objects' symbol tables for a definition of the symbol
- UNDEF_NAME, perhaps with a requested version for the symbol. */
+ UNDEF_NAME, perhaps with a requested version for the symbol.
+
+ We must never have calls to the audit functions inside this function
+ or in any function which gets called. If this would happen the audit
+ code might create a thread which can throw off all the scope locking. */
lookup_t
internal_function
_dl_lookup_symbol_x (const char *undef_name, struct link_map *undef_map,
diff --git a/elf/dl-minimal.c b/elf/dl-minimal.c
index 868d3bd2ed..8e78709b5a 100644
--- a/elf/dl-minimal.c
+++ b/elf/dl-minimal.c
@@ -1,5 +1,6 @@
/* Minimal replacements for basic facilities used in the dynamic linker.
- Copyright (C) 1995-1998,2000-2002,2004,2005 Free Software Foundation, Inc.
+ Copyright (C) 1995-1998,2000-2002,2004,2005,2006
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -128,14 +129,13 @@ free (void *ptr)
void * weak_function
realloc (void *ptr, size_t n)
{
- void *new;
if (ptr == NULL)
return malloc (n);
assert (ptr == alloc_last_block);
+ size_t old_size = alloc_ptr - alloc_last_block;
alloc_ptr = alloc_last_block;
- new = malloc (n);
- assert (new == ptr);
- return new;
+ void *new = malloc (n);
+ return new != ptr ? memcpy (new, ptr, old_size) : new;
}
/* Avoid signal frobnication in setjmp/longjmp. Keeps things smaller. */
diff --git a/elf/dl-object.c b/elf/dl-object.c
index 86f7a8e4d9..c5dae9ef11 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -1,5 +1,5 @@
/* Storage management for the chain of loaded shared objects.
- Copyright (C) 1995-2002, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1995-2002, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -82,8 +82,14 @@ _dl_new_object (char *realname, const char *libname, int type,
/* Use the 'l_scope_mem' array by default for the the 'l_scope'
information. If we need more entries we will allocate a large
array dynamically. */
- new->l_scope = new->l_scope_mem;
- new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]);
+ new->l_scoperec = &new->l_scoperec_mem;
+ new->l_scope_max = (sizeof (new->l_scope_realmem.scope_elems)
+ / sizeof (new->l_scope_realmem.scope_elems[0]));
+
+ /* No need to initialize the scope lock if the initializer is zero. */
+#if _RTLD_MRLOCK_INITIALIZER != 0
+ __rtld_mrlock_initialize (new->l_scoperec_mem.lock);
+#endif
/* Counter for the scopes we have to handle. */
idx = 0;
@@ -98,7 +104,8 @@ _dl_new_object (char *realname, const char *libname, int type,
l->l_next = new;
/* Add the global scope. */
- new->l_scope[idx++] = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
+ new->l_scoperec->scope[idx++]
+ = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist;
}
else
GL(dl_ns)[nsid]._ns_loaded = new;
@@ -114,15 +121,15 @@ _dl_new_object (char *realname, const char *libname, int type,
loader = loader->l_loader;
/* Insert the scope if it isn't the global scope we already added. */
- if (idx == 0 || &loader->l_searchlist != new->l_scope[0])
+ if (idx == 0 || &loader->l_searchlist != new->l_scoperec->scope[0])
{
if ((mode & RTLD_DEEPBIND) != 0 && idx != 0)
{
- new->l_scope[1] = new->l_scope[0];
+ new->l_scoperec->scope[1] = new->l_scoperec->scope[0];
idx = 0;
}
- new->l_scope[idx] = &loader->l_searchlist;
+ new->l_scoperec->scope[idx] = &loader->l_searchlist;
}
new->l_local_scope[0] = &new->l_searchlist;
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 8d057f82eb..85b9637305 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -31,6 +31,7 @@
#include <ldsodefs.h>
#include <bp-sym.h>
#include <caller.h>
+#include <sysdep-cancel.h>
#include <dl-dst.h>
@@ -343,7 +344,7 @@ dl_open_worker (void *a)
start the profiling. */
struct link_map *old_profile_map = GL(dl_profile_map);
- _dl_relocate_object (l, l->l_scope, 1, 1);
+ _dl_relocate_object (l, l->l_scoperec->scope, 1, 1);
if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
{
@@ -356,7 +357,7 @@ dl_open_worker (void *a)
}
else
#endif
- _dl_relocate_object (l, l->l_scope, lazy, 0);
+ _dl_relocate_object (l, l->l_scoperec->scope, lazy, 0);
}
if (l == new)
@@ -374,11 +375,13 @@ dl_open_worker (void *a)
not been loaded here and now. */
if (imap->l_init_called && imap->l_type == lt_loaded)
{
- struct r_scope_elem **runp = imap->l_scope;
+ struct r_scope_elem **runp = imap->l_scoperec->scope;
size_t cnt = 0;
while (*runp != NULL)
{
+ if (*runp == &new->l_searchlist)
+ break;
++cnt;
++runp;
}
@@ -391,35 +394,63 @@ dl_open_worker (void *a)
{
/* The 'r_scope' array is too small. Allocate a new one
dynamically. */
- struct r_scope_elem **newp;
- size_t new_size = imap->l_scope_max * 2;
+ size_t new_size;
+ struct r_scoperec *newp;
- if (imap->l_scope == imap->l_scope_mem)
+ if (imap->l_scoperec != &imap->l_scoperec_mem
+ && imap->l_scope_max < NINIT_SCOPE_ELEMS (imap)
+ && imap->l_scoperec_mem.nusers == 0)
{
- newp = (struct r_scope_elem **)
- malloc (new_size * sizeof (struct r_scope_elem *));
- if (newp == NULL)
- _dl_signal_error (ENOMEM, "dlopen", NULL,
- N_("cannot create scope list"));
- imap->l_scope = memcpy (newp, imap->l_scope,
- cnt * sizeof (imap->l_scope[0]));
+ new_size = NINIT_SCOPE_ELEMS (imap);
+ newp = &imap->l_scoperec_mem;
}
else
{
- newp = (struct r_scope_elem **)
- realloc (imap->l_scope,
- new_size * sizeof (struct r_scope_elem *));
+ new_size = imap->l_scope_max * 2;
+ newp = (struct r_scoperec *)
+ malloc (sizeof (struct r_scoperec)
+ + new_size * sizeof (struct r_scope_elem *));
if (newp == NULL)
_dl_signal_error (ENOMEM, "dlopen", NULL,
N_("cannot create scope list"));
- imap->l_scope = newp;
+ }
+
+ newp->nusers = 0;
+ newp->remove_after_use = false;
+ newp->notify = false;
+ memcpy (newp->scope, imap->l_scoperec->scope,
+ cnt * sizeof (imap->l_scoperec->scope[0]));
+ struct r_scoperec *old = imap->l_scoperec;
+
+ if (old == &imap->l_scoperec_mem)
+ imap->l_scoperec = newp;
+ else if (SINGLE_THREAD_P)
+ {
+ imap->l_scoperec = newp;
+ free (old);
+ }
+ else
+ {
+ __rtld_mrlock_change (imap->l_scoperec_lock);
+ imap->l_scoperec = newp;
+ __rtld_mrlock_done (imap->l_scoperec_lock);
+
+ atomic_increment (&old->nusers);
+ old->remove_after_use = true;
+ if (atomic_decrement_val (&old->nusers) == 0)
+ /* No user, we can free it here and now. */
+ free (old);
}
imap->l_scope_max = new_size;
}
- imap->l_scope[cnt++] = &new->l_searchlist;
- imap->l_scope[cnt] = NULL;
+ /* First terminate the extended list. Otherwise a thread
+ might use the new last element and then use the garbage
+ at offset IDX+1. */
+ imap->l_scoperec->scope[cnt + 1] = NULL;
+ atomic_write_barrier ();
+ imap->l_scoperec->scope[cnt] = &new->l_searchlist;
}
#if USE_TLS
/* Only add TLS memory if this object is loaded now and
diff --git a/elf/dl-profile.c b/elf/dl-profile.c
index 41214c1b08..47033f32ef 100644
--- a/elf/dl-profile.c
+++ b/elf/dl-profile.c
@@ -1,5 +1,5 @@
/* Profiling of shared libraries.
- Copyright (C) 1997-2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
Based on the BSD mcount implementation.
@@ -509,24 +509,24 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
size_t newfromidx;
to_index = (data[narcs].self_pc
/ (HASHFRACTION * sizeof (*tos)));
- newfromidx = atomic_exchange_and_add (&fromidx, 1) + 1;
+ newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1;
froms[newfromidx].here = &data[narcs];
froms[newfromidx].link = tos[to_index];
tos[to_index] = newfromidx;
- atomic_increment (&narcs);
+ catomic_increment (&narcs);
}
/* If we still have no entry stop searching and insert. */
if (*topcindex == 0)
{
- uint_fast32_t newarc = atomic_exchange_and_add (narcsp, 1);
+ uint_fast32_t newarc = catomic_exchange_and_add (narcsp, 1);
/* In rare cases it could happen that all entries in FROMS are
occupied. So we cannot count this anymore. */
if (newarc >= fromlimit)
goto done;
- *topcindex = atomic_exchange_and_add (&fromidx, 1) + 1;
+ *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1;
fromp = &froms[*topcindex];
fromp->here = &data[newarc];
@@ -534,7 +534,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
data[newarc].self_pc = selfpc;
data[newarc].count = 0;
fromp->link = 0;
- atomic_increment (&narcs);
+ catomic_increment (&narcs);
break;
}
@@ -547,7 +547,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
}
/* Increment the counter. */
- atomic_increment (&fromp->here->count);
+ catomic_increment (&fromp->here->count);
done:
;
diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c
index f92cbe26bd..8bf5b89eb6 100644
--- a/elf/dl-runtime.c
+++ b/elf/dl-runtime.c
@@ -1,5 +1,5 @@
/* On-demand PLT fixup for shared objects.
- Copyright (C) 1995-2002,2003,2004,2005 Free Software Foundation, Inc.
+ Copyright (C) 1995-2002,2003,2004,2005,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,6 +24,7 @@
#include <unistd.h>
#include <sys/param.h>
#include <ldsodefs.h>
+#include <sysdep-cancel.h>
#include "dynamic-link.h"
#if (!defined ELF_MACHINE_NO_RELA && !defined ELF_MACHINE_PLT_REL) \
@@ -92,16 +93,36 @@ _dl_fixup (
version = NULL;
}
+ struct r_scoperec *scoperec = l->l_scoperec;
+ if (l->l_type == lt_loaded && !SINGLE_THREAD_P)
+ {
+ __rtld_mrlock_lock (l->l_scoperec_lock);
+ scoperec = l->l_scoperec;
+ atomic_increment (&scoperec->nusers);
+ __rtld_mrlock_unlock (l->l_scoperec_lock);
+ }
+
result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym,
- l->l_scope, version, ELF_RTYPE_CLASS_PLT,
+ scoperec->scope, version,
+ ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
+ if (l->l_type == lt_loaded && !SINGLE_THREAD_P
+ && atomic_decrement_val (&scoperec->nusers) == 0
+ && __builtin_expect (scoperec->remove_after_use, 0))
+ {
+ if (scoperec->notify)
+ __rtld_notify (scoperec->nusers);
+ else
+ free (scoperec);
+ }
+
/* Currently result contains the base load address (or link map)
of the object that defines sym. Now add in the symbol
offset. */
value = DL_FIXUP_MAKE_VALUE (result,
- sym ? LOOKUP_VALUE_ADDRESS (result)
- + sym->st_value : 0);
+ sym ? (LOOKUP_VALUE_ADDRESS (result)
+ + sym->st_value) : 0);
}
else
{
@@ -174,11 +195,30 @@ _dl_profile_fixup (
version = NULL;
}
+ struct r_scoperec *scoperec = l->l_scoperec;
+ if (l->l_type == lt_loaded && !SINGLE_THREAD_P)
+ {
+ __rtld_mrlock_lock (l->l_scoperec_lock);
+ scoperec = l->l_scoperec;
+ atomic_increment (&scoperec->nusers);
+ __rtld_mrlock_unlock (l->l_scoperec_lock);
+ }
+
result = _dl_lookup_symbol_x (strtab + refsym->st_name, l, &defsym,
- l->l_scope, version,
+ scoperec->scope, version,
ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
+ if (l->l_type == lt_loaded && !SINGLE_THREAD_P
+ && atomic_decrement_val (&scoperec->nusers) == 0
+ && __builtin_expect (scoperec->remove_after_use, 0))
+ {
+ if (scoperec->notify)
+ __rtld_notify (scoperec->nusers);
+ else
+ free (scoperec);
+ }
+
/* Currently result contains the base load address (or link map)
of the object that defines sym. Now add in the symbol
offset. */
diff --git a/elf/dl-sym.c b/elf/dl-sym.c
index d2b0ec0dab..34d75a1a67 100644
--- a/elf/dl-sym.c
+++ b/elf/dl-sym.c
@@ -17,6 +17,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+#include <assert.h>
#include <stddef.h>
#include <setjmp.h>
#include <libintl.h>
@@ -24,6 +25,7 @@
#include <dlfcn.h>
#include <ldsodefs.h>
#include <dl-hash.h>
+#include <sysdep-cancel.h>
#ifdef USE_TLS
# include <dl-tls.h>
#endif
@@ -58,6 +60,30 @@ _dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
#endif
+struct call_dl_lookup_args
+{
+ /* Arguments to do_dlsym. */
+ struct link_map *map;
+ const char *name;
+ struct r_scope_elem **scope;
+ struct r_found_version *vers;
+ int flags;
+
+ /* Return values of do_dlsym. */
+ lookup_t loadbase;
+ const ElfW(Sym) **refp;
+};
+
+static void
+call_dl_lookup (void *ptr)
+{
+ struct call_dl_lookup_args *args = (struct call_dl_lookup_args *) ptr;
+ args->map = GLRO(dl_lookup_symbol_x) (args->name, args->map, args->refp,
+ args->scope, args->vers, 0,
+ args->flags, NULL);
+}
+
+
static void *
internal_function
do_sym (void *handle, const char *name, void *who,
@@ -84,10 +110,62 @@ do_sym (void *handle, const char *name, void *who,
}
if (handle == RTLD_DEFAULT)
- /* Search the global scope. */
- result = GLRO(dl_lookup_symbol_x) (name, match, &ref, match->l_scope,
- vers, 0, flags|DL_LOOKUP_ADD_DEPENDENCY,
- NULL);
+ {
+ /* Search the global scope. We have the simple case where
+ we look up in the scope of an object which was part of
+ the initial binary. And then the more complex part
+ where the object is dynamically loaded and the scope
+ array can change. */
+ if (match->l_type != lt_loaded || SINGLE_THREAD_P)
+ result = GLRO(dl_lookup_symbol_x) (name, match, &ref,
+ match->l_scoperec->scope, vers, 0,
+ flags | DL_LOOKUP_ADD_DEPENDENCY,
+ NULL);
+ else
+ {
+ __rtld_mrlock_lock (match->l_scoperec_lock);
+ struct r_scoperec *scoperec = match->l_scoperec;
+ atomic_increment (&scoperec->nusers);
+ __rtld_mrlock_unlock (match->l_scoperec_lock);
+
+ struct call_dl_lookup_args args;
+ args.name = name;
+ args.map = match;
+ args.scope = scoperec->scope;
+ args.vers = vers;
+ args.flags = flags | DL_LOOKUP_ADD_DEPENDENCY;
+ args.refp = &ref;
+
+ const char *objname;
+ const char *errstring = NULL;
+ bool malloced;
+ int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
+ call_dl_lookup, &args);
+
+ if (atomic_decrement_val (&scoperec->nusers) == 0
+ && __builtin_expect (scoperec->remove_after_use, 0))
+ {
+ if (scoperec->notify)
+ __rtld_notify (scoperec->nusers);
+ else
+ free (scoperec);
+ }
+
+ if (__builtin_expect (errstring != NULL, 0))
+ {
+ /* The lookup was unsuccessful. Rethrow the error. */
+ char *errstring_dup = strdupa (errstring);
+ char *objname_dup = strdupa (objname);
+ if (malloced)
+ free ((char *) errstring);
+
+ GLRO(dl_signal_error) (err, objname_dup, NULL, errstring_dup);
+ /* NOTREACHED */
+ }
+
+ result = args.map;
+ }
+ }
else if (handle == RTLD_NEXT)
{
if (__builtin_expect (match == GL(dl_ns)[LM_ID_BASE]._ns_loaded, 0))
diff --git a/elf/rtld.c b/elf/rtld.c
index 7746377f37..ace3a3099d 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -609,7 +609,7 @@ relocate_doit (void *a)
{
struct relocate_args *args = (struct relocate_args *) a;
- _dl_relocate_object (args->l, args->l->l_scope, args->lazy, 0);
+ _dl_relocate_object (args->l, args->l->l_scoperec->scope, args->lazy, 0);
}
static void
@@ -1963,8 +1963,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
lookup_t result;
result = _dl_lookup_symbol_x (INTUSE(_dl_argv)[i], main_map,
- &ref, main_map->l_scope, NULL,
- ELF_RTYPE_CLASS_PLT,
+ &ref, main_map->l_scoperec->scope,
+ NULL, ELF_RTYPE_CLASS_PLT,
DL_LOOKUP_ADD_DEPENDENCY, NULL);
loadbase = LOOKUP_VALUE_ADDRESS (result);
@@ -2006,8 +2006,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
{
/* Mark the link map as not yet relocated again. */
GL(dl_rtld_map).l_relocated = 0;
- _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope,
- 0, 0);
+ _dl_relocate_object (&GL(dl_rtld_map),
+ main_map->l_scoperec->scope, 0, 0);
}
}
#define VERNEEDTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGIDX (DT_VERNEED))
@@ -2227,7 +2227,7 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
}
if (l != &GL(dl_rtld_map))
- _dl_relocate_object (l, l->l_scope, GLRO(dl_lazy),
+ _dl_relocate_object (l, l->l_scoperec->scope, GLRO(dl_lazy),
consider_profiling);
#ifdef USE_TLS
@@ -2303,7 +2303,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
HP_TIMING_NOW (start);
/* Mark the link map as not yet relocated again. */
GL(dl_rtld_map).l_relocated = 0;
- _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope, 0, 0);
+ _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scoperec->scope,
+ 0, 0);
HP_TIMING_NOW (stop);
HP_TIMING_DIFF (add, start, stop);
HP_TIMING_ACCUM_NT (relocate_time, add);