summaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2005-03-18 11:11:42 +0000
committerUlrich Drepper <drepper@redhat.com>2005-03-18 11:11:42 +0000
commit20fe49b93a8807b7e91732d5a1b21a2d99472793 (patch)
tree03326dfe9fea0cad1bbb77d4f546584de6c60b5d /elf
parent193af754dc157708224817fb8ff8914f2fc5e6e1 (diff)
* include/link.h (struct link_map): Remove l_opencount. Add l_removed.
Change type of l_idx to int. * elf/dl-close.c: Basically rewrite. Do not use l_opencount to determine whether a DSO has to be unloaded. Instead compute this in this function. * elf/dl-deps.c: No need to manipulate l_opencount anymore. * elf/dl-lookup.c: Likewise. * elf/rtld.c: Likewise * elf/dl-open.c: Likewise. Use l_init_called to determine whether object was just loaded. * elf/dl-fini.c: Bump l_direct_opencount instead of l_opencount. * elf/dl-load.c (_dl_map_object_from_fd): Do not recognize DSO which is about to be unloaded as a match. (_dl_map_object): Likewise. * elf/do-lookup.h (do_lookup_x): Do not look into DSO which is about to be unloaded. * elf/circleload1.c: Don't use l_opencount anymore. * elf/neededtest.c: Likewise. * elf/neededtest2.c: Likewise. * elf/neededtest3.c: Likewise. * elf/neededtest4.c: Likewise. * elf/unload.c: Likewise. * elf/unload2.c: Likewise. * elf/loadtest.c: Likewise. * elf/rtld.c: Preloading errors are now never fatal. 2005-03-08 Jakub Jelinek <jakub@redhat.com> * elf/Makefile: Add rules to build and run unload5 test. * elf/unload5.c: New file. 2005-03-08 Jakub Jelinek <jakub@redhat.com> * elf/Makefile: Add rules to build and run unload4 test. * elf/unload4.c: New file. * elf/unload4mod1.c: New file. * elf/unload4mod2.c: New file. * elf/unload4mod3.c: New file. * elf/unload4mod4.c: New file.
Diffstat (limited to 'elf')
-rw-r--r--elf/Makefile16
-rw-r--r--elf/circleload1.c2
-rw-r--r--elf/dl-close.c349
-rw-r--r--elf/dl-deps.c2
-rw-r--r--elf/dl-fini.c8
-rw-r--r--elf/dl-load.c5
-rw-r--r--elf/dl-lookup.c19
-rw-r--r--elf/dl-open.c185
-rw-r--r--elf/do-lookup.h4
-rw-r--r--elf/loadtest.c8
-rw-r--r--elf/neededtest.c2
-rw-r--r--elf/neededtest2.c2
-rw-r--r--elf/neededtest3.c2
-rw-r--r--elf/neededtest4.c2
-rw-r--r--elf/rtld.c126
-rw-r--r--elf/unload.c4
-rw-r--r--elf/unload2.c4
17 files changed, 322 insertions, 418 deletions
diff --git a/elf/Makefile b/elf/Makefile
index 6822d1f02c..210b2061e3 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -85,6 +85,7 @@ distribute := rtld-Rules \
check-textrel.c dl-sysdep.h test-dlopenrpathmod.c \
tst-deep1mod1.c tst-deep1mod2.c tst-deep1mod3.c \
unload3mod1.c unload3mod2.c unload3mod3.c unload3mod4.c \
+ unload4mod1.c unload4mod2.c unload4mod3.c unload4mod4.c \
tst-auditmod1.c
CFLAGS-dl-runtime.c = -fexceptions -fasynchronous-unwind-tables
@@ -160,7 +161,7 @@ tests += loadtest restest1 preloadtest loadfail multiload origtest resolvfail \
tst-tls10 tst-tls11 tst-tls12 tst-tls13 tst-tls14 tst-align \
tst-align2 $(tests-execstack-$(have-z-execstack)) tst-dlmodcount \
tst-dlopenrpath tst-deep1 tst-dlmopen1 tst-dlmopen2 tst-dlmopen3 \
- unload3 tst-audit1 tst-global1
+ unload3 unload4 unload5 tst-audit1 tst-global1
# reldep9
test-srcs = tst-pathopt
tests-vis-yes = vismain
@@ -196,7 +197,8 @@ modules-names = testobj1 testobj2 testobj3 testobj4 testobj5 testobj6 \
$(modules-execstack-$(have-z-execstack)) \
tst-dlopenrpathmod tst-deep1mod1 tst-deep1mod2 tst-deep1mod3 \
tst-dlmopen1mod tst-auditmod1 \
- unload3mod1 unload3mod2 unload3mod3 unload3mod4
+ unload3mod1 unload3mod2 unload3mod3 unload3mod4 \
+ unload4mod1 unload4mod2 unload4mod3 unload4mod4
ifeq (yes,$(have-initfini-array))
modules-names += tst-array2dep
endif
@@ -431,6 +433,8 @@ $(objpfx)reldep9mod3.so: $(objpfx)reldep9mod1.so $(objpfx)reldep9mod2.so
$(objpfx)unload3mod1.so: $(objpfx)unload3mod3.so
$(objpfx)unload3mod2.so: $(objpfx)unload3mod3.so
$(objpfx)unload3mod3.so: $(objpfx)unload3mod4.so
+$(objpfx)unload4mod1.so: $(objpfx)unload4mod2.so $(objpfx)unload4mod3.so
+$(objpfx)unload4mod2.so: $(objpfx)unload4mod4.so $(objpfx)unload4mod3.so
LDFLAGS-tst-tlsmod5.so = -nostdlib
LDFLAGS-tst-tlsmod6.so = -nostdlib
@@ -471,6 +475,7 @@ circlemod3a.so-no-z-defs = yes
reldep8mod2.so-no-z-defs = yes
reldep9mod1.so-no-z-defs = yes
unload3mod4.so-no-z-defs = yes
+unload4mod1.so-no-z-defs = yes
# filtmod1.so has a special rule
$(filter-out $(objpfx)filtmod1.so, $(test-modules)): $(objpfx)%.so: $(objpfx)%.os
@@ -691,6 +696,13 @@ $(objpfx)unload3: $(libdl)
$(objpfx)unload3.out: $(objpfx)unload3mod1.so $(objpfx)unload3mod2.so \
$(objpfx)unload3mod3.so $(objpfx)unload3mod4.so
+$(objpfx)unload4: $(libdl)
+$(objpfx)unload4.out: $(objpfx)unload4mod1.so $(objpfx)unload4mod3.so
+
+$(objpfx)unload5: $(libdl)
+$(objpfx)unload5.out: $(objpfx)unload3mod1.so $(objpfx)unload3mod2.so \
+ $(objpfx)unload3mod3.so $(objpfx)unload3mod4.so
+
ifdef libdl
$(objpfx)tst-tls9-static: $(common-objpfx)dlfcn/libdl.a
$(objpfx)tst-tls9-static.out: $(objpfx)tst-tlsmod5.so $(objpfx)tst-tlsmod6.so
diff --git a/elf/circleload1.c b/elf/circleload1.c
index 42fa0b5bc2..f5f886a1da 100644
--- a/elf/circleload1.c
+++ b/elf/circleload1.c
@@ -27,7 +27,7 @@ check_loaded_objects (const char **loaded)
for (lm = _r_debug.r_map; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_opencount);
+ printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 6681c5d3e7..51b958dfa7 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -102,19 +102,9 @@ remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
void
_dl_close (void *_map)
{
- struct reldep_list
- {
- struct link_map **rellist;
- unsigned int nrellist;
- unsigned int nhandled;
- struct reldep_list *next;
- bool handled[0];
- } *reldeps = NULL;
- struct link_map **list;
struct link_map *map = _map;
Lmid_t ns = map->l_ns;
unsigned int i;
- unsigned int *new_opencount;
#ifdef USE_TLS
bool any_tls = false;
#endif
@@ -125,160 +115,127 @@ _dl_close (void *_map)
/* Nope. Do nothing. */
return;
- if (__builtin_expect (map->l_opencount, 1) == 0)
+ if (__builtin_expect (map->l_direct_opencount, 1) == 0)
GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
/* Acquire the lock. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
/* One less direct use. */
- assert (map->l_direct_opencount > 0);
--map->l_direct_opencount;
/* Decrement the reference count. */
- if (map->l_opencount > 1 || map->l_type != lt_loaded)
+ if (map->l_direct_opencount > 1 || map->l_type != lt_loaded)
{
- /* Decrement the object's reference counter, not the dependencies'. */
- --map->l_opencount;
-
/* There are still references to this object. Do nothing more. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
- _dl_debug_printf ("\nclosing file=%s; opencount=%u\n",
- map->l_name, map->l_opencount);
-
- /* If the direct use counter reaches zero we have to decrement
- all the dependencies' usage counter. */
- if (map->l_direct_opencount == 0)
- for (i = 1; i < map->l_searchlist.r_nlist; ++i)
- --map->l_searchlist.r_list[i]->l_opencount;
+ _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
+ map->l_name, map->l_direct_opencount);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
return;
}
- list = map->l_initfini;
-
- /* Compute the new l_opencount values. */
- i = map->l_searchlist.r_nlist;
- if (__builtin_expect (i == 0, 0))
- /* This can happen if we handle relocation dependencies for an
- object which wasn't loaded directly. */
- for (i = 1; list[i] != NULL; ++i)
- ;
+#define NWORDS(n) (((n) + 8 * sizeof (unsigned long int) - 1) \
+ / (sizeof (unsigned long int)))
+#define SETBIT(a, n) a[(n) / sizeof (unsigned long int)] \
+ |= 1 << ((n) % (sizeof (unsigned long int)))
+#define ISSET(a, n) (a[(n) / sizeof (unsigned long int)] \
+ & 1 << ((n) % (sizeof (unsigned long int))))
+ const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
+ unsigned long int used[NWORDS (nloaded)];
+ unsigned long int done[NWORDS (nloaded)];
+ struct link_map *maps[nloaded];
+
+ /* Run over the list and assign indeces to the link maps and enter
+ them into the MAPS array. */
+ int idx = 0;
+ for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
+ {
+ l->l_idx = idx;
+ maps[idx] = l;
+ ++idx;
+ }
+ assert (idx == nloaded);
- unsigned int nopencount = i;
- new_opencount = (unsigned int *) alloca (i * sizeof (unsigned int));
+ /* Prepare the bitmaps. */
+ memset (used, '\0', sizeof (used));
+ memset (done, '\0', sizeof (done));
- for (i = 0; list[i] != NULL; ++i)
+ /* Keep track of the lowest index link map we have covered already. */
+ int done_index = -1;
+ while (++done_index < nloaded)
{
- list[i]->l_idx = i;
- new_opencount[i] = list[i]->l_opencount;
- }
- --new_opencount[0];
- for (i = 1; list[i] != NULL; ++i)
- if ((list[i]->l_flags_1 & DF_1_NODELETE) == 0
- /* Decrement counter. */
- && (assert (new_opencount[i] > 0), --new_opencount[i] == 0))
- {
- void mark_removed (struct link_map *remmap)
- {
- /* Test whether this object was also loaded directly. */
- if (remmap->l_searchlist.r_list != NULL
- && remmap->l_direct_opencount > 0)
- {
- /* In this case we have to decrement all the dependencies of
- this object. They are all in MAP's dependency list. */
- unsigned int j;
- struct link_map **dep_list = remmap->l_searchlist.r_list;
-
- for (j = 1; j < remmap->l_searchlist.r_nlist; ++j)
- if (! (dep_list[j]->l_flags_1 & DF_1_NODELETE)
- || ! dep_list[j]->l_init_called)
+ struct link_map *l = maps[done_index];
+
+ if (ISSET (done, done_index))
+ /* Already handled. */
+ continue;
+
+ /* Check whether this object is still used. */
+ if (l->l_type == lt_loaded
+ && l->l_direct_opencount == 0
+ && (l->l_flags_1 & DF_1_NODELETE) == 0
+ && !ISSET (used, done_index))
+ continue;
+
+ /* We need this object and we handle it now. */
+ SETBIT (done, done_index);
+ SETBIT (used, done_index);
+
+ /* Mark all dependencies as used. */
+ if (l->l_initfini != NULL)
+ {
+ struct link_map **lp = &l->l_initfini[1];
+ while (*lp != NULL)
+ {
+ assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
+
+ if (!ISSET (used, (*lp)->l_idx))
{
- assert (dep_list[j]->l_idx < map->l_searchlist.r_nlist);
- assert (new_opencount[dep_list[j]->l_idx] > 0);
- if (--new_opencount[dep_list[j]->l_idx] == 0)
- {
- assert (dep_list[j]->l_type == lt_loaded);
- mark_removed (dep_list[j]);
- }
+ SETBIT (used, (*lp)->l_idx);
+ if ((*lp)->l_idx - 1 < done_index)
+ done_index = (*lp)->l_idx - 1;
}
- }
- if (remmap->l_reldeps != NULL)
+ ++lp;
+ }
+ }
+ /* And the same for relocation dependencies. */
+ if (l->l_reldeps != NULL)
+ for (unsigned int j = 0; j < l->l_reldepsact; ++j)
+ {
+ struct link_map *jmap = l->l_reldeps[j];
+
+ assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
+
+ if (!ISSET (used, jmap->l_idx))
{
- unsigned int j;
- for (j = 0; j < remmap->l_reldepsact; ++j)
- {
- struct link_map *depmap = remmap->l_reldeps[j];
-
- /* Find out whether this object is in our list. */
- if (depmap->l_idx < nopencount
- && list[depmap->l_idx] == depmap)
- {
- /* Yes, it is. If is has a search list, make a
- recursive call to handle this. */
- if (depmap->l_searchlist.r_list != NULL)
- {
- assert (new_opencount[depmap->l_idx] > 0);
- if (--new_opencount[depmap->l_idx] == 0)
- {
- /* This one is now gone, too. */
- assert (depmap->l_type == lt_loaded);
- mark_removed (depmap);
- }
- }
- else
- {
- /* Otherwise we have to handle the dependency
- deallocation here. */
- unsigned int k;
- for (k = 0; depmap->l_initfini[k] != NULL; ++k)
- {
- struct link_map *rl = depmap->l_initfini[k];
-
- if (rl->l_idx < nopencount
- && list[rl->l_idx] == rl)
- {
- assert (new_opencount[rl->l_idx] > 0);
- if (--new_opencount[rl->l_idx] == 0)
- {
- /* Another module to remove. */
- assert (rl->l_type == lt_loaded);
- mark_removed (rl);
- }
- }
- else
- {
- assert (rl->l_opencount > 0);
- if (--rl->l_opencount == 0)
- mark_removed (rl);
- }
- }
- }
- }
- }
+ SETBIT (used, jmap->l_idx);
+ if (jmap->l_idx - 1 < done_index)
+ done_index = jmap->l_idx - 1;
}
}
-
- mark_removed (list[i]);
- }
- assert (new_opencount[0] == 0);
+ }
/* Call all termination functions at once. */
#ifdef SHARED
bool do_audit = GLRO(dl_naudit) > 0 && !GL(dl_ns)[ns]._ns_loaded->l_auditing;
#endif
- for (i = 0; list[i] != NULL; ++i)
+ bool unload_any = false;
+ unsigned int first_loaded = ~0;
+ for (i = 0; i < nloaded; ++i)
{
- struct link_map *imap = list[i];
+ struct link_map *imap = maps[i];
/* All elements must be in the same namespace. */
assert (imap->l_ns == ns);
- if (new_opencount[i] == 0 && imap->l_type == lt_loaded
- && (imap->l_flags_1 & DF_1_NODELETE) == 0)
+ if (!ISSET (used, i))
{
+ assert (imap->l_type == lt_loaded
+ && (imap->l_flags_1 & DF_1_NODELETE) == 0);
+
/* Call its termination function. Do not do it for
half-cooked objects. */
if (imap->l_init_called)
@@ -324,42 +281,59 @@ _dl_close (void *_map)
}
#endif
- /* This object must not be used anymore. We must remove the
- reference from the scope. */
- unsigned int j;
- struct link_map **searchlist = map->l_searchlist.r_list;
- unsigned int nsearchlist = map->l_searchlist.r_nlist;
+ /* This object must not be used anymore. */
+ imap->l_removed = 1;
-#ifndef NDEBUG
- bool found = false;
-#endif
- for (j = 0; j < nsearchlist; ++j)
- if (imap == searchlist[j])
- {
- /* This is the object to remove. Copy all the
- following ones. */
- while (++j < nsearchlist)
- searchlist[j - 1] = searchlist[j];
+ /* We indeed have an object to remove. */
+ unload_any = true;
- searchlist[j - 1] = NULL;
-
- --map->l_searchlist.r_nlist;
-
-#ifndef NDEBUG
- found = true;
-#endif
- break;
- }
- assert (found);
+ /* Remember where the first dynamically loaded object is. */
+ if (i < first_loaded)
+ first_loaded = i;
}
+ /* Else ISSET (used, i). */
+ else if (imap->l_type == lt_loaded)
+ {
+ if (imap->l_searchlist.r_list == NULL
+ && imap->l_initfini != NULL)
+ {
+ /* The object is still used. But the object we are
+ unloading right now is responsible for loading it. If
+ the current object does not have it's own scope yet we
+ have to create one. This has to be done before running
+ the finalizers.
+
+ To do this count the number of dependencies. */
+ unsigned int cnt;
+ for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
+ ;
+
+ /* We simply reuse the l_initfini list. */
+ imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
+ imap->l_searchlist.r_nlist = cnt;
+
+ for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
+ if (imap->l_scope[cnt] == &map->l_searchlist)
+ {
+ imap->l_scope[cnt] = &imap->l_searchlist;
+ break;
+ }
+ }
- /* Store the new l_opencount value. */
- imap->l_opencount = new_opencount[i];
+ /* The loader is gone, so mark the object as not having one. */
+ if (imap->l_loader != NULL && !ISSET (used, imap->l_loader->l_idx))
+ imap->l_loader = NULL;
- /* Just a sanity check. */
- assert (imap->l_type == lt_loaded || imap->l_opencount > 0);
+ /* Remember where the first dynamically loaded object is. */
+ if (i < first_loaded)
+ first_loaded = i;
+ }
}
+ /* If there are no objects to unload, do nothing further. */
+ if (!unload_any)
+ goto out;
+
#ifdef SHARED
/* Auditing checkpoint: we will start deleting objects. */
if (__builtin_expect (do_audit, 0))
@@ -393,12 +367,12 @@ _dl_close (void *_map)
/* Check each element of the search list to see if all references to
it are gone. */
- for (i = 0; list[i] != NULL; ++i)
+ for (i = first_loaded; i < nloaded; ++i)
{
- struct link_map *imap = list[i];
- if (imap->l_opencount == 0 && imap->l_type == lt_loaded)
+ struct link_map *imap = maps[i];
+ if (!ISSET (used, i))
{
- struct libname_list *lnp;
+ assert (imap->l_type == lt_loaded);
/* That was the last reference, and this was a dlopen-loaded
object. We can unmap it. */
@@ -527,34 +501,7 @@ _dl_close (void *_map)
if (imap->l_origin != (char *) -1)
free ((char *) imap->l_origin);
- /* If the object has relocation dependencies save this
- information for latter. */
- if (__builtin_expect (imap->l_reldeps != NULL, 0))
- {
- struct reldep_list *newrel;
-
- newrel = (struct reldep_list *) alloca (sizeof (*reldeps)
- + (imap->l_reldepsact
- * sizeof (bool)));
- newrel->rellist = imap->l_reldeps;
- newrel->nrellist = imap->l_reldepsact;
- newrel->next = reldeps;
-
- newrel->nhandled = imap->l_reldepsact;
- unsigned int j;
- for (j = 0; j < imap->l_reldepsact; ++j)
- {
- /* Find out whether this object is in our list. */
- if (imap->l_reldeps[j]->l_idx < nopencount
- && list[imap->l_reldeps[j]->l_idx] == imap->l_reldeps[j])
- /* Yes, it is. */
- newrel->handled[j] = true;
- else
- newrel->handled[j] = false;
- }
-
- reldeps = newrel;
- }
+ free (imap->l_reldeps);
/* Print debugging message. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
@@ -564,7 +511,8 @@ _dl_close (void *_map)
/* This name always is allocated. */
free (imap->l_name);
/* Remove the list with all the names of the shared object. */
- lnp = imap->l_libname;
+
+ struct libname_list *lnp = imap->l_libname;
do
{
struct libname_list *this = lnp;
@@ -575,8 +523,7 @@ _dl_close (void *_map)
while (lnp != NULL);
/* Remove the searchlists. */
- if (imap != map)
- free (imap->l_initfini);
+ free (imap->l_initfini);
/* Remove the scope array if we allocated it. */
if (imap->l_scope != imap->l_scope_mem)
@@ -630,26 +577,8 @@ _dl_close (void *_map)
r->r_state = RT_CONSISTENT;
_dl_debug_state ();
- /* Now we can perhaps also remove the modules for which we had
- dependencies because of symbol lookup. */
- while (__builtin_expect (reldeps != NULL, 0))
- {
- while (reldeps->nrellist-- > 0)
- /* Some of the relocation dependencies might be on the
- dependency list of the object we are closing right now.
- They were already handled. Do not close them again. */
- if (reldeps->nrellist < reldeps->nhandled
- && ! reldeps->handled[reldeps->nrellist])
- _dl_close (reldeps->rellist[reldeps->nrellist]);
-
- free (reldeps->rellist);
-
- reldeps = reldeps->next;
- }
-
- free (list);
-
/* Release the lock. */
+ out:
__rtld_lock_unlock_recursive (GL(dl_load_lock));
}
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index a95993057c..50d7a0e71c 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -566,8 +566,6 @@ Filters not supported with LD_TRACE_PRELINKING"));
{
/* A direct or transitive dependency is also on the list
of relocation dependencies. Remove the latter. */
- --map->l_reldeps[i]->l_opencount;
-
for (j = i + 1; j < map->l_reldepsact; ++j)
map->l_reldeps[j - 1] = map->l_reldeps[j];
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index b3282089a9..a01b998ef0 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -98,9 +98,9 @@ _dl_fini (void)
maps[i++] = l;
- /* Bump l_opencount of all objects so that they are not
- dlclose()ed from underneath us. */
- ++l->l_opencount;
+ /* Bump l_direct_opencount of all objects so that they are
+ not dlclose()ed from underneath us. */
+ ++l->l_direct_opencount;
}
assert (cnt != LM_ID_BASE || i == nloaded);
assert (cnt == LM_ID_BASE || i == nloaded || i == nloaded - 1);
@@ -237,7 +237,7 @@ _dl_fini (void)
}
/* Correct the previous increment. */
- --l->l_opencount;
+ --l->l_direct_opencount;
}
}
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 97e1e0089c..d9c2da2ac7 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -849,7 +849,7 @@ _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
/* Look again to see if the real name matched another already loaded. */
for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
- if (l->l_ino == st.st_ino && l->l_dev == st.st_dev)
+ if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it. */
@@ -1914,7 +1914,8 @@ _dl_map_object (struct link_map *loader, const char *name, int preloaded,
/* If the requested name matches the soname of a loaded object,
use that object. Elide this check for names that have not
yet been opened. */
- if (__builtin_expect (l->l_faked, 0) != 0)
+ if (__builtin_expect (l->l_faked, 0) != 0
+ || __builtin_expect (l->l_removed, 0) != 0)
continue;
if (!_dl_name_match_p (name, l))
{
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index 24fc89af10..b3695aeb99 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -91,11 +91,6 @@ add_dependency (struct link_map *undef_map, struct link_map *map)
/* Make sure nobody can unload the object while we are at it. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
- /* Don't create cross-reference between modules which are
- dynamically loaded by the same dlopen() call. */
- if (undef_map->l_opencount == 0 && map->l_opencount == 0)
- goto out;
-
/* Avoid references to objects which cannot be unloaded anyway. */
if (map->l_type != lt_loaded
|| (map->l_flags_1 & DF_1_NODELETE) != 0)
@@ -107,7 +102,6 @@ add_dependency (struct link_map *undef_map, struct link_map *map)
if (undef_map->l_type != lt_loaded
|| (undef_map->l_flags_1 & DF_1_NODELETE) != 0)
{
- ++map->l_opencount;
map->l_flags_1 |= DF_1_NODELETE;
goto out;
}
@@ -172,19 +166,6 @@ add_dependency (struct link_map *undef_map, struct link_map *map)
if (__builtin_expect (act < undef_map->l_reldepsmax, 1))
undef_map->l_reldeps[undef_map->l_reldepsact++] = map;
- if (map->l_searchlist.r_list != NULL)
- /* And increment the counter in the referenced object. */
- ++map->l_opencount;
- else
- /* We have to bump the counts for all dependencies since so far
- this object was only a normal or transitive dependency.
- Now it might be closed with _dl_close() directly. */
- for (list = map->l_initfini; *list != NULL; ++list)
- ++(*list)->l_opencount;
-
- /* As if it is opened through _dl_open. */
- ++map->l_direct_opencount;
-
/* Display information if we are debugging. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
_dl_debug_printf ("\
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 0e06ca15d5..199c75553e 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -281,23 +281,14 @@ dl_open_worker (void *a)
{
/* Let the user know about the opencount. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
- _dl_debug_printf ("opening file=%s [%lu]; opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_opencount + 1);
+ _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
+ new->l_name, new->l_ns, new->l_direct_opencount + 1);
/* If the user requested the object to be in the global namespace
but it is not so far, add it now. */
if ((mode & RTLD_GLOBAL) && new->l_global == 0)
(void) add_to_global (new);
- if (new->l_direct_opencount == 1)
- /* This is the only direct reference. Increment all the
- dependencies' reference counter. */
- for (i = 0; i < new->l_searchlist.r_nlist; ++i)
- ++new->l_searchlist.r_list[i]->l_opencount;
- else
- /* Increment just the reference counter of the object. */
- ++new->l_opencount;
-
assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
return;
@@ -386,94 +377,92 @@ dl_open_worker (void *a)
l = l->l_prev;
}
- /* Increment the open count for all dependencies. If the file is
- not loaded as a dependency here add the search list of the newly
- loaded object to the scope. */
+ /* If the file is not loaded now as a dependency, add the search
+ list of the newly loaded object to the scope. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
- if (++new->l_searchlist.r_list[i]->l_opencount > 1
- && new->l_real->l_searchlist.r_list[i]->l_type == lt_loaded)
- {
- struct link_map *imap = new->l_searchlist.r_list[i];
- struct r_scope_elem **runp = imap->l_scope;
- size_t cnt = 0;
-
- while (*runp != NULL)
- {
- /* This can happen if imap was just loaded, but during
- relocation had l_opencount bumped because of relocation
- dependency. Avoid duplicates in l_scope. */
- if (__builtin_expect (*runp == &new->l_searchlist, 0))
- break;
-
- ++cnt;
- ++runp;
- }
-
- if (*runp != NULL)
- /* Avoid duplicates. */
- continue;
-
- if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
- {
- /* The 'r_scope' array is too small. Allocate a new one
- dynamically. */
- struct r_scope_elem **newp;
- size_t new_size = imap->l_scope_max * 2;
-
- if (imap->l_scope == imap->l_scope_mem)
- {
- newp = (struct r_scope_elem **)
- malloc (new_size * sizeof (struct r_scope_elem *));
- if (newp == NULL)
- _dl_signal_error (ENOMEM, "dlopen", NULL,
- N_("cannot create scope list"));
- imap->l_scope = memcpy (newp, imap->l_scope,
- cnt * sizeof (imap->l_scope[0]));
- }
- else
- {
- newp = (struct r_scope_elem **)
- realloc (imap->l_scope,
- new_size * sizeof (struct r_scope_elem *));
- if (newp == NULL)
- _dl_signal_error (ENOMEM, "dlopen", NULL,
- N_("cannot create scope list"));
- imap->l_scope = newp;
- }
-
- imap->l_scope_max = new_size;
- }
-
- imap->l_scope[cnt++] = &new->l_searchlist;
- imap->l_scope[cnt] = NULL;
- }
+ {
+ struct link_map *imap = new->l_searchlist.r_list[i];
+
+ /* If the initializer has been called already, the object has
+ not been loaded here and now. */
+ if (imap->l_init_called && imap->l_type == lt_loaded)
+ {
+ struct r_scope_elem **runp = imap->l_scope;
+ size_t cnt = 0;
+
+ while (*runp != NULL)
+ {
+ ++cnt;
+ ++runp;
+ }
+
+ if (*runp != NULL)
+ /* Avoid duplicates. */
+ continue;
+
+ if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
+ {
+ /* The 'r_scope' array is too small. Allocate a new one
+ dynamically. */
+ struct r_scope_elem **newp;
+ size_t new_size = imap->l_scope_max * 2;
+
+ if (imap->l_scope == imap->l_scope_mem)
+ {
+ newp = (struct r_scope_elem **)
+ malloc (new_size * sizeof (struct r_scope_elem *));
+ if (newp == NULL)
+ _dl_signal_error (ENOMEM, "dlopen", NULL,
+ N_("cannot create scope list"));
+ imap->l_scope = memcpy (newp, imap->l_scope,
+ cnt * sizeof (imap->l_scope[0]));
+ }
+ else
+ {
+ newp = (struct r_scope_elem **)
+ realloc (imap->l_scope,
+ new_size * sizeof (struct r_scope_elem *));
+ if (newp == NULL)
+ _dl_signal_error (ENOMEM, "dlopen", NULL,
+ N_("cannot create scope list"));
+ imap->l_scope = newp;
+ }
+
+ imap->l_scope_max = new_size;
+ }
+
+ imap->l_scope[cnt++] = &new->l_searchlist;
+ imap->l_scope[cnt] = NULL;
+ }
#if USE_TLS
- else if (new->l_searchlist.r_list[i]->l_opencount == 1
- /* Only if the module defines thread local data. */
- && __builtin_expect (new->l_searchlist.r_list[i]->l_tls_blocksize
- > 0, 0))
- {
- /* Now that we know the object is loaded successfully add
- modules containing TLS data to the slot info table. We
- might have to increase its size. */
- _dl_add_to_slotinfo (new->l_searchlist.r_list[i]);
-
- if (new->l_searchlist.r_list[i]->l_need_tls_init)
- {
- new->l_searchlist.r_list[i]->l_need_tls_init = 0;
+ /* Only add TLS memory if this object is loaded now and
+ therefore is not yet initialized. */
+ else if (! imap->l_init_called
+ /* Only if the module defines thread local data. */
+ && __builtin_expect (imap->l_tls_blocksize > 0, 0))
+ {
+ /* Now that we know the object is loaded successfully add
+ modules containing TLS data to the slot info table. We
+ might have to increase its size. */
+ _dl_add_to_slotinfo (imap);
+
+ if (imap->l_need_tls_init)
+ {
+ imap->l_need_tls_init = 0;
# ifdef SHARED
- /* Update the slot information data for at least the
- generation of the DSO we are allocating data for. */
- _dl_update_slotinfo (new->l_searchlist.r_list[i]->l_tls_modid);
+ /* Update the slot information data for at least the
+ generation of the DSO we are allocating data for. */
+ _dl_update_slotinfo (imap->l_tls_modid);
# endif
- GL(dl_init_static_tls) (new->l_searchlist.r_list[i]);
- assert (new->l_searchlist.r_list[i]->l_need_tls_init == 0);
- }
+ GL(dl_init_static_tls) (imap);
+ assert (imap->l_need_tls_init == 0);
+ }
- /* We have to bump the generation counter. */
- any_tls = true;
- }
+ /* We have to bump the generation counter. */
+ any_tls = true;
+ }
+ }
/* Bump the generation number if necessary. */
if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
@@ -504,8 +493,8 @@ TLS generation counter wrapped! Please report this."));
/* Let the user know about the opencount. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
- _dl_debug_printf ("opening file=%s [%lu]; opencount=%u\n\n",
- new->l_name, new->l_ns, new->l_opencount);
+ _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
+ new->l_name, new->l_ns, new->l_direct_opencount);
}
@@ -581,12 +570,6 @@ no more namespaces available for dlmopen()"));
state if relocation failed, for example. */
if (args.map)
{
- /* Increment open counters for all objects since this
- sometimes has not happened yet. */
- if (args.map->l_searchlist.r_list[0]->l_opencount == 0)
- for (unsigned int i = 0; i < args.map->l_searchlist.r_nlist; ++i)
- ++args.map->l_searchlist.r_list[i]->l_opencount;
-
#ifdef USE_TLS
/* Maybe some of the modules which were loaded use TLS.
Since it will be removed in the following _dl_close call
diff --git a/elf/do-lookup.h b/elf/do-lookup.h
index c89638980e..62755ea013 100644
--- a/elf/do-lookup.h
+++ b/elf/do-lookup.h
@@ -52,6 +52,10 @@ do_lookup_x (const char *undef_name, unsigned long int hash,
if ((type_class & ELF_RTYPE_CLASS_COPY) && map->l_type == lt_executable)
continue;
+ /* Do not look into objects which are going to be removed. */
+ if (map->l_removed)
+ continue;
+
/* Print some debugging info if wanted. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SYMBOLS, 0))
_dl_debug_printf ("symbol=%s; lookup in file=%s [%lu]\n",
diff --git a/elf/loadtest.c b/elf/loadtest.c
index 6b8f4bb7d0..ee106ea152 100644
--- a/elf/loadtest.c
+++ b/elf/loadtest.c
@@ -73,8 +73,8 @@ static const struct
#define OUT \
for (map = _r_debug.r_map; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", opencount = %d\n", \
- map->l_name, (int) map->l_opencount); \
+ printf ("name = \"%s\", direct_opencount = %d\n", \
+ map->l_name, (int) map->l_direct_opencount); \
fflush (stdout)
@@ -183,8 +183,8 @@ main (int argc, char *argv[])
for (map = _r_debug.r_map; map != NULL; map = map->l_next)
if (map->l_type == lt_loaded)
{
- printf ("name = \"%s\", opencount = %d\n",
- map->l_name, (int) map->l_opencount);
+ printf ("name = \"%s\", direct_opencount = %d\n",
+ map->l_name, (int) map->l_direct_opencount);
result = 1;
}
diff --git a/elf/neededtest.c b/elf/neededtest.c
index e6e99bfc6d..6c7a952066 100644
--- a/elf/neededtest.c
+++ b/elf/neededtest.c
@@ -27,7 +27,7 @@ check_loaded_objects (const char **loaded)
for (lm = _r_debug.r_map; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_opencount);
+ printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
diff --git a/elf/neededtest2.c b/elf/neededtest2.c
index cf111bc303..b682f15792 100644
--- a/elf/neededtest2.c
+++ b/elf/neededtest2.c
@@ -27,7 +27,7 @@ check_loaded_objects (const char **loaded)
for (lm = _r_debug.r_map; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_opencount);
+ printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
diff --git a/elf/neededtest3.c b/elf/neededtest3.c
index 38b3c6c6b7..ea1dcf4794 100644
--- a/elf/neededtest3.c
+++ b/elf/neededtest3.c
@@ -27,7 +27,7 @@ check_loaded_objects (const char **loaded)
for (lm = _r_debug.r_map; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_opencount);
+ printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
diff --git a/elf/neededtest4.c b/elf/neededtest4.c
index 04ab10e4c9..7514bed499 100644
--- a/elf/neededtest4.c
+++ b/elf/neededtest4.c
@@ -27,7 +27,7 @@ check_loaded_objects (const char **loaded)
for (lm = _r_debug.r_map; lm; lm = lm->l_next)
{
if (lm->l_name && lm->l_name[0])
- printf(" %s, count = %d\n", lm->l_name, (int) lm->l_opencount);
+ printf(" %s, count = %d\n", lm->l_name, (int) lm->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_name)
{
int match = 0;
diff --git a/elf/rtld.c b/elf/rtld.c
index fbb4e4c3f2..56cd39f3c6 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -267,7 +267,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
#endif
_dl_setup_hash (&GL(dl_rtld_map));
GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
- GL(dl_rtld_map).l_opencount = 1;
GL(dl_rtld_map).l_map_start = (ElfW(Addr)) _begin;
GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
@@ -764,14 +763,48 @@ _dl_initial_error_catch_tsd (void)
}
#endif
+
+static unsigned int
+do_preload (char *fname, struct link_map *main_map, const char *where)
+{
+ const char *objname;
+ const char *err_str = NULL;
+ struct map_args args;
+
+ args.str = fname;
+ args.loader = main_map;
+ args.is_preloaded = 1;
+ args.mode = 0;
+
+ unsigned int old_nloaded = GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
+
+ (void) _dl_catch_error (&objname, &err_str, map_doit, &args);
+ if (__builtin_expect (err_str != NULL, 0))
+ {
+ _dl_error_printf ("\
+ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
+ fname, where);
+ /* No need to call free, this is still before
+ the libc's malloc is used. */
+ }
+ else if (GL(dl_ns)[LM_ID_BASE]._ns_nloaded != old_nloaded)
+ /* It is no duplicate. */
+ return 1;
+
+ /* Nothing loaded. */
+ return 0;
+}
+
#if defined SHARED && defined _LIBC_REENTRANT \
&& defined __rtld_lock_default_lock_recursive
-static void rtld_lock_default_lock_recursive (void *lock)
+static void
+rtld_lock_default_lock_recursive (void *lock)
{
__rtld_lock_default_lock_recursive (lock);
}
-static void rtld_lock_default_unlock_recursive (void *lock)
+static void
+rtld_lock_default_unlock_recursive (void *lock)
{
__rtld_lock_default_unlock_recursive (lock);
}
@@ -792,8 +825,6 @@ dl_main (const ElfW(Phdr) *phdr,
{
const ElfW(Phdr) *ph;
enum mode mode;
- struct link_map **preloads;
- unsigned int npreloads;
struct link_map *main_map;
size_t file_size;
char *file;
@@ -1024,8 +1055,6 @@ of this helper program; chances are you did not intend to run this program.\n\
main_map->l_text_end = 0;
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
- /* We opened the file, account for it. */
- ++main_map->l_opencount;
/* And it was opened directly. */
++main_map->l_direct_opencount;
@@ -1449,8 +1478,9 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
/* We have two ways to specify objects to preload: via environment
variable and via the file /etc/ld.so.preload. The latter can also
be used when security is enabled. */
- preloads = NULL;
- npreloads = 0;
+ assert (GL(dl_rtld_map).l_next == NULL);
+ struct link_map **preloads = NULL;
+ unsigned int npreloads = 0;
if (__builtin_expect (preloadlist != NULL, 0))
{
@@ -1469,14 +1499,7 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
if (p[0] != '\0'
&& (__builtin_expect (! INTUSE(__libc_enable_secure), 1)
|| strchr (p, '/') == NULL))
- {
- struct link_map *new_map = _dl_map_object (main_map, p, 1,
- lt_library, 0, 0,
- LM_ID_BASE);
- if (++new_map->l_opencount == 1)
- /* It is no duplicate. */
- ++npreloads;
- }
+ npreloads += do_preload (p, main_map, "LD_PRELOAD");
HP_TIMING_NOW (stop);
HP_TIMING_DIFF (diff, start, stop);
@@ -1548,41 +1571,14 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n",
runp = file;
while ((p = strsep (&runp, ": \t\n")) != NULL)
if (p[0] != '\0')
- {
- const char *objname;
- const char *err_str = NULL;
- struct map_args args;
-
- args.str = p;
- args.loader = main_map;
- args.is_preloaded = 1;
- args.mode = 0;
-
- (void) _dl_catch_error (&objname, &err_str, map_doit,
- &args);
- if (__builtin_expect (err_str != NULL, 0))
- {
- _dl_error_printf ("\
-ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
- p, preload_file);
- /* No need to call free, this is still before
- the libc's malloc is used. */
- }
- else if (++args.map->l_opencount == 1)
- /* It is no duplicate. */
- ++npreloads;
- }
+ npreloads += do_preload (p, main_map, preload_file);
}
if (problem != NULL)
{
char *p = strndupa (problem, file_size - (problem - file));
- struct link_map *new_map = _dl_map_object (main_map, p, 1,
- lt_library, 0, 0,
- LM_ID_BASE);
- if (++new_map->l_opencount == 1)
- /* It is no duplicate. */
- ++npreloads;
+
+ npreloads += do_preload (p, main_map, preload_file);
}
HP_TIMING_NOW (stop);
@@ -1594,7 +1590,7 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
}
}
- if (__builtin_expect (npreloads, 0) != 0)
+ if (__builtin_expect (GL(dl_rtld_map).l_next != NULL, 0))
{
/* Set up PRELOADS with a vector of the preloaded libraries. */
struct link_map *l;
@@ -1694,14 +1690,9 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
HP_TIMING_DIFF (diff, start, stop);
HP_TIMING_ACCUM_NT (load_time, diff);
- /* Mark all objects as being in the global scope and set the open
- counter. */
+ /* Mark all objects as being in the global scope. */
for (i = main_map->l_searchlist.r_nlist; i > 0; )
- {
- --i;
- main_map->l_searchlist.r_list[i]->l_global = 1;
- ++main_map->l_searchlist.r_list[i]->l_opencount;
- }
+ main_map->l_searchlist.r_list[--i]->l_global = 1;
#ifndef MAP_ANON
/* We are done mapping things, so close the zero-fill descriptor. */
@@ -1711,18 +1702,22 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
/* Remove _dl_rtld_map from the chain. */
GL(dl_rtld_map).l_prev->l_next = GL(dl_rtld_map).l_next;
- if (GL(dl_rtld_map).l_next)
+ if (GL(dl_rtld_map).l_next != NULL)
GL(dl_rtld_map).l_next->l_prev = GL(dl_rtld_map).l_prev;
- if (__builtin_expect (GL(dl_rtld_map).l_opencount > 1, 1))
+ for (i = 1; i < main_map->l_searchlist.r_nlist; ++i)
+ if (main_map->l_searchlist.r_list[i] == &GL(dl_rtld_map))
+ break;
+
+ bool rtld_multiple_ref = false;
+ if (__builtin_expect (i < main_map->l_searchlist.r_nlist, 1))
{
/* Some DT_NEEDED entry referred to the interpreter object itself, so
put it back in the list of visible objects. We insert it into the
chain in symbol search order because gdb uses the chain's order as
its symbol search order. */
- i = 1;
- while (main_map->l_searchlist.r_list[i] != &GL(dl_rtld_map))
- ++i;
+ rtld_multiple_ref = true;
+
GL(dl_rtld_map).l_prev = main_map->l_searchlist.r_list[i - 1];
if (__builtin_expect (mode, normal) == normal)
{
@@ -1894,7 +1889,7 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
}
else
{
- /* If LD_WARN is set warn about undefined symbols. */
+ /* If LD_WARN is set, warn about undefined symbols. */
if (GLRO(dl_lazy) >= 0 && GLRO(dl_verbose))
{
/* We have to do symbol dependency testing. */
@@ -1904,7 +1899,7 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
args.lazy = GLRO(dl_lazy);
l = main_map;
- while (l->l_next)
+ while (l->l_next != NULL)
l = l->l_next;
do
{
@@ -1915,10 +1910,11 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
&args);
}
l = l->l_prev;
- } while (l);
+ }
+ while (l != NULL);
if ((GLRO(dl_debug_mask) & DL_DEBUG_PRELINK)
- && GL(dl_rtld_map).l_opencount > 1)
+ && rtld_multiple_ref)
{
/* Mark the link map as not yet relocated again. */
GL(dl_rtld_map).l_relocated = 0;
@@ -2198,7 +2194,7 @@ ERROR: ld.so: object '%s' from %s cannot be preloaded: ignored.\n",
/* We must prepare the profiling. */
_dl_start_profile ();
- if (GL(dl_rtld_map).l_opencount > 1)
+ if (rtld_multiple_ref)
{
/* There was an explicit ref to the dynamic linker as a shared lib.
Re-relocate ourselves with user-controlled symbol definitions. */
diff --git a/elf/unload.c b/elf/unload.c
index 4fd82b7e3a..ffb33482c0 100644
--- a/elf/unload.c
+++ b/elf/unload.c
@@ -12,8 +12,8 @@
#define OUT \
for (map = _r_debug.r_map; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", opencount = %d\n", \
- map->l_name, (int) map->l_opencount); \
+ printf ("name = \"%s\", direct_opencount = %d\n", \
+ map->l_name, (int) map->l_direct_opencount); \
fflush (stdout)
typedef struct
diff --git a/elf/unload2.c b/elf/unload2.c
index 7a38053433..e14c6f06af 100644
--- a/elf/unload2.c
+++ b/elf/unload2.c
@@ -9,8 +9,8 @@
#define OUT \
for (map = _r_debug.r_map; map != NULL; map = map->l_next) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", opencount = %d\n", \
- map->l_name, (int) map->l_opencount); \
+ printf ("name = \"%s\", direct_opencount = %d\n", \
+ map->l_name, (int) map->l_direct_opencount); \
fflush (stdout)
int