summaryrefslogtreecommitdiff
path: root/elf/dl-tls.c
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2016-08-20 20:20:41 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2016-08-20 20:20:41 +0200
commit75d37d0a7b6ddc923aaf58727f7b74547ffe85ac (patch)
tree9ad50fad65a95b5296bf35053a009921483e5444 /elf/dl-tls.c
parenta1bda1e53b60b8f67de6e1f0e6ac2f5fee2d1e5b (diff)
parent10ca877fa1bcccdd6c136060ec3804a426b7d291 (diff)
Merge commit 'refs/top-bases/t/hurdsig-boot-fix' into t/hurdsig-boot-fix
Diffstat (limited to 'elf/dl-tls.c')
-rw-r--r--elf/dl-tls.c219
1 files changed, 139 insertions, 80 deletions
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index dbaea0aa91..20c7e33c41 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -1,5 +1,5 @@
/* Thread-local storage handling in the ELF dynamic linker. Generic version.
- Copyright (C) 2002-2014 Free Software Foundation, Inc.
+ Copyright (C) 2002-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <sys/param.h>
+#include <atomic.h>
#include <tls.h>
#include <dl-tls.h>
@@ -34,14 +35,12 @@
/* Out-of-memory handler. */
-#ifdef SHARED
static void
__attribute__ ((__noreturn__))
oom (void)
{
_dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
}
-#endif
size_t
@@ -105,6 +104,33 @@ _dl_next_tls_modid (void)
}
+size_t
+internal_function
+_dl_count_modids (void)
+{
+ /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
+ we fail to load a module and unload it leaving a gap. If we don't
+ have gaps then the number of modids is the current maximum so
+ return that. */
+ if (__glibc_likely (!GL(dl_tls_dtv_gaps)))
+ return GL(dl_tls_max_dtv_idx);
+
+ /* We have gaps and are forced to count the non-NULL entries. */
+ size_t n = 0;
+ struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
+ while (runp != NULL)
+ {
+ for (size_t i = 0; i < runp->len; ++i)
+ if (runp->slotinfo[i].map != NULL)
+ ++n;
+
+ runp = runp->next;
+ }
+
+ return n;
+}
+
+
#ifdef SHARED
void
internal_function
@@ -370,6 +396,53 @@ _dl_allocate_tls_storage (void)
}
+#ifndef SHARED
+extern dtv_t _dl_static_dtv[];
+# define _dl_initial_dtv (&_dl_static_dtv[1])
+#endif
+
+static dtv_t *
+_dl_resize_dtv (dtv_t *dtv)
+{
+ /* Resize the dtv. */
+ dtv_t *newp;
+ /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by
+ other threads concurrently. */
+ size_t newsize
+ = atomic_load_acquire (&GL(dl_tls_max_dtv_idx)) + DTV_SURPLUS;
+ size_t oldsize = dtv[-1].counter;
+
+ if (dtv == GL(dl_initial_dtv))
+ {
+ /* This is the initial dtv that was either statically allocated in
+ __libc_setup_tls or allocated during rtld startup using the
+ dl-minimal.c malloc instead of the real malloc. We can't free
+ it, we have to abandon the old storage. */
+
+ newp = malloc ((2 + newsize) * sizeof (dtv_t));
+ if (newp == NULL)
+ oom ();
+ memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
+ }
+ else
+ {
+ newp = realloc (&dtv[-1],
+ (2 + newsize) * sizeof (dtv_t));
+ if (newp == NULL)
+ oom ();
+ }
+
+ newp[0].counter = newsize;
+
+ /* Clear the newly allocated part. */
+ memset (newp + 2 + oldsize, '\0',
+ (newsize - oldsize) * sizeof (dtv_t));
+
+ /* Return the generation counter. */
+ return &newp[1];
+}
+
+
void *
internal_function
_dl_allocate_tls_init (void *result)
@@ -383,6 +456,16 @@ _dl_allocate_tls_init (void *result)
size_t total = 0;
size_t maxgen = 0;
+ /* Check if the current dtv is big enough. */
+ if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
+ {
+ /* Resize the dtv. */
+ dtv = _dl_resize_dtv (dtv);
+
+ /* Install this new dtv in the thread data structures. */
+ INSTALL_DTV (result, &dtv[-1]);
+ }
+
/* We have to prepare the dtv for all currently loaded modules using
TLS. For those which are dynamically loaded we add the values
indicating deferred allocation. */
@@ -407,19 +490,17 @@ _dl_allocate_tls_init (void *result)
/* Keep track of the maximum generation number. This might
not be the generation counter. */
+ assert (listp->slotinfo[cnt].gen <= GL(dl_tls_generation));
maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
+ dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
+ dtv[map->l_tls_modid].pointer.is_static = false;
+
if (map->l_tls_offset == NO_TLS_OFFSET
|| map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
- {
- /* For dynamically loaded modules we simply store
- the value indicating deferred allocation. */
- dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
- dtv[map->l_tls_modid].pointer.is_static = false;
- continue;
- }
+ continue;
- assert (map->l_tls_modid == cnt);
+ assert (map->l_tls_modid == total + cnt);
assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
#if TLS_TCB_AT_TP
assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
@@ -431,8 +512,6 @@ _dl_allocate_tls_init (void *result)
#endif
/* Copy the initialization image and clear the BSS part. */
- dtv[map->l_tls_modid].pointer.val = dest;
- dtv[map->l_tls_modid].pointer.is_static = true;
memset (__mempcpy (dest, map->l_tls_initimage,
map->l_tls_initimage_size), '\0',
map->l_tls_blocksize - map->l_tls_initimage_size);
@@ -464,11 +543,6 @@ _dl_allocate_tls (void *mem)
rtld_hidden_def (_dl_allocate_tls)
-#ifndef SHARED
-extern dtv_t _dl_static_dtv[];
-# define _dl_initial_dtv (&_dl_static_dtv[1])
-#endif
-
void
internal_function
_dl_deallocate_tls (void *tcb, bool dealloc_tcb)
@@ -600,13 +674,16 @@ _dl_update_slotinfo (unsigned long int req_modid)
struct link_map *map = listp->slotinfo[cnt].map;
if (map == NULL)
{
- /* If this modid was used at some point the memory
- might still be allocated. */
- if (! dtv[total + cnt].pointer.is_static
- && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
+ if (dtv[-1].counter >= total + cnt)
{
- free (dtv[total + cnt].pointer.val);
+ /* If this modid was used at some point the memory
+ might still be allocated. */
+ if (! dtv[total + cnt].pointer.is_static
+ && (dtv[total + cnt].pointer.val
+ != TLS_DTV_UNALLOCATED))
+ free (dtv[total + cnt].pointer.val);
dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
+ dtv[total + cnt].pointer.is_static = false;
}
continue;
@@ -617,41 +694,10 @@ _dl_update_slotinfo (unsigned long int req_modid)
assert (total + cnt == modid);
if (dtv[-1].counter < modid)
{
- /* Reallocate the dtv. */
- dtv_t *newp;
- size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
- size_t oldsize = dtv[-1].counter;
-
- assert (map->l_tls_modid <= newsize);
-
- if (dtv == GL(dl_initial_dtv))
- {
- /* This is the initial dtv that was allocated
- during rtld startup using the dl-minimal.c
- malloc instead of the real malloc. We can't
- free it, we have to abandon the old storage. */
-
- newp = malloc ((2 + newsize) * sizeof (dtv_t));
- if (newp == NULL)
- oom ();
- memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
- }
- else
- {
- newp = realloc (&dtv[-1],
- (2 + newsize) * sizeof (dtv_t));
- if (newp == NULL)
- oom ();
- }
+ /* Resize the dtv. */
+ dtv = _dl_resize_dtv (dtv);
- newp[0].counter = newsize;
-
- /* Clear the newly allocated part. */
- memset (newp + 2 + oldsize, '\0',
- (newsize - oldsize) * sizeof (dtv_t));
-
- /* Point dtv to the generation counter. */
- dtv = &newp[1];
+ assert (modid <= dtv[-1].counter);
/* Install this new dtv in the thread data
structures. */
@@ -670,10 +716,8 @@ _dl_update_slotinfo (unsigned long int req_modid)
memalign and not malloc. */
free (dtv[modid].pointer.val);
- /* This module is loaded dynamically- We defer memory
- allocation. */
- dtv[modid].pointer.is_static = false;
dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
+ dtv[modid].pointer.is_static = false;
if (modid == req_modid)
the_map = map;
@@ -711,36 +755,41 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
the_map = listp->slotinfo[idx].map;
}
- again:
/* Make sure that, if a dlopen running in parallel forces the
variable into static storage, we'll wait until the address in the
static TLS block is set up, and use that. If we're undecided
yet, make sure we make the decision holding the lock as well. */
- if (__builtin_expect (the_map->l_tls_offset
- != FORCED_DYNAMIC_TLS_OFFSET, 0))
+ if (__glibc_unlikely (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET))
{
__rtld_lock_lock_recursive (GL(dl_load_lock));
- if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
+ if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
{
the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
__rtld_lock_unlock_recursive (GL(dl_load_lock));
}
- else
+ else if (__glibc_likely (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET))
{
+#if TLS_TCB_AT_TP
+ void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+#elif TLS_DTV_AT_TP
+ void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+#else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+#endif
__rtld_lock_unlock_recursive (GL(dl_load_lock));
- if (__builtin_expect (the_map->l_tls_offset
- != FORCED_DYNAMIC_TLS_OFFSET, 1))
- {
- void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
- goto again;
- return (char *) p + GET_ADDR_OFFSET;
- }
+ dtv[GET_ADDR_MODULE].pointer.is_static = true;
+ dtv[GET_ADDR_MODULE].pointer.val = p;
+
+ return (char *) p + GET_ADDR_OFFSET;
}
+ else
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
}
void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
- dtv[GET_ADDR_MODULE].pointer.is_static = false;
+ assert (!dtv[GET_ADDR_MODULE].pointer.is_static);
return (char *) p + GET_ADDR_OFFSET;
}
@@ -755,12 +804,22 @@ update_get_addr (GET_ADDR_ARGS)
void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map);
return (void *) p + GET_ADDR_OFFSET;
}
+/* For all machines that have a non-macro version of __tls_get_addr, we
+ want to use rtld_hidden_proto/rtld_hidden_def in order to call the
+ internal alias for __tls_get_addr from ld.so. This avoids a PLT entry
+ in ld.so for __tls_get_addr. */
+
+#ifndef __tls_get_addr
+extern void * __tls_get_addr (GET_ADDR_ARGS);
+rtld_hidden_proto (__tls_get_addr)
+rtld_hidden_def (__tls_get_addr)
+#endif
/* The generic dynamic and local dynamic model cannot be used in
statically linked applications. */
@@ -769,12 +828,12 @@ __tls_get_addr (GET_ADDR_ARGS)
{
dtv_t *dtv = THREAD_DTV ();
- if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
+ if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
return update_get_addr (GET_ADDR_PARAM);
void *p = dtv[GET_ADDR_MODULE].pointer.val;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
return (char *) p + GET_ADDR_OFFSET;
@@ -787,12 +846,12 @@ __tls_get_addr (GET_ADDR_ARGS)
void *
_dl_tls_get_addr_soft (struct link_map *l)
{
- if (__builtin_expect (l->l_tls_modid == 0, 0))
+ if (__glibc_unlikely (l->l_tls_modid == 0))
/* This module has no TLS segment. */
return NULL;
dtv_t *dtv = THREAD_DTV ();
- if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
+ if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
{
/* This thread's DTV is not completely current,
but it might already cover this module. */
@@ -817,7 +876,7 @@ _dl_tls_get_addr_soft (struct link_map *l)
}
void *data = dtv[l->l_tls_modid].pointer.val;
- if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
+ if (__glibc_unlikely (data == TLS_DTV_UNALLOCATED))
/* The DTV is current, but this thread has not yet needed
to allocate this module's segment. */
data = NULL;