diff options
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r-- | nptl/allocatestack.c | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index ddf91e5c10..f30c88f301 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */ static size_t stack_cache_actsize; /* Mutex protecting this variable. */ -static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER; +static int stack_cache_lock = LLL_LOCK_INITIALIZER; /* List of queued stack frames. */ static LIST_HEAD (stack_cache); @@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void **memp) struct pthread *result = NULL; list_t *entry; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Search the cache for a matching entry. We search for the smallest stack which has at least the required size. Note that @@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void **memp) || __builtin_expect (result->stackblock_size > 4 * size, 0)) { /* Release the lock. */ - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return NULL; } @@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void **memp) stack_cache_actsize -= result->stackblock_size; /* Release the lock early. */ - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* Report size and location of the stack to the caller. */ *sizep = result->stackblock_size; @@ -400,12 +400,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* Prepare to modify global data. */ - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* And add to the list of stacks in use. */ list_add (&pd->list, &__stack_user); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } else { @@ -544,12 +544,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* Prepare to modify global data. */ - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* And add to the list of stacks in use. */ list_add (&pd->list, &stack_used); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* There might have been a race. Another thread might have @@ -598,12 +598,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, mprot_error: err = errno; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Remove the thread from the list. */ list_del (&pd->list); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); /* Get rid of the TLS block we allocated. */ _dl_deallocate_tls (TLS_TPADJ (pd), false); @@ -699,7 +699,7 @@ void internal_function __deallocate_stack (struct pthread *pd) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Remove the thread from the list of threads with user defined stacks. */ @@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd) /* Free the memory associated with the ELF TLS. */ _dl_deallocate_tls (TLS_TPADJ (pd), false); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } @@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_endp) const size_t pagemask = ~(__getpagesize () - 1); #endif - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); list_t *runp; list_for_each (runp, &stack_used) @@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_endp) break; } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return err; } @@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid) { struct pthread *result = NULL; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Iterate over the list with system-allocated threads first. */ list_t *runp; @@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid) } out: - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -920,7 +920,7 @@ attribute_hidden __nptl_setxid (struct xid_command *cmdp) { int result; - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); __xidcmd = cmdp; cmdp->cntr = 0; @@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp) result = -1; } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); return result; } @@ -995,7 +995,7 @@ void attribute_hidden __pthread_init_static_tls (struct link_map *map) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); /* Iterate over the list with system-allocated threads first. */ list_t *runp; @@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_map *map) list_for_each (runp, &__stack_user) init_one_static_tls (list_entry (runp, struct pthread, list), map); - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } @@ -1014,7 +1014,7 @@ void attribute_hidden __wait_lookup_done (void) { - lll_lock (stack_cache_lock); + lll_lock (stack_cache_lock, LLL_PRIVATE); struct pthread *self = THREAD_SELF; @@ -1063,5 +1063,5 @@ __wait_lookup_done (void) while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); } - lll_unlock (stack_cache_lock); + lll_unlock (stack_cache_lock, LLL_PRIVATE); } |