summaryrefslogtreecommitdiff
path: root/nptl/allocatestack.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2003-08-03 06:47:02 +0000
committerUlrich Drepper <drepper@redhat.com>2003-08-03 06:47:02 +0000
commitf1205aa71fb0131eb9d1e2efdd5fbcfaa85ad27c (patch)
tree7459ac5d9ff530a22b4035697530a49e49ce3ff7 /nptl/allocatestack.c
parent0cbc6c4eba0d6c957917e036a0822c84b92443fc (diff)
Update.
* sysdeps/pthread/createthread.c (do_clone): If __ASSUME_CLONE_STOPPED is not defined, do explicit synchronization. (create_thread): Do not lock pd->lock here. If __ASSUME_CLONE_STOPPED is not defined also unlock pd->lock for non-debugging case in case it is necessary. * pthread_create.c (start_thread): Always get and release pd->lock if __ASSUME_CLONE_STOPPED is not defined. (start_thread_debug): Removed. Adjust users. * allocatestack.c (allocate_stack): Always initialize lock if __ASSUME_CLONE_STOPPED is not defined. * Makefile (tests): Add tst-sched1. * tst-sched1.c: New file.
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r--nptl/allocatestack.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 729f3b8542..6ada1fe138 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -308,7 +308,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* The first TSD block is included in the TCB. */
pd->specific[0] = pd->specific_1stblock;
-#if LLL_LOCK_INITIALIZER != 0
+#if defined __ASSUME_CLONE_STOPPED && LLL_LOCK_INITIALIZER != 0
/* Initialize the lock. */
pd->lock = LLL_LOCK_INITIALIZER;
#endif
@@ -451,7 +451,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
descriptor. */
pd->specific[0] = pd->specific_1stblock;
-#if LLL_LOCK_INITIALIZER != 0
+#if defined __ASSUME_CLONE_STOPPED && LLL_LOCK_INITIALIZER != 0
/* Initialize the lock. */
pd->lock = LLL_LOCK_INITIALIZER;
#endif
@@ -564,6 +564,13 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
}
}
+#ifndef __ASSUME_CLONE_STOPPED
+ /* Initialize the lock. We have to do this unconditionally if the
+ CLONE_STOPPED flag is not available since then the stillborn
+ thread could be canceled while the lock is taken. */
+ pd->lock = LLL_LOCK_INITIALIZER;
+#endif
+
/* We place the thread descriptor at the end of the stack. */
*pdp = pd;
@@ -744,7 +751,7 @@ __pthread_init_static_tls (struct link_map *map)
/* Now the list with threads using user-allocated stacks. */
list_for_each (runp, &__stack_user)
- init_one_static_tls (list_entry (runp, struct pthread, list), map);
+ init_one_static_tls (list_entry (runp, struct pthread, list), map);
lll_unlock (stack_cache_lock);
}