summaryrefslogtreecommitdiff
path: root/pthread/pt-alloc.c
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-19 01:44:07 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-19 01:44:07 +0200
commita87bf9a8eab3af79798131b60c1f7f92f995df8c (patch)
treecb83315d4b195ad5db94e898311f36cd5fa7cdff /pthread/pt-alloc.c
parentcea6489e43e9756f93f025291154a65fc35aa12c (diff)
Fix exposition of pthread functions
* pthread/pt-alloc.c (__pthread_alloc): Use __pthread_rwlock_wrlock and __pthread_rwlock_unlock instead of pthread_rwlock_wrlock and pthread_rwlock_unlock. * pthread/pt-create.c (__pthread_create_internal): Use__pthread_rwlock_rdlock and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock and pthread_rwlock_unlock. * pthread/pt-dealloc.c (__pthread_dealloc): Use __pthread_cond_broadcast, __pthread_mutex_lock, and __pthread_mutex_unlock instead of pthread_cond_broadcast, pthread_mutex_lock, and pthread_mutex_unlock * pthread/pt-exit.c (__pthread_exit): Use __pthread_setcancelstate and __pthread_cond_broadcast instead of pthread_setcancelstate and pthread_cond_broadcast. * pthread/pt-internal.h (__pthread_getid, __pthread_setid): Use __pthread_rwlock_rdlock, __pthread_rwlock_wrlock, and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock, pthread_rwlock_wrlock, and pthread_rwlock_unlock * pthread/pt-join.c (pthread_join): Use __pthread_cond_wait instead of pthread_cond_wait. * sysdeps/hurd/pt-key-delete.c (pthread_key_delete): Use __pthread_rwlock_rdlock and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock and pthread_rwlock_unlock.
Diffstat (limited to 'pthread/pt-alloc.c')
-rw-r--r--pthread/pt-alloc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/pthread/pt-alloc.c b/pthread/pt-alloc.c
index 4860f48..21063d5 100644
--- a/pthread/pt-alloc.c
+++ b/pthread/pt-alloc.c
@@ -139,7 +139,7 @@ __pthread_alloc (struct __pthread **pthread)
}
retry:
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
if (__pthread_num_threads < __pthread_max_threads)
{
@@ -148,7 +148,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
*pthread = new;
return 0;
@@ -157,7 +157,7 @@ __pthread_alloc (struct __pthread **pthread)
else if (__pthread_num_threads >= PTHREAD_THREADS_MAX)
{
/* We have reached the limit on the number of threads per process. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (new);
return EAGAIN;
@@ -169,7 +169,7 @@ __pthread_alloc (struct __pthread **pthread)
memory allocation, since that's a potentially blocking operation. */
max_threads = __pthread_max_threads;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Allocate a new lookup table that's twice as large. */
new_max_threads
@@ -181,13 +181,13 @@ __pthread_alloc (struct __pthread **pthread)
return ENOMEM;
}
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
/* Check if nobody else has already enlarged the table. */
if (max_threads != __pthread_max_threads)
{
/* Yep, they did. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Free the newly allocated table and try again to allocate a slot. */
free (threads);
@@ -210,7 +210,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (old_threads);