summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-19 01:44:07 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-19 01:44:07 +0200
commita87bf9a8eab3af79798131b60c1f7f92f995df8c (patch)
treecb83315d4b195ad5db94e898311f36cd5fa7cdff
parentcea6489e43e9756f93f025291154a65fc35aa12c (diff)
Fix exposition of pthread functions
* pthread/pt-alloc.c (__pthread_alloc): Use __pthread_rwlock_wrlock and __pthread_rwlock_unlock instead of pthread_rwlock_wrlock and pthread_rwlock_unlock. * pthread/pt-create.c (__pthread_create_internal): Use__pthread_rwlock_rdlock and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock and pthread_rwlock_unlock. * pthread/pt-dealloc.c (__pthread_dealloc): Use __pthread_cond_broadcast, __pthread_mutex_lock, and __pthread_mutex_unlock instead of pthread_cond_broadcast, pthread_mutex_lock, and pthread_mutex_unlock * pthread/pt-exit.c (__pthread_exit): Use __pthread_setcancelstate and __pthread_cond_broadcast instead of pthread_setcancelstate and pthread_cond_broadcast. * pthread/pt-internal.h (__pthread_getid, __pthread_setid): Use __pthread_rwlock_rdlock, __pthread_rwlock_wrlock, and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock, pthread_rwlock_wrlock, and pthread_rwlock_unlock * pthread/pt-join.c (pthread_join): Use __pthread_cond_wait instead of pthread_cond_wait. * sysdeps/hurd/pt-key-delete.c (pthread_key_delete): Use __pthread_rwlock_rdlock and __pthread_rwlock_unlock instead of pthread_rwlock_rdlock and pthread_rwlock_unlock.
-rw-r--r--pthread/pt-alloc.c14
-rw-r--r--pthread/pt-create.c4
-rw-r--r--pthread/pt-dealloc.c6
-rw-r--r--pthread/pt-exit.c6
-rw-r--r--pthread/pt-internal.h8
-rw-r--r--pthread/pt-join.c2
-rw-r--r--sysdeps/hurd/pt-key-delete.c4
7 files changed, 22 insertions, 22 deletions
diff --git a/pthread/pt-alloc.c b/pthread/pt-alloc.c
index 4860f48..21063d5 100644
--- a/pthread/pt-alloc.c
+++ b/pthread/pt-alloc.c
@@ -139,7 +139,7 @@ __pthread_alloc (struct __pthread **pthread)
}
retry:
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
if (__pthread_num_threads < __pthread_max_threads)
{
@@ -148,7 +148,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
*pthread = new;
return 0;
@@ -157,7 +157,7 @@ __pthread_alloc (struct __pthread **pthread)
else if (__pthread_num_threads >= PTHREAD_THREADS_MAX)
{
/* We have reached the limit on the number of threads per process. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (new);
return EAGAIN;
@@ -169,7 +169,7 @@ __pthread_alloc (struct __pthread **pthread)
memory allocation, since that's a potentially blocking operation. */
max_threads = __pthread_max_threads;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Allocate a new lookup table that's twice as large. */
new_max_threads
@@ -181,13 +181,13 @@ __pthread_alloc (struct __pthread **pthread)
return ENOMEM;
}
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
/* Check if nobody else has already enlarged the table. */
if (max_threads != __pthread_max_threads)
{
/* Yep, they did. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Free the newly allocated table and try again to allocate a slot. */
free (threads);
@@ -210,7 +210,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (old_threads);
diff --git a/pthread/pt-create.c b/pthread/pt-create.c
index 417605a..c9e0730 100644
--- a/pthread/pt-create.c
+++ b/pthread/pt-create.c
@@ -202,9 +202,9 @@ __pthread_create_internal (struct __pthread **thread,
could use __thread_setid, however, we only lock for reading as no
other thread should be using this entry (we also assume that the
store is atomic). */
- pthread_rwlock_rdlock (&__pthread_threads_lock);
+ __pthread_rwlock_rdlock (&__pthread_threads_lock);
__pthread_threads[pthread->thread - 1] = pthread;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* At this point it is possible to guess our pthread ID. We have to
make sure that all functions taking a pthread_t argument can
diff --git a/pthread/pt-dealloc.c b/pthread/pt-dealloc.c
index e324800..f44aefa 100644
--- a/pthread/pt-dealloc.c
+++ b/pthread/pt-dealloc.c
@@ -49,14 +49,14 @@ __pthread_dealloc (struct __pthread *pthread)
by the standards. */
__pthread_mutex_lock (&pthread->state_lock);
if (pthread->state != PTHREAD_EXITED)
- pthread_cond_broadcast (&pthread->state_cond);
+ __pthread_cond_broadcast (&pthread->state_cond);
__pthread_mutex_unlock (&pthread->state_lock);
/* We do not actually deallocate the thread structure, but add it to
a list of re-usable thread structures. */
- pthread_mutex_lock (&__pthread_free_threads_lock);
+ __pthread_mutex_lock (&__pthread_free_threads_lock);
__pthread_enqueue (&__pthread_free_threads, pthread);
- pthread_mutex_unlock (&__pthread_free_threads_lock);
+ __pthread_mutex_unlock (&__pthread_free_threads_lock);
/* Setting PTHREAD->STATE to PTHREAD_TERMINATED makes this TCB
available for reuse. After that point, we can no longer assume
diff --git a/pthread/pt-exit.c b/pthread/pt-exit.c
index 3427de5..b078db2 100644
--- a/pthread/pt-exit.c
+++ b/pthread/pt-exit.c
@@ -39,14 +39,14 @@ __pthread_exit (void *status)
/* Run any cancelation handlers. According to POSIX, the
cancellation cleanup handlers should be called with cancellation
disabled. */
- pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
+ __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
for (handlers = __pthread_get_cleanup_stack ();
*handlers;
*handlers = (*handlers)->__next)
(*handlers)->__handler ((*handlers)->__arg);
- pthread_setcancelstate (oldstate, &oldstate);
+ __pthread_setcancelstate (oldstate, &oldstate);
/* Decrease the number of threads. We use an atomic operation to
make sure that only the last thread calls `exit'. */
@@ -86,7 +86,7 @@ __pthread_exit (void *status)
/* Broadcast the condition. This will wake up threads that are
waiting to join us. */
- pthread_cond_broadcast (&self->state_cond);
+ __pthread_cond_broadcast (&self->state_cond);
__pthread_mutex_unlock (&self->state_lock);
break;
diff --git a/pthread/pt-internal.h b/pthread/pt-internal.h
index 18b5b4c..c1b6c59 100644
--- a/pthread/pt-internal.h
+++ b/pthread/pt-internal.h
@@ -191,15 +191,15 @@ extern pthread_rwlock_t __pthread_threads_lock;
#define __pthread_getid(thread) \
({ struct __pthread *__t; \
- pthread_rwlock_rdlock (&__pthread_threads_lock); \
+ __pthread_rwlock_rdlock (&__pthread_threads_lock); \
__t = __pthread_threads[thread - 1]; \
- pthread_rwlock_unlock (&__pthread_threads_lock); \
+ __pthread_rwlock_unlock (&__pthread_threads_lock); \
__t; })
#define __pthread_setid(thread, pthread) \
- pthread_rwlock_wrlock (&__pthread_threads_lock); \
+ __pthread_rwlock_wrlock (&__pthread_threads_lock); \
__pthread_threads[thread - 1] = pthread; \
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Similar to pthread_self, but returns the thread descriptor instead
of the thread ID. */
diff --git a/pthread/pt-join.c b/pthread/pt-join.c
index 122d130..9730afc 100644
--- a/pthread/pt-join.c
+++ b/pthread/pt-join.c
@@ -43,7 +43,7 @@ pthread_join (pthread_t thread, void **status)
/* Rely on pthread_cond_wait being a cancellation point to make
pthread_join one too. */
while (pthread->state == PTHREAD_JOINABLE)
- pthread_cond_wait (&pthread->state_cond, &pthread->state_lock);
+ __pthread_cond_wait (&pthread->state_cond, &pthread->state_lock);
pthread_cleanup_pop (0);
diff --git a/sysdeps/hurd/pt-key-delete.c b/sysdeps/hurd/pt-key-delete.c
index 9d88647..8b2c8bb 100644
--- a/sysdeps/hurd/pt-key-delete.c
+++ b/sysdeps/hurd/pt-key-delete.c
@@ -40,7 +40,7 @@ pthread_key_delete (pthread_key_t key)
__pthread_key_destructors[key] = PTHREAD_KEY_INVALID;
__pthread_key_invalid_count ++;
- pthread_rwlock_rdlock (&__pthread_threads_lock);
+ __pthread_rwlock_rdlock (&__pthread_threads_lock);
for (i = 0; i < __pthread_num_threads; ++i)
{
struct __pthread *t;
@@ -55,7 +55,7 @@ pthread_key_delete (pthread_key_t key)
if (t->thread_specifics)
hurd_ihash_remove (t->thread_specifics, key);
}
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
}
__pthread_mutex_unlock (&__pthread_key_lock);