summaryrefslogtreecommitdiff
path: root/pthread
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-21 00:05:53 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2016-09-21 00:05:53 +0200
commit1f32bdc2e2f54cf4279b720909ff9c2d64a1c406 (patch)
tree03761c564ebe347a2924667ac4b4d4fabbb58dfe /pthread
parenta9d69ae8ab389dc9f47aef519dac4f422b4120ef (diff)
parent1d49ccdd73c182ad9f280d21d5a5e88bd59db871 (diff)
Merge branch 'master' into master-glibc-2.23
Diffstat (limited to 'pthread')
-rw-r--r--pthread/pt-alloc.c14
-rw-r--r--pthread/pt-create.c6
-rw-r--r--pthread/pt-dealloc.c6
-rw-r--r--pthread/pt-exit.c6
-rw-r--r--pthread/pt-internal.h8
-rw-r--r--pthread/pt-join.c2
6 files changed, 21 insertions, 21 deletions
diff --git a/pthread/pt-alloc.c b/pthread/pt-alloc.c
index 4860f48..21063d5 100644
--- a/pthread/pt-alloc.c
+++ b/pthread/pt-alloc.c
@@ -139,7 +139,7 @@ __pthread_alloc (struct __pthread **pthread)
}
retry:
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
if (__pthread_num_threads < __pthread_max_threads)
{
@@ -148,7 +148,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
*pthread = new;
return 0;
@@ -157,7 +157,7 @@ __pthread_alloc (struct __pthread **pthread)
else if (__pthread_num_threads >= PTHREAD_THREADS_MAX)
{
/* We have reached the limit on the number of threads per process. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (new);
return EAGAIN;
@@ -169,7 +169,7 @@ __pthread_alloc (struct __pthread **pthread)
memory allocation, since that's a potentially blocking operation. */
max_threads = __pthread_max_threads;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Allocate a new lookup table that's twice as large. */
new_max_threads
@@ -181,13 +181,13 @@ __pthread_alloc (struct __pthread **pthread)
return ENOMEM;
}
- pthread_rwlock_wrlock (&__pthread_threads_lock);
+ __pthread_rwlock_wrlock (&__pthread_threads_lock);
/* Check if nobody else has already enlarged the table. */
if (max_threads != __pthread_max_threads)
{
/* Yep, they did. */
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Free the newly allocated table and try again to allocate a slot. */
free (threads);
@@ -210,7 +210,7 @@ __pthread_alloc (struct __pthread **pthread)
new->thread = 1 + __pthread_num_threads++;
__pthread_threads[new->thread - 1] = NULL;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
free (old_threads);
diff --git a/pthread/pt-create.c b/pthread/pt-create.c
index d88afae..c9e0730 100644
--- a/pthread/pt-create.c
+++ b/pthread/pt-create.c
@@ -107,7 +107,7 @@ __pthread_create_internal (struct __pthread **thread,
if (!stacksize)
{
struct rlimit rlim;
- getrlimit(RLIMIT_STACK, &rlim);
+ __getrlimit(RLIMIT_STACK, &rlim);
if (rlim.rlim_cur != RLIM_INFINITY)
stacksize = rlim.rlim_cur;
if (!stacksize)
@@ -202,9 +202,9 @@ __pthread_create_internal (struct __pthread **thread,
could use __thread_setid, however, we only lock for reading as no
other thread should be using this entry (we also assume that the
store is atomic). */
- pthread_rwlock_rdlock (&__pthread_threads_lock);
+ __pthread_rwlock_rdlock (&__pthread_threads_lock);
__pthread_threads[pthread->thread - 1] = pthread;
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* At this point it is possible to guess our pthread ID. We have to
make sure that all functions taking a pthread_t argument can
diff --git a/pthread/pt-dealloc.c b/pthread/pt-dealloc.c
index e324800..f44aefa 100644
--- a/pthread/pt-dealloc.c
+++ b/pthread/pt-dealloc.c
@@ -49,14 +49,14 @@ __pthread_dealloc (struct __pthread *pthread)
by the standards. */
__pthread_mutex_lock (&pthread->state_lock);
if (pthread->state != PTHREAD_EXITED)
- pthread_cond_broadcast (&pthread->state_cond);
+ __pthread_cond_broadcast (&pthread->state_cond);
__pthread_mutex_unlock (&pthread->state_lock);
/* We do not actually deallocate the thread structure, but add it to
a list of re-usable thread structures. */
- pthread_mutex_lock (&__pthread_free_threads_lock);
+ __pthread_mutex_lock (&__pthread_free_threads_lock);
__pthread_enqueue (&__pthread_free_threads, pthread);
- pthread_mutex_unlock (&__pthread_free_threads_lock);
+ __pthread_mutex_unlock (&__pthread_free_threads_lock);
/* Setting PTHREAD->STATE to PTHREAD_TERMINATED makes this TCB
available for reuse. After that point, we can no longer assume
diff --git a/pthread/pt-exit.c b/pthread/pt-exit.c
index 3427de5..b078db2 100644
--- a/pthread/pt-exit.c
+++ b/pthread/pt-exit.c
@@ -39,14 +39,14 @@ __pthread_exit (void *status)
/* Run any cancelation handlers. According to POSIX, the
cancellation cleanup handlers should be called with cancellation
disabled. */
- pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
+ __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
for (handlers = __pthread_get_cleanup_stack ();
*handlers;
*handlers = (*handlers)->__next)
(*handlers)->__handler ((*handlers)->__arg);
- pthread_setcancelstate (oldstate, &oldstate);
+ __pthread_setcancelstate (oldstate, &oldstate);
/* Decrease the number of threads. We use an atomic operation to
make sure that only the last thread calls `exit'. */
@@ -86,7 +86,7 @@ __pthread_exit (void *status)
/* Broadcast the condition. This will wake up threads that are
waiting to join us. */
- pthread_cond_broadcast (&self->state_cond);
+ __pthread_cond_broadcast (&self->state_cond);
__pthread_mutex_unlock (&self->state_lock);
break;
diff --git a/pthread/pt-internal.h b/pthread/pt-internal.h
index 18b5b4c..c1b6c59 100644
--- a/pthread/pt-internal.h
+++ b/pthread/pt-internal.h
@@ -191,15 +191,15 @@ extern pthread_rwlock_t __pthread_threads_lock;
#define __pthread_getid(thread) \
({ struct __pthread *__t; \
- pthread_rwlock_rdlock (&__pthread_threads_lock); \
+ __pthread_rwlock_rdlock (&__pthread_threads_lock); \
__t = __pthread_threads[thread - 1]; \
- pthread_rwlock_unlock (&__pthread_threads_lock); \
+ __pthread_rwlock_unlock (&__pthread_threads_lock); \
__t; })
#define __pthread_setid(thread, pthread) \
- pthread_rwlock_wrlock (&__pthread_threads_lock); \
+ __pthread_rwlock_wrlock (&__pthread_threads_lock); \
__pthread_threads[thread - 1] = pthread; \
- pthread_rwlock_unlock (&__pthread_threads_lock);
+ __pthread_rwlock_unlock (&__pthread_threads_lock);
/* Similar to pthread_self, but returns the thread descriptor instead
of the thread ID. */
diff --git a/pthread/pt-join.c b/pthread/pt-join.c
index 122d130..9730afc 100644
--- a/pthread/pt-join.c
+++ b/pthread/pt-join.c
@@ -43,7 +43,7 @@ pthread_join (pthread_t thread, void **status)
/* Rely on pthread_cond_wait being a cancellation point to make
pthread_join one too. */
while (pthread->state == PTHREAD_JOINABLE)
- pthread_cond_wait (&pthread->state_cond, &pthread->state_lock);
+ __pthread_cond_wait (&pthread->state_cond, &pthread->state_lock);
pthread_cleanup_pop (0);