summaryrefslogtreecommitdiff
path: root/nptl/pthread_join_common.c
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-01-25 17:08:51 -0200
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-12-20 11:32:54 -0200
commit4735850f7a4f07f05ee9ff405238332f4bb3ab50 (patch)
treeb16aba7381f9bc06cc2a903a36bd9c7d647e845c /nptl/pthread_join_common.c
parent8bfb461e20380dbc579f6dad4c54bb8a55afc82a (diff)
nptl: Consolidate pthread_{timed,try}join{_np}
This patch consolidates the pthread_join and gnu extensions to avoid code duplication. The function pthread_join, pthread_tryjoin_np, and pthread_timedjoin_np are now based on pthread_timedjoin_ex. It also fixes some inconsistencies on ESRCH, EINVAL, EDEADLK handling (where each implementation differs from each other) and also on clenup handler (which now always use a CAS). Checked on i686-linux-gnu and x86_64-linux-gnu. * nptl/pthreadP.h (__pthread_timedjoin_np): Define. * nptl/pthread_join.c (pthread_join): Use __pthread_timedjoin_np. * nptl/pthread_tryjoin.c (pthread_tryjoin): Likewise. * nptl/pthread_timedjoin.c (cleanup): Use CAS on argument setting. (pthread_timedjoin_np): Define internal symbol and common code from pthread_join. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (__lll_timedwait_tid): Remove superflous checks. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (__lll_timedwait_tid): Likewise. Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'nptl/pthread_join_common.c')
-rw-r--r--nptl/pthread_join_common.c115
1 files changed, 115 insertions, 0 deletions
diff --git a/nptl/pthread_join_common.c b/nptl/pthread_join_common.c
new file mode 100644
index 0000000000..59c54457b6
--- /dev/null
+++ b/nptl/pthread_join_common.c
@@ -0,0 +1,115 @@
+/* Common definition for pthread_{timed,try}join{_np}.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include "pthreadP.h"
+#include <atomic.h>
+#include <stap-probe.h>
+
+static void
+cleanup (void *arg)
+{
+ /* If we already changed the waiter ID, reset it. The call cannot
+ fail for any reason but the thread not having done that yet so
+ there is no reason for a loop. */
+ struct pthread *self = THREAD_SELF;
+ atomic_compare_exchange_weak_acquire (&arg, &self, NULL);
+}
+
+int
+__pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
+ const struct timespec *abstime, bool block)
+{
+ struct pthread *pd = (struct pthread *) threadid;
+
+ /* Make sure the descriptor is valid. */
+ if (INVALID_NOT_TERMINATED_TD_P (pd))
+ /* Not a valid thread handle. */
+ return ESRCH;
+
+ /* Is the thread joinable?. */
+ if (IS_DETACHED (pd))
+ /* We cannot wait for the thread. */
+ return EINVAL;
+
+ struct pthread *self = THREAD_SELF;
+ int result = 0;
+
+ LIBC_PROBE (pthread_join, 1, threadid);
+
+ if ((pd == self
+ || (self->joinid == pd
+ && (pd->cancelhandling
+ & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+ | TERMINATED_BITMASK)) == 0))
+ && !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
+ /* This is a deadlock situation. The threads are waiting for each
+ other to finish. Note that this is a "may" error. To be 100%
+ sure we catch this error we would have to lock the data
+ structures but it is not necessary. In the unlikely case that
+ two threads are really caught in this situation they will
+ deadlock. It is the programmer's problem to figure this
+ out. */
+ return EDEADLK;
+
+ /* Wait for the thread to finish. If it is already locked something
+ is wrong. There can only be one waiter. */
+ else if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid,
+ &self,
+ NULL)))
+ /* There is already somebody waiting for the thread. */
+ return EINVAL;
+
+ if (block)
+ {
+ /* During the wait we change to asynchronous cancellation. If we
+ are cancelled the thread we are waiting for must be marked as
+ un-wait-ed for again. */
+ pthread_cleanup_push (cleanup, &pd->joinid);
+
+ int oldtype = CANCEL_ASYNC ();
+
+ if (abstime != NULL)
+ result = lll_timedwait_tid (pd->tid, abstime);
+ else
+ lll_wait_tid (pd->tid);
+
+ CANCEL_RESET (oldtype);
+
+ pthread_cleanup_pop (0);
+ }
+
+ if (__glibc_likely (result == 0))
+ {
+ /* We mark the thread as terminated and as joined. */
+ pd->tid = -1;
+
+ /* Store the return value if the caller is interested. */
+ if (thread_return != NULL)
+ *thread_return = pd->result;
+
+ /* Free the TCB. */
+ __free_tcb (pd);
+ }
+ else
+ pd->joinid = NULL;
+
+ LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd->result);
+
+ return result;
+}
+hidden_def (__pthread_timedjoin_ex)