summaryrefslogtreecommitdiff
path: root/nptl/pthread_spin_lock.c
blob: e219b0270d861f8594330a16a46894970b685c78 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
/* pthread_spin_lock -- lock a spin lock.  Generic version.
   Copyright (C) 2012-2013 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <http://www.gnu.org/licenses/>.  */

#include <atomic.h>
#include "pthreadP.h"

/* A machine-specific version can define SPIN_LOCK_READS_BETWEEN_CMPXCHG
  to the number of plain reads that it's optimal to spin on between uses
  of atomic_compare_and_exchange_val_acq.  If spinning forever is optimal
  then use -1.  If no plain reads here would ever be optimal, use 0.  */
#ifndef SPIN_LOCK_READS_BETWEEN_CMPXCHG
# warning machine-dependent file should define SPIN_LOCK_READS_BETWEEN_CMPXCHG
# define SPIN_LOCK_READS_BETWEEN_CMPXCHG 1000
#endif

int
pthread_spin_lock (pthread_spinlock_t *lock)
{
  /* atomic_exchange usually takes less instructions than
     atomic_compare_and_exchange.  On the other hand,
     atomic_compare_and_exchange potentially generates less bus traffic
     when the lock is locked.
     We assume that the first try mostly will be successful, and we use
     atomic_exchange.  For the subsequent tries we use
     atomic_compare_and_exchange.  */
  if (atomic_exchange_acq (lock, 1) == 0)
    return 0;

  do
    {
      /* The lock is contended and we need to wait.  Going straight back
	 to cmpxchg is not a good idea on many targets as that will force
	 expensive memory synchronizations among processors and penalize other
	 running threads.
	 On the other hand, we do want to update memory state on the local core
	 once in a while to avoid spinning indefinitely until some event that
	 will happen to update local memory as a side-effect.  */
      if (SPIN_LOCK_READS_BETWEEN_CMPXCHG >= 0)
	{
	  int wait = SPIN_LOCK_READS_BETWEEN_CMPXCHG;

	  while (*lock != 0 && wait > 0)
	    --wait;
	}
      else
	{
	  while (*lock != 0)
	    ;
	}
    }
  while (atomic_compare_and_exchange_val_acq (lock, 1, 0) != 0);

  return 0;
}