summaryrefslogtreecommitdiff
path: root/viengoos/mutex.h
blob: 245cb3a8a0e4369c8aa0a161e630ffe177d23840 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
/* mutex.c - Small, simple LIFO mutex implementation.
   Copyright (C) 2007 Free Software Foundation, Inc.
   Written by Neal H. Walfield <neal@gnu.org>.

   This file is part of the GNU Hurd.

   GNU Hurd is free software: you can redistribute it and/or modify it
   under the terms of the GNU Lesser General Public License as
   published by the Free Software Foundation, either version 3 of the
   License, or (at your option) any later version.

   GNU Hurd is distributed in the hope that it will be useful, but
   WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with GNU Hurd.  If not, see
   <http://www.gnu.org/licenses/>.  */

#ifndef _MUTEX_H
#define _MUTEX_H

#ifdef USE_L4
# include <l4/thread.h>
#endif
#include <atomic.h>
#include <assert.h>
#include <hurd/lock.h>

#ifdef USE_L4
typedef l4_thread_id_t ss_mutex_t;
#else
typedef int ss_mutex_t;
#endif

/* Used by the atomic operations.  */
extern void abort (void);

static inline void
ss_mutex_lock (__const char *caller, int line, ss_mutex_t *lock)
{
#ifdef USE_L4
  l4_thread_id_t owner;

  for (;;)
    {
      owner = atomic_exchange_acq (lock, l4_myself ());
      if (owner == l4_nilthread)
	{
	  ss_mutex_trace_add (SS_MUTEX_LOCK, caller, line, lock);
	  return;
	}
  
      ss_mutex_trace_add (SS_MUTEX_LOCK_WAIT, caller, line, lock);

      if (owner == l4_myself ())
	ss_lock_trace_dump (lock);
      assert (owner != l4_myself ());

      __ss_lock_wait (l4_anylocalthread);
    }
#else
# warning Unimplemened on this platform.
#endif
}

#define ss_mutex_lock(__sml_lockp)					\
  do									\
    {									\
      debug (5, "ss_mutex_lock (%p)", __sml_lockp);			\
      ss_mutex_lock (__func__, __LINE__, __sml_lockp);			\
    }									\
  while (0)

static inline void
ss_mutex_unlock (__const char *caller, int line, ss_mutex_t *lock)
{
#ifdef USE_L4
  l4_thread_id_t waiter;

  waiter = atomic_exchange_acq (lock, l4_nilthread);
  ss_mutex_trace_add (SS_MUTEX_UNLOCK, caller, line, lock);
  if (waiter == l4_myself ())
    /* No waiter.  */
    return;

  if (waiter == l4_nilthread)
    ss_lock_trace_dump (lock);
  assert (waiter != l4_nilthread);

  /* Signal the waiter.  */
  __ss_lock_wakeup (waiter);
#else
# warning Unimplemened on this platform.
#endif
}

#define ss_mutex_unlock(__smu_lockp)					\
  do									\
    {									\
      debug (5, "ss_mutex_unlock (%p)", __smu_lockp);			\
      ss_mutex_unlock (__func__, __LINE__, __smu_lockp);		\
    }									\
  while (0)

static inline bool
ss_mutex_trylock (__const char *caller, int line, ss_mutex_t *lock)
{
#ifdef USE_L4
  l4_thread_id_t owner;

  owner = atomic_compare_and_exchange_val_acq (lock, l4_myself (),
					       l4_nilthread);
  if (owner == l4_nilthread)
    {
      ss_mutex_trace_add (SS_MUTEX_TRYLOCK, caller, line, lock);
      return true;
    }

  // ss_mutex_trace_add (SS_MUTEX_TRYLOCK_BLOCKED, caller, line, lock);

  return false;
#else
# warning Unimplemened on this platform.
  return true;
#endif
}

#define ss_mutex_trylock(__sml_lockp)					\
  ({									\
    bool __sml_r = ss_mutex_trylock (__func__, __LINE__, __sml_lockp);	\
    debug (5, "ss_mutex_trylock (%p) -> %s",				\
	   __sml_lockp, __sml_r ? "t" : "f");				\
    __sml_r;								\
  })

#endif