blob: 87cd15adb21c243720618eecdef91ae142a30bb7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
/*
* Copyright (c) 2017 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* Real-time mutual exclusion locks.
*
* A real-time mutex is similar to a regular mutex, except priority
* inheritance is unconditionally enabled.
*/
#ifndef _KERN_RTMUTEX_H
#define _KERN_RTMUTEX_H
#include <assert.h>
#include <stdint.h>
#include <kern/error.h>
#include <kern/macros.h>
#include <kern/rtmutex_i.h>
#include <kern/rtmutex_types.h>
struct rtmutex;
#define rtmutex_assert_locked(rtmutex) assert((rtmutex)->owner != 0)
/*
* Initialize a real-time mutex.
*/
static inline void
rtmutex_init(struct rtmutex *rtmutex)
{
rtmutex->owner = 0;
}
/*
* Attempt to lock the given real-time mutex.
*
* This function may not sleep.
*
* Return 0 on success, ERROR_BUSY if the mutex is already locked.
*/
static inline int
rtmutex_trylock(struct rtmutex *rtmutex)
{
uintptr_t prev_owner;
prev_owner = rtmutex_lock_fast(rtmutex);
if (unlikely(prev_owner != 0)) {
return ERROR_BUSY;
}
return 0;
}
/*
* Lock a real-time mutex.
*
* If the mutex is already locked, the calling thread sleeps until the
* mutex is unlocked, and its priority is propagated as needed to prevent
* unbounded priority inversion.
*
* A mutex can only be locked once.
*/
static inline void
rtmutex_lock(struct rtmutex *rtmutex)
{
uintptr_t prev_owner;
prev_owner = rtmutex_lock_fast(rtmutex);
if (unlikely(prev_owner != 0)) {
rtmutex_lock_slow(rtmutex);
}
}
static inline int
rtmutex_timedlock(struct rtmutex *rtmutex, uint64_t ticks)
{
uintptr_t prev_owner;
prev_owner = rtmutex_lock_fast(rtmutex);
if (unlikely(prev_owner != 0)) {
return rtmutex_timedlock_slow(rtmutex, ticks);
}
return 0;
}
/*
* Unlock a real-time mutex.
*
* The mutex must be locked, and must have been locked by the calling
* thread.
*/
static inline void
rtmutex_unlock(struct rtmutex *rtmutex)
{
uintptr_t prev_owner;
prev_owner = rtmutex_unlock_fast(rtmutex);
if (unlikely(prev_owner & RTMUTEX_CONTENDED)) {
rtmutex_unlock_slow(rtmutex);
}
}
#endif /* _KERN_RTMUTEX_H */
|