summaryrefslogtreecommitdiff
path: root/kern/mutex/mutex_adaptive.c
blob: ffc47169f51fd52807de803f9a0d079b5f2aecf1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
/*
 * Copyright (c) 2017 Agustina Arzille.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>

#include <kern/atomic.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
#include <kern/thread.h>
#include <machine/cpu.h>

static struct thread *
mutex_adaptive_get_thread(uintptr_t owner)
{
    return (struct thread *)(owner & ~MUTEX_ADAPTIVE_CONTENDED);
}

static void
mutex_adaptive_set_contended(struct mutex *mutex)
{
    atomic_or(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);
}

static inline bool
mutex_adaptive_is_owner(struct mutex *mutex, uintptr_t owner)
{
    uintptr_t prev;

    prev = atomic_load(&mutex->owner, ATOMIC_RELAXED);
    return mutex_adaptive_get_thread(prev) == mutex_adaptive_get_thread(owner);
}

void
mutex_adaptive_lock_slow(struct mutex *mutex)
{
    uintptr_t self, owner;
    struct sleepq *sleepq;
    unsigned long flags;

    self = (uintptr_t)thread_self();

    sleepq = sleepq_lend(mutex, false, &flags);

    mutex_adaptive_set_contended(mutex);

    for (;;) {
        owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED,
                                   self | MUTEX_ADAPTIVE_CONTENDED);
        assert(owner & MUTEX_ADAPTIVE_CONTENDED);

        if (mutex_adaptive_get_thread(owner) == NULL) {
            break;
        }

        /*
         * The owner may not return from the unlock function if a thread is
         * spinning on it.
         */
        while (mutex_adaptive_is_owner(mutex, owner)) {
            if (thread_is_running(mutex_adaptive_get_thread(owner))) {
                cpu_pause();
            } else {
                sleepq_wait(sleepq, "mutex");
            }
        }
    }

    /*
     * A potentially spinning thread wouldn't be accounted in the sleep queue,
     * but the only potentially spinning thread is the new owner.
     */
    if (sleepq_empty(sleepq)) {
        atomic_store(&mutex->owner, self, ATOMIC_RELAXED);
    }

    sleepq_return(sleepq, flags);
}

void
mutex_adaptive_unlock_slow(struct mutex *mutex)
{
    uintptr_t owner;
    struct sleepq *sleepq;
    unsigned long flags;

    atomic_store(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);

    for (;;) {
        owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);

        /*
         * This only happens if another thread was able to become the new
         * owner, in which case that thread isn't spinning on the current
         * thread, i.e. there is no need for an additional reference.
         */
        if (owner != MUTEX_ADAPTIVE_CONTENDED) {
            break;
        }

        /*
         * Avoid contending with incoming threads that are about to spin/wait
         * on the mutex. This is particularly expensive with queued locks.
         *
         * Also, this call returns NULL if another thread is currently spinning
         * on the current thread, in which case the latter doesn't return,
         * averting the need for an additional reference.
         */
        sleepq = sleepq_tryacquire(mutex, false, &flags);

        if (sleepq != NULL) {
            sleepq_signal(sleepq);
            sleepq_release(sleepq, flags);
            break;
        }

        /*
         * Acquiring the sleep queue may fail because of contention on
         * unrelated objects. Retry.
         */
    }
}