summaryrefslogtreecommitdiff
path: root/include/asm-s390/spinlock.h
blob: 321b23bba1ecf16987b724e45f79319f55972159 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
/*
 *  include/asm-s390/spinlock.h
 *
 *  S390 version
 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/spinlock.h"
 */

#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

static inline int
_raw_compare_and_swap(volatile unsigned int *lock,
		      unsigned int old, unsigned int new)
{
	asm volatile ("cs %0,%3,0(%4)"
		      : "=d" (old), "=m" (*lock)
		      : "0" (old), "d" (new), "a" (lock), "m" (*lock)
		      : "cc", "memory" );
	return old;
}

/*
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
 * We make no fairness assumptions. They have a cost.
 */

typedef struct {
	volatile unsigned int lock;
#ifdef CONFIG_PREEMPT
	unsigned int break_lock;
#endif
} __attribute__ ((aligned (4))) spinlock_t;

#define SPIN_LOCK_UNLOCKED	(spinlock_t) { 0 }
#define spin_lock_init(lp)	do { (lp)->lock = 0; } while(0)
#define spin_unlock_wait(lp)	do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
#define spin_is_locked(x)	((x)->lock != 0)
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)

extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc);
extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);

static inline void _raw_spin_lock(spinlock_t *lp)
{
	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);

	if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
		_raw_spin_lock_wait(lp, pc);
}

static inline int _raw_spin_trylock(spinlock_t *lp)
{
	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);

	if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
		return 1;
	return _raw_spin_trylock_retry(lp, pc);
}

static inline void _raw_spin_unlock(spinlock_t *lp)
{
	_raw_compare_and_swap(&lp->lock, lp->lock, 0);
}
		
/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */
typedef struct {
	volatile unsigned int lock;
	volatile unsigned long owner_pc;
#ifdef CONFIG_PREEMPT
	unsigned int break_lock;
#endif
} rwlock_t;

#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }

#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)

/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define read_can_lock(x) ((int)(x)->lock >= 0)

/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define write_can_lock(x) ((x)->lock == 0)

extern void _raw_read_lock_wait(rwlock_t *lp);
extern int _raw_read_trylock_retry(rwlock_t *lp);
extern void _raw_write_lock_wait(rwlock_t *lp);
extern int _raw_write_trylock_retry(rwlock_t *lp);

static inline void _raw_read_lock(rwlock_t *rw)
{
	unsigned int old;
	old = rw->lock & 0x7fffffffU;
	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
		_raw_read_lock_wait(rw);
}

static inline void _raw_read_unlock(rwlock_t *rw)
{
	unsigned int old, cmp;

	old = rw->lock;
	do {
		cmp = old;
		old = _raw_compare_and_swap(&rw->lock, old, old - 1);
	} while (cmp != old);
}

static inline void _raw_write_lock(rwlock_t *rw)
{
	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
		_raw_write_lock_wait(rw);
}

static inline void _raw_write_unlock(rwlock_t *rw)
{
	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}

static inline int _raw_read_trylock(rwlock_t *rw)
{
	unsigned int old;
	old = rw->lock & 0x7fffffffU;
	if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
		return 1;
	return _raw_read_trylock_retry(rw);
}

static inline int _raw_write_trylock(rwlock_t *rw)
{
	if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
		return 1;
	return _raw_write_trylock_retry(rw);
}

#endif /* __ASM_SPINLOCK_H */