1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
|
/* client-inhibit.c - Inhibit RPCs on a capability client.
Copyright (C) 2004 Free Software Foundation, Inc.
Written by Marcus Brinkmann <marcus@gnu.org>
This file is part of the GNU Hurd.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#if HAVE_CONFIG_H
#include <config.h>
#endif
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "cap-server-intern.h"
/* Inhibit all RPCs on the capability client CLIENT (which must not be
locked) in the capability bucket BUCKET. You _must_ follow up
with a hurd_cap_client_resume operation, and hold at least one
reference to the object continuously until you did so. */
error_t
_hurd_cap_client_inhibit (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
{
error_t err;
/* First take the bucket-wide lock for conditions on capability
client states. */
pthread_mutex_lock (&bucket->client_cond_lock);
/* Then lock the client to check its state. */
pthread_mutex_lock (&client->lock);
/* First wait until any other inhibitor has resumed the capability
client. This ensures that capability client inhibitions are
fully serialized (per capability client). */
while (client->state != _HURD_CAP_STATE_GREEN)
{
pthread_mutex_unlock (&client->lock);
err = hurd_cond_wait (&bucket->client_cond,
&bucket->client_cond_lock);
if (err)
{
/* We have been canceled. */
pthread_mutex_unlock (&bucket->client_cond_lock);
return err;
}
pthread_mutex_lock (&client->lock);
}
/* Now it is our turn to inhibit the capability client. */
client->cond_waiter = pthread_self ();
if (_hurd_cap_client_cond_busy (client))
{
_hurd_cap_list_item_t pending_rpc = client->pending_rpcs;
/* There are still pending RPCs (beside us). Cancel them. */
while (pending_rpc)
{
if (pending_rpc->thread != client->cond_waiter)
pthread_cancel (pending_rpc->thread);
pending_rpc = pending_rpc->next;
}
/* Indicate that we would like to know when they have gone. */
client->state = _HURD_CAP_STATE_YELLOW;
/* The last one will shut the door. */
do
{
pthread_mutex_unlock (&client->lock);
err = hurd_cond_wait (&bucket->client_cond,
&bucket->client_cond_lock);
if (err)
{
/* We have been canceled ourselves. Give up. */
client->state = _HURD_CAP_STATE_GREEN;
pthread_mutex_unlock (&bucket->client_cond_lock);
return err;
}
pthread_mutex_lock (&client->lock);
}
while (client->state != _HURD_CAP_STATE_RED);
}
else
client->state = _HURD_CAP_STATE_RED;
/* Now all pending RPCs have been canceled and are completed (except
us), and all incoming RPCs are inhibited. */
pthread_mutex_unlock (&client->lock);
pthread_mutex_unlock (&bucket->client_cond_lock);
return 0;
}
/* Resume RPCs on the capability client CLIENT in the bucket BUCKET
and wake-up all waiters. */
void
_hurd_cap_client_resume (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
{
pthread_mutex_lock (&bucket->client_cond_lock);
pthread_mutex_lock (&bucket->lock);
client->state = _HURD_CAP_STATE_GREEN;
/* Broadcast the change to all potential waiters. */
pthread_cond_broadcast (&bucket->client_cond);
pthread_mutex_unlock (&bucket->lock);
pthread_mutex_unlock (&bucket->client_cond_lock);
}
/* End RPCs on the capability client CLIENT in the bucket BUCKET and
wake-up all waiters. */
void
_hurd_cap_client_end (hurd_cap_bucket_t bucket, _hurd_cap_client_t client)
{
pthread_mutex_lock (&bucket->client_cond_lock);
pthread_mutex_lock (&bucket->lock);
client->state = _HURD_CAP_STATE_BLACK;
/* Broadcast the change to all potential waiters. Even though the
task is dead now, there is a race condition where we will process
one spurious incoming RPC which is blocked on the inhibited
state. So we wake up such threads, they will then go away
quickly.
Note that this does not work reliably for still living clients:
They may bombard us with RPCs and thus keep the reference count
of the client in the bucket table above 0 all the time, even in
the _HURD_CAP_STATE_BLACK state. This is the reason that this
interface is only for internal use (by
_hurd_cap_client_death). */
pthread_cond_broadcast (&bucket->client_cond);
pthread_mutex_unlock (&bucket->lock);
pthread_mutex_unlock (&bucket->client_cond_lock);
}
|