]>
Commit | Line | Data |
---|---|---|
316670eb | 1 | /* |
3e170ce0 | 2 | * Copyright (c) 2011-2014 Apple Inc. All rights reserved. |
316670eb A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <stdint.h> | |
29 | #include <sys/param.h> | |
30 | #include <sys/mount_internal.h> | |
31 | #include <sys/malloc.h> | |
32 | #include <sys/queue.h> | |
33 | ||
34 | #include <libkern/libkern.h> | |
35 | #include <libkern/OSAtomic.h> | |
36 | #include <kern/debug.h> | |
37 | #include <kern/thread.h> | |
38 | ||
39 | #include <nfs/rpcv2.h> | |
40 | #include <nfs/nfsproto.h> | |
41 | #include <nfs/nfs.h> | |
42 | ||
43 | #ifdef NFS_UC_DEBUG | |
44 | #define DPRINT(fmt, ...) printf(fmt,## __VA_ARGS__) | |
45 | #else | |
46 | #define DPRINT(fmt, ...) | |
47 | #endif | |
48 | ||
49 | struct nfsrv_uc_arg { | |
50 | TAILQ_ENTRY(nfsrv_uc_arg) nua_svcq; | |
51 | socket_t nua_so; | |
52 | struct nfsrv_sock *nua_slp; | |
53 | int nua_waitflag; /* Should always be MBUF_DONTWAIT */ | |
54 | uint32_t nua_flags; | |
55 | uint32_t nua_qi; | |
56 | }; | |
57 | #define NFS_UC_QUEUED 0x0001 | |
58 | ||
59 | #define NFS_UC_HASH_SZ 7 | |
60 | #define NFS_UC_HASH(x) ((((uint32_t)(uintptr_t)(x)) >> 3) % nfsrv_uc_thread_count) | |
61 | ||
62 | TAILQ_HEAD(nfsrv_uc_q, nfsrv_uc_arg); | |
63 | ||
64 | static struct nfsrv_uc_queue { | |
65 | lck_mtx_t *ucq_lock; | |
66 | struct nfsrv_uc_q ucq_queue[1]; | |
67 | thread_t ucq_thd; | |
68 | uint32_t ucq_flags; | |
69 | } nfsrv_uc_queue_tbl[NFS_UC_HASH_SZ]; | |
70 | #define NFS_UC_QUEUE_SLEEPING 0x0001 | |
71 | ||
72 | static lck_grp_t *nfsrv_uc_group; | |
73 | static lck_mtx_t *nfsrv_uc_shutdown_lock; | |
74 | static volatile int nfsrv_uc_shutdown = 0; | |
75 | static int32_t nfsrv_uc_thread_count; | |
76 | ||
77 | extern kern_return_t thread_terminate(thread_t); | |
78 | ||
79 | #ifdef NFS_UC_Q_DEBUG | |
80 | int nfsrv_uc_use_proxy = 1; | |
81 | uint32_t nfsrv_uc_queue_limit; | |
82 | uint32_t nfsrv_uc_queue_max_seen; | |
83 | volatile uint32_t nfsrv_uc_queue_count; | |
84 | #endif | |
85 | ||
86 | /* | |
87 | * Thread that dequeues up-calls and runs the nfsrv_rcv routine | |
88 | */ | |
89 | static void | |
90 | nfsrv_uc_thread(void *arg, wait_result_t wr __unused) | |
91 | { | |
92 | int qi = (int)(uintptr_t)arg; | |
93 | int error; | |
94 | struct nfsrv_uc_arg *ep = NULL; | |
95 | struct nfsrv_uc_queue *myqueue = &nfsrv_uc_queue_tbl[qi]; | |
96 | ||
97 | DPRINT("nfsrv_uc_thread %d started\n", qi); | |
98 | while (!nfsrv_uc_shutdown) { | |
99 | lck_mtx_lock(myqueue->ucq_lock); | |
100 | ||
101 | while (!nfsrv_uc_shutdown && TAILQ_EMPTY(myqueue->ucq_queue)) { | |
102 | myqueue->ucq_flags |= NFS_UC_QUEUE_SLEEPING; | |
103 | error = msleep(myqueue, myqueue->ucq_lock, PSOCK, "nfsd_upcall_handler", NULL); | |
104 | myqueue->ucq_flags &= ~NFS_UC_QUEUE_SLEEPING; | |
105 | if (error) { | |
106 | printf("nfsrv_uc_thread received error %d\n", error); | |
107 | } | |
108 | } | |
109 | if (nfsrv_uc_shutdown) { | |
110 | lck_mtx_unlock(myqueue->ucq_lock); | |
111 | break; | |
112 | } | |
113 | ||
114 | ||
115 | ep = TAILQ_FIRST(myqueue->ucq_queue); | |
116 | DPRINT("nfsrv_uc_thread:%d dequeue %p from %p\n", qi, ep, myqueue); | |
117 | ||
118 | TAILQ_REMOVE(myqueue->ucq_queue, ep, nua_svcq); | |
119 | ||
120 | ep->nua_flags &= ~NFS_UC_QUEUED; | |
121 | ||
122 | lck_mtx_unlock(myqueue->ucq_lock); | |
123 | ||
124 | #ifdef NFS_UC_Q_DEBUG | |
125 | OSDecrementAtomic(&nfsrv_uc_queue_count); | |
126 | #endif | |
127 | ||
128 | DPRINT("calling nfsrv_rcv for %p\n", (void *)ep->nua_slp); | |
129 | nfsrv_rcv(ep->nua_so, (void *)ep->nua_slp, ep->nua_waitflag); | |
130 | } | |
131 | ||
132 | lck_mtx_lock(nfsrv_uc_shutdown_lock); | |
133 | nfsrv_uc_thread_count--; | |
134 | wakeup(&nfsrv_uc_thread_count); | |
135 | lck_mtx_unlock(nfsrv_uc_shutdown_lock); | |
136 | ||
137 | thread_terminate(current_thread()); | |
138 | } | |
139 | ||
140 | /* | |
141 | * Dequeue a closed nfsrv_sock if needed from the up-call queue. | |
142 | * Call from nfsrv_zapsock | |
143 | */ | |
144 | void | |
145 | nfsrv_uc_dequeue(struct nfsrv_sock *slp) | |
146 | { | |
147 | struct nfsrv_uc_arg *ap = slp->ns_ua; | |
148 | struct nfsrv_uc_queue *myqueue = &nfsrv_uc_queue_tbl[ap->nua_qi]; | |
149 | ||
150 | /* | |
151 | * We assume that the socket up-calls have been stop and the socket | |
152 | * is shutting down so no need for acquiring the lock to check that | |
153 | * the flag is cleared. | |
154 | */ | |
155 | if (ap == NULL || (ap->nua_flags & NFS_UC_QUEUED) == 0) | |
156 | return; | |
157 | /* If we're queued we might race with nfsrv_uc_thread */ | |
158 | lck_mtx_lock(myqueue->ucq_lock); | |
159 | if (ap->nua_flags & NFS_UC_QUEUED) { | |
160 | printf("nfsrv_uc_dequeue remove %p\n", ap); | |
161 | TAILQ_REMOVE(myqueue->ucq_queue, ap, nua_svcq); | |
162 | ap->nua_flags &= ~NFS_UC_QUEUED; | |
163 | #ifdef NFS_UC_Q_DEBUG | |
164 | OSDecrementAtomic(&nfsrv_uc_queue_count); | |
165 | #endif | |
166 | } | |
3e170ce0 A |
167 | FREE(slp->ns_ua, M_TEMP); |
168 | slp->ns_ua = NULL; | |
316670eb A |
169 | lck_mtx_unlock(myqueue->ucq_lock); |
170 | } | |
171 | ||
172 | /* | |
173 | * Allocate and initialize globals for nfsrv_sock up-call support. | |
174 | */ | |
175 | void | |
176 | nfsrv_uc_init(void) | |
177 | { | |
178 | int i; | |
179 | ||
180 | nfsrv_uc_group = lck_grp_alloc_init("nfs_upcall_locks", LCK_GRP_ATTR_NULL); | |
181 | for (i = 0; i < NFS_UC_HASH_SZ; i++) { | |
182 | TAILQ_INIT(nfsrv_uc_queue_tbl[i].ucq_queue); | |
183 | nfsrv_uc_queue_tbl[i].ucq_lock = lck_mtx_alloc_init(nfsrv_uc_group, LCK_ATTR_NULL); | |
184 | nfsrv_uc_queue_tbl[i].ucq_thd = THREAD_NULL; | |
185 | nfsrv_uc_queue_tbl[i].ucq_flags = 0; | |
186 | } | |
187 | nfsrv_uc_shutdown_lock = lck_mtx_alloc_init(nfsrv_uc_group, LCK_ATTR_NULL); | |
188 | } | |
189 | ||
190 | /* | |
191 | * Start up-call threads to service nfsrv_sock(s) | |
192 | * Called from the first call of nfsrv_uc_addsock | |
193 | */ | |
194 | static void | |
195 | nfsrv_uc_start(void) | |
196 | { | |
197 | int32_t i; | |
198 | int error; | |
199 | ||
200 | #ifdef NFS_UC_Q_DEBUG | |
201 | if (!nfsrv_uc_use_proxy) | |
202 | return; | |
203 | #endif | |
204 | DPRINT("nfsrv_uc_start\n"); | |
205 | ||
206 | /* Wait until previous shutdown finishes */ | |
207 | lck_mtx_lock(nfsrv_uc_shutdown_lock); | |
208 | while (nfsrv_uc_shutdown || nfsrv_uc_thread_count > 0) | |
209 | msleep(&nfsrv_uc_thread_count, nfsrv_uc_shutdown_lock, PSOCK, "nfsd_upcall_shutdown_wait", NULL); | |
210 | ||
211 | /* Start up-call threads */ | |
212 | for (i = 0; i < NFS_UC_HASH_SZ; i++) { | |
213 | error = kernel_thread_start(nfsrv_uc_thread, (void *)(uintptr_t)i, &nfsrv_uc_queue_tbl[nfsrv_uc_thread_count].ucq_thd); | |
214 | if (!error) { | |
215 | nfsrv_uc_thread_count++; | |
216 | } else { | |
217 | printf("nfsd: Could not start nfsrv_uc_thread: %d\n", error); | |
218 | } | |
219 | } | |
220 | if (nfsrv_uc_thread_count == 0) { | |
221 | printf("nfsd: Could not start nfsd proxy up-call service. Falling back\n"); | |
222 | goto out; | |
223 | } | |
224 | ||
225 | out: | |
226 | #ifdef NFS_UC_Q_DEBUG | |
227 | nfsrv_uc_queue_count = 0ULL; | |
228 | nfsrv_uc_queue_max_seen = 0ULL; | |
229 | #endif | |
230 | lck_mtx_unlock(nfsrv_uc_shutdown_lock); | |
231 | } | |
232 | ||
233 | /* | |
234 | * Stop the up-call threads. | |
235 | * Called from nfsrv_uc_cleanup. | |
236 | */ | |
237 | static void | |
238 | nfsrv_uc_stop(void) | |
239 | { | |
240 | int32_t i; | |
241 | int32_t thread_count = nfsrv_uc_thread_count; | |
242 | ||
243 | DPRINT("Entering nfsrv_uc_stop\n"); | |
244 | ||
245 | /* Signal up-call threads to stop */ | |
246 | nfsrv_uc_shutdown = 1; | |
247 | for (i = 0; i < thread_count; i++) { | |
248 | lck_mtx_lock(nfsrv_uc_queue_tbl[i].ucq_lock); | |
249 | wakeup(&nfsrv_uc_queue_tbl[i]); | |
250 | lck_mtx_unlock(nfsrv_uc_queue_tbl[i].ucq_lock); | |
251 | } | |
252 | ||
253 | /* Wait until they are done shutting down */ | |
254 | lck_mtx_lock(nfsrv_uc_shutdown_lock); | |
255 | while (nfsrv_uc_thread_count > 0) | |
256 | msleep(&nfsrv_uc_thread_count, nfsrv_uc_shutdown_lock, PSOCK, "nfsd_upcall_shutdown_stop", NULL); | |
257 | ||
258 | /* Deallocate old threads */ | |
259 | for (i = 0; i < nfsrv_uc_thread_count; i++) { | |
260 | if (nfsrv_uc_queue_tbl[i].ucq_thd != THREAD_NULL) | |
261 | thread_deallocate(nfsrv_uc_queue_tbl[i].ucq_thd); | |
262 | nfsrv_uc_queue_tbl[i].ucq_thd = THREAD_NULL; | |
263 | } | |
264 | ||
265 | /* Enable restarting */ | |
266 | nfsrv_uc_shutdown = 0; | |
267 | lck_mtx_unlock(nfsrv_uc_shutdown_lock); | |
268 | } | |
269 | ||
270 | /* | |
271 | * Shutdown up-calls for nfsrv_socks. | |
272 | * Make sure nothing is queued on the up-call queues | |
273 | * Shutdown the up-call threads | |
274 | * Called from nfssvc_cleanup. | |
275 | */ | |
276 | void | |
277 | nfsrv_uc_cleanup(void) | |
278 | { | |
279 | int i; | |
280 | ||
281 | DPRINT("Entering nfsrv_uc_cleanup\n"); | |
282 | ||
283 | /* | |
284 | * Every thing should be dequeued at this point or will be as sockets are closed | |
285 | * but to be safe, we'll make sure. | |
286 | */ | |
287 | for (i = 0; i < NFS_UC_HASH_SZ; i++) { | |
288 | struct nfsrv_uc_queue *queue = &nfsrv_uc_queue_tbl[i]; | |
289 | ||
290 | lck_mtx_lock(queue->ucq_lock); | |
291 | while (!TAILQ_EMPTY(queue->ucq_queue)) { | |
292 | struct nfsrv_uc_arg *ep = TAILQ_FIRST(queue->ucq_queue); | |
293 | TAILQ_REMOVE(queue->ucq_queue, ep, nua_svcq); | |
294 | ep->nua_flags &= ~NFS_UC_QUEUED; | |
295 | } | |
296 | lck_mtx_unlock(queue->ucq_lock); | |
297 | } | |
298 | ||
299 | nfsrv_uc_stop(); | |
300 | } | |
301 | ||
302 | /* | |
303 | * This is the nfs up-call routine for server sockets. | |
304 | * We used to set nfsrv_rcv as the up-call routine, but | |
305 | * recently that seems like we are doing to much work for | |
306 | * the interface thread, so we just queue the arguments | |
307 | * that we would have gotten for nfsrv_rcv and let a | |
308 | * worker thread dequeue them and pass them on to nfsrv_rcv. | |
309 | */ | |
310 | static void | |
311 | nfsrv_uc_proxy(socket_t so, void *arg, int waitflag) | |
312 | { | |
313 | struct nfsrv_uc_arg *uap = (struct nfsrv_uc_arg *)arg; | |
314 | int qi = uap->nua_qi; | |
315 | struct nfsrv_uc_queue *myqueue = &nfsrv_uc_queue_tbl[qi]; | |
316 | ||
317 | lck_mtx_lock(myqueue->ucq_lock); | |
318 | DPRINT("nfsrv_uc_proxy called for %p (%p)\n", uap, uap->nua_slp); | |
319 | DPRINT("\tUp-call queued on %d for wakeup of %p\n", qi, myqueue); | |
3e170ce0 | 320 | if (uap == NULL || uap->nua_flags & NFS_UC_QUEUED) { |
316670eb | 321 | lck_mtx_unlock(myqueue->ucq_lock); |
3e170ce0 | 322 | return; /* Already queued or freed */ |
316670eb A |
323 | } |
324 | ||
325 | uap->nua_so = so; | |
326 | uap->nua_waitflag = waitflag; | |
327 | ||
328 | TAILQ_INSERT_TAIL(myqueue->ucq_queue, uap, nua_svcq); | |
329 | ||
330 | uap->nua_flags |= NFS_UC_QUEUED; | |
331 | if (myqueue->ucq_flags | NFS_UC_QUEUE_SLEEPING) | |
332 | wakeup(myqueue); | |
333 | ||
334 | #ifdef NFS_UC_Q_DEBUG | |
335 | { | |
336 | uint32_t count = OSIncrementAtomic(&nfsrv_uc_queue_count); | |
337 | ||
338 | /* This is a bit racey but just for debug */ | |
339 | if (count > nfsrv_uc_queue_max_seen) | |
340 | nfsrv_uc_queue_max_seen = count; | |
341 | ||
342 | if (nfsrv_uc_queue_limit && count > nfsrv_uc_queue_limit) { | |
343 | panic("nfsd up-call queue limit exceeded\n"); | |
344 | } | |
345 | } | |
346 | #endif | |
347 | lck_mtx_unlock(myqueue->ucq_lock); | |
348 | } | |
349 | ||
350 | ||
351 | /* | |
352 | * Set the up-call routine on the socket associated with the passed in | |
353 | * nfsrv_sock. | |
354 | * Assumes nfsd_mutex is held. | |
355 | */ | |
356 | void | |
357 | nfsrv_uc_addsock(struct nfsrv_sock *slp, int start) | |
358 | { | |
359 | int on = 1; | |
360 | struct nfsrv_uc_arg *arg; | |
361 | ||
362 | if (start && nfsrv_uc_thread_count == 0) | |
363 | nfsrv_uc_start(); | |
364 | ||
365 | /* | |
366 | * We don't take a lock since once we're up nfsrv_uc_thread_count does | |
367 | * not change until shutdown and then we should not be adding sockets to | |
368 | * generate up-calls. | |
369 | */ | |
370 | if (nfsrv_uc_thread_count) { | |
3e170ce0 | 371 | MALLOC(arg, struct nfsrv_uc_arg *, sizeof (struct nfsrv_uc_arg), M_TEMP, M_WAITOK | M_ZERO); |
316670eb A |
372 | if (arg == NULL) |
373 | goto direct; | |
374 | ||
375 | slp->ns_ua = arg; | |
376 | arg->nua_slp = slp; | |
377 | arg->nua_qi = NFS_UC_HASH(slp); | |
378 | ||
379 | sock_setupcall(slp->ns_so, nfsrv_uc_proxy, arg); | |
380 | } else { | |
381 | direct: | |
382 | slp->ns_ua = NULL; | |
383 | DPRINT("setting nfsrv_rcv up-call\n"); | |
384 | sock_setupcall(slp->ns_so, nfsrv_rcv, slp); | |
385 | } | |
386 | ||
387 | /* just playin' it safe */ | |
388 | sock_setsockopt(slp->ns_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); | |
389 | ||
390 | return; | |
391 | } | |
392 |