]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <nfs/nfs_conf.h> | |
30 | #if CONFIG_NFS | |
31 | ||
32 | /************* | |
33 | * These functions implement RPCSEC_GSS security for the NFS client and server. | |
34 | * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5 | |
35 | * protection as described in Internet RFC 2203 and 2623. | |
36 | * | |
37 | * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful. | |
38 | * It requires the client and server negotiate a secure connection as part of a | |
39 | * security context. The context state is maintained in client and server structures. | |
40 | * On the client side, each user of an NFS mount is assigned their own context, | |
41 | * identified by UID, on their first use of the mount, and it persists until the | |
42 | * unmount or until the context is renewed. Each user context has a corresponding | |
43 | * server context which the server maintains until the client destroys it, or | |
44 | * until the context expires. | |
45 | * | |
46 | * The client and server contexts are set up dynamically. When a user attempts | |
47 | * to send an NFS request, if there is no context for the user, then one is | |
48 | * set up via an exchange of NFS null procedure calls as described in RFC 2203. | |
49 | * During this exchange, the client and server pass a security token that is | |
50 | * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate | |
51 | * the user to the server (and vice-versa). The client and server also receive | |
52 | * a unique session key that can be used to digitally sign the credentials and | |
53 | * verifier or optionally to provide data integrity and/or privacy. | |
54 | * | |
55 | * Once the context is complete, the client and server enter a normal data | |
56 | * exchange phase - beginning with the NFS request that prompted the context | |
57 | * creation. During this phase, the client's RPC header contains an RPCSEC_GSS | |
58 | * credential and verifier, and the server returns a verifier as well. | |
59 | * For simple authentication, the verifier contains a signed checksum of the | |
60 | * RPC header, including the credential. The server's verifier has a signed | |
61 | * checksum of the current sequence number. | |
62 | * | |
63 | * Each client call contains a sequence number that nominally increases by one | |
64 | * on each request. The sequence number is intended to prevent replay attacks. | |
65 | * Since the protocol can be used over UDP, there is some allowance for | |
66 | * out-of-sequence requests, so the server checks whether the sequence numbers | |
67 | * are within a sequence "window". If a sequence number is outside the lower | |
68 | * bound of the window, the server silently drops the request. This has some | |
69 | * implications for retransmission. If a request needs to be retransmitted, the | |
70 | * client must bump the sequence number even if the request XID is unchanged. | |
71 | * | |
72 | * When the NFS mount is unmounted, the client sends a "destroy" credential | |
73 | * to delete the server's context for each user of the mount. Since it's | |
74 | * possible for the client to crash or disconnect without sending the destroy | |
75 | * message, the server has a thread that reaps contexts that have been idle | |
76 | * too long. | |
77 | */ | |
78 | ||
79 | #include <stdint.h> | |
80 | #include <sys/param.h> | |
81 | #include <sys/systm.h> | |
82 | #include <sys/proc.h> | |
83 | #include <sys/kauth.h> | |
84 | #include <sys/kernel.h> | |
85 | #include <sys/mount_internal.h> | |
86 | #include <sys/vnode.h> | |
87 | #include <sys/ubc.h> | |
88 | #include <sys/malloc.h> | |
89 | #include <sys/kpi_mbuf.h> | |
90 | #include <sys/ucred.h> | |
91 | ||
92 | #include <kern/host.h> | |
93 | #include <kern/task.h> | |
94 | #include <libkern/libkern.h> | |
95 | ||
96 | #include <mach/task.h> | |
97 | #include <mach/host_special_ports.h> | |
98 | #include <mach/host_priv.h> | |
99 | #include <mach/thread_act.h> | |
100 | #include <mach/mig_errors.h> | |
101 | #include <mach/vm_map.h> | |
102 | #include <vm/vm_map.h> | |
103 | #include <vm/vm_kern.h> | |
104 | #include <gssd/gssd_mach.h> | |
105 | ||
106 | #include <nfs/rpcv2.h> | |
107 | #include <nfs/nfsproto.h> | |
108 | #include <nfs/nfs.h> | |
109 | #include <nfs/nfsnode.h> | |
110 | #include <nfs/nfs_gss.h> | |
111 | #include <nfs/nfsmount.h> | |
112 | #include <nfs/xdr_subs.h> | |
113 | #include <nfs/nfsm_subs.h> | |
114 | #include <nfs/nfs_gss.h> | |
115 | #include <mach_assert.h> | |
116 | #include <kern/assert.h> | |
117 | ||
118 | #define ASSERT(EX) assert(EX) | |
119 | ||
120 | #define NFS_GSS_MACH_MAX_RETRIES 3 | |
121 | ||
122 | #define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__) | |
123 | #define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS) | |
124 | ||
125 | ||
126 | #if CONFIG_NFS_SERVER | |
127 | u_long nfs_gss_svc_ctx_hash; | |
128 | struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl; | |
129 | static LCK_GRP_DECLARE(nfs_gss_svc_grp, "rpcsec_gss_svc"); | |
130 | static LCK_MTX_DECLARE(nfs_gss_svc_ctx_mutex, &nfs_gss_svc_grp); | |
131 | uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE; | |
132 | #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC) | |
133 | #endif /* CONFIG_NFS_SERVER */ | |
134 | ||
135 | #if CONFIG_NFS_CLIENT | |
136 | LCK_GRP_DECLARE(nfs_gss_clnt_grp, "rpcsec_gss_clnt"); | |
137 | #endif /* CONFIG_NFS_CLIENT */ | |
138 | ||
139 | #define KRB5_MAX_MIC_SIZE 128 | |
140 | uint8_t krb5_mech_oid[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 }; | |
141 | static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00}; | |
142 | ||
143 | #if CONFIG_NFS_CLIENT | |
144 | static int nfs_gss_clnt_ctx_find(struct nfsreq *); | |
145 | static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *); | |
146 | static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *); | |
147 | static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *); | |
148 | static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, size_t *); | |
149 | static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t); | |
150 | void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *); | |
151 | static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *); | |
152 | static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **); | |
153 | static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *); | |
154 | static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t); | |
155 | #endif /* CONFIG_NFS_CLIENT */ | |
156 | ||
157 | #if CONFIG_NFS_SERVER | |
158 | static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t); | |
159 | static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *); | |
160 | static void nfs_gss_svc_ctx_timer(void *, void *); | |
161 | static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *); | |
162 | static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t); | |
163 | ||
164 | /* This is only used by server code */ | |
165 | static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t); | |
166 | #endif /* CONFIG_NFS_SERVER */ | |
167 | ||
168 | static void host_release_special_port(mach_port_t); | |
169 | static mach_port_t host_copy_special_port(mach_port_t); | |
170 | static void nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *); | |
171 | static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *); | |
172 | ||
173 | static int nfs_gss_mchain_length(mbuf_t); | |
174 | static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t); | |
175 | ||
176 | #if CONFIG_NFS_SERVER | |
177 | thread_call_t nfs_gss_svc_ctx_timer_call; | |
178 | int nfs_gss_timer_on = 0; | |
179 | uint32_t nfs_gss_ctx_count = 0; | |
180 | const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS; | |
181 | #endif /* CONFIG_NFS_SERVER */ | |
182 | ||
183 | /* | |
184 | * Initialization when NFS starts | |
185 | */ | |
186 | void | |
187 | nfs_gss_init(void) | |
188 | { | |
189 | #if CONFIG_NFS_SERVER | |
190 | nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash); | |
191 | ||
192 | nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL); | |
193 | #endif /* CONFIG_NFS_SERVER */ | |
194 | } | |
195 | ||
196 | /* | |
197 | * Common RPCSEC_GSS support routines | |
198 | */ | |
199 | ||
200 | static errno_t | |
201 | rpc_gss_prepend_32(mbuf_t *mb, uint32_t value) | |
202 | { | |
203 | int error; | |
204 | uint32_t *data; | |
205 | ||
206 | #if 0 | |
207 | data = mbuf_data(*mb); | |
208 | /* | |
209 | * If a wap token comes back and is not aligned | |
210 | * get a new buffer (which should be aligned) to put the | |
211 | * length in. | |
212 | */ | |
213 | if ((uintptr_t)data & 0x3) { | |
214 | mbuf_t nmb; | |
215 | ||
216 | error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb); | |
217 | if (error) { | |
218 | return error; | |
219 | } | |
220 | mbuf_setnext(nmb, *mb); | |
221 | *mb = nmb; | |
222 | } | |
223 | #endif | |
224 | error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK); | |
225 | if (error) { | |
226 | return error; | |
227 | } | |
228 | ||
229 | data = mbuf_data(*mb); | |
230 | *data = txdr_unsigned(value); | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Prepend the sequence number to the xdr encode argumen or result | |
237 | * Sequence number is prepended in its own mbuf. | |
238 | * | |
239 | * On successful return mbp_head will point to the old mbuf chain | |
240 | * prepended with a new mbuf that has the sequence number. | |
241 | */ | |
242 | ||
243 | static errno_t | |
244 | rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum) | |
245 | { | |
246 | int error; | |
247 | mbuf_t mb; | |
248 | struct nfsm_chain nmc; | |
249 | struct nfsm_chain *nmcp = &nmc; | |
250 | uint8_t *data; | |
251 | ||
252 | error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb); | |
253 | if (error) { | |
254 | return error; | |
255 | } | |
256 | data = mbuf_data(mb); | |
257 | #if 0 | |
258 | /* Reserve space for prepending */ | |
259 | len = mbuf_maxlen(mb); | |
260 | len = (len & ~0x3) - NFSX_UNSIGNED; | |
261 | printf("%s: data = %p, len = %d\n", __func__, data, (int)len); | |
262 | error = mbuf_setdata(mb, data + len, 0); | |
263 | if (error || mbuf_trailingspace(mb)) { | |
264 | printf("%s: data = %p trailingspace = %d error = %d\n", __func__, mbuf_data(mb), (int)mbuf_trailingspace(mb), error); | |
265 | } | |
266 | #endif | |
267 | /* Reserve 16 words for prepending */ | |
268 | error = mbuf_setdata(mb, data + 16 * sizeof(uint32_t), 0); | |
269 | nfsm_chain_init(nmcp, mb); | |
270 | nfsm_chain_add_32(error, nmcp, seqnum); | |
271 | nfsm_chain_build_done(error, nmcp); | |
272 | if (error) { | |
273 | return EINVAL; | |
274 | } | |
275 | mbuf_setnext(nmcp->nmc_mcur, *mbp_head); | |
276 | *mbp_head = nmcp->nmc_mhead; | |
277 | ||
278 | return 0; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Create an rpc_gss_integ_data_t given an argument or result in mb_head. | |
283 | * On successful return mb_head will point to the rpc_gss_integ_data_t of length len. | |
284 | * Note mb_head will now point to a 4 byte sequence number. len does not include | |
285 | * any extra xdr padding. | |
286 | * Returns 0 on success, else an errno_t | |
287 | */ | |
288 | ||
289 | static errno_t | |
290 | rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len) | |
291 | { | |
292 | uint32_t error; | |
293 | uint32_t major; | |
294 | uint32_t length; | |
295 | gss_buffer_desc mic; | |
296 | struct nfsm_chain nmc; | |
297 | ||
298 | /* Length of the argument or result */ | |
299 | length = nfs_gss_mchain_length(*mb_head); | |
300 | if (len) { | |
301 | *len = length; | |
302 | } | |
303 | error = rpc_gss_data_create(mb_head, seqnum); | |
304 | if (error) { | |
305 | return error; | |
306 | } | |
307 | ||
308 | /* | |
309 | * length is the length of the rpc_gss_data | |
310 | */ | |
311 | length += NFSX_UNSIGNED; /* Add the sequence number to the length */ | |
312 | major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic); | |
313 | if (major != GSS_S_COMPLETE) { | |
314 | printf("gss_krb5_get_mic_mbuf failed %d\n", error); | |
315 | return error; | |
316 | } | |
317 | ||
318 | error = rpc_gss_prepend_32(mb_head, length); | |
319 | if (error) { | |
320 | return error; | |
321 | } | |
322 | ||
323 | nfsm_chain_dissect_init(error, &nmc, *mb_head); | |
324 | /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */ | |
325 | nfsm_chain_adv(error, &nmc, length + NFSX_UNSIGNED); | |
326 | nfsm_chain_finish_mbuf(error, &nmc); // Force the mic into its own sub chain. | |
327 | nfsm_chain_add_32(error, &nmc, mic.length); | |
328 | nfsm_chain_add_opaque(error, &nmc, mic.value, mic.length); | |
329 | nfsm_chain_build_done(error, &nmc); | |
330 | gss_release_buffer(NULL, &mic); | |
331 | ||
332 | // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0); | |
333 | assert(nmc.nmc_mhead == *mb_head); | |
334 | ||
335 | return error; | |
336 | } | |
337 | ||
338 | /* | |
339 | * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head. | |
340 | * On successful return mb_head will point to a wrap token of lenght len. | |
341 | * Note len does not include any xdr padding | |
342 | * Returns 0 on success, else an errno_t | |
343 | */ | |
344 | static errno_t | |
345 | rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len) | |
346 | { | |
347 | uint32_t error; | |
348 | uint32_t major; | |
349 | struct nfsm_chain nmc; | |
350 | uint32_t pad; | |
351 | uint32_t length; | |
352 | ||
353 | error = rpc_gss_data_create(mb_head, seqnum); | |
354 | if (error) { | |
355 | return error; | |
356 | } | |
357 | ||
358 | length = nfs_gss_mchain_length(*mb_head); | |
359 | major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL); | |
360 | if (major != GSS_S_COMPLETE) { | |
361 | return error; | |
362 | } | |
363 | ||
364 | length = nfs_gss_mchain_length(*mb_head); | |
365 | if (len) { | |
366 | *len = length; | |
367 | } | |
368 | pad = nfsm_pad(length); | |
369 | ||
370 | /* Prepend the opaque length of rep rpc_gss_priv_data */ | |
371 | error = rpc_gss_prepend_32(mb_head, length); | |
372 | ||
373 | if (error) { | |
374 | return error; | |
375 | } | |
376 | if (pad) { | |
377 | nfsm_chain_dissect_init(error, &nmc, *mb_head); | |
378 | /* Advance the opauque size of length and length data */ | |
379 | nfsm_chain_adv(error, &nmc, NFSX_UNSIGNED + length); | |
380 | nfsm_chain_finish_mbuf(error, &nmc); | |
381 | nfsm_chain_add_opaque_nopad(error, &nmc, xdrpad, pad); | |
382 | nfsm_chain_build_done(error, &nmc); | |
383 | } | |
384 | ||
385 | return error; | |
386 | } | |
387 | ||
388 | #if CONFIG_NFS_CLIENT | |
389 | ||
390 | /* | |
391 | * Restore the argument or result from an rpc_gss_integ_data mbuf chain | |
392 | * We have a four byte seqence number, len arguments, and an opaque | |
393 | * encoded mic, possibly followed by some pad bytes. The mic and possible | |
394 | * pad bytes are on their own sub mbuf chains. | |
395 | * | |
396 | * On successful return mb_head is the chain of the xdr args or results sans | |
397 | * the sequence number and mic and return 0. Otherwise return an errno. | |
398 | * | |
399 | */ | |
400 | static errno_t | |
401 | rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused, mbuf_t *mb_head, size_t len) | |
402 | { | |
403 | mbuf_t mb = *mb_head; | |
404 | mbuf_t tail = NULL, next; | |
405 | ||
406 | /* Chop of the opaque length and seq number */ | |
407 | mbuf_adj(mb, 2 * NFSX_UNSIGNED); | |
408 | ||
409 | /* should only be one, ... but */ | |
410 | for (; mb; mb = next) { | |
411 | next = mbuf_next(mb); | |
412 | if (mbuf_len(mb) == 0) { | |
413 | mbuf_free(mb); | |
414 | } else { | |
415 | break; | |
416 | } | |
417 | } | |
418 | *mb_head = mb; | |
419 | ||
420 | for (; mb && len; mb = mbuf_next(mb)) { | |
421 | tail = mb; | |
422 | if (mbuf_len(mb) <= len) { | |
423 | len -= mbuf_len(mb); | |
424 | } else { | |
425 | return EBADRPC; | |
426 | } | |
427 | } | |
428 | /* drop the mic */ | |
429 | if (tail) { | |
430 | mbuf_setnext(tail, NULL); | |
431 | mbuf_freem(mb); | |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Restore the argument or result rfom an rpc_gss_priv_data mbuf chain | |
439 | * mb_head points to the wrap token of length len. | |
440 | * | |
441 | * On successful return mb_head is our original xdr arg or result an | |
442 | * the return value is 0. Otherise return an errno | |
443 | */ | |
444 | static errno_t | |
445 | rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) | |
446 | { | |
447 | uint32_t major, error; | |
448 | mbuf_t mb = *mb_head, next; | |
449 | size_t plen, length; | |
450 | gss_qop_t qop = GSS_C_QOP_REVERSE; | |
451 | ||
452 | /* Chop of the opaque length */ | |
453 | mbuf_adj(mb, NFSX_UNSIGNED); | |
454 | /* If we have padding, drop it */ | |
455 | plen = nfsm_pad(len); | |
456 | if (plen) { | |
457 | mbuf_t tail = NULL; | |
458 | ||
459 | for (length = 0; length < len && mb; mb = mbuf_next(mb)) { | |
460 | tail = mb; | |
461 | length += mbuf_len(mb); | |
462 | } | |
463 | if ((length != len) || (mb == NULL) || (tail == NULL)) { | |
464 | return EBADRPC; | |
465 | } | |
466 | ||
467 | mbuf_freem(mb); | |
468 | mbuf_setnext(tail, NULL); | |
469 | } | |
470 | ||
471 | major = gss_krb5_unwrap_mbuf(&error, ctx, mb_head, 0, len, NULL, &qop); | |
472 | if (major != GSS_S_COMPLETE) { | |
473 | printf("gss_krb5_unwrap_mbuf failed. major = %d minor = %d\n", (int)major, error); | |
474 | return error; | |
475 | } | |
476 | mb = *mb_head; | |
477 | ||
478 | /* Drop the seqence number */ | |
479 | mbuf_adj(mb, NFSX_UNSIGNED); | |
480 | assert(mbuf_len(mb) == 0); | |
481 | ||
482 | /* Chop of any empty mbufs */ | |
483 | for (mb = *mb_head; mb; mb = next) { | |
484 | next = mbuf_next(mb); | |
485 | if (mbuf_len(mb) == 0) { | |
486 | mbuf_free(mb); | |
487 | } else { | |
488 | break; | |
489 | } | |
490 | } | |
491 | *mb_head = mb; | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Find the context for a particular user. | |
498 | * | |
499 | * If the context doesn't already exist | |
500 | * then create a new context for this user. | |
501 | * | |
502 | * Note that the code allows superuser (uid == 0) | |
503 | * to adopt the context of another user. | |
504 | * | |
505 | * We'll match on the audit session ids, since those | |
506 | * processes will have acccess to the same credential cache. | |
507 | */ | |
508 | ||
509 | #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid) | |
510 | #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid) | |
511 | ||
512 | #define SAFE_CAST_INTTYPE( type, intval ) \ | |
513 | ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) ) | |
514 | ||
515 | uid_t | |
516 | nfs_cred_getasid2uid(kauth_cred_t cred) | |
517 | { | |
518 | uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred)); | |
519 | return result; | |
520 | } | |
521 | ||
522 | /* | |
523 | * Debugging | |
524 | */ | |
525 | static void | |
526 | nfs_gss_clnt_ctx_dump(struct nfsmount *nmp) | |
527 | { | |
528 | struct nfs_gss_clnt_ctx *cp; | |
529 | ||
530 | lck_mtx_lock(&nmp->nm_lock); | |
531 | NFS_GSS_DBG("Enter\n"); | |
532 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { | |
533 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
534 | printf("context %d/%d: refcnt = %d, flags = %x\n", | |
535 | kauth_cred_getasid(cp->gss_clnt_cred), | |
536 | kauth_cred_getauid(cp->gss_clnt_cred), | |
537 | cp->gss_clnt_refcnt, cp->gss_clnt_flags); | |
538 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
539 | } | |
540 | NFS_GSS_DBG("Exit\n"); | |
541 | lck_mtx_unlock(&nmp->nm_lock); | |
542 | } | |
543 | ||
544 | static char * | |
545 | nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len) | |
546 | { | |
547 | char *np; | |
548 | size_t nlen; | |
549 | const char *server = ""; | |
550 | ||
551 | if (nmp && nmp->nm_mountp) { | |
552 | server = vfs_statfs(nmp->nm_mountp)->f_mntfromname; | |
553 | } | |
554 | ||
555 | if (cp == NULL) { | |
556 | snprintf(buf, len, "[%s] NULL context", server); | |
557 | return buf; | |
558 | } | |
559 | ||
560 | if (cp->gss_clnt_principal && !cp->gss_clnt_display) { | |
561 | np = (char *)cp->gss_clnt_principal; | |
562 | nlen = cp->gss_clnt_prinlen; | |
563 | } else { | |
564 | np = cp->gss_clnt_display; | |
565 | nlen = np ? strlen(cp->gss_clnt_display) : 0; | |
566 | } | |
567 | if (nlen) { | |
568 | snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen > INT_MAX ? INT_MAX : (int)nlen, np, | |
569 | kauth_cred_getasid(cp->gss_clnt_cred), | |
570 | kauth_cred_getuid(cp->gss_clnt_cred), | |
571 | cp->gss_clnt_principal ? "" : "[from default cred] "); | |
572 | } else { | |
573 | snprintf(buf, len, "[%s] using default %d/%d ", server, | |
574 | kauth_cred_getasid(cp->gss_clnt_cred), | |
575 | kauth_cred_getuid(cp->gss_clnt_cred)); | |
576 | } | |
577 | return buf; | |
578 | } | |
579 | ||
580 | #define NFS_CTXBUFSZ 80 | |
581 | #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF)) | |
582 | ||
583 | #define NFS_GSS_CLNT_CTX_DUMP(nmp) \ | |
584 | do { \ | |
585 | if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \ | |
586 | nfs_gss_clnt_ctx_dump((nmp)); \ | |
587 | } while (0) | |
588 | ||
589 | static int | |
590 | nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2) | |
591 | { | |
592 | if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2)) { | |
593 | return 1; | |
594 | } | |
595 | return 0; | |
596 | } | |
597 | ||
598 | /* | |
599 | * Busy the mount for each principal set on the mount | |
600 | * so that the automounter will not unmount the file | |
601 | * system underneath us. With out this, if an unmount | |
602 | * occurs the principal that is set for an audit session | |
603 | * will be lost and we may end up with a different identity. | |
604 | * | |
605 | * Note setting principals on the mount is a bad idea. This | |
606 | * really should be handle by KIM (Kerberos Identity Management) | |
607 | * so that defaults can be set by service identities. | |
608 | */ | |
609 | ||
610 | static int | |
611 | nfs_gss_clnt_mnt_ref(struct nfsmount *nmp) | |
612 | { | |
613 | int error; | |
614 | vnode_t rvp; | |
615 | ||
616 | if (nmp == NULL || | |
617 | !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { | |
618 | return EINVAL; | |
619 | } | |
620 | ||
621 | error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); | |
622 | if (!error) { | |
623 | error = vnode_ref(rvp); | |
624 | vnode_put(rvp); | |
625 | } | |
626 | ||
627 | return error; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Unbusy the mount. See above comment, | |
632 | */ | |
633 | ||
634 | static int | |
635 | nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) | |
636 | { | |
637 | int error; | |
638 | vnode_t rvp; | |
639 | ||
640 | if (nmp == NULL || | |
641 | !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { | |
642 | return EINVAL; | |
643 | } | |
644 | ||
645 | error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); | |
646 | if (!error) { | |
647 | vnode_rele(rvp); | |
648 | vnode_put(rvp); | |
649 | } | |
650 | ||
651 | return error; | |
652 | } | |
653 | ||
654 | int nfs_root_steals_ctx = 0; | |
655 | ||
656 | static int | |
657 | nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, size_t plen, uint32_t nt) | |
658 | { | |
659 | struct nfsmount *nmp = req->r_nmp; | |
660 | struct nfs_gss_clnt_ctx *cp, *tcp; | |
661 | struct nfsreq *treq; | |
662 | int error = 0; | |
663 | struct timeval now; | |
664 | char CTXBUF[NFS_CTXBUFSZ]; | |
665 | ||
666 | treq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO); | |
667 | treq->r_nmp = nmp; | |
668 | ||
669 | microuptime(&now); | |
670 | lck_mtx_lock(&nmp->nm_lock); | |
671 | TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { | |
672 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
673 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { | |
674 | NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", | |
675 | NFS_GSS_CTX(req, cp), | |
676 | cp->gss_clnt_refcnt); | |
677 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
678 | continue; | |
679 | } | |
680 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) { | |
681 | if (nmp->nm_gsscl.tqh_first != cp) { | |
682 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
683 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
684 | } | |
685 | if (principal) { | |
686 | /* | |
687 | * If we have a principal, but it does not match the current cred | |
688 | * mark it for removal | |
689 | */ | |
690 | if (cp->gss_clnt_prinlen != plen || cp->gss_clnt_prinnt != nt || | |
691 | bcmp(cp->gss_clnt_principal, principal, plen) != 0) { | |
692 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); | |
693 | cp->gss_clnt_refcnt++; | |
694 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
695 | NFS_GSS_DBG("Marking %s for deletion because %s does not match\n", | |
696 | NFS_GSS_CTX(req, cp), principal); | |
697 | NFS_GSS_DBG("len = (%zu,%zu), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen, | |
698 | cp->gss_clnt_prinnt, nt); | |
699 | treq->r_gss_ctx = cp; | |
700 | cp = NULL; | |
701 | break; | |
702 | } | |
703 | } | |
704 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { | |
705 | /* | |
706 | * If we're still being used and we're not expired | |
707 | * just return and don't bother gssd again. Note if | |
708 | * gss_clnt_nctime is zero it is about to be set to now. | |
709 | */ | |
710 | if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) { | |
711 | NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n", | |
712 | NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec); | |
713 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
714 | lck_mtx_unlock(&nmp->nm_lock); | |
715 | NFS_ZFREE(nfs_req_zone, treq); | |
716 | return NFSERR_EAUTH; | |
717 | } | |
718 | if (cp->gss_clnt_refcnt) { | |
719 | struct nfs_gss_clnt_ctx *ncp; | |
720 | /* | |
721 | * If this context has references, we can't use it so we mark if for | |
722 | * destruction and create a new context based on this one in the | |
723 | * same manner as renewing one. | |
724 | */ | |
725 | cp->gss_clnt_flags |= GSS_CTX_DESTROY; | |
726 | NFS_GSS_DBG("Context %s has expired but we still have %d references\n", | |
727 | NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt); | |
728 | error = nfs_gss_clnt_ctx_copy(cp, &ncp); | |
729 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
730 | if (error) { | |
731 | lck_mtx_unlock(&nmp->nm_lock); | |
732 | NFS_ZFREE(nfs_req_zone, treq); | |
733 | return error; | |
734 | } | |
735 | cp = ncp; | |
736 | break; | |
737 | } else { | |
738 | if (cp->gss_clnt_nctime) { | |
739 | nmp->nm_ncentries--; | |
740 | } | |
741 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
742 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
743 | break; | |
744 | } | |
745 | } | |
746 | /* Found a valid context to return */ | |
747 | cp->gss_clnt_refcnt++; | |
748 | req->r_gss_ctx = cp; | |
749 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
750 | lck_mtx_unlock(&nmp->nm_lock); | |
751 | NFS_ZFREE(nfs_req_zone, treq); | |
752 | return 0; | |
753 | } | |
754 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
755 | } | |
756 | ||
757 | if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) { | |
758 | /* | |
759 | * If superuser is trying to get access, then co-opt | |
760 | * the first valid context in the list. | |
761 | * XXX Ultimately, we need to allow superuser to | |
762 | * go ahead and attempt to set up its own context | |
763 | * in case one is set up for it. | |
764 | */ | |
765 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { | |
766 | if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY))) { | |
767 | nfs_gss_clnt_ctx_ref(req, cp); | |
768 | lck_mtx_unlock(&nmp->nm_lock); | |
769 | NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL)); | |
770 | NFS_ZFREE(nfs_req_zone, treq); | |
771 | return 0; | |
772 | } | |
773 | } | |
774 | } | |
775 | ||
776 | NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n", | |
777 | NFS_GSS_CTX(req, cp), | |
778 | cp == NULL ? " not " : "", | |
779 | cp == NULL ? 0L : cp->gss_clnt_nctime); | |
780 | ||
781 | /* | |
782 | * Not found - create a new context | |
783 | */ | |
784 | ||
785 | if (cp == NULL) { | |
786 | MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO); | |
787 | if (cp == NULL) { | |
788 | lck_mtx_unlock(&nmp->nm_lock); | |
789 | NFS_ZFREE(nfs_req_zone, treq); | |
790 | return ENOMEM; | |
791 | } | |
792 | cp->gss_clnt_cred = req->r_cred; | |
793 | kauth_cred_ref(cp->gss_clnt_cred); | |
794 | lck_mtx_init(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL); | |
795 | cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY; | |
796 | if (principal) { | |
797 | MALLOC(cp->gss_clnt_principal, uint8_t *, plen + 1, M_TEMP, M_WAITOK | M_ZERO); | |
798 | memcpy(cp->gss_clnt_principal, principal, plen); | |
799 | cp->gss_clnt_prinlen = plen; | |
800 | cp->gss_clnt_prinnt = nt; | |
801 | cp->gss_clnt_flags |= GSS_CTX_STICKY; | |
802 | if (!nfs_gss_clnt_mnt_ref(nmp)) { | |
803 | cp->gss_clnt_flags |= GSS_CTX_USECOUNT; | |
804 | } | |
805 | } | |
806 | } else { | |
807 | uint32_t oldflags = cp->gss_clnt_flags; | |
808 | nfs_gss_clnt_ctx_clean(cp); | |
809 | if (principal) { | |
810 | /* | |
811 | * If we have a principal and we found a matching audit | |
812 | * session, then to get here, the principal had to match. | |
813 | * In walking the context list if it has a principal | |
814 | * or the principal is not set then we mark the context | |
815 | * for destruction and set cp to NULL and we fall to the | |
816 | * if clause above. If the context still has references | |
817 | * again we copy the context which will preserve the principal | |
818 | * and we end up here with the correct principal set. | |
819 | * If we don't have references the the principal must have | |
820 | * match and we will fall through here. | |
821 | */ | |
822 | cp->gss_clnt_flags |= GSS_CTX_STICKY; | |
823 | ||
824 | /* | |
825 | * We are preserving old flags if it set, and we take a ref if not set. | |
826 | * Also, because of the short circuit we will not take extra refs here. | |
827 | */ | |
828 | if ((oldflags & GSS_CTX_USECOUNT) || !nfs_gss_clnt_mnt_ref(nmp)) { | |
829 | cp->gss_clnt_flags |= GSS_CTX_USECOUNT; | |
830 | } | |
831 | } | |
832 | } | |
833 | ||
834 | cp->gss_clnt_thread = current_thread(); | |
835 | nfs_gss_clnt_ctx_ref(req, cp); | |
836 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
837 | lck_mtx_unlock(&nmp->nm_lock); | |
838 | ||
839 | error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context | |
840 | if (error) { | |
841 | NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error, NFS_GSS_CTX(req, cp)); | |
842 | nfs_gss_clnt_ctx_unref(req); | |
843 | } | |
844 | ||
845 | /* Remove any old matching contex that had a different principal */ | |
846 | nfs_gss_clnt_ctx_unref(treq); | |
847 | NFS_ZFREE(nfs_req_zone, treq); | |
848 | return error; | |
849 | } | |
850 | ||
851 | static int | |
852 | nfs_gss_clnt_ctx_find(struct nfsreq *req) | |
853 | { | |
854 | return nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0); | |
855 | } | |
856 | ||
857 | /* | |
858 | * Inserts an RPCSEC_GSS credential into an RPC header. | |
859 | * After the credential is inserted, the code continues | |
860 | * to build the verifier which contains a signed checksum | |
861 | * of the RPC header. | |
862 | */ | |
863 | ||
864 | int | |
865 | nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args) | |
866 | { | |
867 | struct nfs_gss_clnt_ctx *cp; | |
868 | uint32_t seqnum = 0; | |
869 | uint32_t major; | |
870 | uint32_t error = 0; | |
871 | int slpflag, recordmark = 0, offset; | |
872 | struct gss_seq *gsp; | |
873 | gss_buffer_desc mic; | |
874 | ||
875 | slpflag = (PZERO - 1); | |
876 | if (req->r_nmp) { | |
877 | slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; | |
878 | recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM); | |
879 | } | |
880 | ||
881 | retry: | |
882 | if (req->r_gss_ctx == NULL) { | |
883 | /* | |
884 | * Find the context for this user. | |
885 | * If no context is found, one will | |
886 | * be created. | |
887 | */ | |
888 | error = nfs_gss_clnt_ctx_find(req); | |
889 | if (error) { | |
890 | return error; | |
891 | } | |
892 | } | |
893 | cp = req->r_gss_ctx; | |
894 | ||
895 | /* | |
896 | * If the context thread isn't null, then the context isn't | |
897 | * yet complete and is for the exclusive use of the thread | |
898 | * doing the context setup. Wait until the context thread | |
899 | * is null. | |
900 | */ | |
901 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
902 | if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) { | |
903 | cp->gss_clnt_flags |= GSS_NEEDCTX; | |
904 | msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL); | |
905 | slpflag &= ~PCATCH; | |
906 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { | |
907 | return error; | |
908 | } | |
909 | nfs_gss_clnt_ctx_unref(req); | |
910 | goto retry; | |
911 | } | |
912 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
913 | ||
914 | if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) { | |
915 | /* | |
916 | * Get a sequence number for this request. | |
917 | * Check whether the oldest request in the window is complete. | |
918 | * If it's still pending, then wait until it's done before | |
919 | * we allocate a new sequence number and allow this request | |
920 | * to proceed. | |
921 | */ | |
922 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
923 | while (win_getbit(cp->gss_clnt_seqbits, | |
924 | ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) { | |
925 | cp->gss_clnt_flags |= GSS_NEEDSEQ; | |
926 | msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL); | |
927 | slpflag &= ~PCATCH; | |
928 | if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { | |
929 | return error; | |
930 | } | |
931 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
932 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { | |
933 | /* Renewed while while we were waiting */ | |
934 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
935 | nfs_gss_clnt_ctx_unref(req); | |
936 | goto retry; | |
937 | } | |
938 | } | |
939 | seqnum = ++cp->gss_clnt_seqnum; | |
940 | win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin); | |
941 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
942 | ||
943 | MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK | M_ZERO); | |
944 | if (gsp == NULL) { | |
945 | return ENOMEM; | |
946 | } | |
947 | gsp->gss_seqnum = seqnum; | |
948 | SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext); | |
949 | } | |
950 | ||
951 | /* Insert the credential */ | |
952 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); | |
953 | nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len); | |
954 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1); | |
955 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc); | |
956 | nfsm_chain_add_32(error, nmc, seqnum); | |
957 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_service); | |
958 | nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len); | |
959 | if (cp->gss_clnt_handle_len > 0) { | |
960 | if (cp->gss_clnt_handle == NULL) { | |
961 | return EBADRPC; | |
962 | } | |
963 | nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len); | |
964 | } | |
965 | if (error) { | |
966 | return error; | |
967 | } | |
968 | /* | |
969 | * Now add the verifier | |
970 | */ | |
971 | if (cp->gss_clnt_proc == RPCSEC_GSS_INIT || | |
972 | cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) { | |
973 | /* | |
974 | * If the context is still being created | |
975 | * then use a null verifier. | |
976 | */ | |
977 | nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); // flavor | |
978 | nfsm_chain_add_32(error, nmc, 0); // length | |
979 | nfsm_chain_build_done(error, nmc); | |
980 | if (!error) { | |
981 | nfs_gss_append_chain(nmc, args); | |
982 | } | |
983 | return error; | |
984 | } | |
985 | ||
986 | offset = recordmark ? NFSX_UNSIGNED : 0; // record mark | |
987 | nfsm_chain_build_done(error, nmc); | |
988 | ||
989 | major = gss_krb5_get_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, 0, nmc->nmc_mhead, offset, 0, &mic); | |
990 | if (major != GSS_S_COMPLETE) { | |
991 | printf("gss_krb5_get_mic_buf failed %d\n", error); | |
992 | return error; | |
993 | } | |
994 | ||
995 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor | |
996 | nfsm_chain_add_32(error, nmc, mic.length); // length | |
997 | nfsm_chain_add_opaque(error, nmc, mic.value, mic.length); | |
998 | (void)gss_release_buffer(NULL, &mic); | |
999 | nfsm_chain_build_done(error, nmc); | |
1000 | if (error) { | |
1001 | return error; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * Now we may have to compute integrity or encrypt the call args | |
1006 | * per RFC 2203 Section 5.3.2 | |
1007 | */ | |
1008 | switch (cp->gss_clnt_service) { | |
1009 | case RPCSEC_GSS_SVC_NONE: | |
1010 | if (args) { | |
1011 | nfs_gss_append_chain(nmc, args); | |
1012 | } | |
1013 | break; | |
1014 | case RPCSEC_GSS_SVC_INTEGRITY: | |
1015 | /* | |
1016 | * r_gss_arglen is the length of args mbuf going into the routine. | |
1017 | * Its used to find the mic if we need to restore the args. | |
1018 | */ | |
1019 | /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_integ_data_t */ | |
1020 | assert(req->r_mrest == args); | |
1021 | nfsm_chain_finish_mbuf(error, nmc); | |
1022 | if (error) { | |
1023 | return error; | |
1024 | } | |
1025 | error = rpc_gss_integ_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); | |
1026 | if (error) { | |
1027 | break; | |
1028 | } | |
1029 | req->r_mrest = args; | |
1030 | req->r_gss_argoff = nfsm_chain_offset(nmc); | |
1031 | nfs_gss_append_chain(nmc, args); | |
1032 | break; | |
1033 | case RPCSEC_GSS_SVC_PRIVACY: | |
1034 | /* | |
1035 | * r_gss_arglen is the length of the wrap token sans any padding length. | |
1036 | * Its used to find any XDR padding of the wrap token. | |
1037 | */ | |
1038 | /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_priv_data_t */ | |
1039 | assert(req->r_mrest == args); | |
1040 | nfsm_chain_finish_mbuf(error, nmc); | |
1041 | if (error) { | |
1042 | return error; | |
1043 | } | |
1044 | error = rpc_gss_priv_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); | |
1045 | if (error) { | |
1046 | break; | |
1047 | } | |
1048 | req->r_mrest = args; | |
1049 | req->r_gss_argoff = nfsm_chain_offset(nmc); | |
1050 | nfs_gss_append_chain(nmc, args); | |
1051 | break; | |
1052 | default: | |
1053 | return EINVAL; | |
1054 | } | |
1055 | ||
1056 | return error; | |
1057 | } | |
1058 | ||
1059 | /* | |
1060 | * When receiving a reply, the client checks the verifier | |
1061 | * returned by the server. Check that the verifier is the | |
1062 | * correct type, then extract the sequence number checksum | |
1063 | * from the token in the credential and compare it with a | |
1064 | * computed checksum of the sequence number in the request | |
1065 | * that was sent. | |
1066 | */ | |
1067 | int | |
1068 | nfs_gss_clnt_verf_get( | |
1069 | struct nfsreq *req, | |
1070 | struct nfsm_chain *nmc, | |
1071 | uint32_t verftype, | |
1072 | uint32_t verflen, | |
1073 | uint32_t *accepted_statusp) | |
1074 | { | |
1075 | gss_buffer_desc cksum; | |
1076 | uint32_t seqnum = 0; | |
1077 | uint32_t major; | |
1078 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; | |
1079 | struct nfsm_chain nmc_tmp; | |
1080 | struct gss_seq *gsp; | |
1081 | uint32_t reslen; | |
1082 | int error = 0; | |
1083 | mbuf_t results_mbuf, prev_mbuf, pad_mbuf; | |
1084 | size_t ressize, offset; | |
1085 | ||
1086 | reslen = 0; | |
1087 | *accepted_statusp = 0; | |
1088 | ||
1089 | if (cp == NULL) { | |
1090 | return NFSERR_EAUTH; | |
1091 | } | |
1092 | /* | |
1093 | * If it's not an RPCSEC_GSS verifier, then it has to | |
1094 | * be a null verifier that resulted from either | |
1095 | * a CONTINUE_NEEDED reply during context setup or | |
1096 | * from the reply to an AUTH_UNIX call from a dummy | |
1097 | * context that resulted from a fallback to sec=sys. | |
1098 | */ | |
1099 | if (verftype != RPCSEC_GSS) { | |
1100 | if (verftype != RPCAUTH_NULL) { | |
1101 | return NFSERR_EAUTH; | |
1102 | } | |
1103 | if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) { | |
1104 | return NFSERR_EAUTH; | |
1105 | } | |
1106 | if (verflen > 0) { | |
1107 | nfsm_chain_adv(error, nmc, nfsm_rndup(verflen)); | |
1108 | } | |
1109 | nfsm_chain_get_32(error, nmc, *accepted_statusp); | |
1110 | return error; | |
1111 | } | |
1112 | ||
1113 | /* | |
1114 | * If we received an RPCSEC_GSS verifier but the | |
1115 | * context isn't yet complete, then it must be | |
1116 | * the context complete message from the server. | |
1117 | * The verifier will contain an encrypted checksum | |
1118 | * of the window but we don't have the session key | |
1119 | * yet so we can't decrypt it. Stash the verifier | |
1120 | * and check it later in nfs_gss_clnt_ctx_init() when | |
1121 | * the context is complete. | |
1122 | */ | |
1123 | if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) { | |
1124 | if (verflen > KRB5_MAX_MIC_SIZE) { | |
1125 | return EBADRPC; | |
1126 | } | |
1127 | MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK | M_ZERO); | |
1128 | if (cp->gss_clnt_verf == NULL) { | |
1129 | return ENOMEM; | |
1130 | } | |
1131 | cp->gss_clnt_verflen = verflen; | |
1132 | nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf); | |
1133 | nfsm_chain_get_32(error, nmc, *accepted_statusp); | |
1134 | return error; | |
1135 | } | |
1136 | ||
1137 | if (verflen > KRB5_MAX_MIC_SIZE) { | |
1138 | return EBADRPC; | |
1139 | } | |
1140 | cksum.length = verflen; | |
1141 | MALLOC(cksum.value, void *, verflen, M_TEMP, M_WAITOK); | |
1142 | ||
1143 | /* | |
1144 | * Get the gss mic | |
1145 | */ | |
1146 | nfsm_chain_get_opaque(error, nmc, verflen, cksum.value); | |
1147 | if (error) { | |
1148 | FREE(cksum.value, M_TEMP); | |
1149 | goto nfsmout; | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * Search the request sequence numbers for this reply, starting | |
1154 | * with the most recent, looking for a checksum that matches | |
1155 | * the one in the verifier returned by the server. | |
1156 | */ | |
1157 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { | |
1158 | gss_buffer_desc seqnum_buf; | |
1159 | uint32_t network_seqnum = htonl(gsp->gss_seqnum); | |
1160 | ||
1161 | seqnum_buf.length = sizeof(network_seqnum); | |
1162 | seqnum_buf.value = &network_seqnum; | |
1163 | major = gss_krb5_verify_mic(NULL, cp->gss_clnt_ctx_id, &seqnum_buf, &cksum, NULL); | |
1164 | if (major == GSS_S_COMPLETE) { | |
1165 | break; | |
1166 | } | |
1167 | } | |
1168 | FREE(cksum.value, M_TEMP); | |
1169 | if (gsp == NULL) { | |
1170 | return NFSERR_EAUTH; | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 | * Get the RPC accepted status | |
1175 | */ | |
1176 | nfsm_chain_get_32(error, nmc, *accepted_statusp); | |
1177 | if (*accepted_statusp != RPC_SUCCESS) { | |
1178 | return 0; | |
1179 | } | |
1180 | ||
1181 | /* | |
1182 | * Now we may have to check integrity or decrypt the results | |
1183 | * per RFC 2203 Section 5.3.2 | |
1184 | */ | |
1185 | switch (cp->gss_clnt_service) { | |
1186 | case RPCSEC_GSS_SVC_NONE: | |
1187 | /* nothing to do */ | |
1188 | break; | |
1189 | case RPCSEC_GSS_SVC_INTEGRITY: | |
1190 | /* | |
1191 | * Here's what we expect in the integrity results from RFC 2203: | |
1192 | * | |
1193 | * - length of seq num + results (4 bytes) | |
1194 | * - sequence number (4 bytes) | |
1195 | * - results (variable bytes) | |
1196 | * - length of checksum token | |
1197 | * - checksum of seqnum + results | |
1198 | */ | |
1199 | ||
1200 | nfsm_chain_get_32(error, nmc, reslen); // length of results | |
1201 | if (reslen > NFS_MAXPACKET) { | |
1202 | error = EBADRPC; | |
1203 | goto nfsmout; | |
1204 | } | |
1205 | ||
1206 | /* Advance and fetch the mic */ | |
1207 | nmc_tmp = *nmc; | |
1208 | nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results | |
1209 | nfsm_chain_get_32(error, &nmc_tmp, cksum.length); | |
1210 | if (cksum.length > KRB5_MAX_MIC_SIZE) { | |
1211 | error = EBADRPC; | |
1212 | goto nfsmout; | |
1213 | } | |
1214 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); | |
1215 | nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value); | |
1216 | //XXX chop offf the cksum? | |
1217 | ||
1218 | /* Call verify mic */ | |
1219 | offset = nfsm_chain_offset(nmc); | |
1220 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, nmc->nmc_mhead, offset, reslen, &cksum, NULL); | |
1221 | FREE(cksum.value, M_TEMP); | |
1222 | if (major != GSS_S_COMPLETE) { | |
1223 | printf("client results: gss_krb5_verify_mic_mbuf failed %d\n", error); | |
1224 | error = EBADRPC; | |
1225 | goto nfsmout; | |
1226 | } | |
1227 | ||
1228 | /* | |
1229 | * Get the sequence number prepended to the results | |
1230 | * and compare it against the header. | |
1231 | */ | |
1232 | nfsm_chain_get_32(error, nmc, seqnum); | |
1233 | if (gsp->gss_seqnum != seqnum) { | |
1234 | error = EBADRPC; | |
1235 | goto nfsmout; | |
1236 | } | |
1237 | #if 0 | |
1238 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { | |
1239 | if (seqnum == gsp->gss_seqnum) { | |
1240 | break; | |
1241 | } | |
1242 | } | |
1243 | if (gsp == NULL) { | |
1244 | error = EBADRPC; | |
1245 | goto nfsmout; | |
1246 | } | |
1247 | #endif | |
1248 | break; | |
1249 | case RPCSEC_GSS_SVC_PRIVACY: | |
1250 | /* | |
1251 | * Here's what we expect in the privacy results: | |
1252 | * | |
1253 | * opaque encodeing of the wrap token | |
1254 | * - length of wrap token | |
1255 | * - wrap token | |
1256 | */ | |
1257 | prev_mbuf = nmc->nmc_mcur; | |
1258 | nfsm_chain_get_32(error, nmc, reslen); // length of results | |
1259 | if (reslen == 0 || reslen > NFS_MAXPACKET) { | |
1260 | error = EBADRPC; | |
1261 | goto nfsmout; | |
1262 | } | |
1263 | ||
1264 | /* Get the wrap token (current mbuf in the chain starting at the current offset) */ | |
1265 | offset = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur); | |
1266 | ||
1267 | /* split out the wrap token */ | |
1268 | ressize = reslen; | |
1269 | error = gss_normalize_mbuf(nmc->nmc_mcur, offset, &ressize, &results_mbuf, &pad_mbuf, 0); | |
1270 | if (error) { | |
1271 | goto nfsmout; | |
1272 | } | |
1273 | ||
1274 | if (pad_mbuf) { | |
1275 | assert(nfsm_pad(reslen) == mbuf_len(pad_mbuf)); | |
1276 | mbuf_free(pad_mbuf); | |
1277 | } | |
1278 | ||
1279 | major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, &results_mbuf, 0, ressize, NULL, NULL); | |
1280 | if (major) { | |
1281 | printf("%s unwraped failed %d\n", __func__, error); | |
1282 | goto nfsmout; | |
1283 | } | |
1284 | ||
1285 | /* Now replace the wrapped arguments with the unwrapped ones */ | |
1286 | mbuf_setnext(prev_mbuf, results_mbuf); | |
1287 | nmc->nmc_mcur = results_mbuf; | |
1288 | nmc->nmc_ptr = mbuf_data(results_mbuf); | |
1289 | nmc->nmc_left = mbuf_len(results_mbuf); | |
1290 | ||
1291 | /* | |
1292 | * Get the sequence number prepended to the results | |
1293 | * and compare it against the header | |
1294 | */ | |
1295 | nfsm_chain_get_32(error, nmc, seqnum); | |
1296 | if (gsp->gss_seqnum != seqnum) { | |
1297 | printf("%s bad seqnum\n", __func__); | |
1298 | error = EBADRPC; | |
1299 | goto nfsmout; | |
1300 | } | |
1301 | #if 0 | |
1302 | SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { | |
1303 | if (seqnum == gsp->gss_seqnum) { | |
1304 | break; | |
1305 | } | |
1306 | } | |
1307 | if (gsp == NULL) { | |
1308 | error = EBADRPC; | |
1309 | goto nfsmout; | |
1310 | } | |
1311 | #endif | |
1312 | break; | |
1313 | } | |
1314 | nfsmout: | |
1315 | return error; | |
1316 | } | |
1317 | ||
1318 | /* | |
1319 | * An RPCSEC_GSS request with no integrity or privacy consists | |
1320 | * of just the header mbufs followed by the arg mbufs. | |
1321 | * | |
1322 | * However, integrity or privacy the original mbufs have mbufs | |
1323 | * prepended and appended to, which means we have to do some work to | |
1324 | * restore the arg mbuf chain to its previous state in case we need to | |
1325 | * retransmit. | |
1326 | * | |
1327 | * The location and length of the args is marked by two fields | |
1328 | * in the request structure: r_gss_argoff and r_gss_arglen, | |
1329 | * which are stashed when the NFS request is built. | |
1330 | */ | |
1331 | int | |
1332 | nfs_gss_clnt_args_restore(struct nfsreq *req) | |
1333 | { | |
1334 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; | |
1335 | struct nfsm_chain mchain, *nmc = &mchain; | |
1336 | int error = 0, merr; | |
1337 | ||
1338 | if (cp == NULL) { | |
1339 | return NFSERR_EAUTH; | |
1340 | } | |
1341 | ||
1342 | if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0) { | |
1343 | return ENEEDAUTH; | |
1344 | } | |
1345 | ||
1346 | /* Nothing to restore for SVC_NONE */ | |
1347 | if (cp->gss_clnt_service == RPCSEC_GSS_SVC_NONE) { | |
1348 | return 0; | |
1349 | } | |
1350 | ||
1351 | nfsm_chain_dissect_init(error, nmc, req->r_mhead); // start at RPC header | |
1352 | nfsm_chain_adv(error, nmc, req->r_gss_argoff); // advance to args | |
1353 | if (error) { | |
1354 | return error; | |
1355 | } | |
1356 | ||
1357 | if (cp->gss_clnt_service == RPCSEC_GSS_SVC_INTEGRITY) { | |
1358 | error = rpc_gss_integ_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); | |
1359 | } else { | |
1360 | error = rpc_gss_priv_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); | |
1361 | } | |
1362 | ||
1363 | merr = mbuf_setnext(nmc->nmc_mcur, req->r_mrest); /* Should always succeed */ | |
1364 | assert(merr == 0); | |
1365 | ||
1366 | return error ? error : merr; | |
1367 | } | |
1368 | ||
1369 | /* | |
1370 | * This function sets up a new context on the client. | |
1371 | * Context setup alternates upcalls to the gssd with NFS nullproc calls | |
1372 | * to the server. Each of these calls exchanges an opaque token, obtained | |
1373 | * via the gssd's calls into the GSS-API on either the client or the server. | |
1374 | * This cycle of calls ends when the client's upcall to the gssd and the | |
1375 | * server's response both return GSS_S_COMPLETE. At this point, the client | |
1376 | * should have its session key and a handle that it can use to refer to its | |
1377 | * new context on the server. | |
1378 | */ | |
1379 | static int | |
1380 | nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) | |
1381 | { | |
1382 | struct nfsmount *nmp = req->r_nmp; | |
1383 | gss_buffer_desc cksum, window; | |
1384 | uint32_t network_seqnum; | |
1385 | int client_complete = 0; | |
1386 | int server_complete = 0; | |
1387 | int error = 0; | |
1388 | int retrycnt = 0; | |
1389 | uint32_t major; | |
1390 | ||
1391 | /* Initialize a new client context */ | |
1392 | ||
1393 | if (cp->gss_clnt_svcname == NULL) { | |
1394 | cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen); | |
1395 | if (cp->gss_clnt_svcname == NULL) { | |
1396 | error = NFSERR_EAUTH; | |
1397 | goto nfsmout; | |
1398 | } | |
1399 | } | |
1400 | ||
1401 | cp->gss_clnt_proc = RPCSEC_GSS_INIT; | |
1402 | ||
1403 | cp->gss_clnt_service = | |
1404 | req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE : | |
1405 | req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY : | |
1406 | req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0; | |
1407 | ||
1408 | /* | |
1409 | * Now loop around alternating gss_init_sec_context and | |
1410 | * gss_accept_sec_context upcalls to the gssd on the client | |
1411 | * and server side until the context is complete - or fails. | |
1412 | */ | |
1413 | for (;;) { | |
1414 | retry: | |
1415 | /* Upcall to the gss_init_sec_context in the gssd */ | |
1416 | error = nfs_gss_clnt_gssd_upcall(req, cp, retrycnt); | |
1417 | if (error) { | |
1418 | goto nfsmout; | |
1419 | } | |
1420 | ||
1421 | if (cp->gss_clnt_major == GSS_S_COMPLETE) { | |
1422 | client_complete = 1; | |
1423 | NFS_GSS_DBG("Client complete\n"); | |
1424 | if (server_complete) { | |
1425 | break; | |
1426 | } | |
1427 | } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { | |
1428 | /* | |
1429 | * We may have gotten here because the accept sec context | |
1430 | * from the server failed and sent back a GSS token that | |
1431 | * encapsulates a kerberos error token per RFC 1964/4121 | |
1432 | * with a status of GSS_S_CONTINUE_NEEDED. That caused us | |
1433 | * to loop to the above up call and received the now | |
1434 | * decoded errors. | |
1435 | */ | |
1436 | retrycnt++; | |
1437 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; | |
1438 | NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor); | |
1439 | goto retry; | |
1440 | } | |
1441 | ||
1442 | /* | |
1443 | * Pass the token to the server. | |
1444 | */ | |
1445 | error = nfs_gss_clnt_ctx_callserver(req, cp); | |
1446 | if (error) { | |
1447 | if (error == ENEEDAUTH && | |
1448 | (cp->gss_clnt_proc == RPCSEC_GSS_INIT || | |
1449 | cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT)) { | |
1450 | /* | |
1451 | * We got here because the server had a problem | |
1452 | * trying to establish a context and sent that there | |
1453 | * was a context problem at the rpc sec layer. Perhaps | |
1454 | * gss_accept_sec_context succeeded in user space, | |
1455 | * but the kernel could not handle the etype | |
1456 | * to generate the mic for the verifier of the rpc_sec | |
1457 | * window size. | |
1458 | */ | |
1459 | retrycnt++; | |
1460 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; | |
1461 | NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor); | |
1462 | goto retry; | |
1463 | } | |
1464 | goto nfsmout; | |
1465 | } | |
1466 | if (cp->gss_clnt_major == GSS_S_COMPLETE) { | |
1467 | NFS_GSS_DBG("Server complete\n"); | |
1468 | server_complete = 1; | |
1469 | if (client_complete) { | |
1470 | break; | |
1471 | } | |
1472 | } else if (cp->gss_clnt_major == GSS_S_CONTINUE_NEEDED) { | |
1473 | cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT; | |
1474 | } else { | |
1475 | /* Server didn't like us. Try something else */ | |
1476 | retrycnt++; | |
1477 | cp->gss_clnt_gssd_flags |= GSSD_RESTART; | |
1478 | NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor); | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | /* | |
1483 | * The context is apparently established successfully | |
1484 | */ | |
1485 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
1486 | cp->gss_clnt_flags |= GSS_CTX_COMPLETE; | |
1487 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
1488 | cp->gss_clnt_proc = RPCSEC_GSS_DATA; | |
1489 | ||
1490 | network_seqnum = htonl(cp->gss_clnt_seqwin); | |
1491 | window.length = sizeof(cp->gss_clnt_seqwin); | |
1492 | window.value = &network_seqnum; | |
1493 | cksum.value = cp->gss_clnt_verf; | |
1494 | cksum.length = cp->gss_clnt_verflen; | |
1495 | major = gss_krb5_verify_mic((uint32_t *)&error, cp->gss_clnt_ctx_id, &window, &cksum, NULL); | |
1496 | cp->gss_clnt_verflen = 0; | |
1497 | FREE(cp->gss_clnt_verf, M_TEMP); | |
1498 | cp->gss_clnt_verf = NULL; | |
1499 | if (major != GSS_S_COMPLETE) { | |
1500 | printf("%s: could not verify window\n", __func__); | |
1501 | error = NFSERR_EAUTH; | |
1502 | goto nfsmout; | |
1503 | } | |
1504 | ||
1505 | /* | |
1506 | * Set an initial sequence number somewhat randomized. | |
1507 | * Start small so we don't overflow GSS_MAXSEQ too quickly. | |
1508 | * Add the size of the sequence window so seqbits arithmetic | |
1509 | * doesn't go negative. | |
1510 | */ | |
1511 | cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin; | |
1512 | ||
1513 | /* | |
1514 | * Allocate a bitmap to keep track of which requests | |
1515 | * are pending within the sequence number window. | |
1516 | */ | |
1517 | MALLOC(cp->gss_clnt_seqbits, uint32_t *, | |
1518 | nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK | M_ZERO); | |
1519 | if (cp->gss_clnt_seqbits == NULL) { | |
1520 | error = NFSERR_EAUTH; | |
1521 | } | |
1522 | ||
1523 | nfsmout: | |
1524 | /* | |
1525 | * If the error is ENEEDAUTH we're not done, so no need | |
1526 | * to wake up other threads again. This thread will retry in | |
1527 | * the find or renew routines. | |
1528 | */ | |
1529 | if (error == ENEEDAUTH) { | |
1530 | NFS_GSS_DBG("Returning ENEEDAUTH\n"); | |
1531 | return error; | |
1532 | } | |
1533 | ||
1534 | /* | |
1535 | * If there's an error, just mark it as invalid. | |
1536 | * It will be removed when the reference count | |
1537 | * drops to zero. | |
1538 | */ | |
1539 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
1540 | if (error) { | |
1541 | cp->gss_clnt_flags |= GSS_CTX_INVAL; | |
1542 | } | |
1543 | ||
1544 | /* | |
1545 | * Wake any threads waiting to use the context | |
1546 | */ | |
1547 | cp->gss_clnt_thread = NULL; | |
1548 | if (cp->gss_clnt_flags & GSS_NEEDCTX) { | |
1549 | cp->gss_clnt_flags &= ~GSS_NEEDCTX; | |
1550 | wakeup(cp); | |
1551 | } | |
1552 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
1553 | ||
1554 | NFS_GSS_DBG("Returning error = %d\n", error); | |
1555 | return error; | |
1556 | } | |
1557 | ||
1558 | /* | |
1559 | * This function calls nfs_gss_clnt_ctx_init() to set up a new context. | |
1560 | * But if there's a failure in trying to establish the context it keeps | |
1561 | * retrying at progressively longer intervals in case the failure is | |
1562 | * due to some transient condition. For instance, the server might be | |
1563 | * failing the context setup because directory services is not coming | |
1564 | * up in a timely fashion. | |
1565 | */ | |
1566 | static int | |
1567 | nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) | |
1568 | { | |
1569 | struct nfsmount *nmp = req->r_nmp; | |
1570 | struct timeval now; | |
1571 | time_t waituntil; | |
1572 | int error, slpflag; | |
1573 | int retries = 0; | |
1574 | int timeo = NFS_TRYLATERDEL; | |
1575 | ||
1576 | if (nfs_mount_gone(nmp)) { | |
1577 | error = ENXIO; | |
1578 | goto bad; | |
1579 | } | |
1580 | ||
1581 | /* For an "intr" mount allow a signal to interrupt the retries */ | |
1582 | slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; | |
1583 | ||
1584 | while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) { | |
1585 | microuptime(&now); | |
1586 | waituntil = now.tv_sec + timeo; | |
1587 | while (now.tv_sec < waituntil) { | |
1588 | tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz); | |
1589 | slpflag = 0; | |
1590 | error = nfs_sigintr(req->r_nmp, req, current_thread(), 0); | |
1591 | if (error) { | |
1592 | goto bad; | |
1593 | } | |
1594 | microuptime(&now); | |
1595 | } | |
1596 | ||
1597 | retries++; | |
1598 | /* If it's a soft mount just give up after a while */ | |
1599 | if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) { | |
1600 | error = ETIMEDOUT; | |
1601 | goto bad; | |
1602 | } | |
1603 | timeo *= 2; | |
1604 | if (timeo > 60) { | |
1605 | timeo = 60; | |
1606 | } | |
1607 | } | |
1608 | ||
1609 | if (error == 0) { | |
1610 | return 0; // success | |
1611 | } | |
1612 | bad: | |
1613 | /* | |
1614 | * Give up on this context | |
1615 | */ | |
1616 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
1617 | cp->gss_clnt_flags |= GSS_CTX_INVAL; | |
1618 | ||
1619 | /* | |
1620 | * Wake any threads waiting to use the context | |
1621 | */ | |
1622 | cp->gss_clnt_thread = NULL; | |
1623 | if (cp->gss_clnt_flags & GSS_NEEDCTX) { | |
1624 | cp->gss_clnt_flags &= ~GSS_NEEDCTX; | |
1625 | wakeup(cp); | |
1626 | } | |
1627 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
1628 | ||
1629 | return error; | |
1630 | } | |
1631 | ||
1632 | /* | |
1633 | * Call the NFS server using a null procedure for context setup. | |
1634 | * Even though it's a null procedure and nominally has no arguments | |
1635 | * RFC 2203 requires that the GSS-API token be passed as an argument | |
1636 | * and received as a reply. | |
1637 | */ | |
1638 | static int | |
1639 | nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) | |
1640 | { | |
1641 | struct nfsm_chain nmreq, nmrep; | |
1642 | int error = 0, status; | |
1643 | uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor; | |
1644 | int sz; | |
1645 | ||
1646 | if (nfs_mount_gone(req->r_nmp)) { | |
1647 | return ENXIO; | |
1648 | } | |
1649 | nfsm_chain_null(&nmreq); | |
1650 | nfsm_chain_null(&nmrep); | |
1651 | sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen); | |
1652 | nfsm_chain_build_alloc_init(error, &nmreq, sz); | |
1653 | nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen); | |
1654 | if (cp->gss_clnt_tokenlen > 0) { | |
1655 | nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen); | |
1656 | } | |
1657 | nfsm_chain_build_done(error, &nmreq); | |
1658 | if (error) { | |
1659 | goto nfsmout; | |
1660 | } | |
1661 | ||
1662 | /* Call the server */ | |
1663 | error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred, | |
1664 | (req->r_flags & R_OPTMASK), cp, &nmrep, &status); | |
1665 | if (cp->gss_clnt_token != NULL) { | |
1666 | FREE(cp->gss_clnt_token, M_TEMP); | |
1667 | cp->gss_clnt_token = NULL; | |
1668 | } | |
1669 | if (!error) { | |
1670 | error = status; | |
1671 | } | |
1672 | if (error) { | |
1673 | goto nfsmout; | |
1674 | } | |
1675 | ||
1676 | /* Get the server's reply */ | |
1677 | ||
1678 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len); | |
1679 | if (cp->gss_clnt_handle != NULL) { | |
1680 | FREE(cp->gss_clnt_handle, M_TEMP); | |
1681 | cp->gss_clnt_handle = NULL; | |
1682 | } | |
1683 | if (cp->gss_clnt_handle_len > 0 && cp->gss_clnt_handle_len < GSS_MAX_CTX_HANDLE_LEN) { | |
1684 | MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK); | |
1685 | if (cp->gss_clnt_handle == NULL) { | |
1686 | error = ENOMEM; | |
1687 | goto nfsmout; | |
1688 | } | |
1689 | nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle); | |
1690 | } else { | |
1691 | error = EBADRPC; | |
1692 | } | |
1693 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major); | |
1694 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor); | |
1695 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin); | |
1696 | nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen); | |
1697 | if (error) { | |
1698 | goto nfsmout; | |
1699 | } | |
1700 | if (cp->gss_clnt_tokenlen > 0 && cp->gss_clnt_tokenlen < GSS_MAX_TOKEN_LEN) { | |
1701 | MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK); | |
1702 | if (cp->gss_clnt_token == NULL) { | |
1703 | error = ENOMEM; | |
1704 | goto nfsmout; | |
1705 | } | |
1706 | nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token); | |
1707 | } else { | |
1708 | error = EBADRPC; | |
1709 | } | |
1710 | ||
1711 | /* | |
1712 | * Make sure any unusual errors are expanded and logged by gssd | |
1713 | */ | |
1714 | if (cp->gss_clnt_major != GSS_S_COMPLETE && | |
1715 | cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { | |
1716 | printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major); | |
1717 | nfs_gss_clnt_log_error(req, cp, major, minor); | |
1718 | } | |
1719 | ||
1720 | nfsmout: | |
1721 | nfsm_chain_cleanup(&nmreq); | |
1722 | nfsm_chain_cleanup(&nmrep); | |
1723 | ||
1724 | return error; | |
1725 | } | |
1726 | ||
1727 | /* | |
1728 | * We construct the service principal as a gss hostbased service principal of | |
1729 | * the form nfs@<server>, unless the servers principal was passed down in the | |
1730 | * mount arguments. If the arguments don't specify the service principal, the | |
1731 | * server name is extracted the location passed in the mount argument if | |
1732 | * available. Otherwise assume a format of <server>:<path> in the | |
1733 | * mntfromname. We don't currently support url's or other bizarre formats like | |
1734 | * path@server. Mount_url will convert the nfs url into <server>:<path> when | |
1735 | * calling mount, so this works out well in practice. | |
1736 | * | |
1737 | */ | |
1738 | ||
1739 | static uint8_t * | |
1740 | nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, size_t *len) | |
1741 | { | |
1742 | char *svcname, *d, *server; | |
1743 | int lindx, sindx; | |
1744 | ||
1745 | if (nfs_mount_gone(nmp)) { | |
1746 | return NULL; | |
1747 | } | |
1748 | ||
1749 | if (nmp->nm_sprinc) { | |
1750 | *len = strlen(nmp->nm_sprinc) + 1; | |
1751 | MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); | |
1752 | *nt = GSSD_HOSTBASED; | |
1753 | if (svcname == NULL) { | |
1754 | return NULL; | |
1755 | } | |
1756 | strlcpy(svcname, nmp->nm_sprinc, *len); | |
1757 | ||
1758 | return (uint8_t *)svcname; | |
1759 | } | |
1760 | ||
1761 | *nt = GSSD_HOSTBASED; | |
1762 | if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x1))) { | |
1763 | lindx = nmp->nm_locations.nl_current.nli_loc; | |
1764 | sindx = nmp->nm_locations.nl_current.nli_serv; | |
1765 | server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name; | |
1766 | *len = (uint32_t)strlen(server); | |
1767 | } else { | |
1768 | /* Older binaries using older mount args end up here */ | |
1769 | server = vfs_statfs(nmp->nm_mountp)->f_mntfromname; | |
1770 | NFS_GSS_DBG("nfs getting gss svcname from %s\n", server); | |
1771 | d = strchr(server, ':'); | |
1772 | *len = (uint32_t)(d ? (d - server) : strlen(server)); | |
1773 | } | |
1774 | ||
1775 | *len += 5; /* "nfs@" plus null */ | |
1776 | MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); | |
1777 | strlcpy(svcname, "nfs", *len); | |
1778 | strlcat(svcname, "@", *len); | |
1779 | strlcat(svcname, server, *len); | |
1780 | NFS_GSS_DBG("nfs svcname = %s\n", svcname); | |
1781 | ||
1782 | return (uint8_t *)svcname; | |
1783 | } | |
1784 | ||
1785 | /* | |
1786 | * Get a mach port to talk to gssd. | |
1787 | * gssd lives in the root bootstrap, so we call gssd's lookup routine | |
1788 | * to get a send right to talk to a new gssd instance that launchd has launched | |
1789 | * based on the cred's uid and audit session id. | |
1790 | */ | |
1791 | ||
1792 | static mach_port_t | |
1793 | nfs_gss_clnt_get_upcall_port(kauth_cred_t credp) | |
1794 | { | |
1795 | mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL; | |
1796 | kern_return_t kr; | |
1797 | au_asid_t asid; | |
1798 | uid_t uid; | |
1799 | ||
1800 | kr = host_get_gssd_port(host_priv_self(), &gssd_host_port); | |
1801 | if (kr != KERN_SUCCESS) { | |
1802 | printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr); | |
1803 | return IPC_PORT_NULL; | |
1804 | } | |
1805 | if (!IPC_PORT_VALID(gssd_host_port)) { | |
1806 | printf("nfs_gss_get_upcall_port: gssd port not valid\n"); | |
1807 | return IPC_PORT_NULL; | |
1808 | } | |
1809 | ||
1810 | asid = kauth_cred_getasid(credp); | |
1811 | uid = kauth_cred_getauid(credp); | |
1812 | if (uid == AU_DEFAUDITID) { | |
1813 | uid = kauth_cred_getuid(credp); | |
1814 | } | |
1815 | kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port); | |
1816 | if (kr != KERN_SUCCESS) { | |
1817 | printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr); | |
1818 | } | |
1819 | host_release_special_port(gssd_host_port); | |
1820 | ||
1821 | return uc_port; | |
1822 | } | |
1823 | ||
1824 | ||
1825 | static void | |
1826 | nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor) | |
1827 | { | |
1828 | #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) | |
1829 | struct nfsmount *nmp = req->r_nmp; | |
1830 | char who[] = "client"; | |
1831 | uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major); | |
1832 | const char *procn = "unkown"; | |
1833 | proc_t proc; | |
1834 | pid_t pid = -1; | |
1835 | struct timeval now; | |
1836 | ||
1837 | if (req->r_thread) { | |
1838 | proc = (proc_t)get_bsdthreadtask_info(req->r_thread); | |
1839 | if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) { | |
1840 | proc = NULL; | |
1841 | } | |
1842 | if (proc) { | |
1843 | if (*proc->p_comm) { | |
1844 | procn = proc->p_comm; | |
1845 | } | |
1846 | pid = proc->p_pid; | |
1847 | } | |
1848 | } else { | |
1849 | procn = "kernproc"; | |
1850 | pid = 0; | |
1851 | } | |
1852 | ||
1853 | microuptime(&now); | |
1854 | if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor || | |
1855 | cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) && | |
1856 | (nmp->nm_state & NFSSTA_MOUNTED)) { | |
1857 | /* | |
1858 | * Will let gssd do some logging in hopes that it can translate | |
1859 | * the minor code. | |
1860 | */ | |
1861 | if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) { | |
1862 | (void) mach_gss_log_error( | |
1863 | cp->gss_clnt_mport, | |
1864 | vfs_statfs(nmp->nm_mountp)->f_mntfromname, | |
1865 | kauth_cred_getuid(cp->gss_clnt_cred), | |
1866 | who, | |
1867 | cp->gss_clnt_major, | |
1868 | cp->gss_clnt_minor); | |
1869 | } | |
1870 | gss_error = gss_error ? gss_error : cp->gss_clnt_major; | |
1871 | ||
1872 | /* | |
1873 | *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here. | |
1874 | */ | |
1875 | printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n", | |
1876 | cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), | |
1877 | procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); | |
1878 | cp->gss_clnt_ptime = now.tv_sec; | |
1879 | switch (gss_error) { | |
1880 | case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n", | |
1881 | kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); | |
1882 | break; | |
1883 | case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n", | |
1884 | kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); | |
1885 | break; | |
1886 | } | |
1887 | } else { | |
1888 | NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n", | |
1889 | cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), | |
1890 | procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); | |
1891 | } | |
1892 | } | |
1893 | ||
1894 | /* | |
1895 | * Make an upcall to the gssd using Mach RPC | |
1896 | * The upcall is made using a host special port. | |
1897 | * This allows launchd to fire up the gssd in the | |
1898 | * user's session. This is important, since gssd | |
1899 | * must have access to the user's credential cache. | |
1900 | */ | |
1901 | static int | |
1902 | nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t retrycnt) | |
1903 | { | |
1904 | kern_return_t kr; | |
1905 | gssd_byte_buffer octx = NULL; | |
1906 | uint32_t lucidlen = 0; | |
1907 | void *lucid_ctx_buffer; | |
1908 | int retry_cnt = 0; | |
1909 | vm_map_copy_t itoken = NULL; | |
1910 | gssd_byte_buffer otoken = NULL; | |
1911 | mach_msg_type_number_t otokenlen; | |
1912 | int error = 0; | |
1913 | uint8_t *principal = NULL; | |
1914 | size_t plen = 0; | |
1915 | int32_t nt = GSSD_STRING_NAME; | |
1916 | vm_map_copy_t pname = NULL; | |
1917 | vm_map_copy_t svcname = NULL; | |
1918 | char display_name[MAX_DISPLAY_STR] = ""; | |
1919 | uint32_t ret_flags; | |
1920 | struct nfsmount *nmp = req->r_nmp; | |
1921 | uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor; | |
1922 | uint32_t selected = (uint32_t)-1; | |
1923 | struct nfs_etype etype; | |
1924 | ||
1925 | if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { | |
1926 | return ENXIO; | |
1927 | } | |
1928 | ||
1929 | if (cp->gss_clnt_gssd_flags & GSSD_RESTART) { | |
1930 | if (cp->gss_clnt_token) { | |
1931 | FREE(cp->gss_clnt_token, M_TEMP); | |
1932 | } | |
1933 | cp->gss_clnt_token = NULL; | |
1934 | cp->gss_clnt_tokenlen = 0; | |
1935 | cp->gss_clnt_proc = RPCSEC_GSS_INIT; | |
1936 | /* Server's handle isn't valid. Don't reuse */ | |
1937 | cp->gss_clnt_handle_len = 0; | |
1938 | if (cp->gss_clnt_handle != NULL) { | |
1939 | FREE(cp->gss_clnt_handle, M_TEMP); | |
1940 | cp->gss_clnt_handle = NULL; | |
1941 | } | |
1942 | } | |
1943 | ||
1944 | NFS_GSS_DBG("Retrycnt = %d nm_etype.count = %d\n", retrycnt, nmp->nm_etype.count); | |
1945 | if (retrycnt >= nmp->nm_etype.count) { | |
1946 | return EACCES; | |
1947 | } | |
1948 | ||
1949 | /* Copy the mount etypes to an order set of etypes to try */ | |
1950 | etype = nmp->nm_etype; | |
1951 | ||
1952 | /* | |
1953 | * If we've already selected an etype, lets put that first in our | |
1954 | * array of etypes to try, since overwhelmingly, that is likely | |
1955 | * to be the etype we want. | |
1956 | */ | |
1957 | if (etype.selected < etype.count) { | |
1958 | etype.etypes[0] = nmp->nm_etype.etypes[etype.selected]; | |
1959 | for (uint32_t i = 0; i < etype.selected; i++) { | |
1960 | etype.etypes[i + 1] = nmp->nm_etype.etypes[i]; | |
1961 | } | |
1962 | for (uint32_t i = etype.selected + 1; i < etype.count; i++) { | |
1963 | etype.etypes[i] = nmp->nm_etype.etypes[i]; | |
1964 | } | |
1965 | } | |
1966 | ||
1967 | /* Remove the ones we've already have tried */ | |
1968 | for (uint32_t i = retrycnt; i < etype.count; i++) { | |
1969 | etype.etypes[i - retrycnt] = etype.etypes[i]; | |
1970 | } | |
1971 | etype.count = etype.count - retrycnt; | |
1972 | ||
1973 | NFS_GSS_DBG("etype count = %d preferred etype = %d\n", etype.count, etype.etypes[0]); | |
1974 | ||
1975 | /* | |
1976 | * NFS currently only supports default principals or | |
1977 | * principals based on the uid of the caller, unless | |
1978 | * the principal to use for the mounting cred was specified | |
1979 | * in the mount argmuments. If the realm to use was specified | |
1980 | * then will send that up as the principal since the realm is | |
1981 | * preceed by an "@" gssd that will try and select the default | |
1982 | * principal for that realm. | |
1983 | */ | |
1984 | ||
1985 | if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) { | |
1986 | principal = cp->gss_clnt_principal; | |
1987 | plen = cp->gss_clnt_prinlen; | |
1988 | nt = cp->gss_clnt_prinnt; | |
1989 | } else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) { | |
1990 | plen = (uint32_t)strlen(nmp->nm_principal); | |
1991 | principal = (uint8_t *)nmp->nm_principal; | |
1992 | cp->gss_clnt_prinnt = nt = GSSD_USER; | |
1993 | } else if (nmp->nm_realm) { | |
1994 | plen = (uint32_t)strlen(nmp->nm_realm); | |
1995 | principal = (uint8_t *)nmp->nm_realm; | |
1996 | nt = GSSD_USER; | |
1997 | } | |
1998 | ||
1999 | if (!IPC_PORT_VALID(cp->gss_clnt_mport)) { | |
2000 | cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred); | |
2001 | if (cp->gss_clnt_mport == IPC_PORT_NULL) { | |
2002 | goto out; | |
2003 | } | |
2004 | } | |
2005 | ||
2006 | if (plen) { | |
2007 | nfs_gss_mach_alloc_buffer(principal, plen, &pname); | |
2008 | } | |
2009 | if (cp->gss_clnt_svcnamlen) { | |
2010 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); | |
2011 | } | |
2012 | if (cp->gss_clnt_tokenlen) { | |
2013 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); | |
2014 | } | |
2015 | ||
2016 | /* Always want to export the lucid context */ | |
2017 | cp->gss_clnt_gssd_flags |= GSSD_LUCID_CONTEXT; | |
2018 | ||
2019 | retry: | |
2020 | kr = mach_gss_init_sec_context_v3( | |
2021 | cp->gss_clnt_mport, | |
2022 | GSSD_KRB5_MECH, | |
2023 | (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen, | |
2024 | kauth_cred_getuid(cp->gss_clnt_cred), | |
2025 | nt, | |
2026 | (gssd_byte_buffer)pname, (mach_msg_type_number_t) plen, | |
2027 | cp->gss_clnt_svcnt, | |
2028 | (gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen, | |
2029 | GSSD_MUTUAL_FLAG, | |
2030 | (gssd_etype_list)etype.etypes, (mach_msg_type_number_t)etype.count, | |
2031 | &cp->gss_clnt_gssd_flags, | |
2032 | &cp->gss_clnt_context, | |
2033 | &cp->gss_clnt_cred_handle, | |
2034 | &ret_flags, | |
2035 | &octx, (mach_msg_type_number_t *) &lucidlen, | |
2036 | &otoken, &otokenlen, | |
2037 | cp->gss_clnt_display ? NULL : display_name, | |
2038 | &cp->gss_clnt_major, | |
2039 | &cp->gss_clnt_minor); | |
2040 | ||
2041 | /* Clear the RESTART flag */ | |
2042 | cp->gss_clnt_gssd_flags &= ~GSSD_RESTART; | |
2043 | if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { | |
2044 | /* We're done with the gssd handles */ | |
2045 | cp->gss_clnt_context = 0; | |
2046 | cp->gss_clnt_cred_handle = 0; | |
2047 | } | |
2048 | ||
2049 | if (kr != KERN_SUCCESS) { | |
2050 | printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr); | |
2051 | if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 && | |
2052 | retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES && | |
2053 | !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) { | |
2054 | if (plen) { | |
2055 | nfs_gss_mach_alloc_buffer(principal, plen, &pname); | |
2056 | } | |
2057 | if (cp->gss_clnt_svcnamlen) { | |
2058 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); | |
2059 | } | |
2060 | if (cp->gss_clnt_tokenlen > 0) { | |
2061 | nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); | |
2062 | } | |
2063 | goto retry; | |
2064 | } | |
2065 | ||
2066 | host_release_special_port(cp->gss_clnt_mport); | |
2067 | cp->gss_clnt_mport = IPC_PORT_NULL; | |
2068 | goto out; | |
2069 | } | |
2070 | ||
2071 | if (cp->gss_clnt_display == NULL && *display_name != '\0') { | |
2072 | size_t dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */ | |
2073 | ||
2074 | if (dlen < MAX_DISPLAY_STR) { | |
2075 | MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK); | |
2076 | if (cp->gss_clnt_display == NULL) { | |
2077 | goto skip; | |
2078 | } | |
2079 | bcopy(display_name, cp->gss_clnt_display, dlen); | |
2080 | } else { | |
2081 | goto skip; | |
2082 | } | |
2083 | } | |
2084 | skip: | |
2085 | /* | |
2086 | * Make sure any unusual errors are expanded and logged by gssd | |
2087 | * | |
2088 | * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes. | |
2089 | */ | |
2090 | if (cp->gss_clnt_major != GSS_S_COMPLETE && | |
2091 | cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { | |
2092 | NFS_GSS_DBG("Up call returned error\n"); | |
2093 | nfs_gss_clnt_log_error(req, cp, major, minor); | |
2094 | /* Server's handle isn't valid. Don't reuse */ | |
2095 | cp->gss_clnt_handle_len = 0; | |
2096 | if (cp->gss_clnt_handle != NULL) { | |
2097 | FREE(cp->gss_clnt_handle, M_TEMP); | |
2098 | cp->gss_clnt_handle = NULL; | |
2099 | } | |
2100 | } | |
2101 | ||
2102 | if (lucidlen > 0) { | |
2103 | if (lucidlen > MAX_LUCIDLEN) { | |
2104 | printf("nfs_gss_clnt_gssd_upcall: bad context length (%d)\n", lucidlen); | |
2105 | vm_map_copy_discard((vm_map_copy_t) octx); | |
2106 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
2107 | goto out; | |
2108 | } | |
2109 | MALLOC(lucid_ctx_buffer, void *, lucidlen, M_TEMP, M_WAITOK | M_ZERO); | |
2110 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer); | |
2111 | if (error) { | |
2112 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
2113 | goto out; | |
2114 | } | |
2115 | ||
2116 | if (cp->gss_clnt_ctx_id) { | |
2117 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); | |
2118 | } | |
2119 | cp->gss_clnt_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); | |
2120 | if (cp->gss_clnt_ctx_id == NULL) { | |
2121 | printf("Failed to make context from lucid_ctx_buffer\n"); | |
2122 | goto out; | |
2123 | } | |
2124 | for (uint32_t i = 0; i < nmp->nm_etype.count; i++) { | |
2125 | if (nmp->nm_etype.etypes[i] == cp->gss_clnt_ctx_id->gss_cryptor.etype) { | |
2126 | selected = i; | |
2127 | break; | |
2128 | } | |
2129 | } | |
2130 | } | |
2131 | ||
2132 | /* Free context token used as input */ | |
2133 | if (cp->gss_clnt_token) { | |
2134 | FREE(cp->gss_clnt_token, M_TEMP); | |
2135 | } | |
2136 | cp->gss_clnt_token = NULL; | |
2137 | cp->gss_clnt_tokenlen = 0; | |
2138 | ||
2139 | if (otokenlen > 0) { | |
2140 | /* Set context token to gss output token */ | |
2141 | MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK); | |
2142 | if (cp->gss_clnt_token == NULL) { | |
2143 | printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen); | |
2144 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
2145 | return ENOMEM; | |
2146 | } | |
2147 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token); | |
2148 | if (error) { | |
2149 | printf("Could not copyout gss token\n"); | |
2150 | FREE(cp->gss_clnt_token, M_TEMP); | |
2151 | cp->gss_clnt_token = NULL; | |
2152 | return NFSERR_EAUTH; | |
2153 | } | |
2154 | cp->gss_clnt_tokenlen = otokenlen; | |
2155 | } | |
2156 | ||
2157 | if (selected != (uint32_t)-1) { | |
2158 | nmp->nm_etype.selected = selected; | |
2159 | NFS_GSS_DBG("etype selected = %d\n", nmp->nm_etype.etypes[selected]); | |
2160 | } | |
2161 | NFS_GSS_DBG("Up call succeeded major = %d\n", cp->gss_clnt_major); | |
2162 | return 0; | |
2163 | ||
2164 | out: | |
2165 | if (cp->gss_clnt_token) { | |
2166 | FREE(cp->gss_clnt_token, M_TEMP); | |
2167 | } | |
2168 | cp->gss_clnt_token = NULL; | |
2169 | cp->gss_clnt_tokenlen = 0; | |
2170 | /* Server's handle isn't valid. Don't reuse */ | |
2171 | cp->gss_clnt_handle_len = 0; | |
2172 | if (cp->gss_clnt_handle != NULL) { | |
2173 | FREE(cp->gss_clnt_handle, M_TEMP); | |
2174 | cp->gss_clnt_handle = NULL; | |
2175 | } | |
2176 | ||
2177 | NFS_GSS_DBG("Up call returned NFSERR_EAUTH"); | |
2178 | return NFSERR_EAUTH; | |
2179 | } | |
2180 | ||
2181 | /* | |
2182 | * Invoked at the completion of an RPC call that uses an RPCSEC_GSS | |
2183 | * credential. The sequence number window that the server returns | |
2184 | * at context setup indicates the maximum number of client calls that | |
2185 | * can be outstanding on a context. The client maintains a bitmap that | |
2186 | * represents the server's window. Each pending request has a bit set | |
2187 | * in the window bitmap. When a reply comes in or times out, we reset | |
2188 | * the bit in the bitmap and if there are any other threads waiting for | |
2189 | * a context slot we notify the waiting thread(s). | |
2190 | * | |
2191 | * Note that if a request is retransmitted, it will have a single XID | |
2192 | * but it may be associated with multiple sequence numbers. So we | |
2193 | * may have to reset multiple sequence number bits in the window bitmap. | |
2194 | */ | |
2195 | void | |
2196 | nfs_gss_clnt_rpcdone(struct nfsreq *req) | |
2197 | { | |
2198 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; | |
2199 | struct gss_seq *gsp, *ngsp; | |
2200 | int i = 0; | |
2201 | ||
2202 | if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) { | |
2203 | return; // no context - don't bother | |
2204 | } | |
2205 | /* | |
2206 | * Reset the bit for this request in the | |
2207 | * sequence number window to indicate it's done. | |
2208 | * We do this even if the request timed out. | |
2209 | */ | |
2210 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2211 | gsp = SLIST_FIRST(&req->r_gss_seqlist); | |
2212 | if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) { | |
2213 | win_resetbit(cp->gss_clnt_seqbits, | |
2214 | gsp->gss_seqnum % cp->gss_clnt_seqwin); | |
2215 | } | |
2216 | ||
2217 | /* | |
2218 | * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries | |
2219 | */ | |
2220 | SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) { | |
2221 | if (++i > GSS_CLNT_SEQLISTMAX) { | |
2222 | SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext); | |
2223 | FREE(gsp, M_TEMP); | |
2224 | } | |
2225 | } | |
2226 | ||
2227 | /* | |
2228 | * If there's a thread waiting for | |
2229 | * the window to advance, wake it up. | |
2230 | */ | |
2231 | if (cp->gss_clnt_flags & GSS_NEEDSEQ) { | |
2232 | cp->gss_clnt_flags &= ~GSS_NEEDSEQ; | |
2233 | wakeup(cp); | |
2234 | } | |
2235 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2236 | } | |
2237 | ||
2238 | /* | |
2239 | * Create a reference to a context from a request | |
2240 | * and bump the reference count | |
2241 | */ | |
2242 | void | |
2243 | nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) | |
2244 | { | |
2245 | req->r_gss_ctx = cp; | |
2246 | ||
2247 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2248 | cp->gss_clnt_refcnt++; | |
2249 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2250 | } | |
2251 | ||
2252 | /* | |
2253 | * Remove a context reference from a request | |
2254 | * If the reference count drops to zero, and the | |
2255 | * context is invalid, destroy the context | |
2256 | */ | |
2257 | void | |
2258 | nfs_gss_clnt_ctx_unref(struct nfsreq *req) | |
2259 | { | |
2260 | struct nfsmount *nmp = req->r_nmp; | |
2261 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; | |
2262 | int on_neg_cache = 0; | |
2263 | int neg_cache = 0; | |
2264 | int destroy = 0; | |
2265 | struct timeval now; | |
2266 | char CTXBUF[NFS_CTXBUFSZ]; | |
2267 | ||
2268 | if (cp == NULL) { | |
2269 | return; | |
2270 | } | |
2271 | ||
2272 | req->r_gss_ctx = NULL; | |
2273 | ||
2274 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2275 | if (--cp->gss_clnt_refcnt < 0) { | |
2276 | panic("Over release of gss context!\n"); | |
2277 | } | |
2278 | ||
2279 | if (cp->gss_clnt_refcnt == 0) { | |
2280 | if ((cp->gss_clnt_flags & GSS_CTX_INVAL) && | |
2281 | cp->gss_clnt_ctx_id) { | |
2282 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); | |
2283 | cp->gss_clnt_ctx_id = NULL; | |
2284 | } | |
2285 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { | |
2286 | destroy = 1; | |
2287 | if ((cp->gss_clnt_flags & GSS_CTX_USECOUNT) && !nfs_gss_clnt_mnt_rele(nmp)) { | |
2288 | cp->gss_clnt_flags &= ~GSS_CTX_USECOUNT; | |
2289 | } | |
2290 | if (cp->gss_clnt_nctime) { | |
2291 | on_neg_cache = 1; | |
2292 | } | |
2293 | } | |
2294 | } | |
2295 | if (!destroy && cp->gss_clnt_nctime == 0 && | |
2296 | (cp->gss_clnt_flags & GSS_CTX_INVAL)) { | |
2297 | microuptime(&now); | |
2298 | cp->gss_clnt_nctime = now.tv_sec; | |
2299 | neg_cache = 1; | |
2300 | } | |
2301 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2302 | if (destroy) { | |
2303 | NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req, cp)); | |
2304 | if (nmp) { | |
2305 | lck_mtx_lock(&nmp->nm_lock); | |
2306 | if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) { | |
2307 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
2308 | } | |
2309 | if (on_neg_cache) { | |
2310 | nmp->nm_ncentries--; | |
2311 | } | |
2312 | lck_mtx_unlock(&nmp->nm_lock); | |
2313 | } | |
2314 | nfs_gss_clnt_ctx_destroy(cp); | |
2315 | } else if (neg_cache) { | |
2316 | NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req, cp)); | |
2317 | if (nmp) { | |
2318 | lck_mtx_lock(&nmp->nm_lock); | |
2319 | nmp->nm_ncentries++; | |
2320 | nfs_gss_clnt_ctx_neg_cache_reap(nmp); | |
2321 | lck_mtx_unlock(&nmp->nm_lock); | |
2322 | } | |
2323 | } | |
2324 | NFS_GSS_CLNT_CTX_DUMP(nmp); | |
2325 | } | |
2326 | ||
2327 | /* | |
2328 | * Try and reap any old negative cache entries. | |
2329 | * cache queue. | |
2330 | */ | |
2331 | void | |
2332 | nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp) | |
2333 | { | |
2334 | struct nfs_gss_clnt_ctx *cp, *tcp; | |
2335 | struct timeval now; | |
2336 | int reaped = 0; | |
2337 | ||
2338 | /* Try and reap old, unreferenced, expired contexts */ | |
2339 | microuptime(&now); | |
2340 | ||
2341 | NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp->nm_ncentries); | |
2342 | ||
2343 | TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { | |
2344 | int destroy = 0; | |
2345 | ||
2346 | /* Don't reap STICKY contexts */ | |
2347 | if ((cp->gss_clnt_flags & GSS_CTX_STICKY) || | |
2348 | !(cp->gss_clnt_flags & GSS_CTX_INVAL)) { | |
2349 | continue; | |
2350 | } | |
2351 | /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */ | |
2352 | if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES) { | |
2353 | break; | |
2354 | } | |
2355 | /* Contexts too young */ | |
2356 | if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) { | |
2357 | continue; | |
2358 | } | |
2359 | /* Not referenced, remove it. */ | |
2360 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2361 | if (cp->gss_clnt_refcnt == 0) { | |
2362 | cp->gss_clnt_flags |= GSS_CTX_DESTROY; | |
2363 | destroy = 1; | |
2364 | } | |
2365 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2366 | if (destroy) { | |
2367 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
2368 | nmp->nm_ncentries++; | |
2369 | reaped++; | |
2370 | nfs_gss_clnt_ctx_destroy(cp); | |
2371 | } | |
2372 | } | |
2373 | NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped, nmp->nm_ncentries); | |
2374 | } | |
2375 | ||
2376 | /* | |
2377 | * Clean a context to be cached | |
2378 | */ | |
2379 | static void | |
2380 | nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp) | |
2381 | { | |
2382 | /* Preserve gss_clnt_mtx */ | |
2383 | assert(cp->gss_clnt_thread == NULL); /* Will be set to this thread */ | |
2384 | /* gss_clnt_entries we should not be on any list at this point */ | |
2385 | cp->gss_clnt_flags = 0; | |
2386 | /* gss_clnt_refcnt should be zero */ | |
2387 | assert(cp->gss_clnt_refcnt == 0); | |
2388 | /* | |
2389 | * We are who we are preserve: | |
2390 | * gss_clnt_cred | |
2391 | * gss_clnt_principal | |
2392 | * gss_clnt_prinlen | |
2393 | * gss_clnt_prinnt | |
2394 | * gss_clnt_desplay | |
2395 | */ | |
2396 | /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */ | |
2397 | cp->gss_clnt_seqnum = 0; | |
2398 | /* Preserve gss_clnt_service, we're not changing flavors */ | |
2399 | if (cp->gss_clnt_handle) { | |
2400 | FREE(cp->gss_clnt_handle, M_TEMP); | |
2401 | cp->gss_clnt_handle = NULL; | |
2402 | } | |
2403 | cp->gss_clnt_handle_len = 0; | |
2404 | cp->gss_clnt_nctime = 0; | |
2405 | cp->gss_clnt_seqwin = 0; | |
2406 | if (cp->gss_clnt_seqbits) { | |
2407 | FREE(cp->gss_clnt_seqbits, M_TEMP); | |
2408 | cp->gss_clnt_seqbits = NULL; | |
2409 | } | |
2410 | /* Preserve gss_clnt_mport. Still talking to the same gssd */ | |
2411 | if (cp->gss_clnt_verf) { | |
2412 | FREE(cp->gss_clnt_verf, M_TEMP); | |
2413 | cp->gss_clnt_verf = NULL; | |
2414 | } | |
2415 | /* Service name might change on failover, so reset it */ | |
2416 | if (cp->gss_clnt_svcname) { | |
2417 | FREE(cp->gss_clnt_svcname, M_TEMP); | |
2418 | cp->gss_clnt_svcname = NULL; | |
2419 | cp->gss_clnt_svcnt = 0; | |
2420 | } | |
2421 | cp->gss_clnt_svcnamlen = 0; | |
2422 | cp->gss_clnt_cred_handle = 0; | |
2423 | cp->gss_clnt_context = 0; | |
2424 | if (cp->gss_clnt_token) { | |
2425 | FREE(cp->gss_clnt_token, M_TEMP); | |
2426 | cp->gss_clnt_token = NULL; | |
2427 | } | |
2428 | cp->gss_clnt_tokenlen = 0; | |
2429 | /* XXX gss_clnt_ctx_id ??? */ | |
2430 | /* | |
2431 | * Preserve: | |
2432 | * gss_clnt_gssd_flags | |
2433 | * gss_clnt_major | |
2434 | * gss_clnt_minor | |
2435 | * gss_clnt_ptime | |
2436 | */ | |
2437 | } | |
2438 | ||
2439 | /* | |
2440 | * Copy a source context to a new context. This is used to create a new context | |
2441 | * with the identity of the old context for renewal. The old context is invalid | |
2442 | * at this point but may have reference still to it, so it is not safe to use that | |
2443 | * context. | |
2444 | */ | |
2445 | static int | |
2446 | nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dcpp) | |
2447 | { | |
2448 | struct nfs_gss_clnt_ctx *dcp; | |
2449 | ||
2450 | *dcpp = (struct nfs_gss_clnt_ctx *)NULL; | |
2451 | MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof(struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK); | |
2452 | if (dcp == NULL) { | |
2453 | return ENOMEM; | |
2454 | } | |
2455 | bzero(dcp, sizeof(struct nfs_gss_clnt_ctx)); | |
2456 | lck_mtx_init(&dcp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL); | |
2457 | dcp->gss_clnt_cred = scp->gss_clnt_cred; | |
2458 | kauth_cred_ref(dcp->gss_clnt_cred); | |
2459 | dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen; | |
2460 | dcp->gss_clnt_prinnt = scp->gss_clnt_prinnt; | |
2461 | if (scp->gss_clnt_principal) { | |
2462 | MALLOC(dcp->gss_clnt_principal, uint8_t *, dcp->gss_clnt_prinlen, M_TEMP, M_WAITOK | M_ZERO); | |
2463 | if (dcp->gss_clnt_principal == NULL) { | |
2464 | FREE(dcp, M_TEMP); | |
2465 | return ENOMEM; | |
2466 | } | |
2467 | bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen); | |
2468 | } | |
2469 | /* Note we don't preserve the display name, that will be set by a successful up call */ | |
2470 | dcp->gss_clnt_service = scp->gss_clnt_service; | |
2471 | dcp->gss_clnt_mport = host_copy_special_port(scp->gss_clnt_mport); | |
2472 | dcp->gss_clnt_ctx_id = NULL; /* Will be set from successful upcall */ | |
2473 | dcp->gss_clnt_gssd_flags = scp->gss_clnt_gssd_flags; | |
2474 | dcp->gss_clnt_major = scp->gss_clnt_major; | |
2475 | dcp->gss_clnt_minor = scp->gss_clnt_minor; | |
2476 | dcp->gss_clnt_ptime = scp->gss_clnt_ptime; | |
2477 | ||
2478 | *dcpp = dcp; | |
2479 | ||
2480 | return 0; | |
2481 | } | |
2482 | ||
2483 | /* | |
2484 | * Remove a context | |
2485 | */ | |
2486 | static void | |
2487 | nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp) | |
2488 | { | |
2489 | NFS_GSS_DBG("Destroying context %d/%d\n", | |
2490 | kauth_cred_getasid(cp->gss_clnt_cred), | |
2491 | kauth_cred_getauid(cp->gss_clnt_cred)); | |
2492 | ||
2493 | host_release_special_port(cp->gss_clnt_mport); | |
2494 | cp->gss_clnt_mport = IPC_PORT_NULL; | |
2495 | ||
2496 | lck_mtx_destroy(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp); | |
2497 | ||
2498 | if (IS_VALID_CRED(cp->gss_clnt_cred)) { | |
2499 | kauth_cred_unref(&cp->gss_clnt_cred); | |
2500 | } | |
2501 | cp->gss_clnt_entries.tqe_next = NFSNOLIST; | |
2502 | cp->gss_clnt_entries.tqe_prev = NFSNOLIST; | |
2503 | if (cp->gss_clnt_principal) { | |
2504 | FREE(cp->gss_clnt_principal, M_TEMP); | |
2505 | cp->gss_clnt_principal = NULL; | |
2506 | } | |
2507 | if (cp->gss_clnt_display) { | |
2508 | FREE(cp->gss_clnt_display, M_TEMP); | |
2509 | cp->gss_clnt_display = NULL; | |
2510 | } | |
2511 | if (cp->gss_clnt_ctx_id) { | |
2512 | gss_krb5_destroy_context(cp->gss_clnt_ctx_id); | |
2513 | cp->gss_clnt_ctx_id = NULL; | |
2514 | } | |
2515 | ||
2516 | nfs_gss_clnt_ctx_clean(cp); | |
2517 | ||
2518 | FREE(cp, M_TEMP); | |
2519 | } | |
2520 | ||
2521 | /* | |
2522 | * The context for a user is invalid. | |
2523 | * Mark the context as invalid, then | |
2524 | * create a new context. | |
2525 | */ | |
2526 | int | |
2527 | nfs_gss_clnt_ctx_renew(struct nfsreq *req) | |
2528 | { | |
2529 | struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; | |
2530 | struct nfs_gss_clnt_ctx *ncp; | |
2531 | struct nfsmount *nmp; | |
2532 | int error = 0; | |
2533 | char CTXBUF[NFS_CTXBUFSZ]; | |
2534 | ||
2535 | if (cp == NULL) { | |
2536 | return 0; | |
2537 | } | |
2538 | ||
2539 | if (req->r_nmp == NULL) { | |
2540 | return ENXIO; | |
2541 | } | |
2542 | nmp = req->r_nmp; | |
2543 | ||
2544 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2545 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { | |
2546 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2547 | nfs_gss_clnt_ctx_unref(req); | |
2548 | return 0; // already being renewed | |
2549 | } | |
2550 | ||
2551 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); | |
2552 | ||
2553 | if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) { | |
2554 | cp->gss_clnt_flags &= ~GSS_NEEDSEQ; | |
2555 | wakeup(cp); | |
2556 | } | |
2557 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2558 | ||
2559 | if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) { | |
2560 | return EACCES; /* Destroying a context is best effort. Don't renew. */ | |
2561 | } | |
2562 | /* | |
2563 | * If we're setting up a context let nfs_gss_clnt_ctx_init know this is not working | |
2564 | * and to try some other etype. | |
2565 | */ | |
2566 | if (cp->gss_clnt_proc != RPCSEC_GSS_DATA) { | |
2567 | return ENEEDAUTH; | |
2568 | } | |
2569 | error = nfs_gss_clnt_ctx_copy(cp, &ncp); | |
2570 | NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req, ncp)); | |
2571 | nfs_gss_clnt_ctx_unref(req); | |
2572 | if (error) { | |
2573 | return error; | |
2574 | } | |
2575 | ||
2576 | lck_mtx_lock(&nmp->nm_lock); | |
2577 | /* | |
2578 | * Note we don't bother taking the new context mutex as we're | |
2579 | * not findable at the moment. | |
2580 | */ | |
2581 | ncp->gss_clnt_thread = current_thread(); | |
2582 | nfs_gss_clnt_ctx_ref(req, ncp); | |
2583 | TAILQ_INSERT_HEAD(&nmp->nm_gsscl, ncp, gss_clnt_entries); | |
2584 | lck_mtx_unlock(&nmp->nm_lock); | |
2585 | ||
2586 | error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context | |
2587 | if (error) { | |
2588 | nfs_gss_clnt_ctx_unref(req); | |
2589 | } | |
2590 | ||
2591 | return error; | |
2592 | } | |
2593 | ||
2594 | ||
2595 | /* | |
2596 | * Destroy all the contexts associated with a mount. | |
2597 | * The contexts are also destroyed by the server. | |
2598 | */ | |
2599 | void | |
2600 | nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) | |
2601 | { | |
2602 | struct nfs_gss_clnt_ctx *cp; | |
2603 | struct nfsm_chain nmreq, nmrep; | |
2604 | int error, status; | |
2605 | struct nfsreq *req; | |
2606 | ||
2607 | if (!nmp) { | |
2608 | return; | |
2609 | } | |
2610 | ||
2611 | req = zalloc(nfs_req_zone); | |
2612 | req->r_nmp = nmp; | |
2613 | lck_mtx_lock(&nmp->nm_lock); | |
2614 | while ((cp = TAILQ_FIRST(&nmp->nm_gsscl))) { | |
2615 | TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); | |
2616 | cp->gss_clnt_entries.tqe_next = NFSNOLIST; | |
2617 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2618 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { | |
2619 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2620 | continue; | |
2621 | } | |
2622 | cp->gss_clnt_refcnt++; | |
2623 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2624 | req->r_gss_ctx = cp; | |
2625 | ||
2626 | lck_mtx_unlock(&nmp->nm_lock); | |
2627 | /* | |
2628 | * Tell the server to destroy its context. | |
2629 | * But don't bother if it's a forced unmount. | |
2630 | */ | |
2631 | if (!nfs_mount_gone(nmp) && | |
2632 | (cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY | GSS_CTX_COMPLETE)) == GSS_CTX_COMPLETE) { | |
2633 | cp->gss_clnt_proc = RPCSEC_GSS_DESTROY; | |
2634 | ||
2635 | error = 0; | |
2636 | nfsm_chain_null(&nmreq); | |
2637 | nfsm_chain_null(&nmrep); | |
2638 | nfsm_chain_build_alloc_init(error, &nmreq, 0); | |
2639 | nfsm_chain_build_done(error, &nmreq); | |
2640 | if (!error) { | |
2641 | nfs_request_gss(nmp->nm_mountp, &nmreq, | |
2642 | current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status); | |
2643 | } | |
2644 | nfsm_chain_cleanup(&nmreq); | |
2645 | nfsm_chain_cleanup(&nmrep); | |
2646 | } | |
2647 | ||
2648 | /* | |
2649 | * Mark the context invalid then drop | |
2650 | * the reference to remove it if its | |
2651 | * refcount is zero. | |
2652 | */ | |
2653 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2654 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); | |
2655 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2656 | nfs_gss_clnt_ctx_unref(req); | |
2657 | lck_mtx_lock(&nmp->nm_lock); | |
2658 | } | |
2659 | lck_mtx_unlock(&nmp->nm_lock); | |
2660 | assert(TAILQ_EMPTY(&nmp->nm_gsscl)); | |
2661 | NFS_ZFREE(nfs_req_zone, req); | |
2662 | } | |
2663 | ||
2664 | ||
2665 | /* | |
2666 | * Removes a mounts context for a credential | |
2667 | */ | |
2668 | int | |
2669 | nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) | |
2670 | { | |
2671 | struct nfs_gss_clnt_ctx *cp, *tcp; | |
2672 | struct nfsreq *req; | |
2673 | ||
2674 | req = zalloc(nfs_req_zone); | |
2675 | req->r_nmp = nmp; | |
2676 | ||
2677 | NFS_GSS_DBG("Enter\n"); | |
2678 | NFS_GSS_CLNT_CTX_DUMP(nmp); | |
2679 | lck_mtx_lock(&nmp->nm_lock); | |
2680 | TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { | |
2681 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2682 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { | |
2683 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { | |
2684 | NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n", | |
2685 | kauth_cred_getasid(cp->gss_clnt_cred), | |
2686 | kauth_cred_getauid(cp->gss_clnt_cred), | |
2687 | cp->gss_clnt_refcnt); | |
2688 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2689 | continue; | |
2690 | } | |
2691 | cp->gss_clnt_refcnt++; | |
2692 | cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); | |
2693 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2694 | req->r_gss_ctx = cp; | |
2695 | lck_mtx_unlock(&nmp->nm_lock); | |
2696 | /* | |
2697 | * Drop the reference to remove it if its | |
2698 | * refcount is zero. | |
2699 | */ | |
2700 | NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n", | |
2701 | kauth_cred_getasid(cp->gss_clnt_cred), | |
2702 | kauth_cred_getuid(cp->gss_clnt_cred), | |
2703 | cp->gss_clnt_refcnt); | |
2704 | nfs_gss_clnt_ctx_unref(req); | |
2705 | NFS_ZFREE(nfs_req_zone, req); | |
2706 | return 0; | |
2707 | } | |
2708 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2709 | } | |
2710 | ||
2711 | lck_mtx_unlock(&nmp->nm_lock); | |
2712 | ||
2713 | NFS_ZFREE(nfs_req_zone, req); | |
2714 | NFS_GSS_DBG("Returning ENOENT\n"); | |
2715 | return ENOENT; | |
2716 | } | |
2717 | ||
2718 | /* | |
2719 | * Sets a mounts principal for a session associated with cred. | |
2720 | */ | |
2721 | int | |
2722 | nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, | |
2723 | uint8_t *principal, size_t princlen, uint32_t nametype) | |
2724 | { | |
2725 | struct nfsreq *req; | |
2726 | int error; | |
2727 | ||
2728 | NFS_GSS_DBG("Enter:\n"); | |
2729 | ||
2730 | req = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO); | |
2731 | req->r_nmp = nmp; | |
2732 | req->r_auth = nmp->nm_auth; | |
2733 | req->r_thread = vfs_context_thread(ctx); | |
2734 | req->r_cred = vfs_context_ucred(ctx); | |
2735 | ||
2736 | error = nfs_gss_clnt_ctx_find_principal(req, principal, princlen, nametype); | |
2737 | NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error); | |
2738 | /* | |
2739 | * We don't care about auth errors. Those would indicate that the context is in the | |
2740 | * neagative cache and if and when the user has credentials for the principal | |
2741 | * we should be good to go in that we will select those credentials for this principal. | |
2742 | */ | |
2743 | if (error == EACCES || error == EAUTH || error == ENEEDAUTH) { | |
2744 | error = 0; | |
2745 | } | |
2746 | ||
2747 | /* We're done with this request */ | |
2748 | nfs_gss_clnt_ctx_unref(req); | |
2749 | NFS_ZFREE(nfs_req_zone, req); | |
2750 | return error; | |
2751 | } | |
2752 | ||
2753 | /* | |
2754 | * Gets a mounts principal from a session associated with cred | |
2755 | */ | |
2756 | int | |
2757 | nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, | |
2758 | struct user_nfs_gss_principal *p) | |
2759 | { | |
2760 | struct nfsreq *req; | |
2761 | int error = 0; | |
2762 | struct nfs_gss_clnt_ctx *cp; | |
2763 | kauth_cred_t cred = vfs_context_ucred(ctx); | |
2764 | const char *princ = NULL; | |
2765 | char CTXBUF[NFS_CTXBUFSZ]; | |
2766 | ||
2767 | /* Make sure the the members of the struct user_nfs_gss_principal are initialized */ | |
2768 | p->nametype = GSSD_STRING_NAME; | |
2769 | p->principal = USER_ADDR_NULL; | |
2770 | p->princlen = 0; | |
2771 | p->flags = 0; | |
2772 | ||
2773 | req = zalloc_flags(nfs_req_zone, Z_WAITOK); | |
2774 | req->r_nmp = nmp; | |
2775 | lck_mtx_lock(&nmp->nm_lock); | |
2776 | TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { | |
2777 | lck_mtx_lock(&cp->gss_clnt_mtx); | |
2778 | if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { | |
2779 | NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", | |
2780 | NFS_GSS_CTX(req, cp), | |
2781 | cp->gss_clnt_refcnt); | |
2782 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2783 | continue; | |
2784 | } | |
2785 | if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { | |
2786 | cp->gss_clnt_refcnt++; | |
2787 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2788 | goto out; | |
2789 | } | |
2790 | lck_mtx_unlock(&cp->gss_clnt_mtx); | |
2791 | } | |
2792 | ||
2793 | out: | |
2794 | if (cp == NULL) { | |
2795 | lck_mtx_unlock(&nmp->nm_lock); | |
2796 | p->flags |= NFS_IOC_NO_CRED_FLAG; /* No credentials, valid or invalid on this mount */ | |
2797 | NFS_GSS_DBG("No context found for session %d by uid %d\n", | |
2798 | kauth_cred_getasid(cred), kauth_cred_getuid(cred)); | |
2799 | NFS_ZFREE(nfs_req_zone, req); | |
2800 | return 0; | |
2801 | } | |
2802 | ||
2803 | /* Indicate if the cred is INVALID */ | |
2804 | if (cp->gss_clnt_flags & GSS_CTX_INVAL) { | |
2805 | p->flags |= NFS_IOC_INVALID_CRED_FLAG; | |
2806 | } | |
2807 | ||
2808 | /* We have set a principal on the mount */ | |
2809 | if (cp->gss_clnt_principal) { | |
2810 | princ = (char *)cp->gss_clnt_principal; | |
2811 | p->princlen = cp->gss_clnt_prinlen; | |
2812 | p->nametype = cp->gss_clnt_prinnt; | |
2813 | } else if (cp->gss_clnt_display) { | |
2814 | /* We have a successful use the the default credential */ | |
2815 | princ = cp->gss_clnt_display; | |
2816 | p->princlen = strlen(cp->gss_clnt_display); | |
2817 | } | |
2818 | ||
2819 | /* | |
2820 | * If neither of the above is true we have an invalid default credential | |
2821 | * So from above p->principal is USER_ADDR_NULL and princ is NULL | |
2822 | */ | |
2823 | ||
2824 | if (princ) { | |
2825 | char *pp; | |
2826 | ||
2827 | MALLOC(pp, char *, p->princlen, M_TEMP, M_WAITOK); | |
2828 | bcopy(princ, pp, p->princlen); | |
2829 | p->principal = CAST_USER_ADDR_T(pp); | |
2830 | } | |
2831 | ||
2832 | lck_mtx_unlock(&nmp->nm_lock); | |
2833 | ||
2834 | req->r_gss_ctx = cp; | |
2835 | NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(req, NULL)); | |
2836 | nfs_gss_clnt_ctx_unref(req); | |
2837 | NFS_ZFREE(nfs_req_zone, req); | |
2838 | return error; | |
2839 | } | |
2840 | #endif /* CONFIG_NFS_CLIENT */ | |
2841 | ||
2842 | /************* | |
2843 | * | |
2844 | * Server functions | |
2845 | */ | |
2846 | ||
2847 | #if CONFIG_NFS_SERVER | |
2848 | ||
2849 | /* | |
2850 | * Find a server context based on a handle value received | |
2851 | * in an RPCSEC_GSS credential. | |
2852 | */ | |
2853 | static struct nfs_gss_svc_ctx * | |
2854 | nfs_gss_svc_ctx_find(uint32_t handle) | |
2855 | { | |
2856 | struct nfs_gss_svc_ctx_hashhead *head; | |
2857 | struct nfs_gss_svc_ctx *cp; | |
2858 | uint64_t timenow; | |
2859 | ||
2860 | if (handle == 0) { | |
2861 | return NULL; | |
2862 | } | |
2863 | ||
2864 | head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)]; | |
2865 | /* | |
2866 | * Don't return a context that is going to expire in GSS_CTX_PEND seconds | |
2867 | */ | |
2868 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow); | |
2869 | ||
2870 | lck_mtx_lock(&nfs_gss_svc_ctx_mutex); | |
2871 | ||
2872 | LIST_FOREACH(cp, head, gss_svc_entries) { | |
2873 | if (cp->gss_svc_handle == handle) { | |
2874 | if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) { | |
2875 | /* | |
2876 | * Context has or is about to expire. Don't use. | |
2877 | * We'll return null and the client will have to create | |
2878 | * a new context. | |
2879 | */ | |
2880 | cp->gss_svc_handle = 0; | |
2881 | /* | |
2882 | * Make sure though that we stay around for GSS_CTX_PEND seconds | |
2883 | * for other threads that might be using the context. | |
2884 | */ | |
2885 | cp->gss_svc_incarnation = timenow; | |
2886 | ||
2887 | cp = NULL; | |
2888 | break; | |
2889 | } | |
2890 | lck_mtx_lock(&cp->gss_svc_mtx); | |
2891 | cp->gss_svc_refcnt++; | |
2892 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
2893 | break; | |
2894 | } | |
2895 | } | |
2896 | ||
2897 | lck_mtx_unlock(&nfs_gss_svc_ctx_mutex); | |
2898 | ||
2899 | return cp; | |
2900 | } | |
2901 | ||
2902 | /* | |
2903 | * Insert a new server context into the hash table | |
2904 | * and start the context reap thread if necessary. | |
2905 | */ | |
2906 | static void | |
2907 | nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp) | |
2908 | { | |
2909 | struct nfs_gss_svc_ctx_hashhead *head; | |
2910 | struct nfs_gss_svc_ctx *p; | |
2911 | ||
2912 | lck_mtx_lock(&nfs_gss_svc_ctx_mutex); | |
2913 | ||
2914 | /* | |
2915 | * Give the client a random handle so that if we reboot | |
2916 | * it's unlikely the client will get a bad context match. | |
2917 | * Make sure it's not zero or already assigned. | |
2918 | */ | |
2919 | retry: | |
2920 | cp->gss_svc_handle = random(); | |
2921 | if (cp->gss_svc_handle == 0) { | |
2922 | goto retry; | |
2923 | } | |
2924 | head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)]; | |
2925 | LIST_FOREACH(p, head, gss_svc_entries) | |
2926 | if (p->gss_svc_handle == cp->gss_svc_handle) { | |
2927 | goto retry; | |
2928 | } | |
2929 | ||
2930 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, | |
2931 | &cp->gss_svc_incarnation); | |
2932 | LIST_INSERT_HEAD(head, cp, gss_svc_entries); | |
2933 | nfs_gss_ctx_count++; | |
2934 | ||
2935 | if (!nfs_gss_timer_on) { | |
2936 | nfs_gss_timer_on = 1; | |
2937 | ||
2938 | nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, | |
2939 | min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); | |
2940 | } | |
2941 | ||
2942 | lck_mtx_unlock(&nfs_gss_svc_ctx_mutex); | |
2943 | } | |
2944 | ||
2945 | /* | |
2946 | * This function is called via the kernel's callout | |
2947 | * mechanism. It runs only when there are | |
2948 | * cached RPCSEC_GSS contexts. | |
2949 | */ | |
2950 | void | |
2951 | nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2) | |
2952 | { | |
2953 | struct nfs_gss_svc_ctx *cp, *next; | |
2954 | uint64_t timenow; | |
2955 | int contexts = 0; | |
2956 | int i; | |
2957 | ||
2958 | lck_mtx_lock(&nfs_gss_svc_ctx_mutex); | |
2959 | clock_get_uptime(&timenow); | |
2960 | ||
2961 | NFS_GSS_DBG("is running\n"); | |
2962 | ||
2963 | /* | |
2964 | * Scan all the hash chains | |
2965 | */ | |
2966 | for (i = 0; i < SVC_CTX_HASHSZ; i++) { | |
2967 | /* | |
2968 | * For each hash chain, look for entries | |
2969 | * that haven't been used in a while. | |
2970 | */ | |
2971 | LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) { | |
2972 | contexts++; | |
2973 | if (timenow > cp->gss_svc_incarnation + | |
2974 | (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0) | |
2975 | && cp->gss_svc_refcnt == 0) { | |
2976 | /* | |
2977 | * A stale context - remove it | |
2978 | */ | |
2979 | LIST_REMOVE(cp, gss_svc_entries); | |
2980 | NFS_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid); | |
2981 | if (cp->gss_svc_seqbits) { | |
2982 | FREE(cp->gss_svc_seqbits, M_TEMP); | |
2983 | } | |
2984 | lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp); | |
2985 | FREE(cp, M_TEMP); | |
2986 | contexts--; | |
2987 | } | |
2988 | } | |
2989 | } | |
2990 | ||
2991 | nfs_gss_ctx_count = contexts; | |
2992 | ||
2993 | /* | |
2994 | * If there are still some cached contexts left, | |
2995 | * set up another callout to check on them later. | |
2996 | */ | |
2997 | nfs_gss_timer_on = nfs_gss_ctx_count > 0; | |
2998 | if (nfs_gss_timer_on) { | |
2999 | nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, | |
3000 | min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); | |
3001 | } | |
3002 | ||
3003 | lck_mtx_unlock(&nfs_gss_svc_ctx_mutex); | |
3004 | } | |
3005 | ||
3006 | /* | |
3007 | * Here the server receives an RPCSEC_GSS credential in an | |
3008 | * RPC call header. First there's some checking to make sure | |
3009 | * the credential is appropriate - whether the context is still | |
3010 | * being set up, or is complete. Then we use the handle to find | |
3011 | * the server's context and validate the verifier, which contains | |
3012 | * a signed checksum of the RPC header. If the verifier checks | |
3013 | * out, we extract the user's UID and groups from the context | |
3014 | * and use it to set up a UNIX credential for the user's request. | |
3015 | */ | |
3016 | int | |
3017 | nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) | |
3018 | { | |
3019 | uint32_t vers, proc, seqnum, service; | |
3020 | uint32_t handle, handle_len; | |
3021 | uint32_t major; | |
3022 | struct nfs_gss_svc_ctx *cp = NULL; | |
3023 | uint32_t flavor = 0; | |
3024 | int error = 0; | |
3025 | uint32_t arglen; | |
3026 | size_t argsize, start, header_len; | |
3027 | gss_buffer_desc cksum; | |
3028 | struct nfsm_chain nmc_tmp; | |
3029 | mbuf_t reply_mbuf, prev_mbuf, pad_mbuf; | |
3030 | ||
3031 | vers = proc = seqnum = service = handle_len = 0; | |
3032 | arglen = 0; | |
3033 | ||
3034 | nfsm_chain_get_32(error, nmc, vers); | |
3035 | if (vers != RPCSEC_GSS_VERS_1) { | |
3036 | error = NFSERR_AUTHERR | AUTH_REJECTCRED; | |
3037 | goto nfsmout; | |
3038 | } | |
3039 | ||
3040 | nfsm_chain_get_32(error, nmc, proc); | |
3041 | nfsm_chain_get_32(error, nmc, seqnum); | |
3042 | nfsm_chain_get_32(error, nmc, service); | |
3043 | nfsm_chain_get_32(error, nmc, handle_len); | |
3044 | if (error) { | |
3045 | goto nfsmout; | |
3046 | } | |
3047 | ||
3048 | /* | |
3049 | * Make sure context setup/destroy is being done with a nullproc | |
3050 | */ | |
3051 | if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) { | |
3052 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; | |
3053 | goto nfsmout; | |
3054 | } | |
3055 | ||
3056 | /* | |
3057 | * If the sequence number is greater than the max | |
3058 | * allowable, reject and have the client init a | |
3059 | * new context. | |
3060 | */ | |
3061 | if (seqnum > GSS_MAXSEQ) { | |
3062 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; | |
3063 | goto nfsmout; | |
3064 | } | |
3065 | ||
3066 | nd->nd_sec = | |
3067 | service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 : | |
3068 | service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I : | |
3069 | service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0; | |
3070 | ||
3071 | if (proc == RPCSEC_GSS_INIT) { | |
3072 | /* | |
3073 | * Limit the total number of contexts | |
3074 | */ | |
3075 | if (nfs_gss_ctx_count > nfs_gss_ctx_max) { | |
3076 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; | |
3077 | goto nfsmout; | |
3078 | } | |
3079 | ||
3080 | /* | |
3081 | * Set up a new context | |
3082 | */ | |
3083 | MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO); | |
3084 | if (cp == NULL) { | |
3085 | error = ENOMEM; | |
3086 | goto nfsmout; | |
3087 | } | |
3088 | lck_mtx_init(&cp->gss_svc_mtx, &nfs_gss_svc_grp, LCK_ATTR_NULL); | |
3089 | cp->gss_svc_refcnt = 1; | |
3090 | } else { | |
3091 | /* | |
3092 | * Use the handle to find the context | |
3093 | */ | |
3094 | if (handle_len != sizeof(handle)) { | |
3095 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; | |
3096 | goto nfsmout; | |
3097 | } | |
3098 | nfsm_chain_get_32(error, nmc, handle); | |
3099 | if (error) { | |
3100 | goto nfsmout; | |
3101 | } | |
3102 | cp = nfs_gss_svc_ctx_find(handle); | |
3103 | if (cp == NULL) { | |
3104 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; | |
3105 | goto nfsmout; | |
3106 | } | |
3107 | } | |
3108 | ||
3109 | cp->gss_svc_proc = proc; | |
3110 | ||
3111 | if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) { | |
3112 | struct posix_cred temp_pcred; | |
3113 | ||
3114 | if (cp->gss_svc_seqwin == 0) { | |
3115 | /* | |
3116 | * Context isn't complete | |
3117 | */ | |
3118 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; | |
3119 | goto nfsmout; | |
3120 | } | |
3121 | ||
3122 | if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) { | |
3123 | /* | |
3124 | * Sequence number is bad | |
3125 | */ | |
3126 | error = EINVAL; // drop the request | |
3127 | goto nfsmout; | |
3128 | } | |
3129 | ||
3130 | /* | |
3131 | * Validate the verifier. | |
3132 | * The verifier contains an encrypted checksum | |
3133 | * of the call header from the XID up to and | |
3134 | * including the credential. We compute the | |
3135 | * checksum and compare it with what came in | |
3136 | * the verifier. | |
3137 | */ | |
3138 | header_len = nfsm_chain_offset(nmc); | |
3139 | nfsm_chain_get_32(error, nmc, flavor); | |
3140 | nfsm_chain_get_32(error, nmc, cksum.length); | |
3141 | if (error) { | |
3142 | goto nfsmout; | |
3143 | } | |
3144 | if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) { | |
3145 | error = NFSERR_AUTHERR | AUTH_BADVERF; | |
3146 | } else { | |
3147 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); | |
3148 | nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value); | |
3149 | } | |
3150 | if (error) { | |
3151 | goto nfsmout; | |
3152 | } | |
3153 | ||
3154 | /* Now verify the client's call header checksum */ | |
3155 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL); | |
3156 | (void)gss_release_buffer(NULL, &cksum); | |
3157 | if (major != GSS_S_COMPLETE) { | |
3158 | printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n", error); | |
3159 | error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; | |
3160 | goto nfsmout; | |
3161 | } | |
3162 | ||
3163 | nd->nd_gss_seqnum = seqnum; | |
3164 | ||
3165 | /* | |
3166 | * Set up the user's cred | |
3167 | */ | |
3168 | bzero(&temp_pcred, sizeof(temp_pcred)); | |
3169 | temp_pcred.cr_uid = cp->gss_svc_uid; | |
3170 | bcopy(cp->gss_svc_gids, temp_pcred.cr_groups, | |
3171 | sizeof(gid_t) * cp->gss_svc_ngroups); | |
3172 | temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups; | |
3173 | ||
3174 | nd->nd_cr = posix_cred_create(&temp_pcred); | |
3175 | if (nd->nd_cr == NULL) { | |
3176 | error = ENOMEM; | |
3177 | goto nfsmout; | |
3178 | } | |
3179 | clock_get_uptime(&cp->gss_svc_incarnation); | |
3180 | ||
3181 | /* | |
3182 | * If the call arguments are integrity or privacy protected | |
3183 | * then we need to check them here. | |
3184 | */ | |
3185 | switch (service) { | |
3186 | case RPCSEC_GSS_SVC_NONE: | |
3187 | /* nothing to do */ | |
3188 | break; | |
3189 | case RPCSEC_GSS_SVC_INTEGRITY: | |
3190 | /* | |
3191 | * Here's what we expect in the integrity call args: | |
3192 | * | |
3193 | * - length of seq num + call args (4 bytes) | |
3194 | * - sequence number (4 bytes) | |
3195 | * - call args (variable bytes) | |
3196 | * - length of checksum token | |
3197 | * - checksum of seqnum + call args | |
3198 | */ | |
3199 | nfsm_chain_get_32(error, nmc, arglen); // length of args | |
3200 | if (arglen > NFS_MAXPACKET) { | |
3201 | error = EBADRPC; | |
3202 | goto nfsmout; | |
3203 | } | |
3204 | ||
3205 | nmc_tmp = *nmc; | |
3206 | nfsm_chain_adv(error, &nmc_tmp, arglen); | |
3207 | nfsm_chain_get_32(error, &nmc_tmp, cksum.length); | |
3208 | cksum.value = NULL; | |
3209 | if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) { | |
3210 | MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); | |
3211 | } | |
3212 | ||
3213 | if (cksum.value == NULL) { | |
3214 | error = EBADRPC; | |
3215 | goto nfsmout; | |
3216 | } | |
3217 | nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value); | |
3218 | ||
3219 | /* Verify the checksum over the call args */ | |
3220 | start = nfsm_chain_offset(nmc); | |
3221 | ||
3222 | major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, | |
3223 | nmc->nmc_mhead, start, arglen, &cksum, NULL); | |
3224 | FREE(cksum.value, M_TEMP); | |
3225 | if (major != GSS_S_COMPLETE) { | |
3226 | printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error); | |
3227 | error = EBADRPC; | |
3228 | goto nfsmout; | |
3229 | } | |
3230 | ||
3231 | /* | |
3232 | * Get the sequence number prepended to the args | |
3233 | * and compare it against the one sent in the | |
3234 | * call credential. | |
3235 | */ | |
3236 | nfsm_chain_get_32(error, nmc, seqnum); | |
3237 | if (seqnum != nd->nd_gss_seqnum) { | |
3238 | error = EBADRPC; // returns as GARBAGEARGS | |
3239 | goto nfsmout; | |
3240 | } | |
3241 | break; | |
3242 | case RPCSEC_GSS_SVC_PRIVACY: | |
3243 | /* | |
3244 | * Here's what we expect in the privacy call args: | |
3245 | * | |
3246 | * - length of wrap token | |
3247 | * - wrap token (37-40 bytes) | |
3248 | */ | |
3249 | prev_mbuf = nmc->nmc_mcur; | |
3250 | nfsm_chain_get_32(error, nmc, arglen); // length of args | |
3251 | if (arglen > NFS_MAXPACKET) { | |
3252 | error = EBADRPC; | |
3253 | goto nfsmout; | |
3254 | } | |
3255 | ||
3256 | /* Get the wrap token (current mbuf in the chain starting at the current offset) */ | |
3257 | start = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur); | |
3258 | ||
3259 | /* split out the wrap token */ | |
3260 | argsize = arglen; | |
3261 | error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0); | |
3262 | if (error) { | |
3263 | goto nfsmout; | |
3264 | } | |
3265 | ||
3266 | assert(argsize == arglen); | |
3267 | if (pad_mbuf) { | |
3268 | assert(nfsm_pad(arglen) == mbuf_len(pad_mbuf)); | |
3269 | mbuf_free(pad_mbuf); | |
3270 | } else { | |
3271 | assert(nfsm_pad(arglen) == 0); | |
3272 | } | |
3273 | ||
3274 | major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, &reply_mbuf, 0, arglen, NULL, NULL); | |
3275 | if (major != GSS_S_COMPLETE) { | |
3276 | printf("%s: gss_krb5_unwrap_mbuf failes %d\n", __func__, error); | |
3277 | goto nfsmout; | |
3278 | } | |
3279 | ||
3280 | /* Now replace the wrapped arguments with the unwrapped ones */ | |
3281 | mbuf_setnext(prev_mbuf, reply_mbuf); | |
3282 | nmc->nmc_mcur = reply_mbuf; | |
3283 | nmc->nmc_ptr = mbuf_data(reply_mbuf); | |
3284 | nmc->nmc_left = mbuf_len(reply_mbuf); | |
3285 | ||
3286 | /* | |
3287 | * - sequence number (4 bytes) | |
3288 | * - call args | |
3289 | */ | |
3290 | ||
3291 | // nfsm_chain_reverse(nmc, nfsm_pad(toklen)); | |
3292 | ||
3293 | /* | |
3294 | * Get the sequence number prepended to the args | |
3295 | * and compare it against the one sent in the | |
3296 | * call credential. | |
3297 | */ | |
3298 | nfsm_chain_get_32(error, nmc, seqnum); | |
3299 | if (seqnum != nd->nd_gss_seqnum) { | |
3300 | printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n", | |
3301 | __func__, seqnum, nd->nd_gss_seqnum); | |
3302 | printmbuf("reply_mbuf", nmc->nmc_mhead, 0, 0); | |
3303 | printf("reply_mbuf %p nmc_head %p\n", reply_mbuf, nmc->nmc_mhead); | |
3304 | error = EBADRPC; // returns as GARBAGEARGS | |
3305 | goto nfsmout; | |
3306 | } | |
3307 | break; | |
3308 | } | |
3309 | } else { | |
3310 | uint32_t verflen; | |
3311 | /* | |
3312 | * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT | |
3313 | * then we expect a null verifier. | |
3314 | */ | |
3315 | nfsm_chain_get_32(error, nmc, flavor); | |
3316 | nfsm_chain_get_32(error, nmc, verflen); | |
3317 | if (error || flavor != RPCAUTH_NULL || verflen > 0) { | |
3318 | error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; | |
3319 | } | |
3320 | if (error) { | |
3321 | if (proc == RPCSEC_GSS_INIT) { | |
3322 | lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp); | |
3323 | FREE(cp, M_TEMP); | |
3324 | cp = NULL; | |
3325 | } | |
3326 | goto nfsmout; | |
3327 | } | |
3328 | } | |
3329 | ||
3330 | nd->nd_gss_context = cp; | |
3331 | return 0; | |
3332 | nfsmout: | |
3333 | if (cp) { | |
3334 | nfs_gss_svc_ctx_deref(cp); | |
3335 | } | |
3336 | return error; | |
3337 | } | |
3338 | ||
3339 | /* | |
3340 | * Insert the server's verifier into the RPC reply header. | |
3341 | * It contains a signed checksum of the sequence number that | |
3342 | * was received in the RPC call. | |
3343 | * Then go on to add integrity or privacy if necessary. | |
3344 | */ | |
3345 | int | |
3346 | nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc) | |
3347 | { | |
3348 | struct nfs_gss_svc_ctx *cp; | |
3349 | int error = 0; | |
3350 | gss_buffer_desc cksum, seqbuf; | |
3351 | uint32_t network_seqnum; | |
3352 | cp = nd->nd_gss_context; | |
3353 | uint32_t major; | |
3354 | ||
3355 | if (cp->gss_svc_major != GSS_S_COMPLETE) { | |
3356 | /* | |
3357 | * If the context isn't yet complete | |
3358 | * then return a null verifier. | |
3359 | */ | |
3360 | nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); | |
3361 | nfsm_chain_add_32(error, nmc, 0); | |
3362 | return error; | |
3363 | } | |
3364 | ||
3365 | /* | |
3366 | * Compute checksum of the request seq number | |
3367 | * If it's the final reply of context setup | |
3368 | * then return the checksum of the context | |
3369 | * window size. | |
3370 | */ | |
3371 | seqbuf.length = NFSX_UNSIGNED; | |
3372 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || | |
3373 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { | |
3374 | network_seqnum = htonl(cp->gss_svc_seqwin); | |
3375 | } else { | |
3376 | network_seqnum = htonl(nd->nd_gss_seqnum); | |
3377 | } | |
3378 | seqbuf.value = &network_seqnum; | |
3379 | ||
3380 | major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum); | |
3381 | if (major != GSS_S_COMPLETE) { | |
3382 | return error; | |
3383 | } | |
3384 | ||
3385 | /* | |
3386 | * Now wrap it in a token and add | |
3387 | * the verifier to the reply. | |
3388 | */ | |
3389 | nfsm_chain_add_32(error, nmc, RPCSEC_GSS); | |
3390 | nfsm_chain_add_32(error, nmc, cksum.length); | |
3391 | nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length); | |
3392 | gss_release_buffer(NULL, &cksum); | |
3393 | ||
3394 | return error; | |
3395 | } | |
3396 | ||
3397 | /* | |
3398 | * The results aren't available yet, but if they need to be | |
3399 | * checksummed for integrity protection or encrypted, then | |
3400 | * we can record the start offset here, insert a place-holder | |
3401 | * for the results length, as well as the sequence number. | |
3402 | * The rest of the work is done later by nfs_gss_svc_protect_reply() | |
3403 | * when the results are available. | |
3404 | */ | |
3405 | int | |
3406 | nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc) | |
3407 | { | |
3408 | struct nfs_gss_svc_ctx *cp = nd->nd_gss_context; | |
3409 | int error = 0; | |
3410 | ||
3411 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || | |
3412 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { | |
3413 | return 0; | |
3414 | } | |
3415 | ||
3416 | switch (nd->nd_sec) { | |
3417 | case RPCAUTH_KRB5: | |
3418 | /* Nothing to do */ | |
3419 | break; | |
3420 | case RPCAUTH_KRB5I: | |
3421 | case RPCAUTH_KRB5P: | |
3422 | nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf | |
3423 | nfsm_chain_finish_mbuf(error, nmc); // split the chain here | |
3424 | break; | |
3425 | } | |
3426 | ||
3427 | return error; | |
3428 | } | |
3429 | ||
3430 | /* | |
3431 | * The results are checksummed or encrypted for return to the client | |
3432 | */ | |
3433 | int | |
3434 | nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused) | |
3435 | { | |
3436 | struct nfs_gss_svc_ctx *cp = nd->nd_gss_context; | |
3437 | struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res; | |
3438 | mbuf_t mb, results; | |
3439 | uint32_t reslen; | |
3440 | int error = 0; | |
3441 | ||
3442 | /* XXX | |
3443 | * Using a reference to the mbuf where we previously split the reply | |
3444 | * mbuf chain, we split the mbuf chain argument into two mbuf chains, | |
3445 | * one that allows us to prepend a length field or token, (nmc_pre) | |
3446 | * and the second which holds just the results that we're going to | |
3447 | * checksum and/or encrypt. When we're done, we join the chains back | |
3448 | * together. | |
3449 | */ | |
3450 | ||
3451 | mb = nd->nd_gss_mb; // the mbuf where we split | |
3452 | results = mbuf_next(mb); // first mbuf in the results | |
3453 | error = mbuf_setnext(mb, NULL); // disconnect the chains | |
3454 | if (error) { | |
3455 | return error; | |
3456 | } | |
3457 | nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain | |
3458 | nfsm_chain_build_done(error, nmc_res); | |
3459 | if (error) { | |
3460 | return error; | |
3461 | } | |
3462 | ||
3463 | if (nd->nd_sec == RPCAUTH_KRB5I) { | |
3464 | error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); | |
3465 | } else { | |
3466 | /* RPCAUTH_KRB5P */ | |
3467 | error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); | |
3468 | } | |
3469 | nfs_gss_append_chain(nmc_res, results); // Append the results mbufs | |
3470 | nfsm_chain_build_done(error, nmc_res); | |
3471 | ||
3472 | return error; | |
3473 | } | |
3474 | ||
3475 | /* | |
3476 | * This function handles the context setup calls from the client. | |
3477 | * Essentially, it implements the NFS null procedure calls when | |
3478 | * an RPCSEC_GSS credential is used. | |
3479 | * This is the context maintenance function. It creates and | |
3480 | * destroys server contexts at the whim of the client. | |
3481 | * During context creation, it receives GSS-API tokens from the | |
3482 | * client, passes them up to gssd, and returns a received token | |
3483 | * back to the client in the null procedure reply. | |
3484 | */ | |
3485 | int | |
3486 | nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp) | |
3487 | { | |
3488 | struct nfs_gss_svc_ctx *cp = NULL; | |
3489 | int error = 0; | |
3490 | int autherr = 0; | |
3491 | struct nfsm_chain *nmreq, nmrep; | |
3492 | int sz; | |
3493 | ||
3494 | nmreq = &nd->nd_nmreq; | |
3495 | nfsm_chain_null(&nmrep); | |
3496 | *mrepp = NULL; | |
3497 | cp = nd->nd_gss_context; | |
3498 | nd->nd_repstat = 0; | |
3499 | ||
3500 | switch (cp->gss_svc_proc) { | |
3501 | case RPCSEC_GSS_INIT: | |
3502 | nfs_gss_svc_ctx_insert(cp); | |
3503 | OS_FALLTHROUGH; | |
3504 | ||
3505 | case RPCSEC_GSS_CONTINUE_INIT: | |
3506 | /* Get the token from the request */ | |
3507 | nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen); | |
3508 | cp->gss_svc_token = NULL; | |
3509 | if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) { | |
3510 | MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK); | |
3511 | } | |
3512 | if (cp->gss_svc_token == NULL) { | |
3513 | autherr = RPCSEC_GSS_CREDPROBLEM; | |
3514 | break; | |
3515 | } | |
3516 | nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token); | |
3517 | ||
3518 | /* Use the token in a gss_accept_sec_context upcall */ | |
3519 | error = nfs_gss_svc_gssd_upcall(cp); | |
3520 | if (error) { | |
3521 | autherr = RPCSEC_GSS_CREDPROBLEM; | |
3522 | if (error == NFSERR_EAUTH) { | |
3523 | error = 0; | |
3524 | } | |
3525 | break; | |
3526 | } | |
3527 | ||
3528 | /* | |
3529 | * If the context isn't complete, pass the new token | |
3530 | * back to the client for another round. | |
3531 | */ | |
3532 | if (cp->gss_svc_major != GSS_S_COMPLETE) { | |
3533 | break; | |
3534 | } | |
3535 | ||
3536 | /* | |
3537 | * Now the server context is complete. | |
3538 | * Finish setup. | |
3539 | */ | |
3540 | clock_get_uptime(&cp->gss_svc_incarnation); | |
3541 | ||
3542 | cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW; | |
3543 | MALLOC(cp->gss_svc_seqbits, uint32_t *, | |
3544 | nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK | M_ZERO); | |
3545 | if (cp->gss_svc_seqbits == NULL) { | |
3546 | autherr = RPCSEC_GSS_CREDPROBLEM; | |
3547 | break; | |
3548 | } | |
3549 | break; | |
3550 | ||
3551 | case RPCSEC_GSS_DATA: | |
3552 | /* Just a nullproc ping - do nothing */ | |
3553 | break; | |
3554 | ||
3555 | case RPCSEC_GSS_DESTROY: | |
3556 | /* | |
3557 | * Don't destroy the context immediately because | |
3558 | * other active requests might still be using it. | |
3559 | * Instead, schedule it for destruction after | |
3560 | * GSS_CTX_PEND time has elapsed. | |
3561 | */ | |
3562 | cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle); | |
3563 | if (cp != NULL) { | |
3564 | cp->gss_svc_handle = 0; // so it can't be found | |
3565 | lck_mtx_lock(&cp->gss_svc_mtx); | |
3566 | clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, | |
3567 | &cp->gss_svc_incarnation); | |
3568 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3569 | } | |
3570 | break; | |
3571 | default: | |
3572 | autherr = RPCSEC_GSS_CREDPROBLEM; | |
3573 | break; | |
3574 | } | |
3575 | ||
3576 | /* Now build the reply */ | |
3577 | ||
3578 | if (nd->nd_repstat == 0) { | |
3579 | nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID; | |
3580 | } | |
3581 | sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results | |
3582 | error = nfsrv_rephead(nd, slp, &nmrep, sz); | |
3583 | *mrepp = nmrep.nmc_mhead; | |
3584 | if (error || autherr) { | |
3585 | goto nfsmout; | |
3586 | } | |
3587 | ||
3588 | if (cp->gss_svc_proc == RPCSEC_GSS_INIT || | |
3589 | cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { | |
3590 | nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle)); | |
3591 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle); | |
3592 | ||
3593 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major); | |
3594 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor); | |
3595 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin); | |
3596 | ||
3597 | nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen); | |
3598 | if (cp->gss_svc_token != NULL) { | |
3599 | nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen); | |
3600 | FREE(cp->gss_svc_token, M_TEMP); | |
3601 | cp->gss_svc_token = NULL; | |
3602 | } | |
3603 | } | |
3604 | ||
3605 | nfsmout: | |
3606 | if (autherr != 0) { | |
3607 | nd->nd_gss_context = NULL; | |
3608 | LIST_REMOVE(cp, gss_svc_entries); | |
3609 | if (cp->gss_svc_seqbits != NULL) { | |
3610 | FREE(cp->gss_svc_seqbits, M_TEMP); | |
3611 | } | |
3612 | if (cp->gss_svc_token != NULL) { | |
3613 | FREE(cp->gss_svc_token, M_TEMP); | |
3614 | } | |
3615 | lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp); | |
3616 | FREE(cp, M_TEMP); | |
3617 | } | |
3618 | ||
3619 | nfsm_chain_build_done(error, &nmrep); | |
3620 | if (error) { | |
3621 | nfsm_chain_cleanup(&nmrep); | |
3622 | *mrepp = NULL; | |
3623 | } | |
3624 | return error; | |
3625 | } | |
3626 | ||
3627 | /* | |
3628 | * This is almost a mirror-image of the client side upcall. | |
3629 | * It passes and receives a token, but invokes gss_accept_sec_context. | |
3630 | * If it's the final call of the context setup, then gssd also returns | |
3631 | * the session key and the user's UID. | |
3632 | */ | |
3633 | static int | |
3634 | nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp) | |
3635 | { | |
3636 | kern_return_t kr; | |
3637 | mach_port_t mp; | |
3638 | int retry_cnt = 0; | |
3639 | gssd_byte_buffer octx = NULL; | |
3640 | uint32_t lucidlen = 0; | |
3641 | void *lucid_ctx_buffer; | |
3642 | uint32_t ret_flags; | |
3643 | vm_map_copy_t itoken = NULL; | |
3644 | gssd_byte_buffer otoken = NULL; | |
3645 | mach_msg_type_number_t otokenlen; | |
3646 | int error = 0; | |
3647 | char svcname[] = "nfs"; | |
3648 | ||
3649 | kr = host_get_gssd_port(host_priv_self(), &mp); | |
3650 | if (kr != KERN_SUCCESS) { | |
3651 | printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr); | |
3652 | goto out; | |
3653 | } | |
3654 | if (!IPC_PORT_VALID(mp)) { | |
3655 | printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n"); | |
3656 | goto out; | |
3657 | } | |
3658 | ||
3659 | if (cp->gss_svc_tokenlen > 0) { | |
3660 | nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); | |
3661 | } | |
3662 | ||
3663 | retry: | |
3664 | printf("Calling mach_gss_accept_sec_context\n"); | |
3665 | kr = mach_gss_accept_sec_context( | |
3666 | mp, | |
3667 | (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen, | |
3668 | svcname, | |
3669 | 0, | |
3670 | &cp->gss_svc_context, | |
3671 | &cp->gss_svc_cred_handle, | |
3672 | &ret_flags, | |
3673 | &cp->gss_svc_uid, | |
3674 | cp->gss_svc_gids, | |
3675 | &cp->gss_svc_ngroups, | |
3676 | &octx, (mach_msg_type_number_t *) &lucidlen, | |
3677 | &otoken, &otokenlen, | |
3678 | &cp->gss_svc_major, | |
3679 | &cp->gss_svc_minor); | |
3680 | ||
3681 | printf("mach_gss_accept_sec_context returned %d\n", kr); | |
3682 | if (kr != KERN_SUCCESS) { | |
3683 | printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr); | |
3684 | if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 && | |
3685 | retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) { | |
3686 | if (cp->gss_svc_tokenlen > 0) { | |
3687 | nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); | |
3688 | } | |
3689 | goto retry; | |
3690 | } | |
3691 | host_release_special_port(mp); | |
3692 | goto out; | |
3693 | } | |
3694 | ||
3695 | host_release_special_port(mp); | |
3696 | ||
3697 | if (lucidlen > 0) { | |
3698 | if (lucidlen > MAX_LUCIDLEN) { | |
3699 | printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n", lucidlen); | |
3700 | vm_map_copy_discard((vm_map_copy_t) octx); | |
3701 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
3702 | goto out; | |
3703 | } | |
3704 | MALLOC(lucid_ctx_buffer, void *, lucidlen, M_TEMP, M_WAITOK | M_ZERO); | |
3705 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer); | |
3706 | if (error) { | |
3707 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
3708 | FREE(lucid_ctx_buffer, M_TEMP); | |
3709 | goto out; | |
3710 | } | |
3711 | if (cp->gss_svc_ctx_id) { | |
3712 | gss_krb5_destroy_context(cp->gss_svc_ctx_id); | |
3713 | } | |
3714 | cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); | |
3715 | if (cp->gss_svc_ctx_id == NULL) { | |
3716 | printf("Failed to make context from lucid_ctx_buffer\n"); | |
3717 | goto out; | |
3718 | } | |
3719 | } | |
3720 | ||
3721 | /* Free context token used as input */ | |
3722 | if (cp->gss_svc_token) { | |
3723 | FREE(cp->gss_svc_token, M_TEMP); | |
3724 | } | |
3725 | cp->gss_svc_token = NULL; | |
3726 | cp->gss_svc_tokenlen = 0; | |
3727 | ||
3728 | if (otokenlen > 0) { | |
3729 | /* Set context token to gss output token */ | |
3730 | MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK); | |
3731 | if (cp->gss_svc_token == NULL) { | |
3732 | printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen); | |
3733 | vm_map_copy_discard((vm_map_copy_t) otoken); | |
3734 | return ENOMEM; | |
3735 | } | |
3736 | error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token); | |
3737 | if (error) { | |
3738 | FREE(cp->gss_svc_token, M_TEMP); | |
3739 | cp->gss_svc_token = NULL; | |
3740 | return NFSERR_EAUTH; | |
3741 | } | |
3742 | cp->gss_svc_tokenlen = otokenlen; | |
3743 | } | |
3744 | ||
3745 | return 0; | |
3746 | ||
3747 | out: | |
3748 | FREE(cp->gss_svc_token, M_TEMP); | |
3749 | cp->gss_svc_tokenlen = 0; | |
3750 | cp->gss_svc_token = NULL; | |
3751 | ||
3752 | return NFSERR_EAUTH; | |
3753 | } | |
3754 | ||
3755 | /* | |
3756 | * Validate the sequence number in the credential as described | |
3757 | * in RFC 2203 Section 5.3.3.1 | |
3758 | * | |
3759 | * Here the window of valid sequence numbers is represented by | |
3760 | * a bitmap. As each sequence number is received, its bit is | |
3761 | * set in the bitmap. An invalid sequence number lies below | |
3762 | * the lower bound of the window, or is within the window but | |
3763 | * has its bit already set. | |
3764 | */ | |
3765 | static int | |
3766 | nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) | |
3767 | { | |
3768 | uint32_t *bits = cp->gss_svc_seqbits; | |
3769 | uint32_t win = cp->gss_svc_seqwin; | |
3770 | uint32_t i; | |
3771 | ||
3772 | lck_mtx_lock(&cp->gss_svc_mtx); | |
3773 | ||
3774 | /* | |
3775 | * If greater than the window upper bound, | |
3776 | * move the window up, and set the bit. | |
3777 | */ | |
3778 | if (seq > cp->gss_svc_seqmax) { | |
3779 | if (seq - cp->gss_svc_seqmax > win) { | |
3780 | bzero(bits, nfsm_rndup((win + 7) / 8)); | |
3781 | } else { | |
3782 | for (i = cp->gss_svc_seqmax + 1; i < seq; i++) { | |
3783 | win_resetbit(bits, i % win); | |
3784 | } | |
3785 | } | |
3786 | win_setbit(bits, seq % win); | |
3787 | cp->gss_svc_seqmax = seq; | |
3788 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3789 | return 1; | |
3790 | } | |
3791 | ||
3792 | /* | |
3793 | * Invalid if below the lower bound of the window | |
3794 | */ | |
3795 | if (seq <= cp->gss_svc_seqmax - win) { | |
3796 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3797 | return 0; | |
3798 | } | |
3799 | ||
3800 | /* | |
3801 | * In the window, invalid if the bit is already set | |
3802 | */ | |
3803 | if (win_getbit(bits, seq % win)) { | |
3804 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3805 | return 0; | |
3806 | } | |
3807 | win_setbit(bits, seq % win); | |
3808 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3809 | return 1; | |
3810 | } | |
3811 | ||
3812 | /* | |
3813 | * Drop a reference to a context | |
3814 | * | |
3815 | * Note that it's OK for the context to exist | |
3816 | * with a refcount of zero. The refcount isn't | |
3817 | * checked until we're about to reap an expired one. | |
3818 | */ | |
3819 | void | |
3820 | nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp) | |
3821 | { | |
3822 | lck_mtx_lock(&cp->gss_svc_mtx); | |
3823 | if (cp->gss_svc_refcnt > 0) { | |
3824 | cp->gss_svc_refcnt--; | |
3825 | } else { | |
3826 | printf("nfs_gss_ctx_deref: zero refcount\n"); | |
3827 | } | |
3828 | lck_mtx_unlock(&cp->gss_svc_mtx); | |
3829 | } | |
3830 | ||
3831 | /* | |
3832 | * Called at NFS server shutdown - destroy all contexts | |
3833 | */ | |
3834 | void | |
3835 | nfs_gss_svc_cleanup(void) | |
3836 | { | |
3837 | struct nfs_gss_svc_ctx_hashhead *head; | |
3838 | struct nfs_gss_svc_ctx *cp, *ncp; | |
3839 | int i; | |
3840 | ||
3841 | lck_mtx_lock(&nfs_gss_svc_ctx_mutex); | |
3842 | ||
3843 | /* | |
3844 | * Run through all the buckets | |
3845 | */ | |
3846 | for (i = 0; i < SVC_CTX_HASHSZ; i++) { | |
3847 | /* | |
3848 | * Remove and free all entries in the bucket | |
3849 | */ | |
3850 | head = &nfs_gss_svc_ctx_hashtbl[i]; | |
3851 | LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) { | |
3852 | LIST_REMOVE(cp, gss_svc_entries); | |
3853 | if (cp->gss_svc_seqbits) { | |
3854 | FREE(cp->gss_svc_seqbits, M_TEMP); | |
3855 | } | |
3856 | lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp); | |
3857 | FREE(cp, M_TEMP); | |
3858 | } | |
3859 | } | |
3860 | ||
3861 | lck_mtx_unlock(&nfs_gss_svc_ctx_mutex); | |
3862 | } | |
3863 | ||
3864 | #endif /* CONFIG_NFS_SERVER */ | |
3865 | ||
3866 | ||
3867 | /************* | |
3868 | * The following functions are used by both client and server. | |
3869 | */ | |
3870 | ||
3871 | /* | |
3872 | * Release a host special port that was obtained by host_get_special_port | |
3873 | * or one of its macros (host_get_gssd_port in this case). | |
3874 | * This really should be in a public kpi. | |
3875 | */ | |
3876 | ||
3877 | /* This should be in a public header if this routine is not */ | |
3878 | extern void ipc_port_release_send(ipc_port_t); | |
3879 | extern ipc_port_t ipc_port_copy_send(ipc_port_t); | |
3880 | ||
3881 | static void | |
3882 | host_release_special_port(mach_port_t mp) | |
3883 | { | |
3884 | if (IPC_PORT_VALID(mp)) { | |
3885 | ipc_port_release_send(mp); | |
3886 | } | |
3887 | } | |
3888 | ||
3889 | static mach_port_t | |
3890 | host_copy_special_port(mach_port_t mp) | |
3891 | { | |
3892 | return ipc_port_copy_send(mp); | |
3893 | } | |
3894 | ||
3895 | /* | |
3896 | * The token that is sent and received in the gssd upcall | |
3897 | * has unbounded variable length. Mach RPC does not pass | |
3898 | * the token in-line. Instead it uses page mapping to handle | |
3899 | * these parameters. This function allocates a VM buffer | |
3900 | * to hold the token for an upcall and copies the token | |
3901 | * (received from the client) into it. The VM buffer is | |
3902 | * marked with a src_destroy flag so that the upcall will | |
3903 | * automatically de-allocate the buffer when the upcall is | |
3904 | * complete. | |
3905 | */ | |
3906 | static void | |
3907 | nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr) | |
3908 | { | |
3909 | kern_return_t kr; | |
3910 | vm_offset_t kmem_buf; | |
3911 | vm_size_t tbuflen; | |
3912 | ||
3913 | *addr = NULL; | |
3914 | if (buf == NULL || buflen == 0) { | |
3915 | return; | |
3916 | } | |
3917 | ||
3918 | tbuflen = vm_map_round_page(buflen, | |
3919 | vm_map_page_mask(ipc_kernel_map)); | |
3920 | ||
3921 | if (tbuflen < buflen) { | |
3922 | printf("nfs_gss_mach_alloc_buffer: vm_map_round_page failed\n"); | |
3923 | return; | |
3924 | } | |
3925 | ||
3926 | kr = vm_allocate_kernel(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_FILE); | |
3927 | if (kr != 0) { | |
3928 | printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n"); | |
3929 | return; | |
3930 | } | |
3931 | ||
3932 | kr = vm_map_wire_kernel(ipc_kernel_map, | |
3933 | vm_map_trunc_page(kmem_buf, | |
3934 | vm_map_page_mask(ipc_kernel_map)), | |
3935 | vm_map_round_page(kmem_buf + tbuflen, | |
3936 | vm_map_page_mask(ipc_kernel_map)), | |
3937 | VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_FILE, FALSE); | |
3938 | if (kr != 0) { | |
3939 | printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n"); | |
3940 | return; | |
3941 | } | |
3942 | ||
3943 | bcopy(buf, (void *) kmem_buf, buflen); | |
3944 | // Shouldn't need to bzero below since vm_allocate returns zeroed pages | |
3945 | // bzero(kmem_buf + buflen, tbuflen - buflen); | |
3946 | ||
3947 | kr = vm_map_unwire(ipc_kernel_map, | |
3948 | vm_map_trunc_page(kmem_buf, | |
3949 | vm_map_page_mask(ipc_kernel_map)), | |
3950 | vm_map_round_page(kmem_buf + tbuflen, | |
3951 | vm_map_page_mask(ipc_kernel_map)), | |
3952 | FALSE); | |
3953 | if (kr != 0) { | |
3954 | printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n"); | |
3955 | return; | |
3956 | } | |
3957 | ||
3958 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf, | |
3959 | (vm_map_size_t) buflen, TRUE, addr); | |
3960 | if (kr != 0) { | |
3961 | printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n"); | |
3962 | return; | |
3963 | } | |
3964 | } | |
3965 | ||
3966 | /* | |
3967 | * Here we handle a token received from the gssd via an upcall. | |
3968 | * The received token resides in an allocate VM buffer. | |
3969 | * We copy the token out of this buffer to a chunk of malloc'ed | |
3970 | * memory of the right size, then de-allocate the VM buffer. | |
3971 | */ | |
3972 | static int | |
3973 | nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out) | |
3974 | { | |
3975 | vm_map_offset_t map_data; | |
3976 | vm_offset_t data; | |
3977 | int error; | |
3978 | ||
3979 | error = vm_map_copyout(ipc_kernel_map, &map_data, in); | |
3980 | if (error) { | |
3981 | return error; | |
3982 | } | |
3983 | ||
3984 | data = CAST_DOWN(vm_offset_t, map_data); | |
3985 | bcopy((void *) data, out, len); | |
3986 | vm_deallocate(ipc_kernel_map, data, len); | |
3987 | ||
3988 | return 0; | |
3989 | } | |
3990 | ||
3991 | /* | |
3992 | * Return the number of bytes in an mbuf chain. | |
3993 | */ | |
3994 | static int | |
3995 | nfs_gss_mchain_length(mbuf_t mhead) | |
3996 | { | |
3997 | mbuf_t mb; | |
3998 | int len = 0; | |
3999 | ||
4000 | for (mb = mhead; mb; mb = mbuf_next(mb)) { | |
4001 | len += mbuf_len(mb); | |
4002 | } | |
4003 | ||
4004 | return len; | |
4005 | } | |
4006 | ||
4007 | /* | |
4008 | * Append an args or results mbuf chain to the header chain | |
4009 | */ | |
4010 | static int | |
4011 | nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc) | |
4012 | { | |
4013 | int error = 0; | |
4014 | mbuf_t mb, tail; | |
4015 | ||
4016 | /* Connect the mbuf chains */ | |
4017 | error = mbuf_setnext(nmc->nmc_mcur, mc); | |
4018 | if (error) { | |
4019 | return error; | |
4020 | } | |
4021 | ||
4022 | /* Find the last mbuf in the chain */ | |
4023 | tail = NULL; | |
4024 | for (mb = mc; mb; mb = mbuf_next(mb)) { | |
4025 | tail = mb; | |
4026 | } | |
4027 | ||
4028 | nmc->nmc_mcur = tail; | |
4029 | nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail); | |
4030 | nmc->nmc_left = mbuf_trailingspace(tail); | |
4031 | ||
4032 | return 0; | |
4033 | } | |
4034 | ||
4035 | #if CONFIG_NFS_SERVER /* Only used by CONFIG_NFS_SERVER */ | |
4036 | /* | |
4037 | * Convert an mbuf chain to an NFS mbuf chain | |
4038 | */ | |
4039 | static void | |
4040 | nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc) | |
4041 | { | |
4042 | mbuf_t mb, tail; | |
4043 | ||
4044 | /* Find the last mbuf in the chain */ | |
4045 | tail = NULL; | |
4046 | for (mb = mc; mb; mb = mbuf_next(mb)) { | |
4047 | tail = mb; | |
4048 | } | |
4049 | ||
4050 | nmc->nmc_mhead = mc; | |
4051 | nmc->nmc_mcur = tail; | |
4052 | nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail); | |
4053 | nmc->nmc_left = mbuf_trailingspace(tail); | |
4054 | nmc->nmc_flags = 0; | |
4055 | } | |
4056 | #endif /* CONFIG_NFS_SERVER */ | |
4057 | ||
4058 | ||
4059 | #if 0 | |
4060 | #define DISPLAYLEN 16 | |
4061 | #define MAXDISPLAYLEN 256 | |
4062 | ||
4063 | static void | |
4064 | hexdump(const char *msg, void *data, size_t len) | |
4065 | { | |
4066 | size_t i, j; | |
4067 | u_char *d = data; | |
4068 | char *p, disbuf[3 * DISPLAYLEN + 1]; | |
4069 | ||
4070 | printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len); | |
4071 | if (len > MAXDISPLAYLEN) { | |
4072 | len = MAXDISPLAYLEN; | |
4073 | } | |
4074 | ||
4075 | for (i = 0; i < len; i += DISPLAYLEN) { | |
4076 | for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) { | |
4077 | snprintf(p, 4, "%02x ", d[i + j]); | |
4078 | } | |
4079 | printf("\t%s\n", disbuf); | |
4080 | } | |
4081 | } | |
4082 | #endif | |
4083 | ||
4084 | #endif /* CONFIG_NFS */ |