]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_gss.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_gss.c
1 /*
2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*************
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
33 *
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed. Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
42 *
43 * The client and server contexts are set up dynamically. When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
51 *
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential. The server's verifier has a signed
58 * checksum of the current sequence number.
59 *
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request. The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
68 *
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
73 * too long.
74 */
75
76 #include <stdint.h>
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/kauth.h>
81 #include <sys/kernel.h>
82 #include <sys/mount_internal.h>
83 #include <sys/vnode.h>
84 #include <sys/ubc.h>
85 #include <sys/malloc.h>
86 #include <sys/kpi_mbuf.h>
87
88 #include <kern/host.h>
89 #include <libkern/libkern.h>
90
91 #include <mach/task.h>
92 #include <mach/task_special_ports.h>
93 #include <mach/host_priv.h>
94 #include <mach/thread_act.h>
95 #include <mach/mig_errors.h>
96 #include <mach/vm_map.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <gssd/gssd_mach.h>
100
101 #include <nfs/rpcv2.h>
102 #include <nfs/nfsproto.h>
103 #include <nfs/nfs.h>
104 #include <nfs/nfsnode.h>
105 #include <nfs/nfs_gss.h>
106 #include <nfs/nfsmount.h>
107 #include <nfs/xdr_subs.h>
108 #include <nfs/nfsm_subs.h>
109 #include <nfs/nfs_gss.h>
110
111 #include "nfs_gss_crypto.h"
112
113 #define NFS_GSS_MACH_MAX_RETRIES 3
114
115 typedef struct {
116 int type;
117 union {
118 MD5_DESCBC_CTX m_ctx;
119 HMAC_SHA1_DES3KD_CTX h_ctx;
120 };
121 } GSS_DIGEST_CTX;
122
123 #define MAX_DIGEST SHA_DIGEST_LENGTH
124 #ifdef NFS_KERNEL_DEBUG
125 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
126 (panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
127 #else
128 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
129 (printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
130 #endif
131
132 #if NFSSERVER
133 u_long nfs_gss_svc_ctx_hash;
134 struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
135 lck_mtx_t *nfs_gss_svc_ctx_mutex;
136 lck_grp_t *nfs_gss_svc_grp;
137 uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
138 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
139 #endif /* NFSSERVER */
140
141 #if NFSCLIENT
142 lck_grp_t *nfs_gss_clnt_grp;
143 int nfs_single_des;
144 #endif /* NFSCLIENT */
145
146 /*
147 * These octet strings are used to encode/decode ASN.1 tokens
148 * in the RPCSEC_GSS verifiers.
149 */
150 static u_char krb5_tokhead[] = { 0x60, 0x23 };
151 static u_char krb5_mech[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
152 static u_char krb5_mic[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
153 static u_char krb5_mic3[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
154 static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
155 static u_char krb5_wrap3[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
156 static u_char iv0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
157
158 #define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
159 #define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
160
161 /*
162 * The size of the Kerberos v5 ASN.1 token
163 * in the verifier.
164 *
165 * Note that the second octet of the krb5_tokhead (0x23) is a
166 * DER-encoded size field that has variable length. If the size
167 * is 128 bytes or greater, then it uses two bytes, three bytes
168 * if 65536 or greater, and so on. Since the MIC tokens are
169 * separate from the data, the size is always the same: 35 bytes (0x23).
170 * However, the wrap token is different. Its size field includes the
171 * size of the token + the encrypted data that follows. So the size
172 * field may be two, three or four bytes.
173 */
174 #define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
175 #define KRB5_SZ_MECH sizeof(krb5_mech)
176 #define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
177 #define KRB5_SZ_SEQ 8
178 #define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
179 #define KRB5_SZ_TOKEN_NOSUM (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
180 #define KRB5_SZ_TOKEN(cksumlen) ((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
181 #define KRB5_SZ_TOKMAX(cksumlen) (KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
182
183 #if NFSCLIENT
184 static int nfs_gss_clnt_ctx_find(struct nfsreq *);
185 static int nfs_gss_clnt_ctx_failover(struct nfsreq *);
186 static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
187 static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
188 static char *nfs_gss_clnt_svcname(struct nfsmount *);
189 static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
190 static void nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
191 static int nfs_gss_clnt_ctx_delay(struct nfsreq *, int *);
192 #endif /* NFSCLIENT */
193
194 #if NFSSERVER
195 static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
196 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
197 static void nfs_gss_svc_ctx_timer(void *, void *);
198 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
199 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
200 #endif /* NFSSERVER */
201
202 static void task_release_special_port(mach_port_t);
203 static mach_port_t task_copy_special_port(mach_port_t);
204 static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
205 static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
206 static int nfs_gss_token_get(gss_key_info *ki, u_char *, u_char *, int, uint32_t *, u_char *);
207 static int nfs_gss_token_put(gss_key_info *ki, u_char *, u_char *, int, int, u_char *);
208 static int nfs_gss_der_length_size(int);
209 static void nfs_gss_der_length_put(u_char **, int);
210 static int nfs_gss_der_length_get(u_char **);
211 static int nfs_gss_mchain_length(mbuf_t);
212 static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
213 static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
214 static void nfs_gss_cksum_mchain(gss_key_info *, mbuf_t, u_char *, int, int, u_char *);
215 static void nfs_gss_cksum_chain(gss_key_info *, struct nfsm_chain *, u_char *, int, int, u_char *);
216 static void nfs_gss_cksum_rep(gss_key_info *, uint32_t, u_char *);
217 static void nfs_gss_encrypt_mchain(gss_key_info *, mbuf_t, int, int, int);
218 static void nfs_gss_encrypt_chain(gss_key_info *, struct nfsm_chain *, int, int, int);
219
220 static void gss_digest_Init(GSS_DIGEST_CTX *, gss_key_info *);
221 static void gss_digest_Update(GSS_DIGEST_CTX *, void *, size_t);
222 static void gss_digest_Final(GSS_DIGEST_CTX *, void *);
223 static void gss_des_crypt(gss_key_info *, des_cblock *, des_cblock *,
224 int32_t, des_cblock *, des_cblock *, int, int);
225 static int gss_key_init(gss_key_info *, uint32_t);
226
227 #if NFSSERVER
228 thread_call_t nfs_gss_svc_ctx_timer_call;
229 int nfs_gss_timer_on = 0;
230 uint32_t nfs_gss_ctx_count = 0;
231 const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
232 #endif /* NFSSERVER */
233
234 /*
235 * Initialization when NFS starts
236 */
237 void
238 nfs_gss_init(void)
239 {
240 #if NFSCLIENT
241 nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL);
242 #endif /* NFSCLIENT */
243
244 #if NFSSERVER
245 nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL);
246
247 nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
248 nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
249
250 nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
251 #endif /* NFSSERVER */
252 }
253
254 #if NFSCLIENT
255
256 /*
257 * Find the context for a particular user.
258 *
259 * If the context doesn't already exist
260 * then create a new context for this user.
261 *
262 * Note that the code allows superuser (uid == 0)
263 * to adopt the context of another user.
264 */
265 static int
266 nfs_gss_clnt_ctx_find(struct nfsreq *req)
267 {
268 struct nfsmount *nmp = req->r_nmp;
269 struct nfs_gss_clnt_ctx *cp;
270 uid_t uid = kauth_cred_getuid(req->r_cred);
271 int error = 0;
272 int retrycnt = 0;
273
274 lck_mtx_lock(&nmp->nm_lock);
275 TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
276 if (cp->gss_clnt_uid == uid) {
277 if (cp->gss_clnt_flags & GSS_CTX_INVAL)
278 continue;
279 lck_mtx_unlock(&nmp->nm_lock);
280 nfs_gss_clnt_ctx_ref(req, cp);
281 return (0);
282 }
283 }
284
285 if (uid == 0) {
286 /*
287 * If superuser is trying to get access, then co-opt
288 * the first valid context in the list.
289 * XXX Ultimately, we need to allow superuser to
290 * go ahead and attempt to set up its own context
291 * in case one is set up for it.
292 */
293 TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
294 if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
295 lck_mtx_unlock(&nmp->nm_lock);
296 nfs_gss_clnt_ctx_ref(req, cp);
297 return (0);
298 }
299 }
300 }
301
302 /*
303 * Not found - create a new context
304 */
305
306 /*
307 * If the thread is async, then it cannot get
308 * kerberos creds and set up a proper context.
309 * If no sec= mount option is given, attempt
310 * to failover to sec=sys.
311 */
312 if (req->r_thread == NULL) {
313 if (nmp->nm_flag & NFSMNT_SECSYSOK) {
314 error = nfs_gss_clnt_ctx_failover(req);
315 } else {
316 printf("nfs_gss_clnt_ctx_find: no context for async\n");
317 error = NFSERR_EAUTH;
318 }
319
320 lck_mtx_unlock(&nmp->nm_lock);
321 return (error);
322 }
323
324 MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
325 if (cp == NULL) {
326 lck_mtx_unlock(&nmp->nm_lock);
327 return (ENOMEM);
328 }
329
330 cp->gss_clnt_uid = uid;
331 cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
332 cp->gss_clnt_thread = current_thread();
333 nfs_gss_clnt_ctx_ref(req, cp);
334 TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
335 lck_mtx_unlock(&nmp->nm_lock);
336
337 retry:
338 error = nfs_gss_clnt_ctx_init(req, cp);
339 if (error == ENEEDAUTH) {
340 error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
341 if (!error)
342 goto retry;
343
344 /* Giving up on this context */
345 cp->gss_clnt_flags |= GSS_CTX_INVAL;
346
347 /*
348 * Wake any threads waiting to use the context
349 */
350 lck_mtx_lock(cp->gss_clnt_mtx);
351 cp->gss_clnt_thread = NULL;
352 if (cp->gss_clnt_flags & GSS_NEEDCTX) {
353 cp->gss_clnt_flags &= ~GSS_NEEDCTX;
354 wakeup(cp);
355 }
356 lck_mtx_unlock(cp->gss_clnt_mtx);
357
358 }
359
360 if (error)
361 nfs_gss_clnt_ctx_unref(req);
362
363 /*
364 * If we failed to set up a Kerberos context for this
365 * user and no sec= mount option was given, but the
366 * server indicated that it could support AUTH_SYS, then set
367 * up a dummy context that allows this user to attempt
368 * sec=sys calls.
369 */
370 if (error && (nmp->nm_flag & NFSMNT_SECSYSOK) &&
371 (error != ENXIO) && (error != ETIMEDOUT)) {
372 lck_mtx_lock(&nmp->nm_lock);
373 error = nfs_gss_clnt_ctx_failover(req);
374 lck_mtx_unlock(&nmp->nm_lock);
375 }
376
377 return (error);
378 }
379
380 /*
381 * Set up a dummy context to allow the use of sec=sys
382 * for this user, if the server allows sec=sys.
383 * The context is valid for GSS_CLNT_SYS_VALID seconds,
384 * so that the user will periodically attempt to fail back
385 * and get a real credential.
386 *
387 * Assumes context list (nm_lock) is locked
388 */
389 static int
390 nfs_gss_clnt_ctx_failover(struct nfsreq *req)
391 {
392 struct nfsmount *nmp = req->r_nmp;
393 struct nfs_gss_clnt_ctx *cp;
394 uid_t uid = kauth_cred_getuid(req->r_cred);
395 struct timeval now;
396
397 MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
398 if (cp == NULL)
399 return (ENOMEM);
400
401 cp->gss_clnt_service = RPCSEC_GSS_SVC_SYS;
402 cp->gss_clnt_uid = uid;
403 cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
404 microuptime(&now);
405 cp->gss_clnt_ctime = now.tv_sec; // time stamp
406 nfs_gss_clnt_ctx_ref(req, cp);
407 TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
408
409 return (0);
410 }
411
412 /*
413 * Inserts an RPCSEC_GSS credential into an RPC header.
414 * After the credential is inserted, the code continues
415 * to build the verifier which contains a signed checksum
416 * of the RPC header.
417 */
418 int
419 nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
420 {
421 struct nfs_gss_clnt_ctx *cp;
422 uint32_t seqnum = 0;
423 int error = 0;
424 int slpflag, recordmark = 0;
425 int start, len, offset = 0;
426 int pad, toklen;
427 struct nfsm_chain nmc_tmp;
428 struct gss_seq *gsp;
429 u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
430 u_char cksum[MAX_DIGEST];
431 struct timeval now;
432 gss_key_info *ki;
433
434 slpflag = (PZERO-1);
435 if (req->r_nmp) {
436 slpflag |= ((req->r_nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0;
437 recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
438 }
439 retry:
440 if (req->r_gss_ctx == NULL) {
441 /*
442 * Find the context for this user.
443 * If no context is found, one will
444 * be created.
445 */
446 error = nfs_gss_clnt_ctx_find(req);
447 if (error)
448 return (error);
449 }
450 cp = req->r_gss_ctx;
451
452 /*
453 * If it's a dummy context for a user that's using
454 * a fallback to sec=sys, then just return an error
455 * so rpchead can encode an RPCAUTH_UNIX cred.
456 */
457 if (cp->gss_clnt_service == RPCSEC_GSS_SVC_SYS) {
458 /*
459 * The dummy context is valid for just
460 * GSS_CLNT_SYS_VALID seconds. If the context
461 * is older than this, mark it invalid and try
462 * again to get a real one.
463 */
464 lck_mtx_lock(cp->gss_clnt_mtx);
465 microuptime(&now);
466 if (now.tv_sec > cp->gss_clnt_ctime + GSS_CLNT_SYS_VALID) {
467 cp->gss_clnt_flags |= GSS_CTX_INVAL;
468 lck_mtx_unlock(cp->gss_clnt_mtx);
469 nfs_gss_clnt_ctx_unref(req);
470 goto retry;
471 }
472 lck_mtx_unlock(cp->gss_clnt_mtx);
473 return (ENEEDAUTH);
474 }
475
476 /*
477 * If the context thread isn't null, then the context isn't
478 * yet complete and is for the exclusive use of the thread
479 * doing the context setup. Wait until the context thread
480 * is null.
481 */
482 lck_mtx_lock(cp->gss_clnt_mtx);
483 if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
484 cp->gss_clnt_flags |= GSS_NEEDCTX;
485 msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
486 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
487 return (error);
488 nfs_gss_clnt_ctx_unref(req);
489 goto retry;
490 }
491 lck_mtx_unlock(cp->gss_clnt_mtx);
492
493 ki = &cp->gss_clnt_kinfo;
494 if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
495 /*
496 * Get a sequence number for this request.
497 * Check whether the oldest request in the window is complete.
498 * If it's still pending, then wait until it's done before
499 * we allocate a new sequence number and allow this request
500 * to proceed.
501 */
502 lck_mtx_lock(cp->gss_clnt_mtx);
503 while (win_getbit(cp->gss_clnt_seqbits,
504 ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
505 cp->gss_clnt_flags |= GSS_NEEDSEQ;
506 msleep(cp, cp->gss_clnt_mtx, slpflag, "seqwin", NULL);
507 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
508 lck_mtx_unlock(cp->gss_clnt_mtx);
509 return (error);
510 }
511 if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
512 /* Renewed while while we were waiting */
513 lck_mtx_unlock(cp->gss_clnt_mtx);
514 nfs_gss_clnt_ctx_unref(req);
515 goto retry;
516 }
517 }
518 seqnum = ++cp->gss_clnt_seqnum;
519 win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
520 lck_mtx_unlock(cp->gss_clnt_mtx);
521
522 MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO);
523 if (gsp == NULL)
524 return (ENOMEM);
525 gsp->gss_seqnum = seqnum;
526 SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext);
527 }
528
529 /* Insert the credential */
530 nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
531 nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len);
532 nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1);
533 nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc);
534 nfsm_chain_add_32(error, nmc, seqnum);
535 nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
536 nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
537 if (cp->gss_clnt_handle_len > 0) {
538 if (cp->gss_clnt_handle == NULL)
539 return (EBADRPC);
540 nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
541 }
542 if (error)
543 return(error);
544 /*
545 * Now add the verifier
546 */
547 if (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
548 cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) {
549 /*
550 * If the context is still being created
551 * then use a null verifier.
552 */
553 nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); // flavor
554 nfsm_chain_add_32(error, nmc, 0); // length
555 nfsm_chain_build_done(error, nmc);
556 if (!error)
557 nfs_gss_append_chain(nmc, args);
558 return (error);
559 }
560
561 offset = recordmark ? NFSX_UNSIGNED : 0; // record mark
562 nfsm_chain_build_done(error, nmc);
563 nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), offset, 0, cksum);
564
565 toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
566 nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor
567 nfsm_chain_add_32(error, nmc, toklen); // length
568 nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
569 nfsm_chain_build_done(error, nmc);
570 if (error)
571 return (error);
572
573 /*
574 * Now we may have to compute integrity or encrypt the call args
575 * per RFC 2203 Section 5.3.2
576 */
577 switch (cp->gss_clnt_service) {
578 case RPCSEC_GSS_SVC_NONE:
579 nfs_gss_append_chain(nmc, args);
580 break;
581 case RPCSEC_GSS_SVC_INTEGRITY:
582 len = nfs_gss_mchain_length(args); // Find args length
583 req->r_gss_arglen = len; // Stash the args len
584 len += NFSX_UNSIGNED; // Add seqnum length
585 nfsm_chain_add_32(error, nmc, len); // and insert it
586 start = nfsm_chain_offset(nmc);
587 nfsm_chain_add_32(error, nmc, seqnum); // Insert seqnum
588 req->r_gss_argoff = nfsm_chain_offset(nmc); // Offset to args
589 nfsm_chain_build_done(error, nmc);
590 if (error)
591 return (error);
592 nfs_gss_append_chain(nmc, args); // Append the args mbufs
593
594 /* Now compute a checksum over the seqnum + args */
595 nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, len, cksum);
596
597 /* Insert it into a token and append to the request */
598 toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 1, 0, cksum);
599 nfsm_chain_finish_mbuf(error, nmc); // force checksum into new mbuf
600 nfsm_chain_add_32(error, nmc, toklen);
601 nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
602 nfsm_chain_build_done(error, nmc);
603 break;
604 case RPCSEC_GSS_SVC_PRIVACY:
605 /* Prepend a new mbuf with the confounder & sequence number */
606 nfsm_chain_build_alloc_init(error, &nmc_tmp, 3 * NFSX_UNSIGNED);
607 nfsm_chain_add_32(error, &nmc_tmp, random()); // confounder bytes 1-4
608 nfsm_chain_add_32(error, &nmc_tmp, random()); // confounder bytes 4-8
609 nfsm_chain_add_32(error, &nmc_tmp, seqnum);
610 nfsm_chain_build_done(error, &nmc_tmp);
611 if (error)
612 return (error);
613 nfs_gss_append_chain(&nmc_tmp, args); // Append the args mbufs
614
615 len = nfs_gss_mchain_length(args); // Find args length
616 len += 3 * NFSX_UNSIGNED; // add confounder & seqnum
617 req->r_gss_arglen = len; // Stash length
618
619 /*
620 * Append a pad trailer - per RFC 1964 section 1.2.2.3
621 * Since XDR data is always 32-bit aligned, it
622 * needs to be padded either by 4 bytes or 8 bytes.
623 */
624 nfsm_chain_finish_mbuf(error, &nmc_tmp); // force padding into new mbuf
625 if (len % 8 > 0) {
626 nfsm_chain_add_32(error, &nmc_tmp, 0x04040404);
627 len += NFSX_UNSIGNED;
628 } else {
629 nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
630 nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
631 len += 2 * NFSX_UNSIGNED;
632 }
633 nfsm_chain_build_done(error, &nmc_tmp);
634
635 /* Now compute a checksum over the confounder + seqnum + args */
636 nfs_gss_cksum_chain(ki, &nmc_tmp, ALG_WRAP(ki), 0, len, cksum);
637
638 /* Insert it into a token */
639 toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 1, len, cksum);
640 nfsm_chain_add_32(error, nmc, toklen + len); // token + args length
641 nfsm_chain_add_opaque_nopad(error, nmc, tokbuf, toklen);
642 req->r_gss_argoff = nfsm_chain_offset(nmc); // Stash offset
643 nfsm_chain_build_done(error, nmc);
644 if (error)
645 return (error);
646 nfs_gss_append_chain(nmc, nmc_tmp.nmc_mhead); // Append the args mbufs
647
648 /* Finally, encrypt the args */
649 nfs_gss_encrypt_chain(ki, &nmc_tmp, 0, len, DES_ENCRYPT);
650
651 /* Add null XDR pad if the ASN.1 token misaligned the data */
652 pad = nfsm_pad(toklen + len);
653 if (pad > 0) {
654 nfsm_chain_add_opaque_nopad(error, nmc, iv0, pad);
655 nfsm_chain_build_done(error, nmc);
656 }
657 break;
658 }
659
660 return (error);
661 }
662
663 /*
664 * When receiving a reply, the client checks the verifier
665 * returned by the server. Check that the verifier is the
666 * correct type, then extract the sequence number checksum
667 * from the token in the credential and compare it with a
668 * computed checksum of the sequence number in the request
669 * that was sent.
670 */
671 int
672 nfs_gss_clnt_verf_get(
673 struct nfsreq *req,
674 struct nfsm_chain *nmc,
675 uint32_t verftype,
676 uint32_t verflen,
677 uint32_t *accepted_statusp)
678 {
679 u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
680 u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
681 uint32_t seqnum = 0;
682 struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
683 struct nfsm_chain nmc_tmp;
684 struct gss_seq *gsp;
685 uint32_t reslen, start, cksumlen, toklen;
686 int error = 0;
687 gss_key_info *ki = &cp->gss_clnt_kinfo;
688
689 reslen = cksumlen = 0;
690 *accepted_statusp = 0;
691
692 if (cp == NULL)
693 return (NFSERR_EAUTH);
694 /*
695 * If it's not an RPCSEC_GSS verifier, then it has to
696 * be a null verifier that resulted from either
697 * a CONTINUE_NEEDED reply during context setup or
698 * from the reply to an AUTH_UNIX call from a dummy
699 * context that resulted from a fallback to sec=sys.
700 */
701 if (verftype != RPCSEC_GSS) {
702 if (verftype != RPCAUTH_NULL)
703 return (NFSERR_EAUTH);
704 if (cp->gss_clnt_flags & GSS_CTX_COMPLETE &&
705 cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)
706 return (NFSERR_EAUTH);
707 if (verflen > 0)
708 nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
709 nfsm_chain_get_32(error, nmc, *accepted_statusp);
710 return (error);
711 }
712
713 /*
714 * If we received an RPCSEC_GSS verifier but the
715 * context isn't yet complete, then it must be
716 * the context complete message from the server.
717 * The verifier will contain an encrypted checksum
718 * of the window but we don't have the session key
719 * yet so we can't decrypt it. Stash the verifier
720 * and check it later in nfs_gss_clnt_ctx_init() when
721 * the context is complete.
722 */
723 if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
724 MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO);
725 if (cp->gss_clnt_verf == NULL)
726 return (ENOMEM);
727 nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf);
728 nfsm_chain_get_32(error, nmc, *accepted_statusp);
729 return (error);
730 }
731
732 if (verflen != KRB5_SZ_TOKEN(ki->hash_len))
733 return (NFSERR_EAUTH);
734
735 /*
736 * Get the 8 octet sequence number
737 * checksum out of the verifier token.
738 */
739 nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
740 if (error)
741 goto nfsmout;
742 error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum1);
743 if (error)
744 goto nfsmout;
745
746 /*
747 * Search the request sequence numbers for this reply, starting
748 * with the most recent, looking for a checksum that matches
749 * the one in the verifier returned by the server.
750 */
751 SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
752 nfs_gss_cksum_rep(ki, gsp->gss_seqnum, cksum2);
753 if (bcmp(cksum1, cksum2, HASHLEN(ki)) == 0)
754 break;
755 }
756 if (gsp == NULL)
757 return (NFSERR_EAUTH);
758
759 /*
760 * Get the RPC accepted status
761 */
762 nfsm_chain_get_32(error, nmc, *accepted_statusp);
763 if (*accepted_statusp != RPC_SUCCESS)
764 return (0);
765
766 /*
767 * Now we may have to check integrity or decrypt the results
768 * per RFC 2203 Section 5.3.2
769 */
770 switch (cp->gss_clnt_service) {
771 case RPCSEC_GSS_SVC_NONE:
772 /* nothing to do */
773 break;
774 case RPCSEC_GSS_SVC_INTEGRITY:
775 /*
776 * Here's what we expect in the integrity results:
777 *
778 * - length of seq num + results (4 bytes)
779 * - sequence number (4 bytes)
780 * - results (variable bytes)
781 * - length of checksum token (37)
782 * - checksum of seqnum + results (37 bytes)
783 */
784 nfsm_chain_get_32(error, nmc, reslen); // length of results
785 if (reslen > NFS_MAXPACKET) {
786 error = EBADRPC;
787 goto nfsmout;
788 }
789
790 /* Compute a checksum over the sequence number + results */
791 start = nfsm_chain_offset(nmc);
792 nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, reslen, cksum1);
793
794 /*
795 * Get the sequence number prepended to the results
796 * and compare it against the list in the request.
797 */
798 nfsm_chain_get_32(error, nmc, seqnum);
799 SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
800 if (seqnum == gsp->gss_seqnum)
801 break;
802 }
803 if (gsp == NULL) {
804 error = EBADRPC;
805 goto nfsmout;
806 }
807
808 /*
809 * Advance to the end of the results and
810 * fetch the checksum computed by the server.
811 */
812 nmc_tmp = *nmc;
813 reslen -= NFSX_UNSIGNED; // already skipped seqnum
814 nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results
815 nfsm_chain_get_32(error, &nmc_tmp, cksumlen); // length of checksum
816 if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
817 error = EBADRPC;
818 goto nfsmout;
819 }
820 nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
821 if (error)
822 goto nfsmout;
823 error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 0, NULL, cksum2);
824 if (error)
825 goto nfsmout;
826
827 /* Verify that the checksums are the same */
828 if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
829 error = EBADRPC;
830 goto nfsmout;
831 }
832 break;
833 case RPCSEC_GSS_SVC_PRIVACY:
834 /*
835 * Here's what we expect in the privacy results:
836 *
837 * - length of confounder + seq num + token + results
838 * - wrap token (37-40 bytes)
839 * - confounder (8 bytes)
840 * - sequence number (4 bytes)
841 * - results (encrypted)
842 */
843 nfsm_chain_get_32(error, nmc, reslen); // length of results
844 if (reslen > NFS_MAXPACKET) {
845 error = EBADRPC;
846 goto nfsmout;
847 }
848
849 /* Get the token that prepends the encrypted results */
850 nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
851 if (error)
852 goto nfsmout;
853 error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 0,
854 &toklen, cksum1);
855 if (error)
856 goto nfsmout;
857 nfsm_chain_reverse(nmc, nfsm_pad(toklen));
858 reslen -= toklen; // size of confounder + seqnum + results
859
860 /* decrypt the confounder + sequence number + results */
861 start = nfsm_chain_offset(nmc);
862 nfs_gss_encrypt_chain(ki, nmc, start, reslen, DES_DECRYPT);
863
864 /* Compute a checksum over the confounder + sequence number + results */
865 nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, reslen, cksum2);
866
867 /* Verify that the checksums are the same */
868 if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
869 error = EBADRPC;
870 goto nfsmout;
871 }
872
873 nfsm_chain_adv(error, nmc, 8); // skip over the confounder
874
875 /*
876 * Get the sequence number prepended to the results
877 * and compare it against the list in the request.
878 */
879 nfsm_chain_get_32(error, nmc, seqnum);
880 SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
881 if (seqnum == gsp->gss_seqnum)
882 break;
883 }
884 if (gsp == NULL) {
885 error = EBADRPC;
886 goto nfsmout;
887 }
888
889 break;
890 }
891 nfsmout:
892 return (error);
893 }
894
895 /*
896 * An RPCSEC_GSS request with no integrity or privacy consists
897 * of just the header mbufs followed by the arg mbufs.
898 *
899 * However, integrity or privacy both trailer mbufs to the args,
900 * which means we have to do some work to restore the arg mbuf
901 * chain to its previous state in case we need to retransmit.
902 *
903 * The location and length of the args is marked by two fields
904 * in the request structure: r_gss_argoff and r_gss_arglen,
905 * which are stashed when the NFS request is built.
906 */
907 int
908 nfs_gss_clnt_args_restore(struct nfsreq *req)
909 {
910 struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
911 struct nfsm_chain mchain, *nmc = &mchain;
912 int len, error = 0;
913
914 if (cp == NULL)
915 return (NFSERR_EAUTH);
916
917 if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
918 return (ENEEDAUTH);
919
920 nfsm_chain_dissect_init(error, nmc, req->r_mhead); // start at RPC header
921 nfsm_chain_adv(error, nmc, req->r_gss_argoff); // advance to args
922 if (error)
923 return (error);
924
925 switch (cp->gss_clnt_service) {
926 case RPCSEC_GSS_SVC_NONE:
927 /* nothing to do */
928 break;
929 case RPCSEC_GSS_SVC_INTEGRITY:
930 /*
931 * All we have to do here is remove the appended checksum mbufs.
932 * We know that the checksum starts in a new mbuf beyond the end
933 * of the args.
934 */
935 nfsm_chain_adv(error, nmc, req->r_gss_arglen); // adv to last args mbuf
936 if (error)
937 return (error);
938
939 mbuf_freem(mbuf_next(nmc->nmc_mcur)); // free the cksum mbuf
940 error = mbuf_setnext(nmc->nmc_mcur, NULL);
941 break;
942 case RPCSEC_GSS_SVC_PRIVACY:
943 /*
944 * The args are encrypted along with prepended confounders and seqnum.
945 * First we decrypt, the confounder, seqnum and args then skip to the
946 * final mbuf of the args.
947 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
948 * Finally, we remove between 4 and 8 bytes of encryption padding
949 * as well as any alignment padding in the trailing mbuf.
950 */
951 len = req->r_gss_arglen;
952 len += len % 8 > 0 ? 4 : 8; // add DES padding length
953 nfs_gss_encrypt_chain(&cp->gss_clnt_kinfo, nmc,
954 req->r_gss_argoff, len, DES_DECRYPT);
955 nfsm_chain_adv(error, nmc, req->r_gss_arglen);
956 if (error)
957 return (error);
958 mbuf_freem(mbuf_next(nmc->nmc_mcur)); // free the pad mbuf
959 error = mbuf_setnext(nmc->nmc_mcur, NULL);
960 break;
961 }
962
963 return (error);
964 }
965
966 /*
967 * This function sets up a new context on the client.
968 * Context setup alternates upcalls to the gssd with NFS nullproc calls
969 * to the server. Each of these calls exchanges an opaque token, obtained
970 * via the gssd's calls into the GSS-API on either the client or the server.
971 * This cycle of calls ends when the client's upcall to the gssd and the
972 * server's response both return GSS_S_COMPLETE. At this point, the client
973 * should have its session key and a handle that it can use to refer to its
974 * new context on the server.
975 */
976 static int
977 nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
978 {
979 struct nfsmount *nmp = req->r_nmp;
980 int client_complete = 0;
981 int server_complete = 0;
982 u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
983 int error = 0;
984 struct timeval now;
985 gss_key_info *ki = &cp->gss_clnt_kinfo;
986
987 /* Initialize a new client context */
988
989 cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp);
990 if (cp->gss_clnt_svcname == NULL) {
991 error = NFSERR_EAUTH;
992 goto nfsmout;
993 }
994
995 cp->gss_clnt_proc = RPCSEC_GSS_INIT;
996
997 cp->gss_clnt_service =
998 nmp->nm_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
999 nmp->nm_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
1000 nmp->nm_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
1001
1002 cp->gss_clnt_gssd_flags = (nfs_single_des ? GSSD_NFS_1DES : 0);
1003 /*
1004 * Now loop around alternating gss_init_sec_context and
1005 * gss_accept_sec_context upcalls to the gssd on the client
1006 * and server side until the context is complete - or fails.
1007 */
1008 for (;;) {
1009
1010 retry:
1011 /* Upcall to the gss_init_sec_context in the gssd */
1012 error = nfs_gss_clnt_gssd_upcall(req, cp);
1013 if (error)
1014 goto nfsmout;
1015
1016 if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1017 client_complete = 1;
1018 if (server_complete)
1019 break;
1020 } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1021 error = NFSERR_EAUTH;
1022 goto nfsmout;
1023 }
1024
1025 /*
1026 * Pass the token to the server.
1027 */
1028 error = nfs_gss_clnt_ctx_callserver(req, cp);
1029 if (error) {
1030 if (cp->gss_clnt_proc == RPCSEC_GSS_INIT &&
1031 (cp->gss_clnt_gssd_flags & (GSSD_RESTART | GSSD_NFS_1DES)) == 0) {
1032 cp->gss_clnt_gssd_flags = (GSSD_RESTART | GSSD_NFS_1DES);
1033 if (cp->gss_clnt_token)
1034 FREE(cp->gss_clnt_token, M_TEMP);
1035 cp->gss_clnt_token = NULL;
1036 cp->gss_clnt_tokenlen = 0;
1037 goto retry;
1038 }
1039 // Reset flags, if error = ENEEDAUTH we will try 3des again
1040 cp->gss_clnt_gssd_flags = 0;
1041 goto nfsmout;
1042 }
1043 if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1044 server_complete = 1;
1045 if (client_complete)
1046 break;
1047 } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1048 error = NFSERR_EAUTH;
1049 goto nfsmout;
1050 }
1051
1052 cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
1053 }
1054
1055 /*
1056 * The context is apparently established successfully
1057 */
1058 cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
1059 cp->gss_clnt_proc = RPCSEC_GSS_DATA;
1060 microuptime(&now);
1061 cp->gss_clnt_ctime = now.tv_sec; // time stamp
1062
1063
1064 /*
1065 * Compute checksum of the server's window
1066 */
1067 nfs_gss_cksum_rep(ki, cp->gss_clnt_seqwin, cksum1);
1068
1069 /*
1070 * and see if it matches the one in the
1071 * verifier the server returned.
1072 */
1073 error = nfs_gss_token_get(ki, ALG_MIC(ki), cp->gss_clnt_verf, 0,
1074 NULL, cksum2);
1075 FREE(cp->gss_clnt_verf, M_TEMP);
1076 cp->gss_clnt_verf = NULL;
1077
1078 if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
1079 error = NFSERR_EAUTH;
1080 goto nfsmout;
1081 }
1082
1083 /*
1084 * Set an initial sequence number somewhat randomized.
1085 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1086 * Add the size of the sequence window so seqbits arithmetic
1087 * doesn't go negative.
1088 */
1089 cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin;
1090
1091 /*
1092 * Allocate a bitmap to keep track of which requests
1093 * are pending within the sequence number window.
1094 */
1095 MALLOC(cp->gss_clnt_seqbits, uint32_t *,
1096 nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
1097 if (cp->gss_clnt_seqbits == NULL)
1098 error = NFSERR_EAUTH;
1099 nfsmout:
1100 /*
1101 * If the error is ENEEDAUTH we're not done, so no need
1102 * to wake up other threads again. This thread will retry in
1103 * the find or renew routines.
1104 */
1105 if (error == ENEEDAUTH)
1106 return (error);
1107
1108 /*
1109 * If there's an error, just mark it as invalid.
1110 * It will be removed when the reference count
1111 * drops to zero.
1112 */
1113 if (error)
1114 cp->gss_clnt_flags |= GSS_CTX_INVAL;
1115
1116 /*
1117 * Wake any threads waiting to use the context
1118 */
1119 lck_mtx_lock(cp->gss_clnt_mtx);
1120 cp->gss_clnt_thread = NULL;
1121 if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1122 cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1123 wakeup(cp);
1124 }
1125 lck_mtx_unlock(cp->gss_clnt_mtx);
1126
1127 return (error);
1128 }
1129
1130 /*
1131 * Call the NFS server using a null procedure for context setup.
1132 * Even though it's a null procedure and nominally has no arguments
1133 * RFC 2203 requires that the GSS-API token be passed as an argument
1134 * and received as a reply.
1135 */
1136 static int
1137 nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1138 {
1139 struct nfsm_chain nmreq, nmrep;
1140 int error = 0, status;
1141 int sz;
1142
1143 if (!req->r_nmp)
1144 return (ENXIO);
1145 nfsm_chain_null(&nmreq);
1146 nfsm_chain_null(&nmrep);
1147 sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
1148 nfsm_chain_build_alloc_init(error, &nmreq, sz);
1149 nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
1150 if (cp->gss_clnt_tokenlen > 0)
1151 nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
1152 nfsm_chain_build_done(error, &nmreq);
1153 if (error)
1154 goto nfsmout;
1155
1156 /* Call the server */
1157 error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred,
1158 (req->r_flags & R_OPTMASK), cp, &nmrep, &status);
1159 if (cp->gss_clnt_token != NULL) {
1160 FREE(cp->gss_clnt_token, M_TEMP);
1161 cp->gss_clnt_token = NULL;
1162 }
1163 if (!error)
1164 error = status;
1165 if (error)
1166 goto nfsmout;
1167
1168 /* Get the server's reply */
1169
1170 nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
1171 if (cp->gss_clnt_handle != NULL) {
1172 FREE(cp->gss_clnt_handle, M_TEMP);
1173 cp->gss_clnt_handle = NULL;
1174 }
1175 if (cp->gss_clnt_handle_len > 0) {
1176 MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK);
1177 if (cp->gss_clnt_handle == NULL) {
1178 error = ENOMEM;
1179 goto nfsmout;
1180 }
1181 nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle);
1182 }
1183 nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major);
1184 nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor);
1185 nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin);
1186 nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen);
1187 if (error)
1188 goto nfsmout;
1189 if (cp->gss_clnt_tokenlen > 0) {
1190 MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
1191 if (cp->gss_clnt_token == NULL) {
1192 error = ENOMEM;
1193 goto nfsmout;
1194 }
1195 nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token);
1196 }
1197
1198 /*
1199 * Make sure any unusual errors are expanded and logged by gssd
1200 */
1201 if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1202 cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1203 char who[] = "server";
1204 char unknown[] = "<unknown>";
1205
1206 (void) mach_gss_log_error(
1207 cp->gss_clnt_mport,
1208 !req->r_nmp ? unknown :
1209 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
1210 cp->gss_clnt_uid,
1211 who,
1212 cp->gss_clnt_major,
1213 cp->gss_clnt_minor);
1214 }
1215
1216 nfsmout:
1217 nfsm_chain_cleanup(&nmreq);
1218 nfsm_chain_cleanup(&nmrep);
1219
1220 return (error);
1221 }
1222
1223 /*
1224 * Ugly hack to get the service principal from the f_mntfromname field in
1225 * the statfs struct. We assume a format of server:path. We don't currently
1226 * support url's or other bizarre formats like path@server. A better solution
1227 * here might be to allow passing the service principal down in the mount args.
1228 * For kerberos we just use the default realm.
1229 */
1230 static char *
1231 nfs_gss_clnt_svcname(struct nfsmount *nmp)
1232 {
1233 char *svcname, *d, *mntfromhere;
1234 int len;
1235
1236 if (!nmp)
1237 return (NULL);
1238 mntfromhere = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
1239 len = strlen(mntfromhere) + 5; /* "nfs/" plus null */
1240 MALLOC(svcname, char *, len, M_TEMP, M_NOWAIT);
1241 if (svcname == NULL)
1242 return (NULL);
1243 strlcpy(svcname, "nfs/", len);
1244 strlcat(svcname, mntfromhere, len);
1245 d = strchr(svcname, ':');
1246 if (d)
1247 *d = '\0';
1248
1249 return (svcname);
1250 }
1251
1252 /*
1253 * Make an upcall to the gssd using Mach RPC
1254 * The upcall is made using a task special port.
1255 * This allows launchd to fire up the gssd in the
1256 * user's session. This is important, since gssd
1257 * must have access to the user's credential cache.
1258 */
1259 static int
1260 nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1261 {
1262 kern_return_t kr;
1263 byte_buffer okey = NULL;
1264 uint32_t skeylen = 0;
1265 int retry_cnt = 0;
1266 vm_map_copy_t itoken = NULL;
1267 byte_buffer otoken = NULL;
1268 mach_msg_type_number_t otokenlen;
1269 int error = 0;
1270 char uprinc[1];
1271 uint32_t ret_flags;
1272
1273 /*
1274 * NFS currently only supports default principals or
1275 * principals based on the uid of the caller.
1276 *
1277 * N.B. Note we define a one character array for the principal
1278 * so that we can hold an empty string required by mach, since
1279 * the kernel is being compiled with -Wwrite-strings.
1280 */
1281 uprinc[0] = '\0';
1282 if (cp->gss_clnt_mport == NULL) {
1283 kr = task_get_gssd_port(get_threadtask(req->r_thread), &cp->gss_clnt_mport);
1284 if (kr != KERN_SUCCESS) {
1285 printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
1286 goto out;
1287 }
1288 if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
1289 printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
1290 cp->gss_clnt_mport = NULL;
1291 goto out;
1292 }
1293 }
1294
1295 if (cp->gss_clnt_tokenlen > 0)
1296 nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1297
1298 retry:
1299 kr = mach_gss_init_sec_context(
1300 cp->gss_clnt_mport,
1301 KRB5_MECH,
1302 (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
1303 cp->gss_clnt_uid,
1304 uprinc,
1305 cp->gss_clnt_svcname,
1306 GSSD_MUTUAL_FLAG,
1307 cp->gss_clnt_gssd_flags,
1308 &cp->gss_clnt_context,
1309 &cp->gss_clnt_cred_handle,
1310 &ret_flags,
1311 &okey, (mach_msg_type_number_t *) &skeylen,
1312 &otoken, &otokenlen,
1313 &cp->gss_clnt_major,
1314 &cp->gss_clnt_minor);
1315
1316 cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
1317
1318 if (kr != KERN_SUCCESS) {
1319 printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
1320 if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
1321 retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
1322 if (cp->gss_clnt_tokenlen > 0)
1323 nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1324 goto retry;
1325 }
1326 task_release_special_port(cp->gss_clnt_mport);
1327 cp->gss_clnt_mport = NULL;
1328 goto out;
1329 }
1330
1331 /*
1332 * Make sure any unusual errors are expanded and logged by gssd
1333 */
1334 if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1335 cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1336 char who[] = "client";
1337 char unknown[] = "<unknown>";
1338
1339 (void) mach_gss_log_error(
1340 cp->gss_clnt_mport,
1341 !req->r_nmp ? unknown :
1342 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
1343 cp->gss_clnt_uid,
1344 who,
1345 cp->gss_clnt_major,
1346 cp->gss_clnt_minor);
1347 }
1348
1349 if (skeylen > 0) {
1350 if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
1351 printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen);
1352 vm_map_copy_discard((vm_map_copy_t) okey);
1353 vm_map_copy_discard((vm_map_copy_t) otoken);
1354 goto out;
1355 }
1356 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen,
1357 cp->gss_clnt_kinfo.skey);
1358 if (error) {
1359 vm_map_copy_discard((vm_map_copy_t) otoken);
1360 goto out;
1361 }
1362
1363 error = gss_key_init(&cp->gss_clnt_kinfo, skeylen);
1364 if (error)
1365 goto out;
1366 }
1367
1368 /* Free context token used as input */
1369 if (cp->gss_clnt_token)
1370 FREE(cp->gss_clnt_token, M_TEMP);
1371 cp->gss_clnt_token = NULL;
1372 cp->gss_clnt_tokenlen = 0;
1373
1374 if (otokenlen > 0) {
1375 /* Set context token to gss output token */
1376 MALLOC(cp->gss_clnt_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
1377 if (cp->gss_clnt_token == NULL) {
1378 printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen);
1379 vm_map_copy_discard((vm_map_copy_t) otoken);
1380 return (ENOMEM);
1381 }
1382 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token);
1383 if (error) {
1384 FREE(cp->gss_clnt_token, M_TEMP);
1385 cp->gss_clnt_token = NULL;
1386 return (NFSERR_EAUTH);
1387 }
1388 cp->gss_clnt_tokenlen = otokenlen;
1389 }
1390
1391 return (0);
1392
1393 out:
1394 if (cp->gss_clnt_token)
1395 FREE(cp->gss_clnt_token, M_TEMP);
1396 cp->gss_clnt_token = NULL;
1397 cp->gss_clnt_tokenlen = 0;
1398
1399 return (NFSERR_EAUTH);
1400 }
1401
1402 /*
1403 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1404 * credential. The sequence number window that the server returns
1405 * at context setup indicates the maximum number of client calls that
1406 * can be outstanding on a context. The client maintains a bitmap that
1407 * represents the server's window. Each pending request has a bit set
1408 * in the window bitmap. When a reply comes in or times out, we reset
1409 * the bit in the bitmap and if there are any other threads waiting for
1410 * a context slot we notify the waiting thread(s).
1411 *
1412 * Note that if a request is retransmitted, it will have a single XID
1413 * but it may be associated with multiple sequence numbers. So we
1414 * may have to reset multiple sequence number bits in the window bitmap.
1415 */
1416 void
1417 nfs_gss_clnt_rpcdone(struct nfsreq *req)
1418 {
1419 struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1420 struct gss_seq *gsp, *ngsp;
1421 int i = 0;
1422
1423 if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
1424 return; // no context - don't bother
1425 /*
1426 * Reset the bit for this request in the
1427 * sequence number window to indicate it's done.
1428 * We do this even if the request timed out.
1429 */
1430 lck_mtx_lock(cp->gss_clnt_mtx);
1431 gsp = SLIST_FIRST(&req->r_gss_seqlist);
1432 if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin))
1433 win_resetbit(cp->gss_clnt_seqbits,
1434 gsp->gss_seqnum % cp->gss_clnt_seqwin);
1435
1436 /*
1437 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1438 */
1439 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) {
1440 if (++i > GSS_CLNT_SEQLISTMAX) {
1441 SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext);
1442 FREE(gsp, M_TEMP);
1443 }
1444 }
1445
1446 /*
1447 * If there's a thread waiting for
1448 * the window to advance, wake it up.
1449 */
1450 if (cp->gss_clnt_flags & GSS_NEEDSEQ) {
1451 cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1452 wakeup(cp);
1453 }
1454 lck_mtx_unlock(cp->gss_clnt_mtx);
1455 }
1456
1457 /*
1458 * Create a reference to a context from a request
1459 * and bump the reference count
1460 */
1461 void
1462 nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1463 {
1464 req->r_gss_ctx = cp;
1465
1466 lck_mtx_lock(cp->gss_clnt_mtx);
1467 cp->gss_clnt_refcnt++;
1468 lck_mtx_unlock(cp->gss_clnt_mtx);
1469 }
1470
1471 /*
1472 * Remove a context reference from a request
1473 * If the reference count drops to zero, and the
1474 * context is invalid, destroy the context
1475 */
1476 void
1477 nfs_gss_clnt_ctx_unref(struct nfsreq *req)
1478 {
1479 struct nfsmount *nmp = req->r_nmp;
1480 struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1481
1482 if (cp == NULL)
1483 return;
1484
1485 req->r_gss_ctx = NULL;
1486
1487 lck_mtx_lock(cp->gss_clnt_mtx);
1488 if (--cp->gss_clnt_refcnt == 0
1489 && cp->gss_clnt_flags & GSS_CTX_INVAL) {
1490 lck_mtx_unlock(cp->gss_clnt_mtx);
1491
1492 if (nmp)
1493 lck_mtx_lock(&nmp->nm_lock);
1494 nfs_gss_clnt_ctx_remove(nmp, cp);
1495 if (nmp)
1496 lck_mtx_unlock(&nmp->nm_lock);
1497
1498 return;
1499 }
1500 lck_mtx_unlock(cp->gss_clnt_mtx);
1501 }
1502
1503 /*
1504 * Remove a context
1505 */
1506 static void
1507 nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp)
1508 {
1509 /*
1510 * If dequeueing, assume nmp->nm_lock is held
1511 */
1512 if (nmp != NULL)
1513 TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
1514
1515 if (cp->gss_clnt_mport)
1516 task_release_special_port(cp->gss_clnt_mport);
1517 if (cp->gss_clnt_mtx)
1518 lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
1519 if (cp->gss_clnt_handle)
1520 FREE(cp->gss_clnt_handle, M_TEMP);
1521 if (cp->gss_clnt_seqbits)
1522 FREE(cp->gss_clnt_seqbits, M_TEMP);
1523 if (cp->gss_clnt_token)
1524 FREE(cp->gss_clnt_token, M_TEMP);
1525 if (cp->gss_clnt_svcname)
1526 FREE(cp->gss_clnt_svcname, M_TEMP);
1527 FREE(cp, M_TEMP);
1528 }
1529
1530 /*
1531 * The context for a user is invalid.
1532 * Mark the context as invalid, then
1533 * create a new context.
1534 */
1535 int
1536 nfs_gss_clnt_ctx_renew(struct nfsreq *req)
1537 {
1538 struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1539 struct nfsmount *nmp = req->r_nmp;
1540 struct nfs_gss_clnt_ctx *ncp;
1541 int error = 0;
1542 uid_t saved_uid;
1543 mach_port_t saved_mport;
1544 int retrycnt = 0;
1545
1546 if (cp == NULL)
1547 return (0);
1548
1549 lck_mtx_lock(cp->gss_clnt_mtx);
1550 if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
1551 lck_mtx_unlock(cp->gss_clnt_mtx);
1552 nfs_gss_clnt_ctx_unref(req);
1553 return (0); // already being renewed
1554 }
1555 saved_uid = cp->gss_clnt_uid;
1556 saved_mport = task_copy_special_port(cp->gss_clnt_mport);
1557
1558 /* Remove the old context */
1559 cp->gss_clnt_flags |= GSS_CTX_INVAL;
1560
1561 /*
1562 * If there's a thread waiting
1563 * in the old context, wake it up.
1564 */
1565 if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
1566 cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
1567 wakeup(cp);
1568 }
1569 lck_mtx_unlock(cp->gss_clnt_mtx);
1570
1571 /*
1572 * Create a new context
1573 */
1574 MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
1575 M_TEMP, M_WAITOK|M_ZERO);
1576 if (ncp == NULL) {
1577 error = ENOMEM;
1578 goto out;
1579 }
1580
1581 ncp->gss_clnt_uid = saved_uid;
1582 ncp->gss_clnt_mport = task_copy_special_port(saved_mport); // re-use the gssd port
1583 ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
1584 ncp->gss_clnt_thread = current_thread();
1585 lck_mtx_lock(&nmp->nm_lock);
1586 TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
1587 lck_mtx_unlock(&nmp->nm_lock);
1588
1589 /* Adjust reference counts to new and old context */
1590 nfs_gss_clnt_ctx_unref(req);
1591 nfs_gss_clnt_ctx_ref(req, ncp);
1592
1593 retry:
1594 error = nfs_gss_clnt_ctx_init(req, ncp); // Initialize new context
1595 if (error == ENEEDAUTH) {
1596 error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
1597 if (!error)
1598 goto retry;
1599 }
1600 out:
1601 task_release_special_port(saved_mport);
1602 if (error)
1603 nfs_gss_clnt_ctx_unref(req);
1604
1605 return (error);
1606 }
1607
1608 /*
1609 * Destroy all the contexts associated with a mount.
1610 * The contexts are also destroyed by the server.
1611 */
1612 void
1613 nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp, int mntflags)
1614 {
1615 struct nfs_gss_clnt_ctx *cp;
1616 struct ucred temp_cred;
1617 kauth_cred_t cred;
1618 struct nfsm_chain nmreq, nmrep;
1619 int error, status;
1620 struct nfsreq req;
1621
1622 bzero((caddr_t) &temp_cred, sizeof(temp_cred));
1623 temp_cred.cr_ngroups = 1;
1624 req.r_nmp = nmp;
1625
1626 for (;;) {
1627 lck_mtx_lock(&nmp->nm_lock);
1628 cp = TAILQ_FIRST(&nmp->nm_gsscl);
1629 lck_mtx_unlock(&nmp->nm_lock);
1630 if (cp == NULL)
1631 break;
1632
1633 nfs_gss_clnt_ctx_ref(&req, cp);
1634
1635 /*
1636 * Tell the server to destroy its context.
1637 * But don't bother if it's a forced unmount
1638 * or if it's a dummy sec=sys context.
1639 */
1640 if (!(mntflags & MNT_FORCE) && cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS) {
1641 temp_cred.cr_uid = cp->gss_clnt_uid;
1642 cred = kauth_cred_create(&temp_cred);
1643 cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
1644
1645 error = 0;
1646 nfsm_chain_null(&nmreq);
1647 nfsm_chain_null(&nmrep);
1648 nfsm_chain_build_alloc_init(error, &nmreq, 0);
1649 nfsm_chain_build_done(error, &nmreq);
1650 if (!error)
1651 nfs_request_gss(nmp->nm_mountp, &nmreq,
1652 current_thread(), cred, 0, cp, &nmrep, &status);
1653 nfsm_chain_cleanup(&nmreq);
1654 nfsm_chain_cleanup(&nmrep);
1655 kauth_cred_unref(&cred);
1656 }
1657
1658 /*
1659 * Mark the context invalid then drop
1660 * the reference to remove it if its
1661 * refcount is zero.
1662 */
1663 cp->gss_clnt_flags |= GSS_CTX_INVAL;
1664 nfs_gss_clnt_ctx_unref(&req);
1665 }
1666 }
1667
1668 /*
1669 * If we get a failure in trying to establish a context we need to wait a
1670 * little while to see if the server is feeling better. In our case this is
1671 * probably a failure in directory services not coming up in a timely fashion.
1672 * This routine sort of mimics receiving a jukebox error.
1673 */
1674 static int
1675 nfs_gss_clnt_ctx_delay(struct nfsreq *req, int *retry)
1676 {
1677 int timeo = (1 << *retry) * NFS_TRYLATERDEL;
1678 int error = 0;
1679 struct nfsmount *nmp = req->r_nmp;
1680 struct timeval now;
1681 time_t waituntil;
1682
1683 if (!nmp)
1684 return (ENXIO);
1685 if ((nmp->nm_flag & NFSMNT_SOFT) && *retry > nmp->nm_retry)
1686 return (ETIMEDOUT);
1687 if (timeo > 60)
1688 timeo = 60;
1689
1690 microuptime(&now);
1691 waituntil = now.tv_sec + timeo;
1692 while (now.tv_sec < waituntil) {
1693 tsleep(&lbolt, PSOCK, "nfs_gss_clnt_ctx_delay", 0);
1694 error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
1695 if (error)
1696 break;
1697 microuptime(&now);
1698 }
1699 *retry += 1;
1700
1701 return (error);
1702 }
1703
1704
1705 #endif /* NFSCLIENT */
1706
1707 /*************
1708 *
1709 * Server functions
1710 */
1711
1712 #if NFSSERVER
1713
1714 /*
1715 * Find a server context based on a handle value received
1716 * in an RPCSEC_GSS credential.
1717 */
1718 static struct nfs_gss_svc_ctx *
1719 nfs_gss_svc_ctx_find(uint32_t handle)
1720 {
1721 struct nfs_gss_svc_ctx_hashhead *head;
1722 struct nfs_gss_svc_ctx *cp;
1723 uint64_t timenow;
1724
1725 if (handle == 0)
1726 return (NULL);
1727
1728 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
1729 /*
1730 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
1731 */
1732 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
1733
1734 lck_mtx_lock(nfs_gss_svc_ctx_mutex);
1735
1736 LIST_FOREACH(cp, head, gss_svc_entries)
1737 if (cp->gss_svc_handle == handle) {
1738 if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
1739 /*
1740 * Context has or is about to expire. Don't use.
1741 * We'll return null and the client will have to create
1742 * a new context.
1743 */
1744 cp->gss_svc_handle = 0;
1745 /*
1746 * Make sure though that we stay around for GSS_CTC_PEND seconds
1747 * for other threads that might be using the context.
1748 */
1749 cp->gss_svc_incarnation = timenow;
1750 cp = NULL;
1751 }
1752 break;
1753 }
1754
1755 lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
1756
1757 return (cp);
1758 }
1759
1760 /*
1761 * Insert a new server context into the hash table
1762 * and start the context reap thread if necessary.
1763 */
1764 static void
1765 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
1766 {
1767 struct nfs_gss_svc_ctx_hashhead *head;
1768
1769 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
1770
1771 lck_mtx_lock(nfs_gss_svc_ctx_mutex);
1772 LIST_INSERT_HEAD(head, cp, gss_svc_entries);
1773 nfs_gss_ctx_count++;
1774
1775 if (!nfs_gss_timer_on) {
1776 nfs_gss_timer_on = 1;
1777
1778 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
1779 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
1780 }
1781
1782 lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
1783 }
1784
1785 /*
1786 * This function is called via the kernel's callout
1787 * mechanism. It runs only when there are
1788 * cached RPCSEC_GSS contexts.
1789 */
1790 void
1791 nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
1792 {
1793 struct nfs_gss_svc_ctx_hashhead *head;
1794 struct nfs_gss_svc_ctx *cp, *next;
1795 uint64_t timenow;
1796 int contexts = 0;
1797 int i;
1798
1799 lck_mtx_lock(nfs_gss_svc_ctx_mutex);
1800 clock_get_uptime(&timenow);
1801
1802 /*
1803 * Scan all the hash chains
1804 * Assume nfs_gss_svc_ctx_mutex is held
1805 */
1806 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
1807 /*
1808 * For each hash chain, look for entries
1809 * that haven't been used in a while.
1810 */
1811 head = &nfs_gss_svc_ctx_hashtbl[i];
1812 for (cp = LIST_FIRST(head); cp; cp = next) {
1813 contexts++;
1814 next = LIST_NEXT(cp, gss_svc_entries);
1815 if (timenow > cp->gss_svc_incarnation +
1816 (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)) {
1817 /*
1818 * A stale context - remove it
1819 */
1820 LIST_REMOVE(cp, gss_svc_entries);
1821 if (cp->gss_svc_seqbits)
1822 FREE(cp->gss_svc_seqbits, M_TEMP);
1823 lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
1824 FREE(cp, M_TEMP);
1825 contexts--;
1826 }
1827 }
1828 }
1829
1830 nfs_gss_ctx_count = contexts;
1831
1832 /*
1833 * If there are still some cached contexts left,
1834 * set up another callout to check on them later.
1835 */
1836 nfs_gss_timer_on = nfs_gss_ctx_count > 0;
1837 if (nfs_gss_timer_on)
1838 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
1839 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, GSS_SVC_CTX_TTL)) * MSECS_PER_SEC);
1840
1841 lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
1842 }
1843
1844 /*
1845 * Here the server receives an RPCSEC_GSS credential in an
1846 * RPC call header. First there's some checking to make sure
1847 * the credential is appropriate - whether the context is still
1848 * being set up, or is complete. Then we use the handle to find
1849 * the server's context and validate the verifier, which contains
1850 * a signed checksum of the RPC header. If the verifier checks
1851 * out, we extract the user's UID and groups from the context
1852 * and use it to set up a UNIX credential for the user's request.
1853 */
1854 int
1855 nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
1856 {
1857 uint32_t vers, proc, seqnum, service;
1858 uint32_t handle, handle_len;
1859 struct nfs_gss_svc_ctx *cp = NULL;
1860 uint32_t flavor = 0, verflen = 0;
1861 int error = 0;
1862 uint32_t arglen, start, toklen, cksumlen;
1863 u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
1864 u_char cksum1[MAX_DIGEST], cksum2[MAX_DIGEST];
1865 struct nfsm_chain nmc_tmp;
1866 gss_key_info *ki;
1867
1868 vers = proc = seqnum = service = handle_len = 0;
1869 arglen = cksumlen = 0;
1870
1871 nfsm_chain_get_32(error, nmc, vers);
1872 if (vers != RPCSEC_GSS_VERS_1) {
1873 error = NFSERR_AUTHERR | AUTH_REJECTCRED;
1874 goto nfsmout;
1875 }
1876
1877 nfsm_chain_get_32(error, nmc, proc);
1878 nfsm_chain_get_32(error, nmc, seqnum);
1879 nfsm_chain_get_32(error, nmc, service);
1880 nfsm_chain_get_32(error, nmc, handle_len);
1881 if (error)
1882 goto nfsmout;
1883
1884 /*
1885 * Make sure context setup/destroy is being done with a nullproc
1886 */
1887 if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
1888 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
1889 goto nfsmout;
1890 }
1891
1892 /*
1893 * If the sequence number is greater than the max
1894 * allowable, reject and have the client init a
1895 * new context.
1896 */
1897 if (seqnum > GSS_MAXSEQ) {
1898 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
1899 goto nfsmout;
1900 }
1901
1902 nd->nd_sec =
1903 service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 :
1904 service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
1905 service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0;
1906
1907 if (proc == RPCSEC_GSS_INIT) {
1908 /*
1909 * Limit the total number of contexts
1910 */
1911 if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
1912 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
1913 goto nfsmout;
1914 }
1915
1916 /*
1917 * Set up a new context
1918 */
1919 MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
1920 if (cp == NULL) {
1921 error = ENOMEM;
1922 goto nfsmout;
1923 }
1924 } else {
1925
1926 /*
1927 * Use the handle to find the context
1928 */
1929 if (handle_len != sizeof(handle)) {
1930 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
1931 goto nfsmout;
1932 }
1933 nfsm_chain_get_32(error, nmc, handle);
1934 if (error)
1935 goto nfsmout;
1936 cp = nfs_gss_svc_ctx_find(handle);
1937 if (cp == NULL) {
1938 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
1939 goto nfsmout;
1940 }
1941 }
1942
1943 cp->gss_svc_proc = proc;
1944 ki = &cp->gss_svc_kinfo;
1945
1946 if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
1947 struct ucred temp_cred;
1948
1949 if (cp->gss_svc_seqwin == 0) {
1950 /*
1951 * Context isn't complete
1952 */
1953 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
1954 goto nfsmout;
1955 }
1956
1957 if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
1958 /*
1959 * Sequence number is bad
1960 */
1961 error = EINVAL; // drop the request
1962 goto nfsmout;
1963 }
1964
1965 /* Now compute the client's call header checksum */
1966 nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), 0, 0, cksum1);
1967
1968 /*
1969 * Validate the verifier.
1970 * The verifier contains an encrypted checksum
1971 * of the call header from the XID up to and
1972 * including the credential. We compute the
1973 * checksum and compare it with what came in
1974 * the verifier.
1975 */
1976 nfsm_chain_get_32(error, nmc, flavor);
1977 nfsm_chain_get_32(error, nmc, verflen);
1978 if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN(ki->hash_len))
1979 error = NFSERR_AUTHERR | AUTH_BADVERF;
1980 nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
1981 if (error)
1982 goto nfsmout;
1983
1984 /* Get the checksum from the token inside the verifier */
1985 error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
1986 NULL, cksum2);
1987 if (error)
1988 goto nfsmout;
1989
1990 if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
1991 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
1992 goto nfsmout;
1993 }
1994
1995 nd->nd_gss_seqnum = seqnum;
1996
1997 /*
1998 * Set up the user's cred
1999 */
2000 bzero(&temp_cred, sizeof(temp_cred));
2001 temp_cred.cr_uid = cp->gss_svc_uid;
2002 bcopy(cp->gss_svc_gids, temp_cred.cr_groups,
2003 sizeof(gid_t) * cp->gss_svc_ngroups);
2004 temp_cred.cr_ngroups = cp->gss_svc_ngroups;
2005
2006 nd->nd_cr = kauth_cred_create(&temp_cred);
2007 if (nd->nd_cr == NULL) {
2008 error = ENOMEM;
2009 goto nfsmout;
2010 }
2011 clock_get_uptime(&cp->gss_svc_incarnation);
2012
2013 /*
2014 * If the call arguments are integrity or privacy protected
2015 * then we need to check them here.
2016 */
2017 switch (service) {
2018 case RPCSEC_GSS_SVC_NONE:
2019 /* nothing to do */
2020 break;
2021 case RPCSEC_GSS_SVC_INTEGRITY:
2022 /*
2023 * Here's what we expect in the integrity call args:
2024 *
2025 * - length of seq num + call args (4 bytes)
2026 * - sequence number (4 bytes)
2027 * - call args (variable bytes)
2028 * - length of checksum token (37)
2029 * - checksum of seqnum + call args (37 bytes)
2030 */
2031 nfsm_chain_get_32(error, nmc, arglen); // length of args
2032 if (arglen > NFS_MAXPACKET) {
2033 error = EBADRPC;
2034 goto nfsmout;
2035 }
2036
2037 /* Compute the checksum over the call args */
2038 start = nfsm_chain_offset(nmc);
2039 nfs_gss_cksum_chain(ki, nmc, ALG_MIC(ki), start, arglen, cksum1);
2040
2041 /*
2042 * Get the sequence number prepended to the args
2043 * and compare it against the one sent in the
2044 * call credential.
2045 */
2046 nfsm_chain_get_32(error, nmc, seqnum);
2047 if (seqnum != nd->nd_gss_seqnum) {
2048 error = EBADRPC; // returns as GARBAGEARGS
2049 goto nfsmout;
2050 }
2051
2052 /*
2053 * Advance to the end of the args and
2054 * fetch the checksum computed by the client.
2055 */
2056 nmc_tmp = *nmc;
2057 arglen -= NFSX_UNSIGNED; // skipped seqnum
2058 nfsm_chain_adv(error, &nmc_tmp, arglen); // skip args
2059 nfsm_chain_get_32(error, &nmc_tmp, cksumlen); // length of checksum
2060 if (cksumlen != KRB5_SZ_TOKEN(ki->hash_len)) {
2061 error = EBADRPC;
2062 goto nfsmout;
2063 }
2064 nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
2065 if (error)
2066 goto nfsmout;
2067 error = nfs_gss_token_get(ki, ALG_MIC(ki), tokbuf, 1,
2068 NULL, cksum2);
2069
2070 /* Verify that the checksums are the same */
2071 if (error || bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2072 error = EBADRPC;
2073 goto nfsmout;
2074 }
2075 break;
2076 case RPCSEC_GSS_SVC_PRIVACY:
2077 /*
2078 * Here's what we expect in the privacy call args:
2079 *
2080 * - length of confounder + seq num + token + call args
2081 * - wrap token (37-40 bytes)
2082 * - confounder (8 bytes)
2083 * - sequence number (4 bytes)
2084 * - call args (encrypted)
2085 */
2086 nfsm_chain_get_32(error, nmc, arglen); // length of args
2087 if (arglen > NFS_MAXPACKET) {
2088 error = EBADRPC;
2089 goto nfsmout;
2090 }
2091
2092 /* Get the token that prepends the encrypted args */
2093 nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX(ki->hash_len), tokbuf);
2094 if (error)
2095 goto nfsmout;
2096 error = nfs_gss_token_get(ki, ALG_WRAP(ki), tokbuf, 1,
2097 &toklen, cksum1);
2098 if (error)
2099 goto nfsmout;
2100 nfsm_chain_reverse(nmc, nfsm_pad(toklen));
2101
2102 /* decrypt the 8 byte confounder + seqnum + args */
2103 start = nfsm_chain_offset(nmc);
2104 arglen -= toklen;
2105 nfs_gss_encrypt_chain(ki, nmc, start, arglen, DES_DECRYPT);
2106
2107 /* Compute a checksum over the sequence number + results */
2108 nfs_gss_cksum_chain(ki, nmc, ALG_WRAP(ki), start, arglen, cksum2);
2109
2110 /* Verify that the checksums are the same */
2111 if (bcmp(cksum1, cksum2, HASHLEN(ki)) != 0) {
2112 error = EBADRPC;
2113 goto nfsmout;
2114 }
2115
2116 /*
2117 * Get the sequence number prepended to the args
2118 * and compare it against the one sent in the
2119 * call credential.
2120 */
2121 nfsm_chain_adv(error, nmc, 8); // skip over the confounder
2122 nfsm_chain_get_32(error, nmc, seqnum);
2123 if (seqnum != nd->nd_gss_seqnum) {
2124 error = EBADRPC; // returns as GARBAGEARGS
2125 goto nfsmout;
2126 }
2127 break;
2128 }
2129 } else {
2130 /*
2131 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2132 * then we expect a null verifier.
2133 */
2134 nfsm_chain_get_32(error, nmc, flavor);
2135 nfsm_chain_get_32(error, nmc, verflen);
2136 if (error || flavor != RPCAUTH_NULL || verflen > 0)
2137 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
2138 if (error)
2139 goto nfsmout;
2140 }
2141
2142 nd->nd_gss_context = cp;
2143 nfsmout:
2144 return (error);
2145 }
2146
2147 /*
2148 * Insert the server's verifier into the RPC reply header.
2149 * It contains a signed checksum of the sequence number that
2150 * was received in the RPC call.
2151 * Then go on to add integrity or privacy if necessary.
2152 */
2153 int
2154 nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2155 {
2156 struct nfs_gss_svc_ctx *cp;
2157 int error = 0;
2158 u_char tokbuf[KRB5_SZ_TOKEN(MAX_DIGEST)];
2159 int toklen;
2160 u_char cksum[MAX_DIGEST];
2161 gss_key_info *ki;
2162
2163 cp = nd->nd_gss_context;
2164 ki = &cp->gss_svc_kinfo;
2165
2166 if (cp->gss_svc_major != GSS_S_COMPLETE) {
2167 /*
2168 * If the context isn't yet complete
2169 * then return a null verifier.
2170 */
2171 nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
2172 nfsm_chain_add_32(error, nmc, 0);
2173 return (error);
2174 }
2175
2176 /*
2177 * Compute checksum of the request seq number
2178 * If it's the final reply of context setup
2179 * then return the checksum of the context
2180 * window size.
2181 */
2182 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2183 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2184 nfs_gss_cksum_rep(ki, cp->gss_svc_seqwin, cksum);
2185 else
2186 nfs_gss_cksum_rep(ki, nd->nd_gss_seqnum, cksum);
2187 /*
2188 * Now wrap it in a token and add
2189 * the verifier to the reply.
2190 */
2191 toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2192 nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
2193 nfsm_chain_add_32(error, nmc, toklen);
2194 nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
2195
2196 return (error);
2197 }
2198
2199 /*
2200 * The results aren't available yet, but if they need to be
2201 * checksummed for integrity protection or encrypted, then
2202 * we can record the start offset here, insert a place-holder
2203 * for the results length, as well as the sequence number.
2204 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2205 * when the results are available.
2206 */
2207 int
2208 nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2209 {
2210 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2211 int error = 0;
2212
2213 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2214 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
2215 return (0);
2216
2217 switch (nd->nd_sec) {
2218 case RPCAUTH_KRB5:
2219 /* Nothing to do */
2220 break;
2221 case RPCAUTH_KRB5I:
2222 nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf
2223 nfsm_chain_finish_mbuf(error, nmc); // split the chain here
2224 nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2225 break;
2226 case RPCAUTH_KRB5P:
2227 nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf
2228 nfsm_chain_finish_mbuf(error, nmc); // split the chain here
2229 nfsm_chain_add_32(error, nmc, random()); // confounder bytes 1-4
2230 nfsm_chain_add_32(error, nmc, random()); // confounder bytes 5-8
2231 nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); // req sequence number
2232 break;
2233 }
2234
2235 return (error);
2236 }
2237
2238 /*
2239 * The results are checksummed or encrypted for return to the client
2240 */
2241 int
2242 nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep)
2243 {
2244 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
2245 struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
2246 struct nfsm_chain nmrep_pre, *nmc_pre = &nmrep_pre;
2247 mbuf_t mb, results;
2248 uint32_t reslen;
2249 u_char tokbuf[KRB5_SZ_TOKMAX(MAX_DIGEST)];
2250 int pad, toklen;
2251 u_char cksum[MAX_DIGEST];
2252 int error = 0;
2253 gss_key_info *ki = &cp->gss_svc_kinfo;
2254
2255 /*
2256 * Using a reference to the mbuf where we previously split the reply
2257 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
2258 * one that allows us to prepend a length field or token, (nmc_pre)
2259 * and the second which holds just the results that we're going to
2260 * checksum and/or encrypt. When we're done, we join the chains back
2261 * together.
2262 */
2263 nfs_gss_nfsm_chain(nmc_res, mrep); // set up the results chain
2264 mb = nd->nd_gss_mb; // the mbuf where we split
2265 results = mbuf_next(mb); // first mbuf in the results
2266 reslen = nfs_gss_mchain_length(results); // length of results
2267 error = mbuf_setnext(mb, NULL); // disconnect the chains
2268 if (error)
2269 return (error);
2270 nfs_gss_nfsm_chain(nmc_pre, mb); // set up the prepend chain
2271
2272 if (nd->nd_sec == RPCAUTH_KRB5I) {
2273 nfsm_chain_add_32(error, nmc_pre, reslen);
2274 nfsm_chain_build_done(error, nmc_pre);
2275 if (error)
2276 return (error);
2277 nfs_gss_append_chain(nmc_pre, results); // Append the results mbufs
2278
2279 /* Now compute the checksum over the results data */
2280 nfs_gss_cksum_mchain(ki, results, ALG_MIC(ki), 0, reslen, cksum);
2281
2282 /* Put it into a token and append to the request */
2283 toklen = nfs_gss_token_put(ki, ALG_MIC(ki), tokbuf, 0, 0, cksum);
2284 nfsm_chain_add_32(error, nmc_res, toklen);
2285 nfsm_chain_add_opaque(error, nmc_res, tokbuf, toklen);
2286 nfsm_chain_build_done(error, nmc_res);
2287 } else {
2288 /* RPCAUTH_KRB5P */
2289 /*
2290 * Append a pad trailer - per RFC 1964 section 1.2.2.3
2291 * Since XDR data is always 32-bit aligned, it
2292 * needs to be padded either by 4 bytes or 8 bytes.
2293 */
2294 if (reslen % 8 > 0) {
2295 nfsm_chain_add_32(error, nmc_res, 0x04040404);
2296 reslen += NFSX_UNSIGNED;
2297 } else {
2298 nfsm_chain_add_32(error, nmc_res, 0x08080808);
2299 nfsm_chain_add_32(error, nmc_res, 0x08080808);
2300 reslen += 2 * NFSX_UNSIGNED;
2301 }
2302 nfsm_chain_build_done(error, nmc_res);
2303
2304 /* Now compute the checksum over the results data */
2305 nfs_gss_cksum_mchain(ki, results, ALG_WRAP(ki), 0, reslen, cksum);
2306
2307 /* Put it into a token and insert in the reply */
2308 toklen = nfs_gss_token_put(ki, ALG_WRAP(ki), tokbuf, 0, reslen, cksum);
2309 nfsm_chain_add_32(error, nmc_pre, toklen + reslen);
2310 nfsm_chain_add_opaque_nopad(error, nmc_pre, tokbuf, toklen);
2311 nfsm_chain_build_done(error, nmc_pre);
2312 if (error)
2313 return (error);
2314 nfs_gss_append_chain(nmc_pre, results); // Append the results mbufs
2315
2316 /* Encrypt the confounder + seqnum + results */
2317 nfs_gss_encrypt_mchain(ki, results, 0, reslen, DES_ENCRYPT);
2318
2319 /* Add null XDR pad if the ASN.1 token misaligned the data */
2320 pad = nfsm_pad(toklen + reslen);
2321 if (pad > 0) {
2322 nfsm_chain_add_opaque_nopad(error, nmc_pre, iv0, pad);
2323 nfsm_chain_build_done(error, nmc_pre);
2324 }
2325 }
2326
2327 return (error);
2328 }
2329
2330 /*
2331 * This function handles the context setup calls from the client.
2332 * Essentially, it implements the NFS null procedure calls when
2333 * an RPCSEC_GSS credential is used.
2334 * This is the context maintenance function. It creates and
2335 * destroys server contexts at the whim of the client.
2336 * During context creation, it receives GSS-API tokens from the
2337 * client, passes them up to gssd, and returns a received token
2338 * back to the client in the null procedure reply.
2339 */
2340 int
2341 nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
2342 {
2343 struct nfs_gss_svc_ctx *cp = NULL;
2344 uint32_t handle = 0;
2345 int error = 0;
2346 int autherr = 0;
2347 struct nfsm_chain *nmreq, nmrep;
2348 int sz;
2349
2350 nmreq = &nd->nd_nmreq;
2351 nfsm_chain_null(&nmrep);
2352 *mrepp = NULL;
2353 cp = nd->nd_gss_context;
2354 nd->nd_repstat = 0;
2355
2356 switch (cp->gss_svc_proc) {
2357 case RPCSEC_GSS_INIT:
2358 /*
2359 * Give the client a random handle so that
2360 * if we reboot it's unlikely the client
2361 * will get a bad context match.
2362 * Make sure it's not zero, or already assigned.
2363 */
2364 do {
2365 handle = random();
2366 } while (nfs_gss_svc_ctx_find(handle) != NULL || handle == 0);
2367 cp->gss_svc_handle = handle;
2368 cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
2369 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2370 &cp->gss_svc_incarnation);
2371
2372 nfs_gss_svc_ctx_insert(cp);
2373
2374 /* FALLTHRU */
2375
2376 case RPCSEC_GSS_CONTINUE_INIT:
2377 /* Get the token from the request */
2378 nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
2379 if (cp->gss_svc_tokenlen == 0) {
2380 autherr = RPCSEC_GSS_CREDPROBLEM;
2381 break;
2382 }
2383 MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
2384 if (cp->gss_svc_token == NULL) {
2385 autherr = RPCSEC_GSS_CREDPROBLEM;
2386 break;
2387 }
2388 nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
2389
2390 /* Use the token in a gss_accept_sec_context upcall */
2391 error = nfs_gss_svc_gssd_upcall(cp);
2392 if (error) {
2393 autherr = RPCSEC_GSS_CREDPROBLEM;
2394 if (error == NFSERR_EAUTH)
2395 error = 0;
2396 break;
2397 }
2398
2399 /*
2400 * If the context isn't complete, pass the new token
2401 * back to the client for another round.
2402 */
2403 if (cp->gss_svc_major != GSS_S_COMPLETE)
2404 break;
2405
2406 /*
2407 * Now the server context is complete.
2408 * Finish setup.
2409 */
2410 clock_get_uptime(&cp->gss_svc_incarnation);
2411
2412 cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
2413 MALLOC(cp->gss_svc_seqbits, uint32_t *,
2414 nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
2415 if (cp->gss_svc_seqbits == NULL) {
2416 autherr = RPCSEC_GSS_CREDPROBLEM;
2417 break;
2418 }
2419 break;
2420
2421 case RPCSEC_GSS_DATA:
2422 /* Just a nullproc ping - do nothing */
2423 break;
2424
2425 case RPCSEC_GSS_DESTROY:
2426 /*
2427 * Don't destroy the context immediately because
2428 * other active requests might still be using it.
2429 * Instead, schedule it for destruction after
2430 * GSS_CTX_PEND time has elapsed.
2431 */
2432 cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
2433 if (cp != NULL) {
2434 cp->gss_svc_handle = 0; // so it can't be found
2435 lck_mtx_lock(cp->gss_svc_mtx);
2436 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2437 &cp->gss_svc_incarnation);
2438 lck_mtx_unlock(cp->gss_svc_mtx);
2439 }
2440 break;
2441 default:
2442 autherr = RPCSEC_GSS_CREDPROBLEM;
2443 break;
2444 }
2445
2446 /* Now build the reply */
2447
2448 if (nd->nd_repstat == 0)
2449 nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
2450 sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
2451 error = nfsrv_rephead(nd, slp, &nmrep, sz);
2452 *mrepp = nmrep.nmc_mhead;
2453 if (error || autherr)
2454 goto nfsmout;
2455
2456 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
2457 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
2458 nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
2459 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
2460
2461 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
2462 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
2463 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
2464
2465 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
2466 if (cp->gss_svc_token != NULL) {
2467 nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
2468 FREE(cp->gss_svc_token, M_TEMP);
2469 cp->gss_svc_token = NULL;
2470 }
2471 }
2472
2473 nfsmout:
2474 if (autherr != 0) {
2475 nd->nd_gss_context = NULL;
2476 LIST_REMOVE(cp, gss_svc_entries);
2477 if (cp->gss_svc_seqbits != NULL)
2478 FREE(cp->gss_svc_seqbits, M_TEMP);
2479 if (cp->gss_svc_token != NULL)
2480 FREE(cp->gss_svc_token, M_TEMP);
2481 lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2482 FREE(cp, M_TEMP);
2483 }
2484
2485 nfsm_chain_build_done(error, &nmrep);
2486 if (error) {
2487 nfsm_chain_cleanup(&nmrep);
2488 *mrepp = NULL;
2489 }
2490 return (error);
2491 }
2492
2493 /*
2494 * This is almost a mirror-image of the client side upcall.
2495 * It passes and receives a token, but invokes gss_accept_sec_context.
2496 * If it's the final call of the context setup, then gssd also returns
2497 * the session key and the user's UID.
2498 */
2499 static int
2500 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
2501 {
2502 kern_return_t kr;
2503 mach_port_t mp;
2504 int retry_cnt = 0;
2505 byte_buffer okey = NULL;
2506 uint32_t skeylen = 0;
2507 uint32_t ret_flags;
2508 vm_map_copy_t itoken = NULL;
2509 byte_buffer otoken = NULL;
2510 mach_msg_type_number_t otokenlen;
2511 int error = 0;
2512 char svcname[] = "nfs";
2513
2514 kr = task_get_gssd_port(get_threadtask(current_thread()), &mp);
2515 if (kr != KERN_SUCCESS) {
2516 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
2517 goto out;
2518 }
2519 if (!IPC_PORT_VALID(mp)) {
2520 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
2521 goto out;
2522 }
2523
2524 if (cp->gss_svc_tokenlen > 0)
2525 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
2526
2527 retry:
2528 kr = mach_gss_accept_sec_context(
2529 mp,
2530 (byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
2531 svcname,
2532 0,
2533 &cp->gss_svc_context,
2534 &cp->gss_svc_cred_handle,
2535 &ret_flags,
2536 &cp->gss_svc_uid,
2537 cp->gss_svc_gids,
2538 &cp->gss_svc_ngroups,
2539 &okey, (mach_msg_type_number_t *) &skeylen,
2540 &otoken, &otokenlen,
2541 &cp->gss_svc_major,
2542 &cp->gss_svc_minor);
2543
2544 if (kr != KERN_SUCCESS) {
2545 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
2546 if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
2547 retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
2548 if (cp->gss_svc_tokenlen > 0)
2549 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
2550 goto retry;
2551 }
2552 task_release_special_port(mp);
2553 goto out;
2554 }
2555
2556 task_release_special_port(mp);
2557
2558 if (skeylen > 0) {
2559 if (skeylen != SKEYLEN && skeylen != SKEYLEN3) {
2560 printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen);
2561 vm_map_copy_discard((vm_map_copy_t) okey);
2562 vm_map_copy_discard((vm_map_copy_t) otoken);
2563 goto out;
2564 }
2565 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_kinfo.skey);
2566 if (error) {
2567 vm_map_copy_discard((vm_map_copy_t) otoken);
2568 goto out;
2569 }
2570 error = gss_key_init(&cp->gss_svc_kinfo, skeylen);
2571 if (error)
2572 goto out;
2573
2574 }
2575
2576 /* Free context token used as input */
2577 if (cp->gss_svc_token)
2578 FREE(cp->gss_svc_token, M_TEMP);
2579 cp->gss_svc_token = NULL;
2580 cp->gss_svc_tokenlen = 0;
2581
2582 if (otokenlen > 0) {
2583 /* Set context token to gss output token */
2584 MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK);
2585 if (cp->gss_svc_token == NULL) {
2586 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
2587 vm_map_copy_discard((vm_map_copy_t) otoken);
2588 return (ENOMEM);
2589 }
2590 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
2591 if (error) {
2592 FREE(cp->gss_svc_token, M_TEMP);
2593 cp->gss_svc_token = NULL;
2594 return (NFSERR_EAUTH);
2595 }
2596 cp->gss_svc_tokenlen = otokenlen;
2597 }
2598
2599 return (0);
2600
2601 out:
2602 FREE(cp->gss_svc_token, M_TEMP);
2603 cp->gss_svc_tokenlen = 0;
2604 cp->gss_svc_token = NULL;
2605
2606 return (NFSERR_EAUTH);
2607 }
2608
2609 /*
2610 * Validate the sequence number in the credential as described
2611 * in RFC 2203 Section 5.3.3.1
2612 *
2613 * Here the window of valid sequence numbers is represented by
2614 * a bitmap. As each sequence number is received, its bit is
2615 * set in the bitmap. An invalid sequence number lies below
2616 * the lower bound of the window, or is within the window but
2617 * has its bit already set.
2618 */
2619 static int
2620 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
2621 {
2622 uint32_t *bits = cp->gss_svc_seqbits;
2623 uint32_t win = cp->gss_svc_seqwin;
2624 uint32_t i;
2625
2626 lck_mtx_lock(cp->gss_svc_mtx);
2627
2628 /*
2629 * If greater than the window upper bound,
2630 * move the window up, and set the bit.
2631 */
2632 if (seq > cp->gss_svc_seqmax) {
2633 if (seq - cp->gss_svc_seqmax > win)
2634 bzero(bits, nfsm_rndup((win + 7) / 8));
2635 else
2636 for (i = cp->gss_svc_seqmax + 1; i < seq; i++)
2637 win_resetbit(bits, i % win);
2638 win_setbit(bits, seq % win);
2639 cp->gss_svc_seqmax = seq;
2640 lck_mtx_unlock(cp->gss_svc_mtx);
2641 return (1);
2642 }
2643
2644 /*
2645 * Invalid if below the lower bound of the window
2646 */
2647 if (seq <= cp->gss_svc_seqmax - win) {
2648 lck_mtx_unlock(cp->gss_svc_mtx);
2649 return (0);
2650 }
2651
2652 /*
2653 * In the window, invalid if the bit is already set
2654 */
2655 if (win_getbit(bits, seq % win)) {
2656 lck_mtx_unlock(cp->gss_svc_mtx);
2657 return (0);
2658 }
2659 win_setbit(bits, seq % win);
2660 lck_mtx_unlock(cp->gss_svc_mtx);
2661 return (1);
2662 }
2663
2664 /*
2665 * Called at NFS server shutdown - destroy all contexts
2666 */
2667 void
2668 nfs_gss_svc_cleanup(void)
2669 {
2670 struct nfs_gss_svc_ctx_hashhead *head;
2671 struct nfs_gss_svc_ctx *cp, *ncp;
2672 int i;
2673
2674 lck_mtx_lock(nfs_gss_svc_ctx_mutex);
2675
2676 /*
2677 * Run through all the buckets
2678 */
2679 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
2680 /*
2681 * Remove and free all entries in the bucket
2682 */
2683 head = &nfs_gss_svc_ctx_hashtbl[i];
2684 LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
2685 LIST_REMOVE(cp, gss_svc_entries);
2686 if (cp->gss_svc_seqbits)
2687 FREE(cp->gss_svc_seqbits, M_TEMP);
2688 lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
2689 FREE(cp, M_TEMP);
2690 }
2691 }
2692
2693 lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
2694 }
2695
2696 #endif /* NFSSERVER */
2697
2698
2699 /*************
2700 * The following functions are used by both client and server.
2701 */
2702
2703 /*
2704 * Release a task special port that was obtained by task_get_special_port
2705 * or one of its macros (task_get_gssd_port in this case).
2706 * This really should be in a public kpi.
2707 */
2708
2709 /* This should be in a public header if this routine is not */
2710 extern void ipc_port_release_send(ipc_port_t);
2711 extern ipc_port_t ipc_port_copy_send(ipc_port_t);
2712
2713 static void
2714 task_release_special_port(mach_port_t mp)
2715 {
2716
2717 ipc_port_release_send(mp);
2718 }
2719
2720 static mach_port_t
2721 task_copy_special_port(mach_port_t mp)
2722 {
2723 return ipc_port_copy_send(mp);
2724 }
2725
2726 /*
2727 * The token that is sent and received in the gssd upcall
2728 * has unbounded variable length. Mach RPC does not pass
2729 * the token in-line. Instead it uses page mapping to handle
2730 * these parameters. This function allocates a VM buffer
2731 * to hold the token for an upcall and copies the token
2732 * (received from the client) into it. The VM buffer is
2733 * marked with a src_destroy flag so that the upcall will
2734 * automatically de-allocate the buffer when the upcall is
2735 * complete.
2736 */
2737 static void
2738 nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr)
2739 {
2740 kern_return_t kr;
2741 vm_offset_t kmem_buf;
2742 vm_size_t tbuflen;
2743
2744 *addr = NULL;
2745 if (buf == NULL || buflen == 0)
2746 return;
2747
2748 tbuflen = round_page(buflen);
2749 kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
2750 if (kr != 0) {
2751 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
2752 return;
2753 }
2754
2755 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
2756 vm_map_round_page(kmem_buf + tbuflen),
2757 VM_PROT_READ|VM_PROT_WRITE, FALSE);
2758 if (kr != 0) {
2759 printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
2760 return;
2761 }
2762
2763 bcopy(buf, (void *) kmem_buf, buflen);
2764 // Shouldn't need to bzero below since vm_allocate returns zeroed pages
2765 // bzero(kmem_buf + buflen, tbuflen - buflen);
2766
2767 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
2768 vm_map_round_page(kmem_buf + tbuflen), FALSE);
2769 if (kr != 0) {
2770 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
2771 return;
2772 }
2773
2774 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
2775 (vm_map_size_t) buflen, TRUE, addr);
2776 if (kr != 0) {
2777 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
2778 return;
2779 }
2780 }
2781
2782 /*
2783 * Here we handle a token received from the gssd via an upcall.
2784 * The received token resides in an allocate VM buffer.
2785 * We copy the token out of this buffer to a chunk of malloc'ed
2786 * memory of the right size, then de-allocate the VM buffer.
2787 */
2788 static int
2789 nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
2790 {
2791 vm_map_offset_t map_data;
2792 vm_offset_t data;
2793 int error;
2794
2795 error = vm_map_copyout(ipc_kernel_map, &map_data, in);
2796 if (error)
2797 return (error);
2798
2799 data = CAST_DOWN(vm_offset_t, map_data);
2800 bcopy((void *) data, out, len);
2801 vm_deallocate(ipc_kernel_map, data, len);
2802
2803 return (0);
2804 }
2805
2806 /*
2807 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
2808 * Returns the size of the token, since it contains a variable
2809 * length DER encoded size field.
2810 */
2811 static int
2812 nfs_gss_token_put(
2813 gss_key_info *ki,
2814 u_char *alg,
2815 u_char *p,
2816 int initiator,
2817 int datalen,
2818 u_char *cksum)
2819 {
2820 static uint32_t seqnum = 0;
2821 u_char *psave = p;
2822 u_char plain[8];
2823 int toklen, i;
2824
2825 /*
2826 * Fill in the token header: 2 octets.
2827 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
2828 * followed by the length of the token: 35 + 0 octets for a
2829 * MIC token, or 35 + encrypted octets for a wrap token;
2830 */
2831 *p++ = 0x060;
2832 toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + HASHLEN(ki);
2833 nfs_gss_der_length_put(&p, toklen + datalen);
2834
2835 /*
2836 * Fill in the DER encoded mech OID for Kerberos v5.
2837 * This represents the Kerberos OID 1.2.840.113554.1.2.2
2838 * described in RFC 2623, section 4.2
2839 */
2840 bcopy(krb5_mech, p, sizeof(krb5_mech));
2841 p += sizeof(krb5_mech);
2842
2843 /*
2844 * Now at the token described in RFC 1964, section 1.2.1
2845 * Fill in the token ID, integrity algorithm indicator,
2846 * for DES MAC MD5, and four filler octets.
2847 * The alg string encodes the bytes to represent either
2848 * a MIC token or a WRAP token for Kerberos.
2849 */
2850 bcopy(alg, p, KRB5_SZ_ALG);
2851 p += KRB5_SZ_ALG;
2852
2853 /*
2854 * Now encode the sequence number according to
2855 * RFC 1964, section 1.2.1.2 which dictates 4 octets
2856 * of sequence number followed by 4 bytes of direction
2857 * indicator: 0x00 for initiator or 0xff for acceptor.
2858 * We DES CBC encrypt the sequence number using the first
2859 * 8 octets of the checksum field as an initialization
2860 * vector.
2861 * Note that this sequence number is not at all related
2862 * to the RPCSEC_GSS protocol sequence number. This
2863 * number is private to the ASN.1 token. The only
2864 * requirement is that it not be repeated in case the
2865 * server has replay detection on, which normally should
2866 * not be the case, since RFC 2203 section 5.2.3 says that
2867 * replay detection and sequence checking must be turned off.
2868 */
2869 seqnum++;
2870 for (i = 0; i < 4; i++)
2871 plain[i] = (u_char) ((seqnum >> (i * 8)) & 0xff);
2872 for (i = 4; i < 8; i++)
2873 plain[i] = initiator ? 0x00 : 0xff;
2874 gss_des_crypt(ki, (des_cblock *) plain, (des_cblock *) p, 8,
2875 (des_cblock *) cksum, NULL, DES_ENCRYPT, KG_USAGE_SEQ);
2876 p += 8;
2877
2878 /*
2879 * Finally, append the octets of the
2880 * checksum of the alg + plaintext data.
2881 * The plaintext could be an RPC call header,
2882 * the window value, or a sequence number.
2883 */
2884 bcopy(cksum, p, HASHLEN(ki));
2885 p += HASHLEN(ki);
2886
2887 return (p - psave);
2888 }
2889
2890 /*
2891 * Determine size of ASN.1 DER length
2892 */
2893 static int
2894 nfs_gss_der_length_size(int len)
2895 {
2896 return
2897 len < (1 << 7) ? 1 :
2898 len < (1 << 8) ? 2 :
2899 len < (1 << 16) ? 3 :
2900 len < (1 << 24) ? 4 : 5;
2901 }
2902
2903 /*
2904 * Encode an ASN.1 DER length field
2905 */
2906 static void
2907 nfs_gss_der_length_put(u_char **pp, int len)
2908 {
2909 int sz = nfs_gss_der_length_size(len);
2910 u_char *p = *pp;
2911
2912 if (sz == 1) {
2913 *p++ = (u_char) len;
2914 } else {
2915 *p++ = (u_char) ((sz-1) | 0x80);
2916 sz -= 1;
2917 while (sz--)
2918 *p++ = (u_char) ((len >> (sz * 8)) & 0xff);
2919 }
2920
2921 *pp = p;
2922 }
2923
2924 /*
2925 * Decode an ASN.1 DER length field
2926 */
2927 static int
2928 nfs_gss_der_length_get(u_char **pp)
2929 {
2930 u_char *p = *pp;
2931 uint32_t flen, len = 0;
2932
2933 flen = *p & 0x7f;
2934
2935 if ((*p++ & 0x80) == 0)
2936 len = flen;
2937 else {
2938 if (flen > sizeof(uint32_t))
2939 return (-1);
2940 while (flen--)
2941 len = (len << 8) + *p++;
2942 }
2943 *pp = p;
2944 return (len);
2945 }
2946
2947 /*
2948 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
2949 */
2950 static int
2951 nfs_gss_token_get(
2952 gss_key_info *ki,
2953 u_char *alg,
2954 u_char *p,
2955 int initiator,
2956 uint32_t *len,
2957 u_char *cksum)
2958 {
2959 u_char d, plain[8];
2960 u_char *psave = p;
2961 int seqnum, i;
2962
2963 /*
2964 * Check that we have a valid token header
2965 */
2966 if (*p++ != 0x60)
2967 return (AUTH_BADCRED);
2968 (void) nfs_gss_der_length_get(&p); // ignore the size
2969
2970 /*
2971 * Check that we have the DER encoded Kerberos v5 mech OID
2972 */
2973 if (bcmp(p, krb5_mech, sizeof(krb5_mech) != 0))
2974 return (AUTH_BADCRED);
2975 p += sizeof(krb5_mech);
2976
2977 /*
2978 * Now check the token ID, DES MAC MD5 algorithm
2979 * indicator, and filler octets.
2980 */
2981 if (bcmp(p, alg, KRB5_SZ_ALG) != 0)
2982 return (AUTH_BADCRED);
2983 p += KRB5_SZ_ALG;
2984
2985 /*
2986 * Now decrypt the sequence number.
2987 * Note that the gss decryption uses the first 8 octets
2988 * of the checksum field as an initialization vector (p + 8).
2989 * Per RFC 2203 section 5.2.2 we don't check the sequence number
2990 * in the ASN.1 token because the RPCSEC_GSS protocol has its
2991 * own sequence number described in section 5.3.3.1
2992 */
2993 seqnum = 0;
2994 gss_des_crypt(ki, (des_cblock *)p, (des_cblock *) plain, 8,
2995 (des_cblock *) (p + 8), NULL, DES_DECRYPT, KG_USAGE_SEQ);
2996 p += 8;
2997 for (i = 0; i < 4; i++)
2998 seqnum |= plain[i] << (i * 8);
2999
3000 /*
3001 * Make sure the direction
3002 * indicator octets are correct.
3003 */
3004 d = initiator ? 0x00 : 0xff;
3005 for (i = 4; i < 8; i++)
3006 if (plain[i] != d)
3007 return (AUTH_BADCRED);
3008
3009 /*
3010 * Finally, get the checksum
3011 */
3012 bcopy(p, cksum, HASHLEN(ki));
3013 p += HASHLEN(ki);
3014
3015 if (len != NULL)
3016 *len = p - psave;
3017
3018 return (0);
3019 }
3020
3021 /*
3022 * Return the number of bytes in an mbuf chain.
3023 */
3024 static int
3025 nfs_gss_mchain_length(mbuf_t mhead)
3026 {
3027 mbuf_t mb;
3028 int len = 0;
3029
3030 for (mb = mhead; mb; mb = mbuf_next(mb))
3031 len += mbuf_len(mb);
3032
3033 return (len);
3034 }
3035
3036 /*
3037 * Append an args or results mbuf chain to the header chain
3038 */
3039 static int
3040 nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
3041 {
3042 int error = 0;
3043 mbuf_t mb, tail;
3044
3045 /* Connect the mbuf chains */
3046 error = mbuf_setnext(nmc->nmc_mcur, mc);
3047 if (error)
3048 return (error);
3049
3050 /* Find the last mbuf in the chain */
3051 tail = NULL;
3052 for (mb = mc; mb; mb = mbuf_next(mb))
3053 tail = mb;
3054
3055 nmc->nmc_mcur = tail;
3056 nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3057 nmc->nmc_left = mbuf_trailingspace(tail);
3058
3059 return (0);
3060 }
3061
3062 /*
3063 * Convert an mbuf chain to an NFS mbuf chain
3064 */
3065 static void
3066 nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
3067 {
3068 mbuf_t mb, tail;
3069
3070 /* Find the last mbuf in the chain */
3071 tail = NULL;
3072 for (mb = mc; mb; mb = mbuf_next(mb))
3073 tail = mb;
3074
3075 nmc->nmc_mhead = mc;
3076 nmc->nmc_mcur = tail;
3077 nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3078 nmc->nmc_left = mbuf_trailingspace(tail);
3079 nmc->nmc_flags = 0;
3080 }
3081
3082
3083 /*
3084 * Compute a checksum over an mbuf chain.
3085 * Start building an MD5 digest at the given offset and keep
3086 * going until the end of data in the current mbuf is reached.
3087 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3088 * checksum.
3089 */
3090 static void
3091 nfs_gss_cksum_mchain(
3092 gss_key_info *ki,
3093 mbuf_t mhead,
3094 u_char *alg,
3095 int offset,
3096 int len,
3097 u_char *digest)
3098 {
3099 mbuf_t mb;
3100 u_char *ptr;
3101 int left, bytes;
3102 GSS_DIGEST_CTX context;
3103
3104 gss_digest_Init(&context, ki);
3105
3106 /*
3107 * Logically prepend the first 8 bytes of the algorithm
3108 * field as required by RFC 1964, section 1.2.1.1
3109 */
3110 gss_digest_Update(&context, alg, KRB5_SZ_ALG);
3111
3112 /*
3113 * Move down the mbuf chain until we reach the given
3114 * byte offset, then start MD5 on the mbuf data until
3115 * we've done len bytes.
3116 */
3117
3118 for (mb = mhead; mb && len > 0; mb = mbuf_next(mb)) {
3119 ptr = mbuf_data(mb);
3120 left = mbuf_len(mb);
3121 if (offset >= left) {
3122 /* Offset not yet reached */
3123 offset -= left;
3124 continue;
3125 }
3126 /* At or beyond offset - checksum data */
3127 ptr += offset;
3128 left -= offset;
3129 offset = 0;
3130
3131 bytes = left < len ? left : len;
3132 if (bytes > 0)
3133 gss_digest_Update(&context, ptr, bytes);
3134 len -= bytes;
3135 }
3136
3137 gss_digest_Final(&context, digest);
3138 }
3139
3140 /*
3141 * Compute a checksum over an NFS mbuf chain.
3142 * Start building an MD5 digest at the given offset and keep
3143 * going until the end of data in the current mbuf is reached.
3144 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3145 * checksum.
3146 */
3147 static void
3148 nfs_gss_cksum_chain(
3149 gss_key_info *ki,
3150 struct nfsm_chain *nmc,
3151 u_char *alg,
3152 int offset,
3153 int len,
3154 u_char *cksum)
3155 {
3156 /*
3157 * If the length parameter is zero, then we need
3158 * to use the length from the offset to the current
3159 * encode/decode offset.
3160 */
3161 if (len == 0)
3162 len = nfsm_chain_offset(nmc) - offset;
3163
3164 return (nfs_gss_cksum_mchain(ki, nmc->nmc_mhead, alg, offset, len, cksum));
3165 }
3166
3167 /*
3168 * Compute a checksum of the sequence number (or sequence window)
3169 * of an RPCSEC_GSS reply.
3170 */
3171 static void
3172 nfs_gss_cksum_rep(gss_key_info *ki, uint32_t seqnum, u_char *cksum)
3173 {
3174 GSS_DIGEST_CTX context;
3175 uint32_t val = htonl(seqnum);
3176
3177 gss_digest_Init(&context, ki);
3178
3179 /*
3180 * Logically prepend the first 8 bytes of the MIC
3181 * token as required by RFC 1964, section 1.2.1.1
3182 */
3183 gss_digest_Update(&context, ALG_MIC(ki), KRB5_SZ_ALG);
3184
3185 /*
3186 * Compute the digest of the seqnum in network order
3187 */
3188 gss_digest_Update(&context, &val, 4);
3189 gss_digest_Final(&context, cksum);
3190 }
3191
3192 /*
3193 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3194 */
3195 static void
3196 nfs_gss_encrypt_mchain(
3197 gss_key_info *ki,
3198 mbuf_t mhead,
3199 int offset,
3200 int len,
3201 int encrypt)
3202 {
3203 mbuf_t mb, mbn;
3204 u_char *ptr, *nptr;
3205 u_char tmp[8], ivec[8];
3206 int left, left8, remain;
3207
3208
3209 bzero(ivec, 8);
3210
3211 /*
3212 * Move down the mbuf chain until we reach the given
3213 * byte offset, then start encrypting the mbuf data until
3214 * we've done len bytes.
3215 */
3216
3217 for (mb = mhead; mb && len > 0; mb = mbn) {
3218 mbn = mbuf_next(mb);
3219 ptr = mbuf_data(mb);
3220 left = mbuf_len(mb);
3221 if (offset >= left) {
3222 /* Offset not yet reached */
3223 offset -= left;
3224 continue;
3225 }
3226 /* At or beyond offset - encrypt data */
3227 ptr += offset;
3228 left -= offset;
3229 offset = 0;
3230
3231 /*
3232 * DES or DES3 CBC has to encrypt 8 bytes at a time.
3233 * If the number of bytes to be encrypted in this
3234 * mbuf isn't some multiple of 8 bytes, encrypt all
3235 * the 8 byte blocks, then combine the remaining
3236 * bytes with enough from the next mbuf to make up
3237 * an 8 byte block and encrypt that block separately,
3238 * i.e. that block is split across two mbufs.
3239 */
3240 remain = left % 8;
3241 left8 = left - remain;
3242 left = left8 < len ? left8 : len;
3243 if (left > 0) {
3244 gss_des_crypt(ki, (des_cblock *) ptr, (des_cblock *) ptr,
3245 left, &ivec, &ivec, encrypt, KG_USAGE_SEAL);
3246 len -= left;
3247 }
3248
3249 if (mbn && remain > 0) {
3250 nptr = mbuf_data(mbn);
3251 offset = 8 - remain;
3252 bcopy(ptr + left, tmp, remain); // grab from this mbuf
3253 bcopy(nptr, tmp + remain, offset); // grab from next mbuf
3254 gss_des_crypt(ki, (des_cblock *) tmp, (des_cblock *) tmp, 8,
3255 &ivec, &ivec, encrypt, KG_USAGE_SEAL);
3256 bcopy(tmp, ptr + left, remain); // return to this mbuf
3257 bcopy(tmp + remain, nptr, offset); // return to next mbuf
3258 len -= 8;
3259 }
3260 }
3261 }
3262
3263 /*
3264 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
3265 */
3266 static void
3267 nfs_gss_encrypt_chain(
3268 gss_key_info *ki,
3269 struct nfsm_chain *nmc,
3270 int offset,
3271 int len,
3272 int encrypt)
3273 {
3274 /*
3275 * If the length parameter is zero, then we need
3276 * to use the length from the offset to the current
3277 * encode/decode offset.
3278 */
3279 if (len == 0)
3280 len = nfsm_chain_offset(nmc) - offset;
3281
3282 return (nfs_gss_encrypt_mchain(ki, nmc->nmc_mhead, offset, len, encrypt));
3283 }
3284
3285 /*
3286 * The routines that follow provide abstractions for doing digests and crypto.
3287 */
3288
3289 static void
3290 gss_digest_Init(GSS_DIGEST_CTX *ctx, gss_key_info *ki)
3291 {
3292 ctx->type = ki->type;
3293 switch (ki->type) {
3294 case NFS_GSS_1DES: MD5_DESCBC_Init(&ctx->m_ctx, &ki->ks_u.des.gss_sched);
3295 break;
3296 case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Init(&ctx->h_ctx, ki->ks_u.des3.ckey, 0);
3297 break;
3298 default:
3299 printf("gss_digest_Init: Unknown key info type %d\n", ki->type);
3300 }
3301 }
3302
3303 static void
3304 gss_digest_Update(GSS_DIGEST_CTX *ctx, void *data, size_t len)
3305 {
3306 switch (ctx->type) {
3307 case NFS_GSS_1DES: MD5_DESCBC_Update(&ctx->m_ctx, data, len);
3308 break;
3309 case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Update(&ctx->h_ctx, data, len);
3310 break;
3311 }
3312 }
3313
3314 static void
3315 gss_digest_Final(GSS_DIGEST_CTX *ctx, void *digest)
3316 {
3317 switch (ctx->type) {
3318 case NFS_GSS_1DES: MD5_DESCBC_Final(digest, &ctx->m_ctx);
3319 break;
3320 case NFS_GSS_3DES: HMAC_SHA1_DES3KD_Final(digest, &ctx->h_ctx);
3321 break;
3322 }
3323 }
3324
3325 static void
3326 gss_des_crypt(gss_key_info *ki, des_cblock *in, des_cblock *out,
3327 int32_t len, des_cblock *iv, des_cblock *retiv, int encrypt, int usage)
3328 {
3329 switch (ki->type) {
3330 case NFS_GSS_1DES:
3331 {
3332 des_key_schedule *sched = ((usage == KG_USAGE_SEAL) ?
3333 &ki->ks_u.des.gss_sched_Ke :
3334 &ki->ks_u.des.gss_sched);
3335 des_cbc_encrypt(in, out, len, *sched, iv, retiv, encrypt);
3336 }
3337 break;
3338 case NFS_GSS_3DES:
3339
3340 des3_cbc_encrypt(in, out, len, ki->ks_u.des3.gss_sched, iv, retiv, encrypt);
3341 break;
3342 }
3343 }
3344
3345 static int
3346 gss_key_init(gss_key_info *ki, uint32_t skeylen)
3347 {
3348 size_t i;
3349 int rc;
3350 des_cblock k[3];
3351
3352 ki->keybytes = skeylen;
3353 switch (skeylen) {
3354 case sizeof(des_cblock):
3355 ki->type = NFS_GSS_1DES;
3356 ki->hash_len = MD5_DESCBC_DIGEST_LENGTH;
3357 ki->ks_u.des.key = (des_cblock *)ki->skey;
3358 rc = des_key_sched(ki->ks_u.des.key, ki->ks_u.des.gss_sched);
3359 if (rc)
3360 return (rc);
3361 for (i = 0; i < ki->keybytes; i++)
3362 k[0][i] = 0xf0 ^ (*ki->ks_u.des.key)[i];
3363 rc = des_key_sched(&k[0], ki->ks_u.des.gss_sched_Ke);
3364 break;
3365 case 3*sizeof(des_cblock):
3366 ki->type = NFS_GSS_3DES;
3367 ki->hash_len = SHA_DIGEST_LENGTH;
3368 ki->ks_u.des3.key = (des_cblock (*)[3])ki->skey;
3369 des3_derive_key(*ki->ks_u.des3.key, ki->ks_u.des3.ckey,
3370 KEY_USAGE_DES3_SIGN, KEY_USAGE_LEN);
3371 rc = des3_key_sched(*ki->ks_u.des3.key, ki->ks_u.des3.gss_sched);
3372 if (rc)
3373 return (rc);
3374 break;
3375 default:
3376 printf("gss_key_init: Invalid key length %d\n", skeylen);
3377 rc = EINVAL;
3378 break;
3379 }
3380
3381 return (rc);
3382 }
3383
3384 #if 0
3385 #define DISPLAYLEN 16
3386 #define MAXDISPLAYLEN 256
3387
3388 static void
3389 hexdump(const char *msg, void *data, size_t len)
3390 {
3391 size_t i, j;
3392 u_char *d = data;
3393 char *p, disbuf[3*DISPLAYLEN+1];
3394
3395 printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
3396 if (len > MAXDISPLAYLEN)
3397 len = MAXDISPLAYLEN;
3398
3399 for (i = 0; i < len; i += DISPLAYLEN) {
3400 for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3)
3401 snprintf(p, 4, "%02x ", d[i + j]);
3402 printf("\t%s\n", disbuf);
3403 }
3404 }
3405 #endif