2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed. Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
43 * The client and server contexts are set up dynamically. When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential. The server's verifier has a signed
58 * checksum of the current sequence number.
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request. The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
77 #include <sys/param.h>
78 #include <sys/systm.h>
80 #include <sys/kauth.h>
81 #include <sys/kernel.h>
82 #include <sys/mount_internal.h>
83 #include <sys/vnode.h>
85 #include <sys/malloc.h>
86 #include <sys/kpi_mbuf.h>
87 #include <sys/ucred.h>
89 #include <kern/host.h>
90 #include <kern/task.h>
91 #include <libkern/libkern.h>
93 #include <mach/task.h>
94 #include <mach/host_special_ports.h>
95 #include <mach/host_priv.h>
96 #include <mach/thread_act.h>
97 #include <mach/mig_errors.h>
98 #include <mach/vm_map.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <gssd/gssd_mach.h>
103 #include <nfs/rpcv2.h>
104 #include <nfs/nfsproto.h>
106 #include <nfs/nfsnode.h>
107 #include <nfs/nfs_gss.h>
108 #include <nfs/nfsmount.h>
109 #include <nfs/xdr_subs.h>
110 #include <nfs/nfsm_subs.h>
111 #include <nfs/nfs_gss.h>
112 #include "nfs_gss_crypto.h"
113 #include <mach_assert.h>
114 #include <kern/assert.h>
116 #define ASSERT(EX) assert(EX)
118 #define NFS_GSS_MACH_MAX_RETRIES 3
120 #define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
121 #define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS)
126 MD5_DESCBC_CTX m_ctx
;
127 HMAC_SHA1_DES3KD_CTX h_ctx
;
131 #define MAX_DIGEST SHA_DIGEST_LENGTH
132 #ifdef NFS_KERNEL_DEBUG
133 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
134 (panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
136 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
137 (printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
141 u_long nfs_gss_svc_ctx_hash
;
142 struct nfs_gss_svc_ctx_hashhead
*nfs_gss_svc_ctx_hashtbl
;
143 lck_mtx_t
*nfs_gss_svc_ctx_mutex
;
144 lck_grp_t
*nfs_gss_svc_grp
;
145 uint32_t nfsrv_gss_context_ttl
= GSS_CTX_EXPIRE
;
146 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
147 #endif /* NFSSERVER */
150 lck_grp_t
*nfs_gss_clnt_grp
;
152 #endif /* NFSCLIENT */
155 * These octet strings are used to encode/decode ASN.1 tokens
156 * in the RPCSEC_GSS verifiers.
158 static u_char krb5_tokhead
[] __attribute__((unused
)) = { 0x60, 0x23 };
159 u_char krb5_mech
[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
160 static u_char krb5_mic
[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
161 static u_char krb5_mic3
[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
162 static u_char krb5_wrap
[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
163 static u_char krb5_wrap3
[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
164 static u_char iv0
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
166 #define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
167 #define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
170 * The size of the Kerberos v5 ASN.1 token
173 * Note that the second octet of the krb5_tokhead (0x23) is a
174 * DER-encoded size field that has variable length. If the size
175 * is 128 bytes or greater, then it uses two bytes, three bytes
176 * if 65536 or greater, and so on. Since the MIC tokens are
177 * separate from the data, the size is always the same: 35 bytes (0x23).
178 * However, the wrap token is different. Its size field includes the
179 * size of the token + the encrypted data that follows. So the size
180 * field may be two, three or four bytes.
182 #define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
183 #define KRB5_SZ_MECH sizeof(krb5_mech)
184 #define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
185 #define KRB5_SZ_SEQ 8
186 #define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
187 #define KRB5_SZ_TOKEN_NOSUM (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
188 #define KRB5_SZ_TOKEN(cksumlen) ((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
189 #define KRB5_SZ_TOKMAX(cksumlen) (KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
192 static int nfs_gss_clnt_ctx_find(struct nfsreq
*);
193 static int nfs_gss_clnt_ctx_init(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
194 static int nfs_gss_clnt_ctx_init_retry(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
195 static int nfs_gss_clnt_ctx_callserver(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
196 static uint8_t *nfs_gss_clnt_svcname(struct nfsmount
*, gssd_nametype
*, uint32_t *);
197 static int nfs_gss_clnt_gssd_upcall(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
198 void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount
*);
199 static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx
*);
200 static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx
*, struct nfs_gss_clnt_ctx
**, gss_key_info
*);
201 static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx
*);
202 static void nfs_gss_clnt_log_error(struct nfsreq
*, struct nfs_gss_clnt_ctx
*, uint32_t, uint32_t);
203 #endif /* NFSCLIENT */
206 static struct nfs_gss_svc_ctx
*nfs_gss_svc_ctx_find(uint32_t);
207 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*);
208 static void nfs_gss_svc_ctx_timer(void *, void *);
209 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*);
210 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*, uint32_t);
211 #endif /* NFSSERVER */
213 static void host_release_special_port(mach_port_t
);
214 static mach_port_t
host_copy_special_port(mach_port_t
);
215 static void nfs_gss_mach_alloc_buffer(u_char
*, uint32_t, vm_map_copy_t
*);
216 static int nfs_gss_mach_vmcopyout(vm_map_copy_t
, uint32_t, u_char
*);
217 static int nfs_gss_token_get(gss_key_info
*ki
, u_char
*, u_char
*, int, uint32_t *, u_char
*);
218 static int nfs_gss_token_put(gss_key_info
*ki
, u_char
*, u_char
*, int, int, u_char
*);
219 static int nfs_gss_der_length_size(int);
220 static void nfs_gss_der_length_put(u_char
**, int);
221 static int nfs_gss_der_length_get(u_char
**);
222 static int nfs_gss_mchain_length(mbuf_t
);
223 static int nfs_gss_append_chain(struct nfsm_chain
*, mbuf_t
);
224 static void nfs_gss_nfsm_chain(struct nfsm_chain
*, mbuf_t
);
225 static void nfs_gss_cksum_mchain(gss_key_info
*, mbuf_t
, u_char
*, int, int, u_char
*);
226 static void nfs_gss_cksum_chain(gss_key_info
*, struct nfsm_chain
*, u_char
*, int, int, u_char
*);
227 static void nfs_gss_cksum_rep(gss_key_info
*, uint32_t, u_char
*);
228 static void nfs_gss_encrypt_mchain(gss_key_info
*, mbuf_t
, int, int, int);
229 static void nfs_gss_encrypt_chain(gss_key_info
*, struct nfsm_chain
*, int, int, int);
231 static void gss_digest_Init(GSS_DIGEST_CTX
*, gss_key_info
*);
232 static void gss_digest_Update(GSS_DIGEST_CTX
*, void *, size_t);
233 static void gss_digest_Final(GSS_DIGEST_CTX
*, void *);
234 static void gss_des_crypt(gss_key_info
*, des_cblock
*, des_cblock
*,
235 int32_t, des_cblock
*, des_cblock
*, int, int);
236 static int gss_key_init(gss_key_info
*, uint32_t);
239 thread_call_t nfs_gss_svc_ctx_timer_call
;
240 int nfs_gss_timer_on
= 0;
241 uint32_t nfs_gss_ctx_count
= 0;
242 const uint32_t nfs_gss_ctx_max
= GSS_SVC_MAXCONTEXTS
;
243 #endif /* NFSSERVER */
246 * Initialization when NFS starts
252 nfs_gss_clnt_grp
= lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL
);
253 #endif /* NFSCLIENT */
256 nfs_gss_svc_grp
= lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL
);
258 nfs_gss_svc_ctx_hashtbl
= hashinit(SVC_CTX_HASHSZ
, M_TEMP
, &nfs_gss_svc_ctx_hash
);
259 nfs_gss_svc_ctx_mutex
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
261 nfs_gss_svc_ctx_timer_call
= thread_call_allocate(nfs_gss_svc_ctx_timer
, NULL
);
262 #endif /* NFSSERVER */
268 * Find the context for a particular user.
270 * If the context doesn't already exist
271 * then create a new context for this user.
273 * Note that the code allows superuser (uid == 0)
274 * to adopt the context of another user.
276 * We'll match on the audit session ids, since those
277 * processes will have acccess to the same credential cache.
280 #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
281 #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
283 #define SAFE_CAST_INTTYPE( type, intval ) \
284 ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) )
287 nfs_cred_getasid2uid(kauth_cred_t cred
)
289 uid_t result
= SAFE_CAST_INTTYPE(uid_t
, kauth_cred_getasid(cred
));
297 nfs_gss_clnt_ctx_dump(struct nfsmount
*nmp
)
299 struct nfs_gss_clnt_ctx
*cp
;
301 lck_mtx_lock(&nmp
->nm_lock
);
302 NFS_GSS_DBG("Enter\n");
303 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
304 lck_mtx_lock(cp
->gss_clnt_mtx
);
305 printf("context %d/%d: refcnt = %d, flags = %x\n",
306 kauth_cred_getasid(cp
->gss_clnt_cred
),
307 kauth_cred_getauid(cp
->gss_clnt_cred
),
308 cp
->gss_clnt_refcnt
, cp
->gss_clnt_flags
);
309 lck_mtx_unlock(cp
->gss_clnt_mtx
);
311 NFS_GSS_DBG("Exit\n");
312 lck_mtx_unlock(&nmp
->nm_lock
);
316 nfs_gss_clnt_ctx_name(struct nfsmount
*nmp
, struct nfs_gss_clnt_ctx
*cp
, char *buf
, int len
)
320 const char *server
= "";
322 if (nmp
&& nmp
->nm_mountp
)
323 server
= vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
;
326 snprintf(buf
, len
, "[%s] NULL context", server
);
330 if (cp
->gss_clnt_principal
&& !cp
->gss_clnt_display
) {
331 np
= (char *)cp
->gss_clnt_principal
;
332 nlen
= cp
->gss_clnt_prinlen
;
334 np
= cp
->gss_clnt_display
;
335 nlen
= np
? strlen(cp
->gss_clnt_display
) : 0;
338 snprintf(buf
, len
, "[%s] %.*s %d/%d %s", server
, nlen
, np
,
339 kauth_cred_getasid(cp
->gss_clnt_cred
),
340 kauth_cred_getuid(cp
->gss_clnt_cred
),
341 cp
->gss_clnt_principal
? "" : "[from default cred] ");
343 snprintf(buf
, len
, "[%s] using default %d/%d ", server
,
344 kauth_cred_getasid(cp
->gss_clnt_cred
),
345 kauth_cred_getuid(cp
->gss_clnt_cred
));
349 #define NFS_CTXBUFSZ 80
350 #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF))
352 #define NFS_GSS_CLNT_CTX_DUMP(nmp) \
354 if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \
355 nfs_gss_clnt_ctx_dump((nmp)); \
359 nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1
, kauth_cred_t cred2
)
361 if (kauth_cred_getasid(cred1
) == kauth_cred_getasid(cred2
))
367 * Busy the mount for each principal set on the mount
368 * so that the automounter will not unmount the file
369 * system underneath us. With out this, if an unmount
370 * occurs the principal that is set for an audit session
371 * will be lost and we may end up with a different identity.
373 * Note setting principals on the mount is a bad idea. This
374 * really should be handle by KIM (Kerberos Identity Management)
375 * so that defaults can be set by service identities.
379 nfs_gss_clnt_mnt_ref(struct nfsmount
*nmp
)
385 !(vfs_flags(nmp
->nm_mountp
) & MNT_AUTOMOUNTED
))
388 error
= VFS_ROOT(nmp
->nm_mountp
, &rvp
, NULL
);
396 * Unbusy the mout. See above comment,
400 nfs_gss_clnt_mnt_rele(struct nfsmount
*nmp
)
406 !(vfs_flags(nmp
->nm_mountp
) & MNT_AUTOMOUNTED
))
409 error
= VFS_ROOT(nmp
->nm_mountp
, &rvp
, NULL
);
416 int nfs_root_steals_ctx
= 1;
419 nfs_gss_clnt_ctx_find_principal(struct nfsreq
*req
, uint8_t *principal
, uint32_t plen
, uint32_t nt
)
421 struct nfsmount
*nmp
= req
->r_nmp
;
422 struct nfs_gss_clnt_ctx
*cp
;
427 char CTXBUF
[NFS_CTXBUFSZ
];
429 bzero(&treq
, sizeof (struct nfsreq
));
433 lck_mtx_lock(&nmp
->nm_lock
);
434 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
435 lck_mtx_lock(cp
->gss_clnt_mtx
);
436 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
437 NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
438 NFS_GSS_CTX(req
, cp
),
439 cp
->gss_clnt_refcnt
);
440 lck_mtx_unlock(cp
->gss_clnt_mtx
);
443 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, req
->r_cred
)) {
444 if (nmp
->nm_gsscl
.tqh_first
!= cp
) {
445 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
446 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
450 * If we have a principal, but it does not match the current cred
451 * mark it for removal
453 if (cp
->gss_clnt_prinlen
!= plen
|| cp
->gss_clnt_prinnt
!= nt
||
454 bcmp(cp
->gss_clnt_principal
, principal
, plen
) != 0) {
455 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
456 cp
->gss_clnt_refcnt
++;
457 lck_mtx_unlock(cp
->gss_clnt_mtx
);
458 NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
459 NFS_GSS_CTX(req
, cp
), principal
);
460 NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp
->gss_clnt_prinlen
, plen
,
461 cp
->gss_clnt_prinnt
, nt
);
467 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
469 * If we're still being used and we're not expired
470 * just return and don't bother gssd again. Note if
471 * gss_clnt_nctime is zero it is about to be set to now.
473 if (cp
->gss_clnt_nctime
+ GSS_NEG_CACHE_TO
>= now
.tv_sec
|| cp
->gss_clnt_nctime
== 0) {
474 NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
475 NFS_GSS_CTX(req
, cp
), cp
->gss_clnt_refcnt
, cp
->gss_clnt_nctime
, now
.tv_sec
);
476 lck_mtx_unlock(cp
->gss_clnt_mtx
);
477 lck_mtx_unlock(&nmp
->nm_lock
);
478 return (NFSERR_EAUTH
);
480 if (cp
->gss_clnt_refcnt
) {
481 struct nfs_gss_clnt_ctx
*ncp
;
483 * If this context has references, we can't use it so we mark if for
484 * destruction and create a new context based on this one in the
485 * same manner as renewing one.
487 cp
->gss_clnt_flags
|= GSS_CTX_DESTROY
;
488 NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
489 NFS_GSS_CTX(req
, cp
), cp
->gss_clnt_refcnt
);
490 error
= nfs_gss_clnt_ctx_copy(cp
, &ncp
, NULL
);
491 lck_mtx_unlock(cp
->gss_clnt_mtx
);
493 lck_mtx_unlock(&nmp
->nm_lock
);
499 /* cp->gss_clnt_kinfo should be NULL here */
500 if (cp
->gss_clnt_kinfo
) {
501 FREE(cp
->gss_clnt_kinfo
, M_TEMP
);
502 cp
->gss_clnt_kinfo
= NULL
;
504 if (cp
->gss_clnt_nctime
)
506 lck_mtx_unlock(cp
->gss_clnt_mtx
);
507 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
511 /* Found a valid context to return */
512 cp
->gss_clnt_refcnt
++;
514 lck_mtx_unlock(cp
->gss_clnt_mtx
);
515 lck_mtx_unlock(&nmp
->nm_lock
);
518 lck_mtx_unlock(cp
->gss_clnt_mtx
);
521 if (!cp
&& nfs_root_steals_ctx
&& principal
== NULL
&& kauth_cred_getuid(req
->r_cred
) == 0) {
523 * If superuser is trying to get access, then co-opt
524 * the first valid context in the list.
525 * XXX Ultimately, we need to allow superuser to
526 * go ahead and attempt to set up its own context
527 * in case one is set up for it.
529 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
530 if (!(cp
->gss_clnt_flags
& (GSS_CTX_INVAL
|GSS_CTX_DESTROY
))) {
531 nfs_gss_clnt_ctx_ref(req
, cp
);
532 lck_mtx_unlock(&nmp
->nm_lock
);
533 NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req
, NULL
));
539 MALLOC(ki
, gss_key_info
*, sizeof (gss_key_info
), M_TEMP
, M_WAITOK
|M_ZERO
);
541 lck_mtx_unlock(&nmp
->nm_lock
);
545 NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n",
546 NFS_GSS_CTX(req
, cp
),
547 cp
== NULL
? " not " : "",
548 cp
== NULL
? 0L : cp
->gss_clnt_nctime
);
551 * Not found - create a new context
555 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
557 lck_mtx_unlock(&nmp
->nm_lock
);
560 cp
->gss_clnt_kinfo
= ki
;
561 cp
->gss_clnt_cred
= req
->r_cred
;
562 kauth_cred_ref(cp
->gss_clnt_cred
);
563 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
564 cp
->gss_clnt_ptime
= now
.tv_sec
- GSS_PRINT_DELAY
;
566 MALLOC(cp
->gss_clnt_principal
, uint8_t *, plen
+1, M_TEMP
, M_WAITOK
|M_ZERO
);
567 memcpy(cp
->gss_clnt_principal
, principal
, plen
);
568 cp
->gss_clnt_prinlen
= plen
;
569 cp
->gss_clnt_prinnt
= nt
;
570 cp
->gss_clnt_flags
|= GSS_CTX_STICKY
;
571 nfs_gss_clnt_mnt_ref(nmp
);
574 cp
->gss_clnt_kinfo
= ki
;
575 nfs_gss_clnt_ctx_clean(cp
);
578 * If we have a principal and we found a matching audit
579 * session, then to get here, the principal had to match.
580 * In walking the context list if it has a principal
581 * or the principal is not set then we mark the context
582 * for destruction and set cp to NULL and we fall to the
583 * if clause above. If the context still has references
584 * again we copy the context which will preserve the principal
585 * and we end up here with the correct principal set.
586 * If we don't have references the the principal must have
587 * match and we will fall through here.
589 cp
->gss_clnt_flags
|= GSS_CTX_STICKY
;
593 cp
->gss_clnt_thread
= current_thread();
594 nfs_gss_clnt_ctx_ref(req
, cp
);
595 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
596 lck_mtx_unlock(&nmp
->nm_lock
);
598 error
= nfs_gss_clnt_ctx_init_retry(req
, cp
); // Initialize new context
600 NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error
, NFS_GSS_CTX(req
, cp
));
601 nfs_gss_clnt_ctx_unref(req
);
604 /* Remove any old matching contex that had a different principal */
605 nfs_gss_clnt_ctx_unref(&treq
);
611 nfs_gss_clnt_ctx_find(struct nfsreq
*req
)
613 return (nfs_gss_clnt_ctx_find_principal(req
, NULL
, 0, 0));
617 * Inserts an RPCSEC_GSS credential into an RPC header.
618 * After the credential is inserted, the code continues
619 * to build the verifier which contains a signed checksum
623 nfs_gss_clnt_cred_put(struct nfsreq
*req
, struct nfsm_chain
*nmc
, mbuf_t args
)
625 struct nfs_gss_clnt_ctx
*cp
;
628 int slpflag
, recordmark
= 0;
629 int start
, len
, offset
= 0;
631 struct nfsm_chain nmc_tmp
;
633 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
634 u_char cksum
[MAX_DIGEST
];
639 slpflag
|= (NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
640 recordmark
= (req
->r_nmp
->nm_sotype
== SOCK_STREAM
);
644 if (req
->r_gss_ctx
== NULL
) {
646 * Find the context for this user.
647 * If no context is found, one will
650 error
= nfs_gss_clnt_ctx_find(req
);
657 * If the context thread isn't null, then the context isn't
658 * yet complete and is for the exclusive use of the thread
659 * doing the context setup. Wait until the context thread
662 lck_mtx_lock(cp
->gss_clnt_mtx
);
663 if (cp
->gss_clnt_thread
&& cp
->gss_clnt_thread
!= current_thread()) {
664 cp
->gss_clnt_flags
|= GSS_NEEDCTX
;
665 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "ctxwait", NULL
);
667 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
669 nfs_gss_clnt_ctx_unref(req
);
672 lck_mtx_unlock(cp
->gss_clnt_mtx
);
674 ki
= cp
->gss_clnt_kinfo
;
675 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) {
677 * Get a sequence number for this request.
678 * Check whether the oldest request in the window is complete.
679 * If it's still pending, then wait until it's done before
680 * we allocate a new sequence number and allow this request
683 lck_mtx_lock(cp
->gss_clnt_mtx
);
684 while (win_getbit(cp
->gss_clnt_seqbits
,
685 ((cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
) + 1) % cp
->gss_clnt_seqwin
)) {
686 cp
->gss_clnt_flags
|= GSS_NEEDSEQ
;
687 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "seqwin", NULL
);
689 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
692 lck_mtx_lock(cp
->gss_clnt_mtx
);
693 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
694 /* Renewed while while we were waiting */
695 lck_mtx_unlock(cp
->gss_clnt_mtx
);
696 nfs_gss_clnt_ctx_unref(req
);
700 seqnum
= ++cp
->gss_clnt_seqnum
;
701 win_setbit(cp
->gss_clnt_seqbits
, seqnum
% cp
->gss_clnt_seqwin
);
702 lck_mtx_unlock(cp
->gss_clnt_mtx
);
704 MALLOC(gsp
, struct gss_seq
*, sizeof(*gsp
), M_TEMP
, M_WAITOK
|M_ZERO
);
707 gsp
->gss_seqnum
= seqnum
;
708 SLIST_INSERT_HEAD(&req
->r_gss_seqlist
, gsp
, gss_seqnext
);
711 /* Insert the credential */
712 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
713 nfsm_chain_add_32(error
, nmc
, 5 * NFSX_UNSIGNED
+ cp
->gss_clnt_handle_len
);
714 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS_VERS_1
);
715 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_proc
);
716 nfsm_chain_add_32(error
, nmc
, seqnum
);
717 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_service
);
718 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_handle_len
);
719 if (cp
->gss_clnt_handle_len
> 0) {
720 if (cp
->gss_clnt_handle
== NULL
)
722 nfsm_chain_add_opaque(error
, nmc
, cp
->gss_clnt_handle
, cp
->gss_clnt_handle_len
);
727 * Now add the verifier
729 if (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
||
730 cp
->gss_clnt_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
732 * If the context is still being created
733 * then use a null verifier.
735 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
); // flavor
736 nfsm_chain_add_32(error
, nmc
, 0); // length
737 nfsm_chain_build_done(error
, nmc
);
739 nfs_gss_append_chain(nmc
, args
);
743 offset
= recordmark
? NFSX_UNSIGNED
: 0; // record mark
744 nfsm_chain_build_done(error
, nmc
);
745 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), offset
, 0, cksum
);
747 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
748 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
); // flavor
749 nfsm_chain_add_32(error
, nmc
, toklen
); // length
750 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
751 nfsm_chain_build_done(error
, nmc
);
756 * Now we may have to compute integrity or encrypt the call args
757 * per RFC 2203 Section 5.3.2
759 switch (cp
->gss_clnt_service
) {
760 case RPCSEC_GSS_SVC_NONE
:
761 nfs_gss_append_chain(nmc
, args
);
763 case RPCSEC_GSS_SVC_INTEGRITY
:
764 len
= nfs_gss_mchain_length(args
); // Find args length
765 req
->r_gss_arglen
= len
; // Stash the args len
766 len
+= NFSX_UNSIGNED
; // Add seqnum length
767 nfsm_chain_add_32(error
, nmc
, len
); // and insert it
768 start
= nfsm_chain_offset(nmc
);
769 nfsm_chain_add_32(error
, nmc
, seqnum
); // Insert seqnum
770 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Offset to args
771 nfsm_chain_build_done(error
, nmc
);
774 nfs_gss_append_chain(nmc
, args
); // Append the args mbufs
776 /* Now compute a checksum over the seqnum + args */
777 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, len
, cksum
);
779 /* Insert it into a token and append to the request */
780 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
781 nfsm_chain_finish_mbuf(error
, nmc
); // force checksum into new mbuf
782 nfsm_chain_add_32(error
, nmc
, toklen
);
783 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
784 nfsm_chain_build_done(error
, nmc
);
786 case RPCSEC_GSS_SVC_PRIVACY
:
787 /* Prepend a new mbuf with the confounder & sequence number */
788 nfsm_chain_build_alloc_init(error
, &nmc_tmp
, 3 * NFSX_UNSIGNED
);
789 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 1-4
790 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 4-8
791 nfsm_chain_add_32(error
, &nmc_tmp
, seqnum
);
792 nfsm_chain_build_done(error
, &nmc_tmp
);
795 nfs_gss_append_chain(&nmc_tmp
, args
); // Append the args mbufs
797 len
= nfs_gss_mchain_length(args
); // Find args length
798 len
+= 3 * NFSX_UNSIGNED
; // add confounder & seqnum
799 req
->r_gss_arglen
= len
; // Stash length
802 * Append a pad trailer - per RFC 1964 section 1.2.2.3
803 * Since XDR data is always 32-bit aligned, it
804 * needs to be padded either by 4 bytes or 8 bytes.
806 nfsm_chain_finish_mbuf(error
, &nmc_tmp
); // force padding into new mbuf
808 nfsm_chain_add_32(error
, &nmc_tmp
, 0x04040404);
809 len
+= NFSX_UNSIGNED
;
811 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
812 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
813 len
+= 2 * NFSX_UNSIGNED
;
815 nfsm_chain_build_done(error
, &nmc_tmp
);
817 /* Now compute a checksum over the confounder + seqnum + args */
818 nfs_gss_cksum_chain(ki
, &nmc_tmp
, ALG_WRAP(ki
), 0, len
, cksum
);
820 /* Insert it into a token */
821 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 1, len
, cksum
);
822 nfsm_chain_add_32(error
, nmc
, toklen
+ len
); // token + args length
823 nfsm_chain_add_opaque_nopad(error
, nmc
, tokbuf
, toklen
);
824 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Stash offset
825 nfsm_chain_build_done(error
, nmc
);
828 nfs_gss_append_chain(nmc
, nmc_tmp
.nmc_mhead
); // Append the args mbufs
830 /* Finally, encrypt the args */
831 nfs_gss_encrypt_chain(ki
, &nmc_tmp
, 0, len
, DES_ENCRYPT
);
833 /* Add null XDR pad if the ASN.1 token misaligned the data */
834 pad
= nfsm_pad(toklen
+ len
);
836 nfsm_chain_add_opaque_nopad(error
, nmc
, iv0
, pad
);
837 nfsm_chain_build_done(error
, nmc
);
846 * When receiving a reply, the client checks the verifier
847 * returned by the server. Check that the verifier is the
848 * correct type, then extract the sequence number checksum
849 * from the token in the credential and compare it with a
850 * computed checksum of the sequence number in the request
854 nfs_gss_clnt_verf_get(
856 struct nfsm_chain
*nmc
,
859 uint32_t *accepted_statusp
)
861 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
862 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
864 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
865 struct nfsm_chain nmc_tmp
;
867 uint32_t reslen
, start
, cksumlen
, toklen
;
869 gss_key_info
*ki
= cp
->gss_clnt_kinfo
;
871 reslen
= cksumlen
= 0;
872 *accepted_statusp
= 0;
875 return (NFSERR_EAUTH
);
877 * If it's not an RPCSEC_GSS verifier, then it has to
878 * be a null verifier that resulted from either
879 * a CONTINUE_NEEDED reply during context setup or
880 * from the reply to an AUTH_UNIX call from a dummy
881 * context that resulted from a fallback to sec=sys.
883 if (verftype
!= RPCSEC_GSS
) {
884 if (verftype
!= RPCAUTH_NULL
)
885 return (NFSERR_EAUTH
);
886 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)
887 return (NFSERR_EAUTH
);
889 nfsm_chain_adv(error
, nmc
, nfsm_rndup(verflen
));
890 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
895 * If we received an RPCSEC_GSS verifier but the
896 * context isn't yet complete, then it must be
897 * the context complete message from the server.
898 * The verifier will contain an encrypted checksum
899 * of the window but we don't have the session key
900 * yet so we can't decrypt it. Stash the verifier
901 * and check it later in nfs_gss_clnt_ctx_init() when
902 * the context is complete.
904 if (!(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)) {
905 MALLOC(cp
->gss_clnt_verf
, u_char
*, verflen
, M_TEMP
, M_WAITOK
|M_ZERO
);
906 if (cp
->gss_clnt_verf
== NULL
)
908 nfsm_chain_get_opaque(error
, nmc
, verflen
, cp
->gss_clnt_verf
);
909 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
913 if (verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
914 return (NFSERR_EAUTH
);
917 * Get the 8 octet sequence number
918 * checksum out of the verifier token.
920 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
923 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum1
);
928 * Search the request sequence numbers for this reply, starting
929 * with the most recent, looking for a checksum that matches
930 * the one in the verifier returned by the server.
932 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
933 nfs_gss_cksum_rep(ki
, gsp
->gss_seqnum
, cksum2
);
934 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) == 0)
938 return (NFSERR_EAUTH
);
941 * Get the RPC accepted status
943 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
944 if (*accepted_statusp
!= RPC_SUCCESS
)
948 * Now we may have to check integrity or decrypt the results
949 * per RFC 2203 Section 5.3.2
951 switch (cp
->gss_clnt_service
) {
952 case RPCSEC_GSS_SVC_NONE
:
955 case RPCSEC_GSS_SVC_INTEGRITY
:
957 * Here's what we expect in the integrity results:
959 * - length of seq num + results (4 bytes)
960 * - sequence number (4 bytes)
961 * - results (variable bytes)
962 * - length of checksum token (37)
963 * - checksum of seqnum + results (37 bytes)
965 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
966 if (reslen
> NFS_MAXPACKET
) {
971 /* Compute a checksum over the sequence number + results */
972 start
= nfsm_chain_offset(nmc
);
973 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, reslen
, cksum1
);
976 * Get the sequence number prepended to the results
977 * and compare it against the list in the request.
979 nfsm_chain_get_32(error
, nmc
, seqnum
);
980 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
981 if (seqnum
== gsp
->gss_seqnum
)
990 * Advance to the end of the results and
991 * fetch the checksum computed by the server.
994 reslen
-= NFSX_UNSIGNED
; // already skipped seqnum
995 nfsm_chain_adv(error
, &nmc_tmp
, reslen
); // skip over the results
996 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
997 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
1001 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
1004 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum2
);
1008 /* Verify that the checksums are the same */
1009 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1014 case RPCSEC_GSS_SVC_PRIVACY
:
1016 * Here's what we expect in the privacy results:
1018 * - length of confounder + seq num + token + results
1019 * - wrap token (37-40 bytes)
1020 * - confounder (8 bytes)
1021 * - sequence number (4 bytes)
1022 * - results (encrypted)
1024 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
1025 if (reslen
> NFS_MAXPACKET
) {
1030 /* Get the token that prepends the encrypted results */
1031 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
1034 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 0,
1038 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
1039 reslen
-= toklen
; // size of confounder + seqnum + results
1041 /* decrypt the confounder + sequence number + results */
1042 start
= nfsm_chain_offset(nmc
);
1043 nfs_gss_encrypt_chain(ki
, nmc
, start
, reslen
, DES_DECRYPT
);
1045 /* Compute a checksum over the confounder + sequence number + results */
1046 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, reslen
, cksum2
);
1048 /* Verify that the checksums are the same */
1049 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1054 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
1057 * Get the sequence number prepended to the results
1058 * and compare it against the list in the request.
1060 nfsm_chain_get_32(error
, nmc
, seqnum
);
1061 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
1062 if (seqnum
== gsp
->gss_seqnum
)
1077 * An RPCSEC_GSS request with no integrity or privacy consists
1078 * of just the header mbufs followed by the arg mbufs.
1080 * However, integrity or privacy both trailer mbufs to the args,
1081 * which means we have to do some work to restore the arg mbuf
1082 * chain to its previous state in case we need to retransmit.
1084 * The location and length of the args is marked by two fields
1085 * in the request structure: r_gss_argoff and r_gss_arglen,
1086 * which are stashed when the NFS request is built.
1089 nfs_gss_clnt_args_restore(struct nfsreq
*req
)
1091 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1092 struct nfsm_chain mchain
, *nmc
= &mchain
;
1096 return (NFSERR_EAUTH
);
1098 if ((cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) == 0)
1101 nfsm_chain_dissect_init(error
, nmc
, req
->r_mhead
); // start at RPC header
1102 nfsm_chain_adv(error
, nmc
, req
->r_gss_argoff
); // advance to args
1106 switch (cp
->gss_clnt_service
) {
1107 case RPCSEC_GSS_SVC_NONE
:
1110 case RPCSEC_GSS_SVC_INTEGRITY
:
1112 * All we have to do here is remove the appended checksum mbufs.
1113 * We know that the checksum starts in a new mbuf beyond the end
1116 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
); // adv to last args mbuf
1120 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the cksum mbuf
1121 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
1123 case RPCSEC_GSS_SVC_PRIVACY
:
1125 * The args are encrypted along with prepended confounders and seqnum.
1126 * First we decrypt, the confounder, seqnum and args then skip to the
1127 * final mbuf of the args.
1128 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
1129 * Finally, we remove between 4 and 8 bytes of encryption padding
1130 * as well as any alignment padding in the trailing mbuf.
1132 len
= req
->r_gss_arglen
;
1133 len
+= len
% 8 > 0 ? 4 : 8; // add DES padding length
1134 nfs_gss_encrypt_chain(cp
->gss_clnt_kinfo
, nmc
,
1135 req
->r_gss_argoff
, len
, DES_DECRYPT
);
1136 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
);
1139 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the pad mbuf
1140 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
1148 * This function sets up a new context on the client.
1149 * Context setup alternates upcalls to the gssd with NFS nullproc calls
1150 * to the server. Each of these calls exchanges an opaque token, obtained
1151 * via the gssd's calls into the GSS-API on either the client or the server.
1152 * This cycle of calls ends when the client's upcall to the gssd and the
1153 * server's response both return GSS_S_COMPLETE. At this point, the client
1154 * should have its session key and a handle that it can use to refer to its
1155 * new context on the server.
1158 nfs_gss_clnt_ctx_init(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1160 struct nfsmount
*nmp
= req
->r_nmp
;
1161 int client_complete
= 0;
1162 int server_complete
= 0;
1163 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
1165 gss_key_info
*ki
= cp
->gss_clnt_kinfo
;
1167 /* Initialize a new client context */
1169 if (cp
->gss_clnt_svcname
== NULL
) {
1170 cp
->gss_clnt_svcname
= nfs_gss_clnt_svcname(nmp
, &cp
->gss_clnt_svcnt
, &cp
->gss_clnt_svcnamlen
);
1171 if (cp
->gss_clnt_svcname
== NULL
) {
1172 error
= NFSERR_EAUTH
;
1177 cp
->gss_clnt_proc
= RPCSEC_GSS_INIT
;
1179 cp
->gss_clnt_service
=
1180 req
->r_auth
== RPCAUTH_KRB5
? RPCSEC_GSS_SVC_NONE
:
1181 req
->r_auth
== RPCAUTH_KRB5I
? RPCSEC_GSS_SVC_INTEGRITY
:
1182 req
->r_auth
== RPCAUTH_KRB5P
? RPCSEC_GSS_SVC_PRIVACY
: 0;
1184 cp
->gss_clnt_gssd_flags
= (nfs_single_des
? GSSD_NFS_1DES
: 0);
1186 * Now loop around alternating gss_init_sec_context and
1187 * gss_accept_sec_context upcalls to the gssd on the client
1188 * and server side until the context is complete - or fails.
1193 /* Upcall to the gss_init_sec_context in the gssd */
1194 error
= nfs_gss_clnt_gssd_upcall(req
, cp
);
1198 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1199 client_complete
= 1;
1200 if (server_complete
)
1202 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1203 error
= NFSERR_EAUTH
;
1208 * Pass the token to the server.
1210 error
= nfs_gss_clnt_ctx_callserver(req
, cp
);
1212 if (error
== ENEEDAUTH
&& cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
&&
1213 (cp
->gss_clnt_gssd_flags
& (GSSD_RESTART
| GSSD_NFS_1DES
)) == 0) {
1214 NFS_GSS_DBG("Retrying with single DES for req %p\n", req
);
1215 cp
->gss_clnt_gssd_flags
= (GSSD_RESTART
| GSSD_NFS_1DES
);
1216 if (cp
->gss_clnt_token
)
1217 FREE(cp
->gss_clnt_token
, M_TEMP
);
1218 cp
->gss_clnt_token
= NULL
;
1219 cp
->gss_clnt_tokenlen
= 0;
1222 // Reset flags, if error = ENEEDAUTH we will try 3des again
1223 cp
->gss_clnt_gssd_flags
= 0;
1226 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1227 server_complete
= 1;
1228 if (client_complete
)
1231 cp
->gss_clnt_proc
= RPCSEC_GSS_CONTINUE_INIT
;
1235 * The context is apparently established successfully
1237 lck_mtx_lock(cp
->gss_clnt_mtx
);
1238 cp
->gss_clnt_flags
|= GSS_CTX_COMPLETE
;
1239 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1240 cp
->gss_clnt_proc
= RPCSEC_GSS_DATA
;
1243 * Compute checksum of the server's window
1245 nfs_gss_cksum_rep(ki
, cp
->gss_clnt_seqwin
, cksum1
);
1248 * and see if it matches the one in the
1249 * verifier the server returned.
1251 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), cp
->gss_clnt_verf
, 0,
1253 FREE(cp
->gss_clnt_verf
, M_TEMP
);
1254 cp
->gss_clnt_verf
= NULL
;
1256 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1257 error
= NFSERR_EAUTH
;
1262 * Set an initial sequence number somewhat randomized.
1263 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1264 * Add the size of the sequence window so seqbits arithmetic
1265 * doesn't go negative.
1267 cp
->gss_clnt_seqnum
= (random() & 0xffff) + cp
->gss_clnt_seqwin
;
1270 * Allocate a bitmap to keep track of which requests
1271 * are pending within the sequence number window.
1273 MALLOC(cp
->gss_clnt_seqbits
, uint32_t *,
1274 nfsm_rndup((cp
->gss_clnt_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
1275 if (cp
->gss_clnt_seqbits
== NULL
)
1276 error
= NFSERR_EAUTH
;
1279 * If the error is ENEEDAUTH we're not done, so no need
1280 * to wake up other threads again. This thread will retry in
1281 * the find or renew routines.
1283 if (error
== ENEEDAUTH
)
1287 * If there's an error, just mark it as invalid.
1288 * It will be removed when the reference count
1291 lck_mtx_lock(cp
->gss_clnt_mtx
);
1293 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1296 * Wake any threads waiting to use the context
1298 cp
->gss_clnt_thread
= NULL
;
1299 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1300 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1303 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1309 * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1310 * But if there's a failure in trying to establish the context it keeps
1311 * retrying at progressively longer intervals in case the failure is
1312 * due to some transient condition. For instance, the server might be
1313 * failing the context setup because directory services is not coming
1314 * up in a timely fashion.
1317 nfs_gss_clnt_ctx_init_retry(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1319 struct nfsmount
*nmp
= req
->r_nmp
;
1324 int timeo
= NFS_TRYLATERDEL
;
1326 if (nfs_mount_gone(nmp
)) {
1331 /* For an "intr" mount allow a signal to interrupt the retries */
1332 slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
1334 while ((error
= nfs_gss_clnt_ctx_init(req
, cp
)) == ENEEDAUTH
) {
1336 waituntil
= now
.tv_sec
+ timeo
;
1337 while (now
.tv_sec
< waituntil
) {
1338 tsleep(NULL
, PSOCK
| slpflag
, "nfs_gss_clnt_ctx_init_retry", hz
);
1340 error
= nfs_sigintr(req
->r_nmp
, req
, current_thread(), 0);
1347 /* If it's a soft mount just give up after a while */
1348 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (retries
> nmp
->nm_retry
)) {
1358 return 0; // success
1361 * Give up on this context
1363 lck_mtx_lock(cp
->gss_clnt_mtx
);
1364 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1367 * Wake any threads waiting to use the context
1369 cp
->gss_clnt_thread
= NULL
;
1370 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1371 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1374 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1380 * Call the NFS server using a null procedure for context setup.
1381 * Even though it's a null procedure and nominally has no arguments
1382 * RFC 2203 requires that the GSS-API token be passed as an argument
1383 * and received as a reply.
1386 nfs_gss_clnt_ctx_callserver(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1388 struct nfsm_chain nmreq
, nmrep
;
1389 int error
= 0, status
;
1390 uint32_t major
= cp
->gss_clnt_major
, minor
= cp
->gss_clnt_minor
;
1393 if (nfs_mount_gone(req
->r_nmp
))
1395 nfsm_chain_null(&nmreq
);
1396 nfsm_chain_null(&nmrep
);
1397 sz
= NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_clnt_tokenlen
);
1398 nfsm_chain_build_alloc_init(error
, &nmreq
, sz
);
1399 nfsm_chain_add_32(error
, &nmreq
, cp
->gss_clnt_tokenlen
);
1400 if (cp
->gss_clnt_tokenlen
> 0)
1401 nfsm_chain_add_opaque(error
, &nmreq
, cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
);
1402 nfsm_chain_build_done(error
, &nmreq
);
1406 /* Call the server */
1407 error
= nfs_request_gss(req
->r_nmp
->nm_mountp
, &nmreq
, req
->r_thread
, req
->r_cred
,
1408 (req
->r_flags
& R_OPTMASK
), cp
, &nmrep
, &status
);
1409 if (cp
->gss_clnt_token
!= NULL
) {
1410 FREE(cp
->gss_clnt_token
, M_TEMP
);
1411 cp
->gss_clnt_token
= NULL
;
1418 /* Get the server's reply */
1420 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_handle_len
);
1421 if (cp
->gss_clnt_handle
!= NULL
) {
1422 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1423 cp
->gss_clnt_handle
= NULL
;
1425 if (cp
->gss_clnt_handle_len
> 0) {
1426 MALLOC(cp
->gss_clnt_handle
, u_char
*, cp
->gss_clnt_handle_len
, M_TEMP
, M_WAITOK
);
1427 if (cp
->gss_clnt_handle
== NULL
) {
1431 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_handle_len
, cp
->gss_clnt_handle
);
1433 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_major
);
1434 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_minor
);
1435 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_seqwin
);
1436 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_tokenlen
);
1439 if (cp
->gss_clnt_tokenlen
> 0) {
1440 MALLOC(cp
->gss_clnt_token
, u_char
*, cp
->gss_clnt_tokenlen
, M_TEMP
, M_WAITOK
);
1441 if (cp
->gss_clnt_token
== NULL
) {
1445 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_tokenlen
, cp
->gss_clnt_token
);
1449 * Make sure any unusual errors are expanded and logged by gssd
1451 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1452 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1454 printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp
->gss_clnt_major
);
1455 nfs_gss_clnt_log_error(req
, cp
, major
, minor
);
1460 nfsm_chain_cleanup(&nmreq
);
1461 nfsm_chain_cleanup(&nmrep
);
1467 * We construct the service principal as a gss hostbased service principal of
1468 * the form nfs@<server>, unless the servers principal was passed down in the
1469 * mount arguments. If the arguments don't specify the service principal, the
1470 * server name is extracted the location passed in the mount argument if
1471 * available. Otherwise assume a format of <server>:<path> in the
1472 * mntfromname. We don't currently support url's or other bizarre formats like
1473 * path@server. Mount_url will convert the nfs url into <server>:<path> when
1474 * calling mount, so this works out well in practice.
1479 nfs_gss_clnt_svcname(struct nfsmount
*nmp
, gssd_nametype
*nt
, uint32_t *len
)
1481 char *svcname
, *d
, *server
;
1484 if (nfs_mount_gone(nmp
))
1487 if (nmp
->nm_sprinc
) {
1488 *len
= strlen(nmp
->nm_sprinc
) + 1;
1489 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1490 *nt
= GSSD_HOSTBASED
;
1491 if (svcname
== NULL
)
1493 strlcpy(svcname
, nmp
->nm_sprinc
, *len
);
1495 return ((uint8_t *)svcname
);
1498 *nt
= GSSD_HOSTBASED
;
1499 if (nmp
->nm_locations
.nl_numlocs
&& !(NFS_GSS_ISDBG
&& (NFS_DEBUG_FLAGS
& 0x1))) {
1500 lindx
= nmp
->nm_locations
.nl_current
.nli_loc
;
1501 sindx
= nmp
->nm_locations
.nl_current
.nli_serv
;
1502 server
= nmp
->nm_locations
.nl_locations
[lindx
]->nl_servers
[sindx
]->ns_name
;
1503 *len
= (uint32_t)strlen(server
);
1505 /* Older binaries using older mount args end up here */
1506 server
= vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
;
1507 NFS_GSS_DBG("nfs getting gss svcname from %s\n", server
);
1508 d
= strchr(server
, ':');
1509 *len
= (uint32_t)(d
? (d
- server
) : strlen(server
));
1512 *len
+= 5; /* "nfs@" plus null */
1513 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1514 strlcpy(svcname
, "nfs", *len
);
1515 strlcat(svcname
, "@", *len
);
1516 strlcat(svcname
, server
, *len
);
1517 NFS_GSS_DBG("nfs svcname = %s\n", svcname
);
1519 return ((uint8_t *)svcname
);
1523 * Get a mach port to talk to gssd.
1524 * gssd lives in the root bootstrap, so we call gssd's lookup routine
1525 * to get a send right to talk to a new gssd instance that launchd has launched
1526 * based on the cred's uid and audit session id.
1530 nfs_gss_clnt_get_upcall_port(kauth_cred_t credp
)
1532 mach_port_t gssd_host_port
, uc_port
= IPC_PORT_NULL
;
1537 kr
= host_get_gssd_port(host_priv_self(), &gssd_host_port
);
1538 if (kr
!= KERN_SUCCESS
) {
1539 printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr
, kr
);
1540 return (IPC_PORT_NULL
);
1542 if (!IPC_PORT_VALID(gssd_host_port
)) {
1543 printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1544 return (IPC_PORT_NULL
);
1547 asid
= kauth_cred_getasid(credp
);
1548 uid
= kauth_cred_getauid(credp
);
1549 if (uid
== AU_DEFAUDITID
)
1550 uid
= kauth_cred_getuid(credp
);
1551 kr
= mach_gss_lookup(gssd_host_port
, uid
, asid
, &uc_port
);
1552 if (kr
!= KERN_SUCCESS
)
1553 printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr
, kr
);
1554 host_release_special_port(gssd_host_port
);
1561 nfs_gss_clnt_log_error(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
, uint32_t major
, uint32_t minor
)
1563 #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1564 struct nfsmount
*nmp
= req
->r_nmp
;
1565 char who
[] = "client";
1566 uint32_t gss_error
= GETMAJERROR(cp
->gss_clnt_major
);
1567 const char *procn
= "unkown";
1572 if (req
->r_thread
) {
1573 proc
= (proc_t
)get_bsdthreadtask_info(req
->r_thread
);
1574 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
)))
1578 procn
= proc
->p_comm
;
1587 if ((cp
->gss_clnt_major
!= major
|| cp
->gss_clnt_minor
!= minor
||
1588 cp
->gss_clnt_ptime
+ GSS_PRINT_DELAY
< now
.tv_sec
) &&
1589 (nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1591 * Will let gssd do some logging in hopes that it can translate
1594 if (cp
->gss_clnt_minor
&& cp
->gss_clnt_minor
!= minor
) {
1595 (void) mach_gss_log_error(
1597 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1598 kauth_cred_getuid(cp
->gss_clnt_cred
),
1601 cp
->gss_clnt_minor
);
1603 gss_error
= gss_error
? gss_error
: cp
->gss_clnt_major
;
1606 *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
1608 printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1609 cp
->gss_clnt_display
? cp
->gss_clnt_display
: who
, kauth_cred_getasid(req
->r_cred
), kauth_cred_getuid(req
->r_cred
),
1610 procn
, pid
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, gss_error
, (int32_t)cp
->gss_clnt_minor
);
1611 cp
->gss_clnt_ptime
= now
.tv_sec
;
1612 switch (gss_error
) {
1613 case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
1614 kauth_cred_getasid(req
->r_cred
), kauth_cred_getauid(req
->r_cred
));
1616 case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
1617 kauth_cred_getasid(req
->r_cred
), kauth_cred_getauid(req
->r_cred
));
1621 NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1622 cp
->gss_clnt_display
? cp
->gss_clnt_display
: who
, kauth_cred_getasid(req
->r_cred
), kauth_cred_getuid(req
->r_cred
),
1623 procn
, pid
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, gss_error
, (int32_t)cp
->gss_clnt_minor
);
1628 * Make an upcall to the gssd using Mach RPC
1629 * The upcall is made using a host special port.
1630 * This allows launchd to fire up the gssd in the
1631 * user's session. This is important, since gssd
1632 * must have access to the user's credential cache.
1635 nfs_gss_clnt_gssd_upcall(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1638 gssd_byte_buffer okey
= NULL
;
1639 uint32_t skeylen
= 0;
1641 vm_map_copy_t itoken
= NULL
;
1642 gssd_byte_buffer otoken
= NULL
;
1643 mach_msg_type_number_t otokenlen
;
1645 uint8_t *principal
= NULL
;
1647 int32_t nt
= GSSD_STRING_NAME
;
1648 vm_map_copy_t pname
= NULL
;
1649 vm_map_copy_t svcname
= NULL
;
1650 char display_name
[MAX_DISPLAY_STR
] = "";
1652 uint32_t nfs_1des
= (cp
->gss_clnt_gssd_flags
& GSSD_NFS_1DES
);
1653 struct nfsmount
*nmp
;
1654 uint32_t major
= cp
->gss_clnt_major
, minor
= cp
->gss_clnt_minor
;
1657 * NFS currently only supports default principals or
1658 * principals based on the uid of the caller, unless
1659 * the principal to use for the mounting cred was specified
1660 * in the mount argmuments. If the realm to use was specified
1661 * then will send that up as the principal since the realm is
1662 * preceed by an "@" gssd that will try and select the default
1663 * principal for that realm.
1667 if (nmp
== NULL
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)))
1670 if (cp
->gss_clnt_principal
&& cp
->gss_clnt_prinlen
) {
1671 principal
= cp
->gss_clnt_principal
;
1672 plen
= cp
->gss_clnt_prinlen
;
1673 nt
= cp
->gss_clnt_prinnt
;
1674 } else if (nmp
->nm_principal
&& IS_VALID_CRED(nmp
->nm_mcred
) && req
->r_cred
== nmp
->nm_mcred
) {
1675 plen
= (uint32_t)strlen(nmp
->nm_principal
);
1676 MALLOC(principal
, uint8_t *, plen
, M_TEMP
, M_WAITOK
| M_ZERO
);
1677 if (principal
== NULL
)
1679 bcopy(nmp
->nm_principal
, principal
, plen
);
1680 cp
->gss_clnt_prinnt
= nt
= GSSD_USER
;
1682 else if (nmp
->nm_realm
) {
1683 plen
= (uint32_t)strlen(nmp
->nm_realm
);
1684 principal
= (uint8_t *)nmp
->nm_realm
;
1688 if (!IPC_PORT_VALID(cp
->gss_clnt_mport
)) {
1689 cp
->gss_clnt_mport
= nfs_gss_clnt_get_upcall_port(req
->r_cred
);
1690 if (cp
->gss_clnt_mport
== IPC_PORT_NULL
)
1695 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
1696 if (cp
->gss_clnt_svcnamlen
)
1697 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
1698 if (cp
->gss_clnt_tokenlen
)
1699 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1702 kr
= mach_gss_init_sec_context_v2(
1705 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_clnt_tokenlen
,
1706 kauth_cred_getuid(cp
->gss_clnt_cred
),
1708 (gssd_byte_buffer
)pname
, (mach_msg_type_number_t
) plen
,
1710 (gssd_byte_buffer
)svcname
, (mach_msg_type_number_t
) cp
->gss_clnt_svcnamlen
,
1712 &cp
->gss_clnt_gssd_flags
,
1713 &cp
->gss_clnt_context
,
1714 &cp
->gss_clnt_cred_handle
,
1716 &okey
, (mach_msg_type_number_t
*) &skeylen
,
1717 &otoken
, &otokenlen
,
1718 cp
->gss_clnt_display
? NULL
: display_name
,
1719 &cp
->gss_clnt_major
,
1720 &cp
->gss_clnt_minor
);
1722 /* Should be cleared and set in gssd ? */
1723 cp
->gss_clnt_gssd_flags
&= ~GSSD_RESTART
;
1724 cp
->gss_clnt_gssd_flags
|= nfs_1des
;
1726 if (kr
!= KERN_SUCCESS
) {
1727 printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr
, kr
);
1728 if (kr
== MIG_SERVER_DIED
&& cp
->gss_clnt_cred_handle
== 0 &&
1729 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
&&
1730 !vfs_isforce(nmp
->nm_mountp
) && (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) == 0) {
1732 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
1733 if (cp
->gss_clnt_svcnamlen
)
1734 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
1735 if (cp
->gss_clnt_tokenlen
> 0)
1736 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1740 host_release_special_port(cp
->gss_clnt_mport
);
1741 cp
->gss_clnt_mport
= IPC_PORT_NULL
;
1745 if (cp
->gss_clnt_display
== NULL
&& *display_name
!= '\0') {
1746 int dlen
= strnlen(display_name
, MAX_DISPLAY_STR
) + 1; /* Add extra byte to include '\0' */
1748 if (dlen
< MAX_DISPLAY_STR
) {
1749 MALLOC(cp
->gss_clnt_display
, char *, dlen
, M_TEMP
, M_WAITOK
);
1750 if (cp
->gss_clnt_display
== NULL
)
1752 bcopy(display_name
, cp
->gss_clnt_display
, dlen
);
1759 * Make sure any unusual errors are expanded and logged by gssd
1761 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
1763 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1764 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1765 nfs_gss_clnt_log_error(req
, cp
, major
, minor
);
1769 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
1770 printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen
);
1771 vm_map_copy_discard((vm_map_copy_t
) okey
);
1772 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1775 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
,
1776 cp
->gss_clnt_kinfo
->skey
);
1778 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1782 error
= gss_key_init(cp
->gss_clnt_kinfo
, skeylen
);
1787 /* Free context token used as input */
1788 if (cp
->gss_clnt_token
)
1789 FREE(cp
->gss_clnt_token
, M_TEMP
);
1790 cp
->gss_clnt_token
= NULL
;
1791 cp
->gss_clnt_tokenlen
= 0;
1793 if (otokenlen
> 0) {
1794 /* Set context token to gss output token */
1795 MALLOC(cp
->gss_clnt_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
1796 if (cp
->gss_clnt_token
== NULL
) {
1797 printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
1798 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1801 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_clnt_token
);
1803 FREE(cp
->gss_clnt_token
, M_TEMP
);
1804 cp
->gss_clnt_token
= NULL
;
1805 return (NFSERR_EAUTH
);
1807 cp
->gss_clnt_tokenlen
= otokenlen
;
1813 if (cp
->gss_clnt_token
)
1814 FREE(cp
->gss_clnt_token
, M_TEMP
);
1815 cp
->gss_clnt_token
= NULL
;
1816 cp
->gss_clnt_tokenlen
= 0;
1818 return (NFSERR_EAUTH
);
1822 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1823 * credential. The sequence number window that the server returns
1824 * at context setup indicates the maximum number of client calls that
1825 * can be outstanding on a context. The client maintains a bitmap that
1826 * represents the server's window. Each pending request has a bit set
1827 * in the window bitmap. When a reply comes in or times out, we reset
1828 * the bit in the bitmap and if there are any other threads waiting for
1829 * a context slot we notify the waiting thread(s).
1831 * Note that if a request is retransmitted, it will have a single XID
1832 * but it may be associated with multiple sequence numbers. So we
1833 * may have to reset multiple sequence number bits in the window bitmap.
1836 nfs_gss_clnt_rpcdone(struct nfsreq
*req
)
1838 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1839 struct gss_seq
*gsp
, *ngsp
;
1842 if (cp
== NULL
|| !(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
))
1843 return; // no context - don't bother
1845 * Reset the bit for this request in the
1846 * sequence number window to indicate it's done.
1847 * We do this even if the request timed out.
1849 lck_mtx_lock(cp
->gss_clnt_mtx
);
1850 gsp
= SLIST_FIRST(&req
->r_gss_seqlist
);
1851 if (gsp
&& gsp
->gss_seqnum
> (cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
))
1852 win_resetbit(cp
->gss_clnt_seqbits
,
1853 gsp
->gss_seqnum
% cp
->gss_clnt_seqwin
);
1856 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1858 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
) {
1859 if (++i
> GSS_CLNT_SEQLISTMAX
) {
1860 SLIST_REMOVE(&req
->r_gss_seqlist
, gsp
, gss_seq
, gss_seqnext
);
1866 * If there's a thread waiting for
1867 * the window to advance, wake it up.
1869 if (cp
->gss_clnt_flags
& GSS_NEEDSEQ
) {
1870 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
1873 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1877 * Create a reference to a context from a request
1878 * and bump the reference count
1881 nfs_gss_clnt_ctx_ref(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1883 req
->r_gss_ctx
= cp
;
1885 lck_mtx_lock(cp
->gss_clnt_mtx
);
1886 cp
->gss_clnt_refcnt
++;
1887 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1891 * Remove a context reference from a request
1892 * If the reference count drops to zero, and the
1893 * context is invalid, destroy the context
1896 nfs_gss_clnt_ctx_unref(struct nfsreq
*req
)
1898 struct nfsmount
*nmp
= req
->r_nmp
;
1899 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1900 int on_neg_cache
= 0;
1904 char CTXBUF
[NFS_CTXBUFSZ
];
1909 req
->r_gss_ctx
= NULL
;
1911 lck_mtx_lock(cp
->gss_clnt_mtx
);
1912 if (--cp
->gss_clnt_refcnt
< 0)
1913 panic("Over release of gss context!\n");
1915 if (cp
->gss_clnt_refcnt
== 0) {
1916 if ((cp
->gss_clnt_flags
& GSS_CTX_INVAL
) &&
1917 cp
->gss_clnt_kinfo
) {
1918 FREE(cp
->gss_clnt_kinfo
, M_TEMP
);
1919 cp
->gss_clnt_kinfo
= NULL
;
1921 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
1923 if (cp
->gss_clnt_flags
& GSS_CTX_STICKY
)
1924 nfs_gss_clnt_mnt_rele(nmp
);
1925 if (cp
->gss_clnt_nctime
)
1929 if (!destroy
&& cp
->gss_clnt_nctime
== 0 &&
1930 (cp
->gss_clnt_flags
& GSS_CTX_INVAL
)) {
1932 cp
->gss_clnt_nctime
= now
.tv_sec
;
1935 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1937 NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req
, cp
));
1939 lck_mtx_lock(&nmp
->nm_lock
);
1940 if (cp
->gss_clnt_entries
.tqe_next
!= NFSNOLIST
) {
1941 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
1944 nmp
->nm_ncentries
--;
1946 lck_mtx_unlock(&nmp
->nm_lock
);
1948 nfs_gss_clnt_ctx_destroy(cp
);
1949 } else if (neg_cache
) {
1950 NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req
, cp
));
1952 lck_mtx_lock(&nmp
->nm_lock
);
1953 nmp
->nm_ncentries
++;
1954 nfs_gss_clnt_ctx_neg_cache_reap(nmp
);
1955 lck_mtx_unlock(&nmp
->nm_lock
);
1958 NFS_GSS_CLNT_CTX_DUMP(nmp
);
1962 * Try and reap any old negative cache entries.
1966 nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount
*nmp
)
1968 struct nfs_gss_clnt_ctx
*cp
, *tcp
;
1972 NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp
->nm_ncentries
);
1973 /* Try and reap old, unreferenced, expired contexts */
1975 TAILQ_FOREACH_SAFE(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
, tcp
) {
1978 /* Don't reap STICKY contexts */
1979 if ((cp
->gss_clnt_flags
& GSS_CTX_STICKY
) ||
1980 !(cp
->gss_clnt_flags
& GSS_CTX_INVAL
))
1982 /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
1983 if (nmp
->nm_ncentries
<= GSS_MAX_NEG_CACHE_ENTRIES
)
1985 /* Contexts too young */
1986 if (cp
->gss_clnt_nctime
+ GSS_NEG_CACHE_TO
>= now
.tv_sec
)
1988 /* Not referenced, remove it. */
1989 lck_mtx_lock(cp
->gss_clnt_mtx
);
1990 if (cp
->gss_clnt_refcnt
== 0) {
1991 cp
->gss_clnt_flags
|= GSS_CTX_DESTROY
;
1994 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1996 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
1997 nmp
->nm_ncentries
++;
1999 nfs_gss_clnt_ctx_destroy(cp
);
2002 NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped
, nmp
->nm_ncentries
);
2006 * Clean a context to be cached
2009 nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx
*cp
)
2011 /* Preserve gss_clnt_mtx */
2012 assert(cp
->gss_clnt_thread
== NULL
); /* Will be set to this thread */
2013 /* gss_clnt_entries we should not be on any list at this point */
2014 cp
->gss_clnt_flags
= 0;
2015 /* gss_clnt_refcnt should be zero */
2016 assert(cp
->gss_clnt_refcnt
== 0);
2018 * We are who we are preserve:
2020 * gss_clnt_principal
2025 /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */
2026 cp
->gss_clnt_seqnum
= 0;
2027 /* Preserve gss_clnt_service, we're not changing flavors */
2028 if (cp
->gss_clnt_handle
) {
2029 FREE(cp
->gss_clnt_handle
, M_TEMP
);
2030 cp
->gss_clnt_handle
= NULL
;
2032 cp
->gss_clnt_handle_len
= 0;
2033 cp
->gss_clnt_nctime
= 0;
2034 cp
->gss_clnt_seqwin
= 0;
2035 if (cp
->gss_clnt_seqbits
) {
2036 FREE(cp
->gss_clnt_seqbits
, M_TEMP
);
2037 cp
->gss_clnt_seqbits
= NULL
;
2039 /* Preserve gss_clnt_mport. Still talking to the same gssd */
2040 if (cp
->gss_clnt_verf
) {
2041 FREE(cp
->gss_clnt_verf
, M_TEMP
);
2042 cp
->gss_clnt_verf
= NULL
;
2044 /* Service name might change on failover, so reset it */
2045 if (cp
->gss_clnt_svcname
) {
2046 FREE(cp
->gss_clnt_svcname
, M_TEMP
);
2047 cp
->gss_clnt_svcname
= NULL
;
2048 cp
->gss_clnt_svcnt
= 0;
2050 cp
->gss_clnt_svcnamlen
= 0;
2051 cp
->gss_clnt_cred_handle
= 0;
2052 cp
->gss_clnt_context
= 0;
2053 if (cp
->gss_clnt_token
) {
2054 FREE(cp
->gss_clnt_token
, M_TEMP
);
2055 cp
->gss_clnt_token
= NULL
;
2057 cp
->gss_clnt_tokenlen
= 0;
2058 if (cp
->gss_clnt_kinfo
)
2059 bzero(cp
->gss_clnt_kinfo
, sizeof(gss_key_info
));
2062 * gss_clnt_gssd_flags
2070 * Copy a source context to a new context. This is used to create a new context
2071 * with the identity of the old context for renewal. The old context is invalid
2072 * at this point but may have reference still to it, so it is not safe to use that
2076 nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx
*scp
, struct nfs_gss_clnt_ctx
**dcpp
, gss_key_info
*ki
)
2078 struct nfs_gss_clnt_ctx
*dcp
;
2080 *dcpp
= (struct nfs_gss_clnt_ctx
*)NULL
;
2081 MALLOC(dcp
, struct nfs_gss_clnt_ctx
*, sizeof (struct nfs_gss_clnt_ctx
), M_TEMP
, M_WAITOK
);
2084 bzero(dcp
, sizeof (struct nfs_gss_clnt_ctx
));
2086 MALLOC(dcp
->gss_clnt_kinfo
, gss_key_info
*, sizeof (gss_key_info
), M_TEMP
, M_WAITOK
);
2087 if (dcp
->gss_clnt_kinfo
== NULL
) {
2092 dcp
->gss_clnt_kinfo
= ki
;
2094 bzero(dcp
->gss_clnt_kinfo
, sizeof (gss_key_info
));
2095 dcp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
2096 dcp
->gss_clnt_cred
= scp
->gss_clnt_cred
;
2097 kauth_cred_ref(dcp
->gss_clnt_cred
);
2098 dcp
->gss_clnt_prinlen
= scp
->gss_clnt_prinlen
;
2099 dcp
->gss_clnt_prinnt
= scp
->gss_clnt_prinnt
;
2100 if (scp
->gss_clnt_principal
) {
2101 MALLOC(dcp
->gss_clnt_principal
, uint8_t *, dcp
->gss_clnt_prinlen
, M_TEMP
, M_WAITOK
| M_ZERO
);
2102 if (dcp
->gss_clnt_principal
== NULL
) {
2103 FREE(dcp
->gss_clnt_kinfo
, M_TEMP
);
2107 bcopy(scp
->gss_clnt_principal
, dcp
->gss_clnt_principal
, dcp
->gss_clnt_prinlen
);
2109 /* Note we don't preserve the display name, that will be set by a successful up call */
2110 dcp
->gss_clnt_service
= scp
->gss_clnt_service
;
2111 dcp
->gss_clnt_mport
= host_copy_special_port(scp
->gss_clnt_mport
);
2112 /* gss_clnt_kinfo allocated above */
2113 dcp
->gss_clnt_gssd_flags
= scp
->gss_clnt_gssd_flags
;
2114 dcp
->gss_clnt_major
= scp
->gss_clnt_major
;
2115 dcp
->gss_clnt_minor
= scp
->gss_clnt_minor
;
2116 dcp
->gss_clnt_ptime
= scp
->gss_clnt_ptime
;
2127 nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx
*cp
)
2129 NFS_GSS_DBG("Destroying context %d/%d\n",
2130 kauth_cred_getasid(cp
->gss_clnt_cred
),
2131 kauth_cred_getauid(cp
->gss_clnt_cred
));
2133 host_release_special_port(cp
->gss_clnt_mport
);
2134 cp
->gss_clnt_mport
= IPC_PORT_NULL
;
2136 if (cp
->gss_clnt_mtx
) {
2137 lck_mtx_destroy(cp
->gss_clnt_mtx
, nfs_gss_clnt_grp
);
2138 cp
->gss_clnt_mtx
= (lck_mtx_t
*)NULL
;
2140 if (IS_VALID_CRED(cp
->gss_clnt_cred
))
2141 kauth_cred_unref(&cp
->gss_clnt_cred
);
2142 cp
->gss_clnt_entries
.tqe_next
= NFSNOLIST
;
2143 cp
->gss_clnt_entries
.tqe_prev
= NFSNOLIST
;
2144 if (cp
->gss_clnt_principal
) {
2145 FREE(cp
->gss_clnt_principal
, M_TEMP
);
2146 cp
->gss_clnt_principal
= NULL
;
2148 if (cp
->gss_clnt_display
) {
2149 FREE(cp
->gss_clnt_display
, M_TEMP
);
2150 cp
->gss_clnt_display
= NULL
;
2152 if (cp
->gss_clnt_kinfo
) {
2153 FREE(cp
->gss_clnt_kinfo
, M_TEMP
);
2154 cp
->gss_clnt_kinfo
= NULL
;
2157 nfs_gss_clnt_ctx_clean(cp
);
2163 * The context for a user is invalid.
2164 * Mark the context as invalid, then
2165 * create a new context.
2168 nfs_gss_clnt_ctx_renew(struct nfsreq
*req
)
2170 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
2171 struct nfs_gss_clnt_ctx
*ncp
;
2172 struct nfsmount
*nmp
;
2174 char CTXBUF
[NFS_CTXBUFSZ
];
2179 if (req
->r_nmp
== NULL
)
2183 lck_mtx_lock(cp
->gss_clnt_mtx
);
2184 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
2185 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2186 nfs_gss_clnt_ctx_unref(req
);
2187 return (0); // already being renewed
2190 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2192 if (cp
->gss_clnt_flags
& (GSS_NEEDCTX
| GSS_NEEDSEQ
)) {
2193 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
2196 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2198 error
= nfs_gss_clnt_ctx_copy(cp
, &ncp
, NULL
);
2199 NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req
, ncp
));
2200 nfs_gss_clnt_ctx_unref(req
);
2204 lck_mtx_lock(&nmp
->nm_lock
);
2206 * Note we don't bother taking the new context mutex as we're
2207 * not findable at the moment.
2209 ncp
->gss_clnt_thread
= current_thread();
2210 nfs_gss_clnt_ctx_ref(req
, ncp
);
2211 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, ncp
, gss_clnt_entries
);
2212 lck_mtx_unlock(&nmp
->nm_lock
);
2214 error
= nfs_gss_clnt_ctx_init_retry(req
, ncp
); // Initialize new context
2216 nfs_gss_clnt_ctx_unref(req
);
2223 * Destroy all the contexts associated with a mount.
2224 * The contexts are also destroyed by the server.
2227 nfs_gss_clnt_ctx_unmount(struct nfsmount
*nmp
)
2229 struct nfs_gss_clnt_ctx
*cp
;
2230 struct nfsm_chain nmreq
, nmrep
;
2239 lck_mtx_lock(&nmp
->nm_lock
);
2240 while((cp
= TAILQ_FIRST(&nmp
->nm_gsscl
))) {
2241 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
2242 cp
->gss_clnt_entries
.tqe_next
= NFSNOLIST
;
2243 lck_mtx_lock(cp
->gss_clnt_mtx
);
2244 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2245 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2248 cp
->gss_clnt_refcnt
++;
2249 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2252 lck_mtx_unlock(&nmp
->nm_lock
);
2254 * Tell the server to destroy its context.
2255 * But don't bother if it's a forced unmount.
2257 if (!nfs_mount_gone(nmp
) &&
2258 (cp
->gss_clnt_flags
& (GSS_CTX_INVAL
| GSS_CTX_DESTROY
| GSS_CTX_COMPLETE
)) == GSS_CTX_COMPLETE
) {
2259 cp
->gss_clnt_proc
= RPCSEC_GSS_DESTROY
;
2262 nfsm_chain_null(&nmreq
);
2263 nfsm_chain_null(&nmrep
);
2264 nfsm_chain_build_alloc_init(error
, &nmreq
, 0);
2265 nfsm_chain_build_done(error
, &nmreq
);
2267 nfs_request_gss(nmp
->nm_mountp
, &nmreq
,
2268 current_thread(), cp
->gss_clnt_cred
, 0, cp
, &nmrep
, &status
);
2269 nfsm_chain_cleanup(&nmreq
);
2270 nfsm_chain_cleanup(&nmrep
);
2274 * Mark the context invalid then drop
2275 * the reference to remove it if its
2278 lck_mtx_lock(cp
->gss_clnt_mtx
);
2279 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2280 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2281 nfs_gss_clnt_ctx_unref(&req
);
2282 lck_mtx_lock(&nmp
->nm_lock
);
2284 lck_mtx_unlock(&nmp
->nm_lock
);
2285 assert(TAILQ_EMPTY(&nmp
->nm_gsscl
));
2290 * Removes a mounts context for a credential
2293 nfs_gss_clnt_ctx_remove(struct nfsmount
*nmp
, kauth_cred_t cred
)
2295 struct nfs_gss_clnt_ctx
*cp
;
2300 NFS_GSS_DBG("Enter\n");
2301 NFS_GSS_CLNT_CTX_DUMP(nmp
);
2302 lck_mtx_lock(&nmp
->nm_lock
);
2303 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
2304 lck_mtx_lock(cp
->gss_clnt_mtx
);
2305 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, cred
)) {
2306 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2307 NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
2308 kauth_cred_getasid(cp
->gss_clnt_cred
),
2309 kauth_cred_getauid(cp
->gss_clnt_cred
),
2310 cp
->gss_clnt_refcnt
);
2311 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2314 cp
->gss_clnt_refcnt
++;
2315 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2316 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2318 lck_mtx_unlock(&nmp
->nm_lock
);
2320 * Drop the reference to remove it if its
2323 NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
2324 kauth_cred_getasid(cp
->gss_clnt_cred
),
2325 kauth_cred_getuid(cp
->gss_clnt_cred
),
2326 cp
->gss_clnt_refcnt
);
2327 nfs_gss_clnt_ctx_unref(&req
);
2330 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2333 lck_mtx_unlock(&nmp
->nm_lock
);
2335 NFS_GSS_DBG("Returning ENOENT\n");
2340 * Sets a mounts principal for a session associated with cred.
2343 nfs_gss_clnt_ctx_set_principal(struct nfsmount
*nmp
, vfs_context_t ctx
,
2344 uint8_t *principal
, uint32_t princlen
, uint32_t nametype
)
2350 NFS_GSS_DBG("Enter:\n");
2352 bzero(&req
, sizeof(struct nfsreq
));
2354 req
.r_gss_ctx
= NULL
;
2355 req
.r_auth
= nmp
->nm_auth
;
2356 req
.r_thread
= vfs_context_thread(ctx
);
2357 req
.r_cred
= vfs_context_ucred(ctx
);
2359 error
= nfs_gss_clnt_ctx_find_principal(&req
, principal
, princlen
, nametype
);
2360 NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error
);
2362 * We don't care about auth errors. Those would indicate that the context is in the
2363 * neagative cache and if and when the user has credentials for the principal
2364 * we should be good to go in that we will select those credentials for this principal.
2366 if (error
== EACCES
|| error
== EAUTH
|| error
== ENEEDAUTH
)
2369 /* We're done with this request */
2370 nfs_gss_clnt_ctx_unref(&req
);
2376 * Gets a mounts principal from a session associated with cred
2379 nfs_gss_clnt_ctx_get_principal(struct nfsmount
*nmp
, vfs_context_t ctx
,
2380 struct user_nfs_gss_principal
*p
)
2384 struct nfs_gss_clnt_ctx
*cp
;
2385 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2387 char CTXBUF
[NFS_CTXBUFSZ
];
2390 lck_mtx_lock(&nmp
->nm_lock
);
2391 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
2392 lck_mtx_lock(cp
->gss_clnt_mtx
);
2393 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2394 NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
2395 NFS_GSS_CTX(&req
, cp
),
2396 cp
->gss_clnt_refcnt
);
2397 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2400 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, cred
)) {
2401 cp
->gss_clnt_refcnt
++;
2402 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2405 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2410 lck_mtx_unlock(&nmp
->nm_lock
);
2412 p
->principal
= USER_ADDR_NULL
;
2413 p
->nametype
= GSSD_STRING_NAME
;
2414 p
->flags
|= NFS_IOC_NO_CRED_FLAG
;
2415 NFS_GSS_DBG("No context found for session %d by uid %d\n",
2416 kauth_cred_getasid(cred
), kauth_cred_getuid(cred
));
2420 princ
= cp
->gss_clnt_principal
? (char *)cp
->gss_clnt_principal
: cp
->gss_clnt_display
;
2421 p
->princlen
= cp
->gss_clnt_principal
? cp
->gss_clnt_prinlen
:
2422 (cp
->gss_clnt_display
? strlen(cp
->gss_clnt_display
) : 0);
2423 p
->nametype
= cp
->gss_clnt_prinnt
;
2427 MALLOC(pp
, char *, p
->princlen
, M_TEMP
, M_WAITOK
);
2429 bcopy(princ
, pp
, p
->princlen
);
2430 p
->principal
= CAST_USER_ADDR_T(pp
);
2435 lck_mtx_unlock(&nmp
->nm_lock
);
2438 NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req
, NULL
));
2439 nfs_gss_clnt_ctx_unref(&req
);
2442 #endif /* NFSCLIENT */
2452 * Find a server context based on a handle value received
2453 * in an RPCSEC_GSS credential.
2455 static struct nfs_gss_svc_ctx
*
2456 nfs_gss_svc_ctx_find(uint32_t handle
)
2458 struct nfs_gss_svc_ctx_hashhead
*head
;
2459 struct nfs_gss_svc_ctx
*cp
;
2465 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(handle
)];
2467 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
2469 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
, &timenow
);
2471 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2473 LIST_FOREACH(cp
, head
, gss_svc_entries
) {
2474 if (cp
->gss_svc_handle
== handle
) {
2475 if (timenow
> cp
->gss_svc_incarnation
+ GSS_SVC_CTX_TTL
) {
2477 * Context has or is about to expire. Don't use.
2478 * We'll return null and the client will have to create
2481 cp
->gss_svc_handle
= 0;
2483 * Make sure though that we stay around for GSS_CTX_PEND seconds
2484 * for other threads that might be using the context.
2486 cp
->gss_svc_incarnation
= timenow
;
2491 lck_mtx_lock(cp
->gss_svc_mtx
);
2492 cp
->gss_svc_refcnt
++;
2493 lck_mtx_unlock(cp
->gss_svc_mtx
);
2498 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2504 * Insert a new server context into the hash table
2505 * and start the context reap thread if necessary.
2508 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*cp
)
2510 struct nfs_gss_svc_ctx_hashhead
*head
;
2511 struct nfs_gss_svc_ctx
*p
;
2513 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2516 * Give the client a random handle so that if we reboot
2517 * it's unlikely the client will get a bad context match.
2518 * Make sure it's not zero or already assigned.
2521 cp
->gss_svc_handle
= random();
2522 if (cp
->gss_svc_handle
== 0)
2524 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(cp
->gss_svc_handle
)];
2525 LIST_FOREACH(p
, head
, gss_svc_entries
)
2526 if (p
->gss_svc_handle
== cp
->gss_svc_handle
)
2529 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2530 &cp
->gss_svc_incarnation
);
2531 LIST_INSERT_HEAD(head
, cp
, gss_svc_entries
);
2532 nfs_gss_ctx_count
++;
2534 if (!nfs_gss_timer_on
) {
2535 nfs_gss_timer_on
= 1;
2537 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
2538 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
2541 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2545 * This function is called via the kernel's callout
2546 * mechanism. It runs only when there are
2547 * cached RPCSEC_GSS contexts.
2550 nfs_gss_svc_ctx_timer(__unused
void *param1
, __unused
void *param2
)
2552 struct nfs_gss_svc_ctx
*cp
, *next
;
2557 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2558 clock_get_uptime(&timenow
);
2560 NFS_GSS_DBG("is running\n");
2563 * Scan all the hash chains
2565 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
2567 * For each hash chain, look for entries
2568 * that haven't been used in a while.
2570 LIST_FOREACH_SAFE(cp
, &nfs_gss_svc_ctx_hashtbl
[i
], gss_svc_entries
, next
) {
2572 if (timenow
> cp
->gss_svc_incarnation
+
2573 (cp
->gss_svc_handle
? GSS_SVC_CTX_TTL
: 0)
2574 && cp
->gss_svc_refcnt
== 0) {
2576 * A stale context - remove it
2578 LIST_REMOVE(cp
, gss_svc_entries
);
2579 NFS_GSS_DBG("Removing contex for %d\n", cp
->gss_svc_uid
);
2580 if (cp
->gss_svc_seqbits
)
2581 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2582 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2589 nfs_gss_ctx_count
= contexts
;
2592 * If there are still some cached contexts left,
2593 * set up another callout to check on them later.
2595 nfs_gss_timer_on
= nfs_gss_ctx_count
> 0;
2596 if (nfs_gss_timer_on
)
2597 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
2598 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
2600 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2604 * Here the server receives an RPCSEC_GSS credential in an
2605 * RPC call header. First there's some checking to make sure
2606 * the credential is appropriate - whether the context is still
2607 * being set up, or is complete. Then we use the handle to find
2608 * the server's context and validate the verifier, which contains
2609 * a signed checksum of the RPC header. If the verifier checks
2610 * out, we extract the user's UID and groups from the context
2611 * and use it to set up a UNIX credential for the user's request.
2614 nfs_gss_svc_cred_get(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2616 uint32_t vers
, proc
, seqnum
, service
;
2617 uint32_t handle
, handle_len
;
2618 struct nfs_gss_svc_ctx
*cp
= NULL
;
2619 uint32_t flavor
= 0, verflen
= 0;
2621 uint32_t arglen
, start
, toklen
, cksumlen
;
2622 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
2623 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
2624 struct nfsm_chain nmc_tmp
;
2627 vers
= proc
= seqnum
= service
= handle_len
= 0;
2628 arglen
= cksumlen
= 0;
2630 nfsm_chain_get_32(error
, nmc
, vers
);
2631 if (vers
!= RPCSEC_GSS_VERS_1
) {
2632 error
= NFSERR_AUTHERR
| AUTH_REJECTCRED
;
2636 nfsm_chain_get_32(error
, nmc
, proc
);
2637 nfsm_chain_get_32(error
, nmc
, seqnum
);
2638 nfsm_chain_get_32(error
, nmc
, service
);
2639 nfsm_chain_get_32(error
, nmc
, handle_len
);
2644 * Make sure context setup/destroy is being done with a nullproc
2646 if (proc
!= RPCSEC_GSS_DATA
&& nd
->nd_procnum
!= NFSPROC_NULL
) {
2647 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2652 * If the sequence number is greater than the max
2653 * allowable, reject and have the client init a
2656 if (seqnum
> GSS_MAXSEQ
) {
2657 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2662 service
== RPCSEC_GSS_SVC_NONE
? RPCAUTH_KRB5
:
2663 service
== RPCSEC_GSS_SVC_INTEGRITY
? RPCAUTH_KRB5I
:
2664 service
== RPCSEC_GSS_SVC_PRIVACY
? RPCAUTH_KRB5P
: 0;
2666 if (proc
== RPCSEC_GSS_INIT
) {
2668 * Limit the total number of contexts
2670 if (nfs_gss_ctx_count
> nfs_gss_ctx_max
) {
2671 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2676 * Set up a new context
2678 MALLOC(cp
, struct nfs_gss_svc_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
2683 cp
->gss_svc_mtx
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
2684 cp
->gss_svc_refcnt
= 1;
2688 * Use the handle to find the context
2690 if (handle_len
!= sizeof(handle
)) {
2691 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2694 nfsm_chain_get_32(error
, nmc
, handle
);
2697 cp
= nfs_gss_svc_ctx_find(handle
);
2699 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2704 cp
->gss_svc_proc
= proc
;
2705 ki
= &cp
->gss_svc_kinfo
;
2707 if (proc
== RPCSEC_GSS_DATA
|| proc
== RPCSEC_GSS_DESTROY
) {
2708 struct posix_cred temp_pcred
;
2710 if (cp
->gss_svc_seqwin
== 0) {
2712 * Context isn't complete
2714 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2718 if (!nfs_gss_svc_seqnum_valid(cp
, seqnum
)) {
2720 * Sequence number is bad
2722 error
= EINVAL
; // drop the request
2726 /* Now compute the client's call header checksum */
2727 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), 0, 0, cksum1
);
2730 * Validate the verifier.
2731 * The verifier contains an encrypted checksum
2732 * of the call header from the XID up to and
2733 * including the credential. We compute the
2734 * checksum and compare it with what came in
2737 nfsm_chain_get_32(error
, nmc
, flavor
);
2738 nfsm_chain_get_32(error
, nmc
, verflen
);
2741 if (flavor
!= RPCSEC_GSS
|| verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
2742 error
= NFSERR_AUTHERR
| AUTH_BADVERF
;
2743 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
2747 /* Get the checksum from the token inside the verifier */
2748 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
2753 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2754 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2758 nd
->nd_gss_seqnum
= seqnum
;
2761 * Set up the user's cred
2763 bzero(&temp_pcred
, sizeof(temp_pcred
));
2764 temp_pcred
.cr_uid
= cp
->gss_svc_uid
;
2765 bcopy(cp
->gss_svc_gids
, temp_pcred
.cr_groups
,
2766 sizeof(gid_t
) * cp
->gss_svc_ngroups
);
2767 temp_pcred
.cr_ngroups
= cp
->gss_svc_ngroups
;
2769 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
2770 if (nd
->nd_cr
== NULL
) {
2774 clock_get_uptime(&cp
->gss_svc_incarnation
);
2777 * If the call arguments are integrity or privacy protected
2778 * then we need to check them here.
2781 case RPCSEC_GSS_SVC_NONE
:
2784 case RPCSEC_GSS_SVC_INTEGRITY
:
2786 * Here's what we expect in the integrity call args:
2788 * - length of seq num + call args (4 bytes)
2789 * - sequence number (4 bytes)
2790 * - call args (variable bytes)
2791 * - length of checksum token (37)
2792 * - checksum of seqnum + call args (37 bytes)
2794 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2795 if (arglen
> NFS_MAXPACKET
) {
2800 /* Compute the checksum over the call args */
2801 start
= nfsm_chain_offset(nmc
);
2802 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, arglen
, cksum1
);
2805 * Get the sequence number prepended to the args
2806 * and compare it against the one sent in the
2809 nfsm_chain_get_32(error
, nmc
, seqnum
);
2810 if (seqnum
!= nd
->nd_gss_seqnum
) {
2811 error
= EBADRPC
; // returns as GARBAGEARGS
2816 * Advance to the end of the args and
2817 * fetch the checksum computed by the client.
2820 arglen
-= NFSX_UNSIGNED
; // skipped seqnum
2821 nfsm_chain_adv(error
, &nmc_tmp
, arglen
); // skip args
2822 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
2823 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
2827 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
2830 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
2833 /* Verify that the checksums are the same */
2834 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2839 case RPCSEC_GSS_SVC_PRIVACY
:
2841 * Here's what we expect in the privacy call args:
2843 * - length of confounder + seq num + token + call args
2844 * - wrap token (37-40 bytes)
2845 * - confounder (8 bytes)
2846 * - sequence number (4 bytes)
2847 * - call args (encrypted)
2849 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2850 if (arglen
> NFS_MAXPACKET
) {
2855 /* Get the token that prepends the encrypted args */
2856 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
2859 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 1,
2863 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
2865 /* decrypt the 8 byte confounder + seqnum + args */
2866 start
= nfsm_chain_offset(nmc
);
2868 nfs_gss_encrypt_chain(ki
, nmc
, start
, arglen
, DES_DECRYPT
);
2870 /* Compute a checksum over the sequence number + results */
2871 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, arglen
, cksum2
);
2873 /* Verify that the checksums are the same */
2874 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2880 * Get the sequence number prepended to the args
2881 * and compare it against the one sent in the
2884 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
2885 nfsm_chain_get_32(error
, nmc
, seqnum
);
2886 if (seqnum
!= nd
->nd_gss_seqnum
) {
2887 error
= EBADRPC
; // returns as GARBAGEARGS
2894 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2895 * then we expect a null verifier.
2897 nfsm_chain_get_32(error
, nmc
, flavor
);
2898 nfsm_chain_get_32(error
, nmc
, verflen
);
2899 if (error
|| flavor
!= RPCAUTH_NULL
|| verflen
> 0)
2900 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2902 if (proc
== RPCSEC_GSS_INIT
) {
2903 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2911 nd
->nd_gss_context
= cp
;
2915 nfs_gss_svc_ctx_deref(cp
);
2920 * Insert the server's verifier into the RPC reply header.
2921 * It contains a signed checksum of the sequence number that
2922 * was received in the RPC call.
2923 * Then go on to add integrity or privacy if necessary.
2926 nfs_gss_svc_verf_put(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2928 struct nfs_gss_svc_ctx
*cp
;
2930 u_char tokbuf
[KRB5_SZ_TOKEN(MAX_DIGEST
)];
2932 u_char cksum
[MAX_DIGEST
];
2935 cp
= nd
->nd_gss_context
;
2936 ki
= &cp
->gss_svc_kinfo
;
2938 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
) {
2940 * If the context isn't yet complete
2941 * then return a null verifier.
2943 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
);
2944 nfsm_chain_add_32(error
, nmc
, 0);
2949 * Compute checksum of the request seq number
2950 * If it's the final reply of context setup
2951 * then return the checksum of the context
2954 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2955 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2956 nfs_gss_cksum_rep(ki
, cp
->gss_svc_seqwin
, cksum
);
2958 nfs_gss_cksum_rep(ki
, nd
->nd_gss_seqnum
, cksum
);
2960 * Now wrap it in a token and add
2961 * the verifier to the reply.
2963 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
2964 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
2965 nfsm_chain_add_32(error
, nmc
, toklen
);
2966 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
2972 * The results aren't available yet, but if they need to be
2973 * checksummed for integrity protection or encrypted, then
2974 * we can record the start offset here, insert a place-holder
2975 * for the results length, as well as the sequence number.
2976 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2977 * when the results are available.
2980 nfs_gss_svc_prepare_reply(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2982 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
2985 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2986 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2989 switch (nd
->nd_sec
) {
2994 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
2995 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
2996 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
2999 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
3000 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
3001 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 1-4
3002 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 5-8
3003 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
3011 * The results are checksummed or encrypted for return to the client
3014 nfs_gss_svc_protect_reply(struct nfsrv_descript
*nd
, mbuf_t mrep
)
3016 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
3017 struct nfsm_chain nmrep_res
, *nmc_res
= &nmrep_res
;
3018 struct nfsm_chain nmrep_pre
, *nmc_pre
= &nmrep_pre
;
3021 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
3023 u_char cksum
[MAX_DIGEST
];
3025 gss_key_info
*ki
= &cp
->gss_svc_kinfo
;
3028 * Using a reference to the mbuf where we previously split the reply
3029 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
3030 * one that allows us to prepend a length field or token, (nmc_pre)
3031 * and the second which holds just the results that we're going to
3032 * checksum and/or encrypt. When we're done, we join the chains back
3035 nfs_gss_nfsm_chain(nmc_res
, mrep
); // set up the results chain
3036 mb
= nd
->nd_gss_mb
; // the mbuf where we split
3037 results
= mbuf_next(mb
); // first mbuf in the results
3038 reslen
= nfs_gss_mchain_length(results
); // length of results
3039 error
= mbuf_setnext(mb
, NULL
); // disconnect the chains
3042 nfs_gss_nfsm_chain(nmc_pre
, mb
); // set up the prepend chain
3044 if (nd
->nd_sec
== RPCAUTH_KRB5I
) {
3045 nfsm_chain_add_32(error
, nmc_pre
, reslen
);
3046 nfsm_chain_build_done(error
, nmc_pre
);
3049 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
3051 /* Now compute the checksum over the results data */
3052 nfs_gss_cksum_mchain(ki
, results
, ALG_MIC(ki
), 0, reslen
, cksum
);
3054 /* Put it into a token and append to the request */
3055 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
3056 nfsm_chain_add_32(error
, nmc_res
, toklen
);
3057 nfsm_chain_add_opaque(error
, nmc_res
, tokbuf
, toklen
);
3058 nfsm_chain_build_done(error
, nmc_res
);
3062 * Append a pad trailer - per RFC 1964 section 1.2.2.3
3063 * Since XDR data is always 32-bit aligned, it
3064 * needs to be padded either by 4 bytes or 8 bytes.
3066 if (reslen
% 8 > 0) {
3067 nfsm_chain_add_32(error
, nmc_res
, 0x04040404);
3068 reslen
+= NFSX_UNSIGNED
;
3070 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
3071 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
3072 reslen
+= 2 * NFSX_UNSIGNED
;
3074 nfsm_chain_build_done(error
, nmc_res
);
3076 /* Now compute the checksum over the results data */
3077 nfs_gss_cksum_mchain(ki
, results
, ALG_WRAP(ki
), 0, reslen
, cksum
);
3079 /* Put it into a token and insert in the reply */
3080 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 0, reslen
, cksum
);
3081 nfsm_chain_add_32(error
, nmc_pre
, toklen
+ reslen
);
3082 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, tokbuf
, toklen
);
3083 nfsm_chain_build_done(error
, nmc_pre
);
3086 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
3088 /* Encrypt the confounder + seqnum + results */
3089 nfs_gss_encrypt_mchain(ki
, results
, 0, reslen
, DES_ENCRYPT
);
3091 /* Add null XDR pad if the ASN.1 token misaligned the data */
3092 pad
= nfsm_pad(toklen
+ reslen
);
3094 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, iv0
, pad
);
3095 nfsm_chain_build_done(error
, nmc_pre
);
3103 * This function handles the context setup calls from the client.
3104 * Essentially, it implements the NFS null procedure calls when
3105 * an RPCSEC_GSS credential is used.
3106 * This is the context maintenance function. It creates and
3107 * destroys server contexts at the whim of the client.
3108 * During context creation, it receives GSS-API tokens from the
3109 * client, passes them up to gssd, and returns a received token
3110 * back to the client in the null procedure reply.
3113 nfs_gss_svc_ctx_init(struct nfsrv_descript
*nd
, struct nfsrv_sock
*slp
, mbuf_t
*mrepp
)
3115 struct nfs_gss_svc_ctx
*cp
= NULL
;
3118 struct nfsm_chain
*nmreq
, nmrep
;
3121 nmreq
= &nd
->nd_nmreq
;
3122 nfsm_chain_null(&nmrep
);
3124 cp
= nd
->nd_gss_context
;
3127 switch (cp
->gss_svc_proc
) {
3128 case RPCSEC_GSS_INIT
:
3129 nfs_gss_svc_ctx_insert(cp
);
3132 case RPCSEC_GSS_CONTINUE_INIT
:
3133 /* Get the token from the request */
3134 nfsm_chain_get_32(error
, nmreq
, cp
->gss_svc_tokenlen
);
3135 if (cp
->gss_svc_tokenlen
== 0) {
3136 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3139 MALLOC(cp
->gss_svc_token
, u_char
*, cp
->gss_svc_tokenlen
, M_TEMP
, M_WAITOK
);
3140 if (cp
->gss_svc_token
== NULL
) {
3141 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3144 nfsm_chain_get_opaque(error
, nmreq
, cp
->gss_svc_tokenlen
, cp
->gss_svc_token
);
3146 /* Use the token in a gss_accept_sec_context upcall */
3147 error
= nfs_gss_svc_gssd_upcall(cp
);
3149 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3150 if (error
== NFSERR_EAUTH
)
3156 * If the context isn't complete, pass the new token
3157 * back to the client for another round.
3159 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
)
3163 * Now the server context is complete.
3166 clock_get_uptime(&cp
->gss_svc_incarnation
);
3168 cp
->gss_svc_seqwin
= GSS_SVC_SEQWINDOW
;
3169 MALLOC(cp
->gss_svc_seqbits
, uint32_t *,
3170 nfsm_rndup((cp
->gss_svc_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
3171 if (cp
->gss_svc_seqbits
== NULL
) {
3172 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3177 case RPCSEC_GSS_DATA
:
3178 /* Just a nullproc ping - do nothing */
3181 case RPCSEC_GSS_DESTROY
:
3183 * Don't destroy the context immediately because
3184 * other active requests might still be using it.
3185 * Instead, schedule it for destruction after
3186 * GSS_CTX_PEND time has elapsed.
3188 cp
= nfs_gss_svc_ctx_find(cp
->gss_svc_handle
);
3190 cp
->gss_svc_handle
= 0; // so it can't be found
3191 lck_mtx_lock(cp
->gss_svc_mtx
);
3192 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
3193 &cp
->gss_svc_incarnation
);
3194 lck_mtx_unlock(cp
->gss_svc_mtx
);
3198 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3202 /* Now build the reply */
3204 if (nd
->nd_repstat
== 0)
3205 nd
->nd_repstat
= autherr
? (NFSERR_AUTHERR
| autherr
) : NFSERR_RETVOID
;
3206 sz
= 7 * NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_svc_tokenlen
); // size of results
3207 error
= nfsrv_rephead(nd
, slp
, &nmrep
, sz
);
3208 *mrepp
= nmrep
.nmc_mhead
;
3209 if (error
|| autherr
)
3212 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
3213 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
3214 nfsm_chain_add_32(error
, &nmrep
, sizeof(cp
->gss_svc_handle
));
3215 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_handle
);
3217 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_major
);
3218 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_minor
);
3219 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_seqwin
);
3221 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_tokenlen
);
3222 if (cp
->gss_svc_token
!= NULL
) {
3223 nfsm_chain_add_opaque(error
, &nmrep
, cp
->gss_svc_token
, cp
->gss_svc_tokenlen
);
3224 FREE(cp
->gss_svc_token
, M_TEMP
);
3225 cp
->gss_svc_token
= NULL
;
3231 nd
->nd_gss_context
= NULL
;
3232 LIST_REMOVE(cp
, gss_svc_entries
);
3233 if (cp
->gss_svc_seqbits
!= NULL
)
3234 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
3235 if (cp
->gss_svc_token
!= NULL
)
3236 FREE(cp
->gss_svc_token
, M_TEMP
);
3237 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3241 nfsm_chain_build_done(error
, &nmrep
);
3243 nfsm_chain_cleanup(&nmrep
);
3250 * This is almost a mirror-image of the client side upcall.
3251 * It passes and receives a token, but invokes gss_accept_sec_context.
3252 * If it's the final call of the context setup, then gssd also returns
3253 * the session key and the user's UID.
3256 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*cp
)
3261 gssd_byte_buffer okey
= NULL
;
3262 uint32_t skeylen
= 0;
3264 vm_map_copy_t itoken
= NULL
;
3265 gssd_byte_buffer otoken
= NULL
;
3266 mach_msg_type_number_t otokenlen
;
3268 char svcname
[] = "nfs";
3270 kr
= host_get_gssd_port(host_priv_self(), &mp
);
3271 if (kr
!= KERN_SUCCESS
) {
3272 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr
, kr
);
3275 if (!IPC_PORT_VALID(mp
)) {
3276 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
3280 if (cp
->gss_svc_tokenlen
> 0)
3281 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
3284 kr
= mach_gss_accept_sec_context(
3286 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_svc_tokenlen
,
3289 &cp
->gss_svc_context
,
3290 &cp
->gss_svc_cred_handle
,
3294 &cp
->gss_svc_ngroups
,
3295 &okey
, (mach_msg_type_number_t
*) &skeylen
,
3296 &otoken
, &otokenlen
,
3298 &cp
->gss_svc_minor
);
3300 if (kr
!= KERN_SUCCESS
) {
3301 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr
, kr
);
3302 if (kr
== MIG_SERVER_DIED
&& cp
->gss_svc_context
== 0 &&
3303 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
) {
3304 if (cp
->gss_svc_tokenlen
> 0)
3305 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
3308 host_release_special_port(mp
);
3312 host_release_special_port(mp
);
3315 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
3316 printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen
);
3317 vm_map_copy_discard((vm_map_copy_t
) okey
);
3318 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3321 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
, cp
->gss_svc_kinfo
.skey
);
3323 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3326 error
= gss_key_init(&cp
->gss_svc_kinfo
, skeylen
);
3332 /* Free context token used as input */
3333 if (cp
->gss_svc_token
)
3334 FREE(cp
->gss_svc_token
, M_TEMP
);
3335 cp
->gss_svc_token
= NULL
;
3336 cp
->gss_svc_tokenlen
= 0;
3338 if (otokenlen
> 0) {
3339 /* Set context token to gss output token */
3340 MALLOC(cp
->gss_svc_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
3341 if (cp
->gss_svc_token
== NULL
) {
3342 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
3343 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3346 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_svc_token
);
3348 FREE(cp
->gss_svc_token
, M_TEMP
);
3349 cp
->gss_svc_token
= NULL
;
3350 return (NFSERR_EAUTH
);
3352 cp
->gss_svc_tokenlen
= otokenlen
;
3358 FREE(cp
->gss_svc_token
, M_TEMP
);
3359 cp
->gss_svc_tokenlen
= 0;
3360 cp
->gss_svc_token
= NULL
;
3362 return (NFSERR_EAUTH
);
3366 * Validate the sequence number in the credential as described
3367 * in RFC 2203 Section 5.3.3.1
3369 * Here the window of valid sequence numbers is represented by
3370 * a bitmap. As each sequence number is received, its bit is
3371 * set in the bitmap. An invalid sequence number lies below
3372 * the lower bound of the window, or is within the window but
3373 * has its bit already set.
3376 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*cp
, uint32_t seq
)
3378 uint32_t *bits
= cp
->gss_svc_seqbits
;
3379 uint32_t win
= cp
->gss_svc_seqwin
;
3382 lck_mtx_lock(cp
->gss_svc_mtx
);
3385 * If greater than the window upper bound,
3386 * move the window up, and set the bit.
3388 if (seq
> cp
->gss_svc_seqmax
) {
3389 if (seq
- cp
->gss_svc_seqmax
> win
)
3390 bzero(bits
, nfsm_rndup((win
+ 7) / 8));
3392 for (i
= cp
->gss_svc_seqmax
+ 1; i
< seq
; i
++)
3393 win_resetbit(bits
, i
% win
);
3394 win_setbit(bits
, seq
% win
);
3395 cp
->gss_svc_seqmax
= seq
;
3396 lck_mtx_unlock(cp
->gss_svc_mtx
);
3401 * Invalid if below the lower bound of the window
3403 if (seq
<= cp
->gss_svc_seqmax
- win
) {
3404 lck_mtx_unlock(cp
->gss_svc_mtx
);
3409 * In the window, invalid if the bit is already set
3411 if (win_getbit(bits
, seq
% win
)) {
3412 lck_mtx_unlock(cp
->gss_svc_mtx
);
3415 win_setbit(bits
, seq
% win
);
3416 lck_mtx_unlock(cp
->gss_svc_mtx
);
3421 * Drop a reference to a context
3423 * Note that it's OK for the context to exist
3424 * with a refcount of zero. The refcount isn't
3425 * checked until we're about to reap an expired one.
3428 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx
*cp
)
3430 lck_mtx_lock(cp
->gss_svc_mtx
);
3431 if (cp
->gss_svc_refcnt
> 0)
3432 cp
->gss_svc_refcnt
--;
3434 printf("nfs_gss_ctx_deref: zero refcount\n");
3435 lck_mtx_unlock(cp
->gss_svc_mtx
);
3439 * Called at NFS server shutdown - destroy all contexts
3442 nfs_gss_svc_cleanup(void)
3444 struct nfs_gss_svc_ctx_hashhead
*head
;
3445 struct nfs_gss_svc_ctx
*cp
, *ncp
;
3448 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
3451 * Run through all the buckets
3453 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
3455 * Remove and free all entries in the bucket
3457 head
= &nfs_gss_svc_ctx_hashtbl
[i
];
3458 LIST_FOREACH_SAFE(cp
, head
, gss_svc_entries
, ncp
) {
3459 LIST_REMOVE(cp
, gss_svc_entries
);
3460 if (cp
->gss_svc_seqbits
)
3461 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
3462 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3467 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
3470 #endif /* NFSSERVER */
3474 * The following functions are used by both client and server.
3478 * Release a host special port that was obtained by host_get_special_port
3479 * or one of its macros (host_get_gssd_port in this case).
3480 * This really should be in a public kpi.
3483 /* This should be in a public header if this routine is not */
3484 extern void ipc_port_release_send(ipc_port_t
);
3485 extern ipc_port_t
ipc_port_copy_send(ipc_port_t
);
3488 host_release_special_port(mach_port_t mp
)
3490 if (IPC_PORT_VALID(mp
))
3491 ipc_port_release_send(mp
);
3495 host_copy_special_port(mach_port_t mp
)
3497 return (ipc_port_copy_send(mp
));
3501 * The token that is sent and received in the gssd upcall
3502 * has unbounded variable length. Mach RPC does not pass
3503 * the token in-line. Instead it uses page mapping to handle
3504 * these parameters. This function allocates a VM buffer
3505 * to hold the token for an upcall and copies the token
3506 * (received from the client) into it. The VM buffer is
3507 * marked with a src_destroy flag so that the upcall will
3508 * automatically de-allocate the buffer when the upcall is
3512 nfs_gss_mach_alloc_buffer(u_char
*buf
, uint32_t buflen
, vm_map_copy_t
*addr
)
3515 vm_offset_t kmem_buf
;
3519 if (buf
== NULL
|| buflen
== 0)
3522 tbuflen
= vm_map_round_page(buflen
,
3523 vm_map_page_mask(ipc_kernel_map
));
3524 kr
= vm_allocate(ipc_kernel_map
, &kmem_buf
, tbuflen
, VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_KERN_MEMORY_FILE
));
3526 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3530 kr
= vm_map_wire(ipc_kernel_map
,
3531 vm_map_trunc_page(kmem_buf
,
3532 vm_map_page_mask(ipc_kernel_map
)),
3533 vm_map_round_page(kmem_buf
+ tbuflen
,
3534 vm_map_page_mask(ipc_kernel_map
)),
3535 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE
), FALSE
);
3537 printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
3541 bcopy(buf
, (void *) kmem_buf
, buflen
);
3542 // Shouldn't need to bzero below since vm_allocate returns zeroed pages
3543 // bzero(kmem_buf + buflen, tbuflen - buflen);
3545 kr
= vm_map_unwire(ipc_kernel_map
,
3546 vm_map_trunc_page(kmem_buf
,
3547 vm_map_page_mask(ipc_kernel_map
)),
3548 vm_map_round_page(kmem_buf
+ tbuflen
,
3549 vm_map_page_mask(ipc_kernel_map
)),
3552 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3556 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
) kmem_buf
,
3557 (vm_map_size_t
) buflen
, TRUE
, addr
);
3559 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3565 * Here we handle a token received from the gssd via an upcall.
3566 * The received token resides in an allocate VM buffer.
3567 * We copy the token out of this buffer to a chunk of malloc'ed
3568 * memory of the right size, then de-allocate the VM buffer.
3571 nfs_gss_mach_vmcopyout(vm_map_copy_t in
, uint32_t len
, u_char
*out
)
3573 vm_map_offset_t map_data
;
3577 error
= vm_map_copyout(ipc_kernel_map
, &map_data
, in
);
3581 data
= CAST_DOWN(vm_offset_t
, map_data
);
3582 bcopy((void *) data
, out
, len
);
3583 vm_deallocate(ipc_kernel_map
, data
, len
);
3589 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
3590 * Returns the size of the token, since it contains a variable
3591 * length DER encoded size field.
3602 static uint32_t seqnum
= 0;
3608 * Fill in the token header: 2 octets.
3609 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
3610 * followed by the length of the token: 35 + 0 octets for a
3611 * MIC token, or 35 + encrypted octets for a wrap token;
3614 toklen
= KRB5_SZ_MECH
+ KRB5_SZ_ALG
+ KRB5_SZ_SEQ
+ HASHLEN(ki
);
3615 nfs_gss_der_length_put(&p
, toklen
+ datalen
);
3618 * Fill in the DER encoded mech OID for Kerberos v5.
3619 * This represents the Kerberos OID 1.2.840.113554.1.2.2
3620 * described in RFC 2623, section 4.2
3622 bcopy(krb5_mech
, p
, sizeof(krb5_mech
));
3623 p
+= sizeof(krb5_mech
);
3626 * Now at the token described in RFC 1964, section 1.2.1
3627 * Fill in the token ID, integrity algorithm indicator,
3628 * for DES MAC MD5, and four filler octets.
3629 * The alg string encodes the bytes to represent either
3630 * a MIC token or a WRAP token for Kerberos.
3632 bcopy(alg
, p
, KRB5_SZ_ALG
);
3636 * Now encode the sequence number according to
3637 * RFC 1964, section 1.2.1.2 which dictates 4 octets
3638 * of sequence number followed by 4 bytes of direction
3639 * indicator: 0x00 for initiator or 0xff for acceptor.
3640 * We DES CBC encrypt the sequence number using the first
3641 * 8 octets of the checksum field as an initialization
3643 * Note that this sequence number is not at all related
3644 * to the RPCSEC_GSS protocol sequence number. This
3645 * number is private to the ASN.1 token. The only
3646 * requirement is that it not be repeated in case the
3647 * server has replay detection on, which normally should
3648 * not be the case, since RFC 2203 section 5.2.3 says that
3649 * replay detection and sequence checking must be turned off.
3652 for (i
= 0; i
< 4; i
++)
3653 plain
[i
] = (u_char
) ((seqnum
>> (i
* 8)) & 0xff);
3654 for (i
= 4; i
< 8; i
++)
3655 plain
[i
] = initiator
? 0x00 : 0xff;
3656 gss_des_crypt(ki
, (des_cblock
*) plain
, (des_cblock
*) p
, 8,
3657 (des_cblock
*) cksum
, NULL
, DES_ENCRYPT
, KG_USAGE_SEQ
);
3661 * Finally, append the octets of the
3662 * checksum of the alg + plaintext data.
3663 * The plaintext could be an RPC call header,
3664 * the window value, or a sequence number.
3666 bcopy(cksum
, p
, HASHLEN(ki
));
3673 * Determine size of ASN.1 DER length
3676 nfs_gss_der_length_size(int len
)
3679 len
< (1 << 7) ? 1 :
3680 len
< (1 << 8) ? 2 :
3681 len
< (1 << 16) ? 3 :
3682 len
< (1 << 24) ? 4 : 5;
3686 * Encode an ASN.1 DER length field
3689 nfs_gss_der_length_put(u_char
**pp
, int len
)
3691 int sz
= nfs_gss_der_length_size(len
);
3695 *p
++ = (u_char
) len
;
3697 *p
++ = (u_char
) ((sz
-1) | 0x80);
3700 *p
++ = (u_char
) ((len
>> (sz
* 8)) & 0xff);
3707 * Decode an ASN.1 DER length field
3710 nfs_gss_der_length_get(u_char
**pp
)
3713 uint32_t flen
, len
= 0;
3717 if ((*p
++ & 0x80) == 0)
3720 if (flen
> sizeof(uint32_t))
3723 len
= (len
<< 8) + *p
++;
3730 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
3746 * Check that we have a valid token header
3749 return (AUTH_BADCRED
);
3750 (void) nfs_gss_der_length_get(&p
); // ignore the size
3753 * Check that we have the DER encoded Kerberos v5 mech OID
3755 if (bcmp(p
, krb5_mech
, sizeof(krb5_mech
) != 0))
3756 return (AUTH_BADCRED
);
3757 p
+= sizeof(krb5_mech
);
3760 * Now check the token ID, DES MAC MD5 algorithm
3761 * indicator, and filler octets.
3763 if (bcmp(p
, alg
, KRB5_SZ_ALG
) != 0)
3764 return (AUTH_BADCRED
);
3768 * Now decrypt the sequence number.
3769 * Note that the gss decryption uses the first 8 octets
3770 * of the checksum field as an initialization vector (p + 8).
3771 * Per RFC 2203 section 5.2.2 we don't check the sequence number
3772 * in the ASN.1 token because the RPCSEC_GSS protocol has its
3773 * own sequence number described in section 5.3.3.1
3776 gss_des_crypt(ki
, (des_cblock
*)p
, (des_cblock
*) plain
, 8,
3777 (des_cblock
*) (p
+ 8), NULL
, DES_DECRYPT
, KG_USAGE_SEQ
);
3779 for (i
= 0; i
< 4; i
++)
3780 seqnum
|= plain
[i
] << (i
* 8);
3783 * Make sure the direction
3784 * indicator octets are correct.
3786 d
= initiator
? 0x00 : 0xff;
3787 for (i
= 4; i
< 8; i
++)
3789 return (AUTH_BADCRED
);
3792 * Finally, get the checksum
3794 bcopy(p
, cksum
, HASHLEN(ki
));
3804 * Return the number of bytes in an mbuf chain.
3807 nfs_gss_mchain_length(mbuf_t mhead
)
3812 for (mb
= mhead
; mb
; mb
= mbuf_next(mb
))
3813 len
+= mbuf_len(mb
);
3819 * Append an args or results mbuf chain to the header chain
3822 nfs_gss_append_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3827 /* Connect the mbuf chains */
3828 error
= mbuf_setnext(nmc
->nmc_mcur
, mc
);
3832 /* Find the last mbuf in the chain */
3834 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3837 nmc
->nmc_mcur
= tail
;
3838 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3839 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3845 * Convert an mbuf chain to an NFS mbuf chain
3848 nfs_gss_nfsm_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3852 /* Find the last mbuf in the chain */
3854 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3857 nmc
->nmc_mhead
= mc
;
3858 nmc
->nmc_mcur
= tail
;
3859 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3860 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3866 * Compute a checksum over an mbuf chain.
3867 * Start building an MD5 digest at the given offset and keep
3868 * going until the end of data in the current mbuf is reached.
3869 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3873 nfs_gss_cksum_mchain(
3884 GSS_DIGEST_CTX context
;
3886 gss_digest_Init(&context
, ki
);
3889 * Logically prepend the first 8 bytes of the algorithm
3890 * field as required by RFC 1964, section 1.2.1.1
3892 gss_digest_Update(&context
, alg
, KRB5_SZ_ALG
);
3895 * Move down the mbuf chain until we reach the given
3896 * byte offset, then start MD5 on the mbuf data until
3897 * we've done len bytes.
3900 for (mb
= mhead
; mb
&& len
> 0; mb
= mbuf_next(mb
)) {
3901 ptr
= mbuf_data(mb
);
3902 left
= mbuf_len(mb
);
3903 if (offset
>= left
) {
3904 /* Offset not yet reached */
3908 /* At or beyond offset - checksum data */
3913 bytes
= left
< len
? left
: len
;
3915 gss_digest_Update(&context
, ptr
, bytes
);
3919 gss_digest_Final(&context
, digest
);
3923 * Compute a checksum over an NFS mbuf chain.
3924 * Start building an MD5 digest at the given offset and keep
3925 * going until the end of data in the current mbuf is reached.
3926 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3930 nfs_gss_cksum_chain(
3932 struct nfsm_chain
*nmc
,
3939 * If the length parameter is zero, then we need
3940 * to use the length from the offset to the current
3941 * encode/decode offset.
3944 len
= nfsm_chain_offset(nmc
) - offset
;
3946 return (nfs_gss_cksum_mchain(ki
, nmc
->nmc_mhead
, alg
, offset
, len
, cksum
));
3950 * Compute a checksum of the sequence number (or sequence window)
3951 * of an RPCSEC_GSS reply.
3954 nfs_gss_cksum_rep(gss_key_info
*ki
, uint32_t seqnum
, u_char
*cksum
)
3956 GSS_DIGEST_CTX context
;
3957 uint32_t val
= htonl(seqnum
);
3959 gss_digest_Init(&context
, ki
);
3962 * Logically prepend the first 8 bytes of the MIC
3963 * token as required by RFC 1964, section 1.2.1.1
3965 gss_digest_Update(&context
, ALG_MIC(ki
), KRB5_SZ_ALG
);
3968 * Compute the digest of the seqnum in network order
3970 gss_digest_Update(&context
, &val
, 4);
3971 gss_digest_Final(&context
, cksum
);
3975 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3978 nfs_gss_encrypt_mchain(
3987 u_char tmp
[8], ivec
[8];
3988 int left
, left8
, remain
;
3994 * Move down the mbuf chain until we reach the given
3995 * byte offset, then start encrypting the mbuf data until
3996 * we've done len bytes.
3999 for (mb
= mhead
; mb
&& len
> 0; mb
= mbn
) {
4000 mbn
= mbuf_next(mb
);
4001 ptr
= mbuf_data(mb
);
4002 left
= mbuf_len(mb
);
4003 if (offset
>= left
) {
4004 /* Offset not yet reached */
4008 /* At or beyond offset - encrypt data */
4014 * DES or DES3 CBC has to encrypt 8 bytes at a time.
4015 * If the number of bytes to be encrypted in this
4016 * mbuf isn't some multiple of 8 bytes, encrypt all
4017 * the 8 byte blocks, then combine the remaining
4018 * bytes with enough from the next mbuf to make up
4019 * an 8 byte block and encrypt that block separately,
4020 * i.e. that block is split across two mbufs.
4023 left8
= left
- remain
;
4024 left
= left8
< len
? left8
: len
;
4026 gss_des_crypt(ki
, (des_cblock
*) ptr
, (des_cblock
*) ptr
,
4027 left
, &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
4031 if (mbn
&& remain
> 0) {
4032 nptr
= mbuf_data(mbn
);
4033 offset
= 8 - remain
;
4034 bcopy(ptr
+ left
, tmp
, remain
); // grab from this mbuf
4035 bcopy(nptr
, tmp
+ remain
, offset
); // grab from next mbuf
4036 gss_des_crypt(ki
, (des_cblock
*) tmp
, (des_cblock
*) tmp
, 8,
4037 &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
4038 bcopy(tmp
, ptr
+ left
, remain
); // return to this mbuf
4039 bcopy(tmp
+ remain
, nptr
, offset
); // return to next mbuf
4046 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
4049 nfs_gss_encrypt_chain(
4051 struct nfsm_chain
*nmc
,
4057 * If the length parameter is zero, then we need
4058 * to use the length from the offset to the current
4059 * encode/decode offset.
4062 len
= nfsm_chain_offset(nmc
) - offset
;
4064 return (nfs_gss_encrypt_mchain(ki
, nmc
->nmc_mhead
, offset
, len
, encrypt
));
4068 * The routines that follow provide abstractions for doing digests and crypto.
4072 gss_digest_Init(GSS_DIGEST_CTX
*ctx
, gss_key_info
*ki
)
4074 ctx
->type
= ki
->type
;
4076 case NFS_GSS_1DES
: MD5_DESCBC_Init(&ctx
->m_ctx
, &ki
->ks_u
.des
.gss_sched
);
4078 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Init(&ctx
->h_ctx
, ki
->ks_u
.des3
.ckey
, 0);
4081 printf("gss_digest_Init: Unknown key info type %d\n", ki
->type
);
4086 gss_digest_Update(GSS_DIGEST_CTX
*ctx
, void *data
, size_t len
)
4088 switch (ctx
->type
) {
4089 case NFS_GSS_1DES
: MD5_DESCBC_Update(&ctx
->m_ctx
, data
, len
);
4091 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Update(&ctx
->h_ctx
, data
, len
);
4097 gss_digest_Final(GSS_DIGEST_CTX
*ctx
, void *digest
)
4099 switch (ctx
->type
) {
4100 case NFS_GSS_1DES
: MD5_DESCBC_Final(digest
, &ctx
->m_ctx
);
4102 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Final(digest
, &ctx
->h_ctx
);
4108 gss_des_crypt(gss_key_info
*ki
, des_cblock
*in
, des_cblock
*out
,
4109 int32_t len
, des_cblock
*iv
, des_cblock
*retiv
, int encrypt
, int usage
)
4114 des_cbc_key_schedule
*sched
= ((usage
== KG_USAGE_SEAL
) ?
4115 &ki
->ks_u
.des
.gss_sched_Ke
:
4116 &ki
->ks_u
.des
.gss_sched
);
4117 des_cbc_encrypt(in
, out
, len
, sched
, iv
, retiv
, encrypt
);
4122 des3_cbc_encrypt(in
, out
, len
, &ki
->ks_u
.des3
.gss_sched
, iv
, retiv
, encrypt
);
4128 gss_key_init(gss_key_info
*ki
, uint32_t skeylen
)
4134 ki
->keybytes
= skeylen
;
4136 case sizeof(des_cblock
):
4137 ki
->type
= NFS_GSS_1DES
;
4138 ki
->hash_len
= MD5_DESCBC_DIGEST_LENGTH
;
4139 ki
->ks_u
.des
.key
= (des_cblock
*)ki
->skey
;
4140 rc
= des_cbc_key_sched(ki
->ks_u
.des
.key
, &ki
->ks_u
.des
.gss_sched
);
4143 for (i
= 0; i
< ki
->keybytes
; i
++)
4144 k
[0][i
] = 0xf0 ^ (*ki
->ks_u
.des
.key
)[i
];
4145 rc
= des_cbc_key_sched(&k
[0], &ki
->ks_u
.des
.gss_sched_Ke
);
4147 case 3*sizeof(des_cblock
):
4148 ki
->type
= NFS_GSS_3DES
;
4149 ki
->hash_len
= SHA_DIGEST_LENGTH
;
4150 ki
->ks_u
.des3
.key
= (des_cblock (*)[3])ki
->skey
;
4151 des3_derive_key(*ki
->ks_u
.des3
.key
, ki
->ks_u
.des3
.ckey
,
4152 KEY_USAGE_DES3_SIGN
, KEY_USAGE_LEN
);
4153 rc
= des3_cbc_key_sched(*ki
->ks_u
.des3
.key
, &ki
->ks_u
.des3
.gss_sched
);
4158 printf("gss_key_init: Invalid key length %d\n", skeylen
);
4167 #define DISPLAYLEN 16
4168 #define MAXDISPLAYLEN 256
4171 hexdump(const char *msg
, void *data
, size_t len
)
4175 char *p
, disbuf
[3*DISPLAYLEN
+1];
4177 printf("NFS DEBUG %s len=%d:\n", msg
, (uint32_t)len
);
4178 if (len
> MAXDISPLAYLEN
)
4179 len
= MAXDISPLAYLEN
;
4181 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
4182 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3)
4183 snprintf(p
, 4, "%02x ", d
[i
+ j
]);
4184 printf("\t%s\n", disbuf
);