2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <nfs/nfs_conf.h>
33 * These functions implement RPCSEC_GSS security for the NFS client and server.
34 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
35 * protection as described in Internet RFC 2203 and 2623.
37 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
38 * It requires the client and server negotiate a secure connection as part of a
39 * security context. The context state is maintained in client and server structures.
40 * On the client side, each user of an NFS mount is assigned their own context,
41 * identified by UID, on their first use of the mount, and it persists until the
42 * unmount or until the context is renewed. Each user context has a corresponding
43 * server context which the server maintains until the client destroys it, or
44 * until the context expires.
46 * The client and server contexts are set up dynamically. When a user attempts
47 * to send an NFS request, if there is no context for the user, then one is
48 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
49 * During this exchange, the client and server pass a security token that is
50 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
51 * the user to the server (and vice-versa). The client and server also receive
52 * a unique session key that can be used to digitally sign the credentials and
53 * verifier or optionally to provide data integrity and/or privacy.
55 * Once the context is complete, the client and server enter a normal data
56 * exchange phase - beginning with the NFS request that prompted the context
57 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
58 * credential and verifier, and the server returns a verifier as well.
59 * For simple authentication, the verifier contains a signed checksum of the
60 * RPC header, including the credential. The server's verifier has a signed
61 * checksum of the current sequence number.
63 * Each client call contains a sequence number that nominally increases by one
64 * on each request. The sequence number is intended to prevent replay attacks.
65 * Since the protocol can be used over UDP, there is some allowance for
66 * out-of-sequence requests, so the server checks whether the sequence numbers
67 * are within a sequence "window". If a sequence number is outside the lower
68 * bound of the window, the server silently drops the request. This has some
69 * implications for retransmission. If a request needs to be retransmitted, the
70 * client must bump the sequence number even if the request XID is unchanged.
72 * When the NFS mount is unmounted, the client sends a "destroy" credential
73 * to delete the server's context for each user of the mount. Since it's
74 * possible for the client to crash or disconnect without sending the destroy
75 * message, the server has a thread that reaps contexts that have been idle
80 #include <sys/param.h>
81 #include <sys/systm.h>
83 #include <sys/kauth.h>
84 #include <sys/kernel.h>
85 #include <sys/mount_internal.h>
86 #include <sys/vnode.h>
88 #include <sys/malloc.h>
89 #include <sys/kpi_mbuf.h>
90 #include <sys/ucred.h>
92 #include <kern/host.h>
93 #include <kern/task.h>
94 #include <libkern/libkern.h>
96 #include <mach/task.h>
97 #include <mach/host_special_ports.h>
98 #include <mach/host_priv.h>
99 #include <mach/thread_act.h>
100 #include <mach/mig_errors.h>
101 #include <mach/vm_map.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_kern.h>
104 #include <gssd/gssd_mach.h>
106 #include <nfs/rpcv2.h>
107 #include <nfs/nfsproto.h>
109 #include <nfs/nfsnode.h>
110 #include <nfs/nfs_gss.h>
111 #include <nfs/nfsmount.h>
112 #include <nfs/xdr_subs.h>
113 #include <nfs/nfsm_subs.h>
114 #include <nfs/nfs_gss.h>
115 #include <mach_assert.h>
116 #include <kern/assert.h>
118 #define ASSERT(EX) assert(EX)
120 #define NFS_GSS_MACH_MAX_RETRIES 3
122 #define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
123 #define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS)
126 #if CONFIG_NFS_SERVER
127 u_long nfs_gss_svc_ctx_hash
;
128 struct nfs_gss_svc_ctx_hashhead
*nfs_gss_svc_ctx_hashtbl
;
129 lck_mtx_t
*nfs_gss_svc_ctx_mutex
;
130 lck_grp_t
*nfs_gss_svc_grp
;
131 uint32_t nfsrv_gss_context_ttl
= GSS_CTX_EXPIRE
;
132 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
133 #endif /* CONFIG_NFS_SERVER */
135 #if CONFIG_NFS_CLIENT
136 lck_grp_t
*nfs_gss_clnt_grp
;
137 #endif /* CONFIG_NFS_CLIENT */
139 #define KRB5_MAX_MIC_SIZE 128
140 uint8_t krb5_mech_oid
[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
141 static uint8_t xdrpad
[] = { 0x00, 0x00, 0x00, 0x00};
143 #if CONFIG_NFS_CLIENT
144 static int nfs_gss_clnt_ctx_find(struct nfsreq
*);
145 static int nfs_gss_clnt_ctx_init(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
146 static int nfs_gss_clnt_ctx_init_retry(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
147 static int nfs_gss_clnt_ctx_callserver(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
148 static uint8_t *nfs_gss_clnt_svcname(struct nfsmount
*, gssd_nametype
*, size_t *);
149 static int nfs_gss_clnt_gssd_upcall(struct nfsreq
*, struct nfs_gss_clnt_ctx
*, uint32_t);
150 void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount
*);
151 static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx
*);
152 static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx
*, struct nfs_gss_clnt_ctx
**);
153 static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx
*);
154 static void nfs_gss_clnt_log_error(struct nfsreq
*, struct nfs_gss_clnt_ctx
*, uint32_t, uint32_t);
155 #endif /* CONFIG_NFS_CLIENT */
157 #if CONFIG_NFS_SERVER
158 static struct nfs_gss_svc_ctx
*nfs_gss_svc_ctx_find(uint32_t);
159 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*);
160 static void nfs_gss_svc_ctx_timer(void *, void *);
161 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*);
162 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*, uint32_t);
164 /* This is only used by server code */
165 static void nfs_gss_nfsm_chain(struct nfsm_chain
*, mbuf_t
);
166 #endif /* CONFIG_NFS_SERVER */
168 static void host_release_special_port(mach_port_t
);
169 static mach_port_t
host_copy_special_port(mach_port_t
);
170 static void nfs_gss_mach_alloc_buffer(u_char
*, size_t, vm_map_copy_t
*);
171 static int nfs_gss_mach_vmcopyout(vm_map_copy_t
, uint32_t, u_char
*);
173 static int nfs_gss_mchain_length(mbuf_t
);
174 static int nfs_gss_append_chain(struct nfsm_chain
*, mbuf_t
);
176 #if CONFIG_NFS_SERVER
177 thread_call_t nfs_gss_svc_ctx_timer_call
;
178 int nfs_gss_timer_on
= 0;
179 uint32_t nfs_gss_ctx_count
= 0;
180 const uint32_t nfs_gss_ctx_max
= GSS_SVC_MAXCONTEXTS
;
181 #endif /* CONFIG_NFS_SERVER */
184 * Initialization when NFS starts
189 #if CONFIG_NFS_CLIENT
190 nfs_gss_clnt_grp
= lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL
);
191 #endif /* CONFIG_NFS_CLIENT */
193 #if CONFIG_NFS_SERVER
194 nfs_gss_svc_grp
= lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL
);
196 nfs_gss_svc_ctx_hashtbl
= hashinit(SVC_CTX_HASHSZ
, M_TEMP
, &nfs_gss_svc_ctx_hash
);
197 nfs_gss_svc_ctx_mutex
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
199 nfs_gss_svc_ctx_timer_call
= thread_call_allocate(nfs_gss_svc_ctx_timer
, NULL
);
200 #endif /* CONFIG_NFS_SERVER */
204 * Common RPCSEC_GSS support routines
208 rpc_gss_prepend_32(mbuf_t
*mb
, uint32_t value
)
214 data
= mbuf_data(*mb
);
216 * If a wap token comes back and is not aligned
217 * get a new buffer (which should be aligned) to put the
220 if ((uintptr_t)data
& 0x3) {
223 error
= mbuf_get(MBUF_WAITOK
, MBUF_TYPE_DATA
, &nmb
);
227 mbuf_setnext(nmb
, *mb
);
231 error
= mbuf_prepend(mb
, sizeof(uint32_t), MBUF_WAITOK
);
236 data
= mbuf_data(*mb
);
237 *data
= txdr_unsigned(value
);
243 * Prepend the sequence number to the xdr encode argumen or result
244 * Sequence number is prepended in its own mbuf.
246 * On successful return mbp_head will point to the old mbuf chain
247 * prepended with a new mbuf that has the sequence number.
251 rpc_gss_data_create(mbuf_t
*mbp_head
, uint32_t seqnum
)
255 struct nfsm_chain nmc
;
256 struct nfsm_chain
*nmcp
= &nmc
;
259 error
= mbuf_get(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mb
);
263 data
= mbuf_data(mb
);
265 /* Reserve space for prepending */
266 len
= mbuf_maxlen(mb
);
267 len
= (len
& ~0x3) - NFSX_UNSIGNED
;
268 printf("%s: data = %p, len = %d\n", __func__
, data
, (int)len
);
269 error
= mbuf_setdata(mb
, data
+ len
, 0);
270 if (error
|| mbuf_trailingspace(mb
)) {
271 printf("%s: data = %p trailingspace = %d error = %d\n", __func__
, mbuf_data(mb
), (int)mbuf_trailingspace(mb
), error
);
274 /* Reserve 16 words for prepending */
275 error
= mbuf_setdata(mb
, data
+ 16 * sizeof(uint32_t), 0);
276 nfsm_chain_init(nmcp
, mb
);
277 nfsm_chain_add_32(error
, nmcp
, seqnum
);
278 nfsm_chain_build_done(error
, nmcp
);
282 mbuf_setnext(nmcp
->nmc_mcur
, *mbp_head
);
283 *mbp_head
= nmcp
->nmc_mhead
;
289 * Create an rpc_gss_integ_data_t given an argument or result in mb_head.
290 * On successful return mb_head will point to the rpc_gss_integ_data_t of length len.
291 * Note mb_head will now point to a 4 byte sequence number. len does not include
292 * any extra xdr padding.
293 * Returns 0 on success, else an errno_t
297 rpc_gss_integ_data_create(gss_ctx_id_t ctx
, mbuf_t
*mb_head
, uint32_t seqnum
, uint32_t *len
)
303 struct nfsm_chain nmc
;
305 /* Length of the argument or result */
306 length
= nfs_gss_mchain_length(*mb_head
);
310 error
= rpc_gss_data_create(mb_head
, seqnum
);
316 * length is the length of the rpc_gss_data
318 length
+= NFSX_UNSIGNED
; /* Add the sequence number to the length */
319 major
= gss_krb5_get_mic_mbuf(&error
, ctx
, 0, *mb_head
, 0, length
, &mic
);
320 if (major
!= GSS_S_COMPLETE
) {
321 printf("gss_krb5_get_mic_mbuf failed %d\n", error
);
325 error
= rpc_gss_prepend_32(mb_head
, length
);
330 nfsm_chain_dissect_init(error
, &nmc
, *mb_head
);
331 /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */
332 nfsm_chain_adv(error
, &nmc
, length
+ NFSX_UNSIGNED
);
333 nfsm_chain_finish_mbuf(error
, &nmc
); // Force the mic into its own sub chain.
334 nfsm_chain_add_32(error
, &nmc
, mic
.length
);
335 nfsm_chain_add_opaque(error
, &nmc
, mic
.value
, mic
.length
);
336 nfsm_chain_build_done(error
, &nmc
);
337 gss_release_buffer(NULL
, &mic
);
339 // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0);
340 assert(nmc
.nmc_mhead
== *mb_head
);
346 * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head.
347 * On successful return mb_head will point to a wrap token of lenght len.
348 * Note len does not include any xdr padding
349 * Returns 0 on success, else an errno_t
352 rpc_gss_priv_data_create(gss_ctx_id_t ctx
, mbuf_t
*mb_head
, uint32_t seqnum
, uint32_t *len
)
356 struct nfsm_chain nmc
;
360 error
= rpc_gss_data_create(mb_head
, seqnum
);
365 length
= nfs_gss_mchain_length(*mb_head
);
366 major
= gss_krb5_wrap_mbuf(&error
, ctx
, 1, 0, mb_head
, 0, length
, NULL
);
367 if (major
!= GSS_S_COMPLETE
) {
371 length
= nfs_gss_mchain_length(*mb_head
);
375 pad
= nfsm_pad(length
);
377 /* Prepend the opaque length of rep rpc_gss_priv_data */
378 error
= rpc_gss_prepend_32(mb_head
, length
);
384 nfsm_chain_dissect_init(error
, &nmc
, *mb_head
);
385 /* Advance the opauque size of length and length data */
386 nfsm_chain_adv(error
, &nmc
, NFSX_UNSIGNED
+ length
);
387 nfsm_chain_finish_mbuf(error
, &nmc
);
388 nfsm_chain_add_opaque_nopad(error
, &nmc
, xdrpad
, pad
);
389 nfsm_chain_build_done(error
, &nmc
);
395 #if CONFIG_NFS_CLIENT
398 * Restore the argument or result from an rpc_gss_integ_data mbuf chain
399 * We have a four byte seqence number, len arguments, and an opaque
400 * encoded mic, possibly followed by some pad bytes. The mic and possible
401 * pad bytes are on their own sub mbuf chains.
403 * On successful return mb_head is the chain of the xdr args or results sans
404 * the sequence number and mic and return 0. Otherwise return an errno.
408 rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused
, mbuf_t
*mb_head
, size_t len
)
410 mbuf_t mb
= *mb_head
;
411 mbuf_t tail
= NULL
, next
;
413 /* Chop of the opaque length and seq number */
414 mbuf_adj(mb
, 2 * NFSX_UNSIGNED
);
416 /* should only be one, ... but */
417 for (; mb
; mb
= next
) {
418 next
= mbuf_next(mb
);
419 if (mbuf_len(mb
) == 0) {
427 for (; mb
&& len
; mb
= mbuf_next(mb
)) {
429 if (mbuf_len(mb
) <= len
) {
437 mbuf_setnext(tail
, NULL
);
445 * Restore the argument or result rfom an rpc_gss_priv_data mbuf chain
446 * mb_head points to the wrap token of length len.
448 * On successful return mb_head is our original xdr arg or result an
449 * the return value is 0. Otherise return an errno
452 rpc_gss_priv_data_restore(gss_ctx_id_t ctx
, mbuf_t
*mb_head
, size_t len
)
454 uint32_t major
, error
;
455 mbuf_t mb
= *mb_head
, next
;
457 gss_qop_t qop
= GSS_C_QOP_REVERSE
;
459 /* Chop of the opaque length */
460 mbuf_adj(mb
, NFSX_UNSIGNED
);
461 /* If we have padding, drop it */
462 plen
= nfsm_pad(len
);
466 for (length
= 0; length
< len
&& mb
; mb
= mbuf_next(mb
)) {
468 length
+= mbuf_len(mb
);
470 if ((length
!= len
) || (mb
== NULL
) || (tail
== NULL
)) {
475 mbuf_setnext(tail
, NULL
);
478 major
= gss_krb5_unwrap_mbuf(&error
, ctx
, mb_head
, 0, len
, NULL
, &qop
);
479 if (major
!= GSS_S_COMPLETE
) {
480 printf("gss_krb5_unwrap_mbuf failed. major = %d minor = %d\n", (int)major
, error
);
485 /* Drop the seqence number */
486 mbuf_adj(mb
, NFSX_UNSIGNED
);
487 assert(mbuf_len(mb
) == 0);
489 /* Chop of any empty mbufs */
490 for (mb
= *mb_head
; mb
; mb
= next
) {
491 next
= mbuf_next(mb
);
492 if (mbuf_len(mb
) == 0) {
504 * Find the context for a particular user.
506 * If the context doesn't already exist
507 * then create a new context for this user.
509 * Note that the code allows superuser (uid == 0)
510 * to adopt the context of another user.
512 * We'll match on the audit session ids, since those
513 * processes will have acccess to the same credential cache.
516 #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
517 #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
519 #define SAFE_CAST_INTTYPE( type, intval ) \
520 ( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) )
523 nfs_cred_getasid2uid(kauth_cred_t cred
)
525 uid_t result
= SAFE_CAST_INTTYPE(uid_t
, kauth_cred_getasid(cred
));
533 nfs_gss_clnt_ctx_dump(struct nfsmount
*nmp
)
535 struct nfs_gss_clnt_ctx
*cp
;
537 lck_mtx_lock(&nmp
->nm_lock
);
538 NFS_GSS_DBG("Enter\n");
539 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
540 lck_mtx_lock(cp
->gss_clnt_mtx
);
541 printf("context %d/%d: refcnt = %d, flags = %x\n",
542 kauth_cred_getasid(cp
->gss_clnt_cred
),
543 kauth_cred_getauid(cp
->gss_clnt_cred
),
544 cp
->gss_clnt_refcnt
, cp
->gss_clnt_flags
);
545 lck_mtx_unlock(cp
->gss_clnt_mtx
);
547 NFS_GSS_DBG("Exit\n");
548 lck_mtx_unlock(&nmp
->nm_lock
);
552 nfs_gss_clnt_ctx_name(struct nfsmount
*nmp
, struct nfs_gss_clnt_ctx
*cp
, char *buf
, int len
)
556 const char *server
= "";
558 if (nmp
&& nmp
->nm_mountp
) {
559 server
= vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
;
563 snprintf(buf
, len
, "[%s] NULL context", server
);
567 if (cp
->gss_clnt_principal
&& !cp
->gss_clnt_display
) {
568 np
= (char *)cp
->gss_clnt_principal
;
569 nlen
= cp
->gss_clnt_prinlen
;
571 np
= cp
->gss_clnt_display
;
572 nlen
= np
? strlen(cp
->gss_clnt_display
) : 0;
575 snprintf(buf
, len
, "[%s] %.*s %d/%d %s", server
, nlen
> INT_MAX
? INT_MAX
: (int)nlen
, np
,
576 kauth_cred_getasid(cp
->gss_clnt_cred
),
577 kauth_cred_getuid(cp
->gss_clnt_cred
),
578 cp
->gss_clnt_principal
? "" : "[from default cred] ");
580 snprintf(buf
, len
, "[%s] using default %d/%d ", server
,
581 kauth_cred_getasid(cp
->gss_clnt_cred
),
582 kauth_cred_getuid(cp
->gss_clnt_cred
));
587 #define NFS_CTXBUFSZ 80
588 #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF))
590 #define NFS_GSS_CLNT_CTX_DUMP(nmp) \
592 if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \
593 nfs_gss_clnt_ctx_dump((nmp)); \
597 nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1
, kauth_cred_t cred2
)
599 if (kauth_cred_getasid(cred1
) == kauth_cred_getasid(cred2
)) {
606 * Busy the mount for each principal set on the mount
607 * so that the automounter will not unmount the file
608 * system underneath us. With out this, if an unmount
609 * occurs the principal that is set for an audit session
610 * will be lost and we may end up with a different identity.
612 * Note setting principals on the mount is a bad idea. This
613 * really should be handle by KIM (Kerberos Identity Management)
614 * so that defaults can be set by service identities.
618 nfs_gss_clnt_mnt_ref(struct nfsmount
*nmp
)
624 !(vfs_flags(nmp
->nm_mountp
) & MNT_AUTOMOUNTED
)) {
628 error
= VFS_ROOT(nmp
->nm_mountp
, &rvp
, NULL
);
630 error
= vnode_ref(rvp
);
638 * Unbusy the mount. See above comment,
642 nfs_gss_clnt_mnt_rele(struct nfsmount
*nmp
)
648 !(vfs_flags(nmp
->nm_mountp
) & MNT_AUTOMOUNTED
)) {
652 error
= VFS_ROOT(nmp
->nm_mountp
, &rvp
, NULL
);
661 int nfs_root_steals_ctx
= 0;
664 nfs_gss_clnt_ctx_find_principal(struct nfsreq
*req
, uint8_t *principal
, size_t plen
, uint32_t nt
)
666 struct nfsmount
*nmp
= req
->r_nmp
;
667 struct nfs_gss_clnt_ctx
*cp
, *tcp
;
671 char CTXBUF
[NFS_CTXBUFSZ
];
673 treq
= zalloc_flags(nfs_req_zone
, Z_WAITOK
| Z_ZERO
);
677 lck_mtx_lock(&nmp
->nm_lock
);
678 TAILQ_FOREACH_SAFE(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
, tcp
) {
679 lck_mtx_lock(cp
->gss_clnt_mtx
);
680 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
681 NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
682 NFS_GSS_CTX(req
, cp
),
683 cp
->gss_clnt_refcnt
);
684 lck_mtx_unlock(cp
->gss_clnt_mtx
);
687 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, req
->r_cred
)) {
688 if (nmp
->nm_gsscl
.tqh_first
!= cp
) {
689 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
690 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
694 * If we have a principal, but it does not match the current cred
695 * mark it for removal
697 if (cp
->gss_clnt_prinlen
!= plen
|| cp
->gss_clnt_prinnt
!= nt
||
698 bcmp(cp
->gss_clnt_principal
, principal
, plen
) != 0) {
699 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
700 cp
->gss_clnt_refcnt
++;
701 lck_mtx_unlock(cp
->gss_clnt_mtx
);
702 NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
703 NFS_GSS_CTX(req
, cp
), principal
);
704 NFS_GSS_DBG("len = (%zu,%zu), nt = (%d,%d)\n", cp
->gss_clnt_prinlen
, plen
,
705 cp
->gss_clnt_prinnt
, nt
);
706 treq
->r_gss_ctx
= cp
;
711 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
713 * If we're still being used and we're not expired
714 * just return and don't bother gssd again. Note if
715 * gss_clnt_nctime is zero it is about to be set to now.
717 if (cp
->gss_clnt_nctime
+ GSS_NEG_CACHE_TO
>= now
.tv_sec
|| cp
->gss_clnt_nctime
== 0) {
718 NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
719 NFS_GSS_CTX(req
, cp
), cp
->gss_clnt_refcnt
, cp
->gss_clnt_nctime
, now
.tv_sec
);
720 lck_mtx_unlock(cp
->gss_clnt_mtx
);
721 lck_mtx_unlock(&nmp
->nm_lock
);
722 NFS_ZFREE(nfs_req_zone
, treq
);
725 if (cp
->gss_clnt_refcnt
) {
726 struct nfs_gss_clnt_ctx
*ncp
;
728 * If this context has references, we can't use it so we mark if for
729 * destruction and create a new context based on this one in the
730 * same manner as renewing one.
732 cp
->gss_clnt_flags
|= GSS_CTX_DESTROY
;
733 NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
734 NFS_GSS_CTX(req
, cp
), cp
->gss_clnt_refcnt
);
735 error
= nfs_gss_clnt_ctx_copy(cp
, &ncp
);
736 lck_mtx_unlock(cp
->gss_clnt_mtx
);
738 lck_mtx_unlock(&nmp
->nm_lock
);
739 NFS_ZFREE(nfs_req_zone
, treq
);
745 if (cp
->gss_clnt_nctime
) {
748 lck_mtx_unlock(cp
->gss_clnt_mtx
);
749 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
753 /* Found a valid context to return */
754 cp
->gss_clnt_refcnt
++;
756 lck_mtx_unlock(cp
->gss_clnt_mtx
);
757 lck_mtx_unlock(&nmp
->nm_lock
);
758 NFS_ZFREE(nfs_req_zone
, treq
);
761 lck_mtx_unlock(cp
->gss_clnt_mtx
);
764 if (!cp
&& nfs_root_steals_ctx
&& principal
== NULL
&& kauth_cred_getuid(req
->r_cred
) == 0) {
766 * If superuser is trying to get access, then co-opt
767 * the first valid context in the list.
768 * XXX Ultimately, we need to allow superuser to
769 * go ahead and attempt to set up its own context
770 * in case one is set up for it.
772 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
773 if (!(cp
->gss_clnt_flags
& (GSS_CTX_INVAL
| GSS_CTX_DESTROY
))) {
774 nfs_gss_clnt_ctx_ref(req
, cp
);
775 lck_mtx_unlock(&nmp
->nm_lock
);
776 NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req
, NULL
));
777 NFS_ZFREE(nfs_req_zone
, treq
);
783 NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n",
784 NFS_GSS_CTX(req
, cp
),
785 cp
== NULL
? " not " : "",
786 cp
== NULL
? 0L : cp
->gss_clnt_nctime
);
789 * Not found - create a new context
793 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
| M_ZERO
);
795 lck_mtx_unlock(&nmp
->nm_lock
);
796 NFS_ZFREE(nfs_req_zone
, treq
);
799 cp
->gss_clnt_cred
= req
->r_cred
;
800 kauth_cred_ref(cp
->gss_clnt_cred
);
801 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
802 cp
->gss_clnt_ptime
= now
.tv_sec
- GSS_PRINT_DELAY
;
804 MALLOC(cp
->gss_clnt_principal
, uint8_t *, plen
+ 1, M_TEMP
, M_WAITOK
| M_ZERO
);
805 memcpy(cp
->gss_clnt_principal
, principal
, plen
);
806 cp
->gss_clnt_prinlen
= plen
;
807 cp
->gss_clnt_prinnt
= nt
;
808 cp
->gss_clnt_flags
|= GSS_CTX_STICKY
;
809 if (!nfs_gss_clnt_mnt_ref(nmp
)) {
810 cp
->gss_clnt_flags
|= GSS_CTX_USECOUNT
;
814 uint32_t oldflags
= cp
->gss_clnt_flags
;
815 nfs_gss_clnt_ctx_clean(cp
);
818 * If we have a principal and we found a matching audit
819 * session, then to get here, the principal had to match.
820 * In walking the context list if it has a principal
821 * or the principal is not set then we mark the context
822 * for destruction and set cp to NULL and we fall to the
823 * if clause above. If the context still has references
824 * again we copy the context which will preserve the principal
825 * and we end up here with the correct principal set.
826 * If we don't have references the the principal must have
827 * match and we will fall through here.
829 cp
->gss_clnt_flags
|= GSS_CTX_STICKY
;
832 * We are preserving old flags if it set, and we take a ref if not set.
833 * Also, because of the short circuit we will not take extra refs here.
835 if ((oldflags
& GSS_CTX_USECOUNT
) || !nfs_gss_clnt_mnt_ref(nmp
)) {
836 cp
->gss_clnt_flags
|= GSS_CTX_USECOUNT
;
841 cp
->gss_clnt_thread
= current_thread();
842 nfs_gss_clnt_ctx_ref(req
, cp
);
843 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
844 lck_mtx_unlock(&nmp
->nm_lock
);
846 error
= nfs_gss_clnt_ctx_init_retry(req
, cp
); // Initialize new context
848 NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error
, NFS_GSS_CTX(req
, cp
));
849 nfs_gss_clnt_ctx_unref(req
);
852 /* Remove any old matching contex that had a different principal */
853 nfs_gss_clnt_ctx_unref(treq
);
854 NFS_ZFREE(nfs_req_zone
, treq
);
859 nfs_gss_clnt_ctx_find(struct nfsreq
*req
)
861 return nfs_gss_clnt_ctx_find_principal(req
, NULL
, 0, 0);
865 * Inserts an RPCSEC_GSS credential into an RPC header.
866 * After the credential is inserted, the code continues
867 * to build the verifier which contains a signed checksum
872 nfs_gss_clnt_cred_put(struct nfsreq
*req
, struct nfsm_chain
*nmc
, mbuf_t args
)
874 struct nfs_gss_clnt_ctx
*cp
;
878 int slpflag
, recordmark
= 0, offset
;
882 slpflag
= (PZERO
- 1);
884 slpflag
|= (NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
885 recordmark
= (req
->r_nmp
->nm_sotype
== SOCK_STREAM
);
889 if (req
->r_gss_ctx
== NULL
) {
891 * Find the context for this user.
892 * If no context is found, one will
895 error
= nfs_gss_clnt_ctx_find(req
);
903 * If the context thread isn't null, then the context isn't
904 * yet complete and is for the exclusive use of the thread
905 * doing the context setup. Wait until the context thread
908 lck_mtx_lock(cp
->gss_clnt_mtx
);
909 if (cp
->gss_clnt_thread
&& cp
->gss_clnt_thread
!= current_thread()) {
910 cp
->gss_clnt_flags
|= GSS_NEEDCTX
;
911 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "ctxwait", NULL
);
913 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
916 nfs_gss_clnt_ctx_unref(req
);
919 lck_mtx_unlock(cp
->gss_clnt_mtx
);
921 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) {
923 * Get a sequence number for this request.
924 * Check whether the oldest request in the window is complete.
925 * If it's still pending, then wait until it's done before
926 * we allocate a new sequence number and allow this request
929 lck_mtx_lock(cp
->gss_clnt_mtx
);
930 while (win_getbit(cp
->gss_clnt_seqbits
,
931 ((cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
) + 1) % cp
->gss_clnt_seqwin
)) {
932 cp
->gss_clnt_flags
|= GSS_NEEDSEQ
;
933 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "seqwin", NULL
);
935 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
938 lck_mtx_lock(cp
->gss_clnt_mtx
);
939 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
940 /* Renewed while while we were waiting */
941 lck_mtx_unlock(cp
->gss_clnt_mtx
);
942 nfs_gss_clnt_ctx_unref(req
);
946 seqnum
= ++cp
->gss_clnt_seqnum
;
947 win_setbit(cp
->gss_clnt_seqbits
, seqnum
% cp
->gss_clnt_seqwin
);
948 lck_mtx_unlock(cp
->gss_clnt_mtx
);
950 MALLOC(gsp
, struct gss_seq
*, sizeof(*gsp
), M_TEMP
, M_WAITOK
| M_ZERO
);
954 gsp
->gss_seqnum
= seqnum
;
955 SLIST_INSERT_HEAD(&req
->r_gss_seqlist
, gsp
, gss_seqnext
);
958 /* Insert the credential */
959 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
960 nfsm_chain_add_32(error
, nmc
, 5 * NFSX_UNSIGNED
+ cp
->gss_clnt_handle_len
);
961 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS_VERS_1
);
962 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_proc
);
963 nfsm_chain_add_32(error
, nmc
, seqnum
);
964 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_service
);
965 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_handle_len
);
966 if (cp
->gss_clnt_handle_len
> 0) {
967 if (cp
->gss_clnt_handle
== NULL
) {
970 nfsm_chain_add_opaque(error
, nmc
, cp
->gss_clnt_handle
, cp
->gss_clnt_handle_len
);
976 * Now add the verifier
978 if (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
||
979 cp
->gss_clnt_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
981 * If the context is still being created
982 * then use a null verifier.
984 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
); // flavor
985 nfsm_chain_add_32(error
, nmc
, 0); // length
986 nfsm_chain_build_done(error
, nmc
);
988 nfs_gss_append_chain(nmc
, args
);
993 offset
= recordmark
? NFSX_UNSIGNED
: 0; // record mark
994 nfsm_chain_build_done(error
, nmc
);
996 major
= gss_krb5_get_mic_mbuf((uint32_t *)&error
, cp
->gss_clnt_ctx_id
, 0, nmc
->nmc_mhead
, offset
, 0, &mic
);
997 if (major
!= GSS_S_COMPLETE
) {
998 printf("gss_krb5_get_mic_buf failed %d\n", error
);
1002 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
); // flavor
1003 nfsm_chain_add_32(error
, nmc
, mic
.length
); // length
1004 nfsm_chain_add_opaque(error
, nmc
, mic
.value
, mic
.length
);
1005 (void)gss_release_buffer(NULL
, &mic
);
1006 nfsm_chain_build_done(error
, nmc
);
1012 * Now we may have to compute integrity or encrypt the call args
1013 * per RFC 2203 Section 5.3.2
1015 switch (cp
->gss_clnt_service
) {
1016 case RPCSEC_GSS_SVC_NONE
:
1018 nfs_gss_append_chain(nmc
, args
);
1021 case RPCSEC_GSS_SVC_INTEGRITY
:
1023 * r_gss_arglen is the length of args mbuf going into the routine.
1024 * Its used to find the mic if we need to restore the args.
1026 /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_integ_data_t */
1027 assert(req
->r_mrest
== args
);
1028 nfsm_chain_finish_mbuf(error
, nmc
);
1032 error
= rpc_gss_integ_data_create(cp
->gss_clnt_ctx_id
, &args
, seqnum
, &req
->r_gss_arglen
);
1036 req
->r_mrest
= args
;
1037 req
->r_gss_argoff
= nfsm_chain_offset(nmc
);
1038 nfs_gss_append_chain(nmc
, args
);
1040 case RPCSEC_GSS_SVC_PRIVACY
:
1042 * r_gss_arglen is the length of the wrap token sans any padding length.
1043 * Its used to find any XDR padding of the wrap token.
1045 /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_priv_data_t */
1046 assert(req
->r_mrest
== args
);
1047 nfsm_chain_finish_mbuf(error
, nmc
);
1051 error
= rpc_gss_priv_data_create(cp
->gss_clnt_ctx_id
, &args
, seqnum
, &req
->r_gss_arglen
);
1055 req
->r_mrest
= args
;
1056 req
->r_gss_argoff
= nfsm_chain_offset(nmc
);
1057 nfs_gss_append_chain(nmc
, args
);
1067 * When receiving a reply, the client checks the verifier
1068 * returned by the server. Check that the verifier is the
1069 * correct type, then extract the sequence number checksum
1070 * from the token in the credential and compare it with a
1071 * computed checksum of the sequence number in the request
1075 nfs_gss_clnt_verf_get(
1077 struct nfsm_chain
*nmc
,
1080 uint32_t *accepted_statusp
)
1082 gss_buffer_desc cksum
;
1083 uint32_t seqnum
= 0;
1085 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1086 struct nfsm_chain nmc_tmp
;
1087 struct gss_seq
*gsp
;
1090 mbuf_t results_mbuf
, prev_mbuf
, pad_mbuf
;
1091 size_t ressize
, offset
;
1094 *accepted_statusp
= 0;
1097 return NFSERR_EAUTH
;
1100 * If it's not an RPCSEC_GSS verifier, then it has to
1101 * be a null verifier that resulted from either
1102 * a CONTINUE_NEEDED reply during context setup or
1103 * from the reply to an AUTH_UNIX call from a dummy
1104 * context that resulted from a fallback to sec=sys.
1106 if (verftype
!= RPCSEC_GSS
) {
1107 if (verftype
!= RPCAUTH_NULL
) {
1108 return NFSERR_EAUTH
;
1110 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) {
1111 return NFSERR_EAUTH
;
1114 nfsm_chain_adv(error
, nmc
, nfsm_rndup(verflen
));
1116 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
1121 * If we received an RPCSEC_GSS verifier but the
1122 * context isn't yet complete, then it must be
1123 * the context complete message from the server.
1124 * The verifier will contain an encrypted checksum
1125 * of the window but we don't have the session key
1126 * yet so we can't decrypt it. Stash the verifier
1127 * and check it later in nfs_gss_clnt_ctx_init() when
1128 * the context is complete.
1130 if (!(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)) {
1131 if (verflen
> KRB5_MAX_MIC_SIZE
) {
1134 MALLOC(cp
->gss_clnt_verf
, u_char
*, verflen
, M_TEMP
, M_WAITOK
| M_ZERO
);
1135 if (cp
->gss_clnt_verf
== NULL
) {
1138 cp
->gss_clnt_verflen
= verflen
;
1139 nfsm_chain_get_opaque(error
, nmc
, verflen
, cp
->gss_clnt_verf
);
1140 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
1144 if (verflen
> KRB5_MAX_MIC_SIZE
) {
1147 cksum
.length
= verflen
;
1148 MALLOC(cksum
.value
, void *, verflen
, M_TEMP
, M_WAITOK
);
1153 nfsm_chain_get_opaque(error
, nmc
, verflen
, cksum
.value
);
1155 FREE(cksum
.value
, M_TEMP
);
1160 * Search the request sequence numbers for this reply, starting
1161 * with the most recent, looking for a checksum that matches
1162 * the one in the verifier returned by the server.
1164 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
1165 gss_buffer_desc seqnum_buf
;
1166 uint32_t network_seqnum
= htonl(gsp
->gss_seqnum
);
1168 seqnum_buf
.length
= sizeof(network_seqnum
);
1169 seqnum_buf
.value
= &network_seqnum
;
1170 major
= gss_krb5_verify_mic(NULL
, cp
->gss_clnt_ctx_id
, &seqnum_buf
, &cksum
, NULL
);
1171 if (major
== GSS_S_COMPLETE
) {
1175 FREE(cksum
.value
, M_TEMP
);
1177 return NFSERR_EAUTH
;
1181 * Get the RPC accepted status
1183 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
1184 if (*accepted_statusp
!= RPC_SUCCESS
) {
1189 * Now we may have to check integrity or decrypt the results
1190 * per RFC 2203 Section 5.3.2
1192 switch (cp
->gss_clnt_service
) {
1193 case RPCSEC_GSS_SVC_NONE
:
1196 case RPCSEC_GSS_SVC_INTEGRITY
:
1198 * Here's what we expect in the integrity results from RFC 2203:
1200 * - length of seq num + results (4 bytes)
1201 * - sequence number (4 bytes)
1202 * - results (variable bytes)
1203 * - length of checksum token
1204 * - checksum of seqnum + results
1207 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
1208 if (reslen
> NFS_MAXPACKET
) {
1213 /* Advance and fetch the mic */
1215 nfsm_chain_adv(error
, &nmc_tmp
, reslen
); // skip over the results
1216 nfsm_chain_get_32(error
, &nmc_tmp
, cksum
.length
);
1217 if (cksum
.length
> KRB5_MAX_MIC_SIZE
) {
1221 MALLOC(cksum
.value
, void *, cksum
.length
, M_TEMP
, M_WAITOK
);
1222 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksum
.length
, cksum
.value
);
1223 //XXX chop offf the cksum?
1225 /* Call verify mic */
1226 offset
= nfsm_chain_offset(nmc
);
1227 major
= gss_krb5_verify_mic_mbuf((uint32_t *)&error
, cp
->gss_clnt_ctx_id
, nmc
->nmc_mhead
, offset
, reslen
, &cksum
, NULL
);
1228 FREE(cksum
.value
, M_TEMP
);
1229 if (major
!= GSS_S_COMPLETE
) {
1230 printf("client results: gss_krb5_verify_mic_mbuf failed %d\n", error
);
1236 * Get the sequence number prepended to the results
1237 * and compare it against the header.
1239 nfsm_chain_get_32(error
, nmc
, seqnum
);
1240 if (gsp
->gss_seqnum
!= seqnum
) {
1245 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
1246 if (seqnum
== gsp
->gss_seqnum
) {
1256 case RPCSEC_GSS_SVC_PRIVACY
:
1258 * Here's what we expect in the privacy results:
1260 * opaque encodeing of the wrap token
1261 * - length of wrap token
1264 prev_mbuf
= nmc
->nmc_mcur
;
1265 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
1266 if (reslen
== 0 || reslen
> NFS_MAXPACKET
) {
1271 /* Get the wrap token (current mbuf in the chain starting at the current offset) */
1272 offset
= nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
);
1274 /* split out the wrap token */
1276 error
= gss_normalize_mbuf(nmc
->nmc_mcur
, offset
, &ressize
, &results_mbuf
, &pad_mbuf
, 0);
1282 assert(nfsm_pad(reslen
) == mbuf_len(pad_mbuf
));
1283 mbuf_free(pad_mbuf
);
1286 major
= gss_krb5_unwrap_mbuf((uint32_t *)&error
, cp
->gss_clnt_ctx_id
, &results_mbuf
, 0, ressize
, NULL
, NULL
);
1288 printf("%s unwraped failed %d\n", __func__
, error
);
1292 /* Now replace the wrapped arguments with the unwrapped ones */
1293 mbuf_setnext(prev_mbuf
, results_mbuf
);
1294 nmc
->nmc_mcur
= results_mbuf
;
1295 nmc
->nmc_ptr
= mbuf_data(results_mbuf
);
1296 nmc
->nmc_left
= mbuf_len(results_mbuf
);
1299 * Get the sequence number prepended to the results
1300 * and compare it against the header
1302 nfsm_chain_get_32(error
, nmc
, seqnum
);
1303 if (gsp
->gss_seqnum
!= seqnum
) {
1304 printf("%s bad seqnum\n", __func__
);
1309 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
1310 if (seqnum
== gsp
->gss_seqnum
) {
1326 * An RPCSEC_GSS request with no integrity or privacy consists
1327 * of just the header mbufs followed by the arg mbufs.
1329 * However, integrity or privacy the original mbufs have mbufs
1330 * prepended and appended to, which means we have to do some work to
1331 * restore the arg mbuf chain to its previous state in case we need to
1334 * The location and length of the args is marked by two fields
1335 * in the request structure: r_gss_argoff and r_gss_arglen,
1336 * which are stashed when the NFS request is built.
1339 nfs_gss_clnt_args_restore(struct nfsreq
*req
)
1341 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1342 struct nfsm_chain mchain
, *nmc
= &mchain
;
1343 int error
= 0, merr
;
1346 return NFSERR_EAUTH
;
1349 if ((cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) == 0) {
1353 /* Nothing to restore for SVC_NONE */
1354 if (cp
->gss_clnt_service
== RPCSEC_GSS_SVC_NONE
) {
1358 nfsm_chain_dissect_init(error
, nmc
, req
->r_mhead
); // start at RPC header
1359 nfsm_chain_adv(error
, nmc
, req
->r_gss_argoff
); // advance to args
1364 if (cp
->gss_clnt_service
== RPCSEC_GSS_SVC_INTEGRITY
) {
1365 error
= rpc_gss_integ_data_restore(cp
->gss_clnt_ctx_id
, &req
->r_mrest
, req
->r_gss_arglen
);
1367 error
= rpc_gss_priv_data_restore(cp
->gss_clnt_ctx_id
, &req
->r_mrest
, req
->r_gss_arglen
);
1370 merr
= mbuf_setnext(nmc
->nmc_mcur
, req
->r_mrest
); /* Should always succeed */
1373 return error
? error
: merr
;
1377 * This function sets up a new context on the client.
1378 * Context setup alternates upcalls to the gssd with NFS nullproc calls
1379 * to the server. Each of these calls exchanges an opaque token, obtained
1380 * via the gssd's calls into the GSS-API on either the client or the server.
1381 * This cycle of calls ends when the client's upcall to the gssd and the
1382 * server's response both return GSS_S_COMPLETE. At this point, the client
1383 * should have its session key and a handle that it can use to refer to its
1384 * new context on the server.
1387 nfs_gss_clnt_ctx_init(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1389 struct nfsmount
*nmp
= req
->r_nmp
;
1390 gss_buffer_desc cksum
, window
;
1391 uint32_t network_seqnum
;
1392 int client_complete
= 0;
1393 int server_complete
= 0;
1398 /* Initialize a new client context */
1400 if (cp
->gss_clnt_svcname
== NULL
) {
1401 cp
->gss_clnt_svcname
= nfs_gss_clnt_svcname(nmp
, &cp
->gss_clnt_svcnt
, &cp
->gss_clnt_svcnamlen
);
1402 if (cp
->gss_clnt_svcname
== NULL
) {
1403 error
= NFSERR_EAUTH
;
1408 cp
->gss_clnt_proc
= RPCSEC_GSS_INIT
;
1410 cp
->gss_clnt_service
=
1411 req
->r_auth
== RPCAUTH_KRB5
? RPCSEC_GSS_SVC_NONE
:
1412 req
->r_auth
== RPCAUTH_KRB5I
? RPCSEC_GSS_SVC_INTEGRITY
:
1413 req
->r_auth
== RPCAUTH_KRB5P
? RPCSEC_GSS_SVC_PRIVACY
: 0;
1416 * Now loop around alternating gss_init_sec_context and
1417 * gss_accept_sec_context upcalls to the gssd on the client
1418 * and server side until the context is complete - or fails.
1422 /* Upcall to the gss_init_sec_context in the gssd */
1423 error
= nfs_gss_clnt_gssd_upcall(req
, cp
, retrycnt
);
1428 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1429 client_complete
= 1;
1430 NFS_GSS_DBG("Client complete\n");
1431 if (server_complete
) {
1434 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1436 * We may have gotten here because the accept sec context
1437 * from the server failed and sent back a GSS token that
1438 * encapsulates a kerberos error token per RFC 1964/4121
1439 * with a status of GSS_S_CONTINUE_NEEDED. That caused us
1440 * to loop to the above up call and received the now
1444 cp
->gss_clnt_gssd_flags
|= GSSD_RESTART
;
1445 NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp
->gss_clnt_major
, (int)cp
->gss_clnt_minor
);
1450 * Pass the token to the server.
1452 error
= nfs_gss_clnt_ctx_callserver(req
, cp
);
1454 if (error
== ENEEDAUTH
&&
1455 (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
||
1456 cp
->gss_clnt_proc
== RPCSEC_GSS_CONTINUE_INIT
)) {
1458 * We got here because the server had a problem
1459 * trying to establish a context and sent that there
1460 * was a context problem at the rpc sec layer. Perhaps
1461 * gss_accept_sec_context succeeded in user space,
1462 * but the kernel could not handle the etype
1463 * to generate the mic for the verifier of the rpc_sec
1467 cp
->gss_clnt_gssd_flags
|= GSSD_RESTART
;
1468 NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp
->gss_clnt_major
, (int)cp
->gss_clnt_minor
);
1473 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1474 NFS_GSS_DBG("Server complete\n");
1475 server_complete
= 1;
1476 if (client_complete
) {
1479 } else if (cp
->gss_clnt_major
== GSS_S_CONTINUE_NEEDED
) {
1480 cp
->gss_clnt_proc
= RPCSEC_GSS_CONTINUE_INIT
;
1482 /* Server didn't like us. Try something else */
1484 cp
->gss_clnt_gssd_flags
|= GSSD_RESTART
;
1485 NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp
->gss_clnt_major
, (int)cp
->gss_clnt_minor
);
1490 * The context is apparently established successfully
1492 lck_mtx_lock(cp
->gss_clnt_mtx
);
1493 cp
->gss_clnt_flags
|= GSS_CTX_COMPLETE
;
1494 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1495 cp
->gss_clnt_proc
= RPCSEC_GSS_DATA
;
1497 network_seqnum
= htonl(cp
->gss_clnt_seqwin
);
1498 window
.length
= sizeof(cp
->gss_clnt_seqwin
);
1499 window
.value
= &network_seqnum
;
1500 cksum
.value
= cp
->gss_clnt_verf
;
1501 cksum
.length
= cp
->gss_clnt_verflen
;
1502 major
= gss_krb5_verify_mic((uint32_t *)&error
, cp
->gss_clnt_ctx_id
, &window
, &cksum
, NULL
);
1503 cp
->gss_clnt_verflen
= 0;
1504 FREE(cp
->gss_clnt_verf
, M_TEMP
);
1505 cp
->gss_clnt_verf
= NULL
;
1506 if (major
!= GSS_S_COMPLETE
) {
1507 printf("%s: could not verify window\n", __func__
);
1508 error
= NFSERR_EAUTH
;
1513 * Set an initial sequence number somewhat randomized.
1514 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1515 * Add the size of the sequence window so seqbits arithmetic
1516 * doesn't go negative.
1518 cp
->gss_clnt_seqnum
= (random() & 0xffff) + cp
->gss_clnt_seqwin
;
1521 * Allocate a bitmap to keep track of which requests
1522 * are pending within the sequence number window.
1524 MALLOC(cp
->gss_clnt_seqbits
, uint32_t *,
1525 nfsm_rndup((cp
->gss_clnt_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
| M_ZERO
);
1526 if (cp
->gss_clnt_seqbits
== NULL
) {
1527 error
= NFSERR_EAUTH
;
1532 * If the error is ENEEDAUTH we're not done, so no need
1533 * to wake up other threads again. This thread will retry in
1534 * the find or renew routines.
1536 if (error
== ENEEDAUTH
) {
1537 NFS_GSS_DBG("Returning ENEEDAUTH\n");
1542 * If there's an error, just mark it as invalid.
1543 * It will be removed when the reference count
1546 lck_mtx_lock(cp
->gss_clnt_mtx
);
1548 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1552 * Wake any threads waiting to use the context
1554 cp
->gss_clnt_thread
= NULL
;
1555 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1556 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1559 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1561 NFS_GSS_DBG("Returning error = %d\n", error
);
1566 * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1567 * But if there's a failure in trying to establish the context it keeps
1568 * retrying at progressively longer intervals in case the failure is
1569 * due to some transient condition. For instance, the server might be
1570 * failing the context setup because directory services is not coming
1571 * up in a timely fashion.
1574 nfs_gss_clnt_ctx_init_retry(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1576 struct nfsmount
*nmp
= req
->r_nmp
;
1581 int timeo
= NFS_TRYLATERDEL
;
1583 if (nfs_mount_gone(nmp
)) {
1588 /* For an "intr" mount allow a signal to interrupt the retries */
1589 slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
1591 while ((error
= nfs_gss_clnt_ctx_init(req
, cp
)) == ENEEDAUTH
) {
1593 waituntil
= now
.tv_sec
+ timeo
;
1594 while (now
.tv_sec
< waituntil
) {
1595 tsleep(NULL
, PSOCK
| slpflag
, "nfs_gss_clnt_ctx_init_retry", hz
);
1597 error
= nfs_sigintr(req
->r_nmp
, req
, current_thread(), 0);
1605 /* If it's a soft mount just give up after a while */
1606 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (retries
> nmp
->nm_retry
)) {
1617 return 0; // success
1621 * Give up on this context
1623 lck_mtx_lock(cp
->gss_clnt_mtx
);
1624 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1627 * Wake any threads waiting to use the context
1629 cp
->gss_clnt_thread
= NULL
;
1630 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1631 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1634 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1640 * Call the NFS server using a null procedure for context setup.
1641 * Even though it's a null procedure and nominally has no arguments
1642 * RFC 2203 requires that the GSS-API token be passed as an argument
1643 * and received as a reply.
1646 nfs_gss_clnt_ctx_callserver(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1648 struct nfsm_chain nmreq
, nmrep
;
1649 int error
= 0, status
;
1650 uint32_t major
= cp
->gss_clnt_major
, minor
= cp
->gss_clnt_minor
;
1653 if (nfs_mount_gone(req
->r_nmp
)) {
1656 nfsm_chain_null(&nmreq
);
1657 nfsm_chain_null(&nmrep
);
1658 sz
= NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_clnt_tokenlen
);
1659 nfsm_chain_build_alloc_init(error
, &nmreq
, sz
);
1660 nfsm_chain_add_32(error
, &nmreq
, cp
->gss_clnt_tokenlen
);
1661 if (cp
->gss_clnt_tokenlen
> 0) {
1662 nfsm_chain_add_opaque(error
, &nmreq
, cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
);
1664 nfsm_chain_build_done(error
, &nmreq
);
1669 /* Call the server */
1670 error
= nfs_request_gss(req
->r_nmp
->nm_mountp
, &nmreq
, req
->r_thread
, req
->r_cred
,
1671 (req
->r_flags
& R_OPTMASK
), cp
, &nmrep
, &status
);
1672 if (cp
->gss_clnt_token
!= NULL
) {
1673 FREE(cp
->gss_clnt_token
, M_TEMP
);
1674 cp
->gss_clnt_token
= NULL
;
1683 /* Get the server's reply */
1685 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_handle_len
);
1686 if (cp
->gss_clnt_handle
!= NULL
) {
1687 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1688 cp
->gss_clnt_handle
= NULL
;
1690 if (cp
->gss_clnt_handle_len
> 0 && cp
->gss_clnt_handle_len
< GSS_MAX_CTX_HANDLE_LEN
) {
1691 MALLOC(cp
->gss_clnt_handle
, u_char
*, cp
->gss_clnt_handle_len
, M_TEMP
, M_WAITOK
);
1692 if (cp
->gss_clnt_handle
== NULL
) {
1696 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_handle_len
, cp
->gss_clnt_handle
);
1700 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_major
);
1701 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_minor
);
1702 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_seqwin
);
1703 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_tokenlen
);
1707 if (cp
->gss_clnt_tokenlen
> 0 && cp
->gss_clnt_tokenlen
< GSS_MAX_TOKEN_LEN
) {
1708 MALLOC(cp
->gss_clnt_token
, u_char
*, cp
->gss_clnt_tokenlen
, M_TEMP
, M_WAITOK
);
1709 if (cp
->gss_clnt_token
== NULL
) {
1713 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_tokenlen
, cp
->gss_clnt_token
);
1719 * Make sure any unusual errors are expanded and logged by gssd
1721 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1722 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1723 printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp
->gss_clnt_major
);
1724 nfs_gss_clnt_log_error(req
, cp
, major
, minor
);
1728 nfsm_chain_cleanup(&nmreq
);
1729 nfsm_chain_cleanup(&nmrep
);
1735 * We construct the service principal as a gss hostbased service principal of
1736 * the form nfs@<server>, unless the servers principal was passed down in the
1737 * mount arguments. If the arguments don't specify the service principal, the
1738 * server name is extracted the location passed in the mount argument if
1739 * available. Otherwise assume a format of <server>:<path> in the
1740 * mntfromname. We don't currently support url's or other bizarre formats like
1741 * path@server. Mount_url will convert the nfs url into <server>:<path> when
1742 * calling mount, so this works out well in practice.
1747 nfs_gss_clnt_svcname(struct nfsmount
*nmp
, gssd_nametype
*nt
, size_t *len
)
1749 char *svcname
, *d
, *server
;
1752 if (nfs_mount_gone(nmp
)) {
1756 if (nmp
->nm_sprinc
) {
1757 *len
= strlen(nmp
->nm_sprinc
) + 1;
1758 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1759 *nt
= GSSD_HOSTBASED
;
1760 if (svcname
== NULL
) {
1763 strlcpy(svcname
, nmp
->nm_sprinc
, *len
);
1765 return (uint8_t *)svcname
;
1768 *nt
= GSSD_HOSTBASED
;
1769 if (nmp
->nm_locations
.nl_numlocs
&& !(NFS_GSS_ISDBG
&& (NFS_DEBUG_FLAGS
& 0x1))) {
1770 lindx
= nmp
->nm_locations
.nl_current
.nli_loc
;
1771 sindx
= nmp
->nm_locations
.nl_current
.nli_serv
;
1772 server
= nmp
->nm_locations
.nl_locations
[lindx
]->nl_servers
[sindx
]->ns_name
;
1773 *len
= (uint32_t)strlen(server
);
1775 /* Older binaries using older mount args end up here */
1776 server
= vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
;
1777 NFS_GSS_DBG("nfs getting gss svcname from %s\n", server
);
1778 d
= strchr(server
, ':');
1779 *len
= (uint32_t)(d
? (d
- server
) : strlen(server
));
1782 *len
+= 5; /* "nfs@" plus null */
1783 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1784 strlcpy(svcname
, "nfs", *len
);
1785 strlcat(svcname
, "@", *len
);
1786 strlcat(svcname
, server
, *len
);
1787 NFS_GSS_DBG("nfs svcname = %s\n", svcname
);
1789 return (uint8_t *)svcname
;
1793 * Get a mach port to talk to gssd.
1794 * gssd lives in the root bootstrap, so we call gssd's lookup routine
1795 * to get a send right to talk to a new gssd instance that launchd has launched
1796 * based on the cred's uid and audit session id.
1800 nfs_gss_clnt_get_upcall_port(kauth_cred_t credp
)
1802 mach_port_t gssd_host_port
, uc_port
= IPC_PORT_NULL
;
1807 kr
= host_get_gssd_port(host_priv_self(), &gssd_host_port
);
1808 if (kr
!= KERN_SUCCESS
) {
1809 printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr
, kr
);
1810 return IPC_PORT_NULL
;
1812 if (!IPC_PORT_VALID(gssd_host_port
)) {
1813 printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1814 return IPC_PORT_NULL
;
1817 asid
= kauth_cred_getasid(credp
);
1818 uid
= kauth_cred_getauid(credp
);
1819 if (uid
== AU_DEFAUDITID
) {
1820 uid
= kauth_cred_getuid(credp
);
1822 kr
= mach_gss_lookup(gssd_host_port
, uid
, asid
, &uc_port
);
1823 if (kr
!= KERN_SUCCESS
) {
1824 printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr
, kr
);
1826 host_release_special_port(gssd_host_port
);
1833 nfs_gss_clnt_log_error(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
, uint32_t major
, uint32_t minor
)
1835 #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1836 struct nfsmount
*nmp
= req
->r_nmp
;
1837 char who
[] = "client";
1838 uint32_t gss_error
= GETMAJERROR(cp
->gss_clnt_major
);
1839 const char *procn
= "unkown";
1844 if (req
->r_thread
) {
1845 proc
= (proc_t
)get_bsdthreadtask_info(req
->r_thread
);
1846 if (proc
!= NULL
&& (proc
->p_fd
== NULL
|| (proc
->p_lflag
& P_LVFORK
))) {
1850 if (*proc
->p_comm
) {
1851 procn
= proc
->p_comm
;
1861 if ((cp
->gss_clnt_major
!= major
|| cp
->gss_clnt_minor
!= minor
||
1862 cp
->gss_clnt_ptime
+ GSS_PRINT_DELAY
< now
.tv_sec
) &&
1863 (nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1865 * Will let gssd do some logging in hopes that it can translate
1868 if (cp
->gss_clnt_minor
&& cp
->gss_clnt_minor
!= minor
) {
1869 (void) mach_gss_log_error(
1871 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1872 kauth_cred_getuid(cp
->gss_clnt_cred
),
1875 cp
->gss_clnt_minor
);
1877 gss_error
= gss_error
? gss_error
: cp
->gss_clnt_major
;
1880 *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
1882 printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1883 cp
->gss_clnt_display
? cp
->gss_clnt_display
: who
, kauth_cred_getasid(req
->r_cred
), kauth_cred_getuid(req
->r_cred
),
1884 procn
, pid
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, gss_error
, (int32_t)cp
->gss_clnt_minor
);
1885 cp
->gss_clnt_ptime
= now
.tv_sec
;
1886 switch (gss_error
) {
1887 case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
1888 kauth_cred_getasid(req
->r_cred
), kauth_cred_getauid(req
->r_cred
));
1890 case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
1891 kauth_cred_getasid(req
->r_cred
), kauth_cred_getauid(req
->r_cred
));
1895 NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1896 cp
->gss_clnt_display
? cp
->gss_clnt_display
: who
, kauth_cred_getasid(req
->r_cred
), kauth_cred_getuid(req
->r_cred
),
1897 procn
, pid
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, gss_error
, (int32_t)cp
->gss_clnt_minor
);
1902 * Make an upcall to the gssd using Mach RPC
1903 * The upcall is made using a host special port.
1904 * This allows launchd to fire up the gssd in the
1905 * user's session. This is important, since gssd
1906 * must have access to the user's credential cache.
1909 nfs_gss_clnt_gssd_upcall(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
, uint32_t retrycnt
)
1912 gssd_byte_buffer octx
= NULL
;
1913 uint32_t lucidlen
= 0;
1914 void *lucid_ctx_buffer
;
1916 vm_map_copy_t itoken
= NULL
;
1917 gssd_byte_buffer otoken
= NULL
;
1918 mach_msg_type_number_t otokenlen
;
1920 uint8_t *principal
= NULL
;
1922 int32_t nt
= GSSD_STRING_NAME
;
1923 vm_map_copy_t pname
= NULL
;
1924 vm_map_copy_t svcname
= NULL
;
1925 char display_name
[MAX_DISPLAY_STR
] = "";
1927 struct nfsmount
*nmp
= req
->r_nmp
;
1928 uint32_t major
= cp
->gss_clnt_major
, minor
= cp
->gss_clnt_minor
;
1929 uint32_t selected
= (uint32_t)-1;
1930 struct nfs_etype etype
;
1932 if (nmp
== NULL
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
1936 if (cp
->gss_clnt_gssd_flags
& GSSD_RESTART
) {
1937 if (cp
->gss_clnt_token
) {
1938 FREE(cp
->gss_clnt_token
, M_TEMP
);
1940 cp
->gss_clnt_token
= NULL
;
1941 cp
->gss_clnt_tokenlen
= 0;
1942 cp
->gss_clnt_proc
= RPCSEC_GSS_INIT
;
1943 /* Server's handle isn't valid. Don't reuse */
1944 cp
->gss_clnt_handle_len
= 0;
1945 if (cp
->gss_clnt_handle
!= NULL
) {
1946 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1947 cp
->gss_clnt_handle
= NULL
;
1951 NFS_GSS_DBG("Retrycnt = %d nm_etype.count = %d\n", retrycnt
, nmp
->nm_etype
.count
);
1952 if (retrycnt
>= nmp
->nm_etype
.count
) {
1956 /* Copy the mount etypes to an order set of etypes to try */
1957 etype
= nmp
->nm_etype
;
1960 * If we've already selected an etype, lets put that first in our
1961 * array of etypes to try, since overwhelmingly, that is likely
1962 * to be the etype we want.
1964 if (etype
.selected
< etype
.count
) {
1965 etype
.etypes
[0] = nmp
->nm_etype
.etypes
[etype
.selected
];
1966 for (uint32_t i
= 0; i
< etype
.selected
; i
++) {
1967 etype
.etypes
[i
+ 1] = nmp
->nm_etype
.etypes
[i
];
1969 for (uint32_t i
= etype
.selected
+ 1; i
< etype
.count
; i
++) {
1970 etype
.etypes
[i
] = nmp
->nm_etype
.etypes
[i
];
1974 /* Remove the ones we've already have tried */
1975 for (uint32_t i
= retrycnt
; i
< etype
.count
; i
++) {
1976 etype
.etypes
[i
- retrycnt
] = etype
.etypes
[i
];
1978 etype
.count
= etype
.count
- retrycnt
;
1980 NFS_GSS_DBG("etype count = %d preferred etype = %d\n", etype
.count
, etype
.etypes
[0]);
1983 * NFS currently only supports default principals or
1984 * principals based on the uid of the caller, unless
1985 * the principal to use for the mounting cred was specified
1986 * in the mount argmuments. If the realm to use was specified
1987 * then will send that up as the principal since the realm is
1988 * preceed by an "@" gssd that will try and select the default
1989 * principal for that realm.
1992 if (cp
->gss_clnt_principal
&& cp
->gss_clnt_prinlen
) {
1993 principal
= cp
->gss_clnt_principal
;
1994 plen
= cp
->gss_clnt_prinlen
;
1995 nt
= cp
->gss_clnt_prinnt
;
1996 } else if (nmp
->nm_principal
&& IS_VALID_CRED(nmp
->nm_mcred
) && req
->r_cred
== nmp
->nm_mcred
) {
1997 plen
= (uint32_t)strlen(nmp
->nm_principal
);
1998 principal
= (uint8_t *)nmp
->nm_principal
;
1999 cp
->gss_clnt_prinnt
= nt
= GSSD_USER
;
2000 } else if (nmp
->nm_realm
) {
2001 plen
= (uint32_t)strlen(nmp
->nm_realm
);
2002 principal
= (uint8_t *)nmp
->nm_realm
;
2006 if (!IPC_PORT_VALID(cp
->gss_clnt_mport
)) {
2007 cp
->gss_clnt_mport
= nfs_gss_clnt_get_upcall_port(req
->r_cred
);
2008 if (cp
->gss_clnt_mport
== IPC_PORT_NULL
) {
2014 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
2016 if (cp
->gss_clnt_svcnamlen
) {
2017 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
2019 if (cp
->gss_clnt_tokenlen
) {
2020 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
2023 /* Always want to export the lucid context */
2024 cp
->gss_clnt_gssd_flags
|= GSSD_LUCID_CONTEXT
;
2027 kr
= mach_gss_init_sec_context_v3(
2030 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_clnt_tokenlen
,
2031 kauth_cred_getuid(cp
->gss_clnt_cred
),
2033 (gssd_byte_buffer
)pname
, (mach_msg_type_number_t
) plen
,
2035 (gssd_byte_buffer
)svcname
, (mach_msg_type_number_t
) cp
->gss_clnt_svcnamlen
,
2037 (gssd_etype_list
)etype
.etypes
, (mach_msg_type_number_t
)etype
.count
,
2038 &cp
->gss_clnt_gssd_flags
,
2039 &cp
->gss_clnt_context
,
2040 &cp
->gss_clnt_cred_handle
,
2042 &octx
, (mach_msg_type_number_t
*) &lucidlen
,
2043 &otoken
, &otokenlen
,
2044 cp
->gss_clnt_display
? NULL
: display_name
,
2045 &cp
->gss_clnt_major
,
2046 &cp
->gss_clnt_minor
);
2048 /* Clear the RESTART flag */
2049 cp
->gss_clnt_gssd_flags
&= ~GSSD_RESTART
;
2050 if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
2051 /* We're done with the gssd handles */
2052 cp
->gss_clnt_context
= 0;
2053 cp
->gss_clnt_cred_handle
= 0;
2056 if (kr
!= KERN_SUCCESS
) {
2057 printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr
, kr
);
2058 if (kr
== MIG_SERVER_DIED
&& cp
->gss_clnt_cred_handle
== 0 &&
2059 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
&&
2060 !vfs_isforce(nmp
->nm_mountp
) && (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) == 0) {
2062 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
2064 if (cp
->gss_clnt_svcnamlen
) {
2065 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
2067 if (cp
->gss_clnt_tokenlen
> 0) {
2068 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
2073 host_release_special_port(cp
->gss_clnt_mport
);
2074 cp
->gss_clnt_mport
= IPC_PORT_NULL
;
2078 if (cp
->gss_clnt_display
== NULL
&& *display_name
!= '\0') {
2079 size_t dlen
= strnlen(display_name
, MAX_DISPLAY_STR
) + 1; /* Add extra byte to include '\0' */
2081 if (dlen
< MAX_DISPLAY_STR
) {
2082 MALLOC(cp
->gss_clnt_display
, char *, dlen
, M_TEMP
, M_WAITOK
);
2083 if (cp
->gss_clnt_display
== NULL
) {
2086 bcopy(display_name
, cp
->gss_clnt_display
, dlen
);
2093 * Make sure any unusual errors are expanded and logged by gssd
2095 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
2097 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
2098 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
2099 NFS_GSS_DBG("Up call returned error\n");
2100 nfs_gss_clnt_log_error(req
, cp
, major
, minor
);
2101 /* Server's handle isn't valid. Don't reuse */
2102 cp
->gss_clnt_handle_len
= 0;
2103 if (cp
->gss_clnt_handle
!= NULL
) {
2104 FREE(cp
->gss_clnt_handle
, M_TEMP
);
2105 cp
->gss_clnt_handle
= NULL
;
2110 if (lucidlen
> MAX_LUCIDLEN
) {
2111 printf("nfs_gss_clnt_gssd_upcall: bad context length (%d)\n", lucidlen
);
2112 vm_map_copy_discard((vm_map_copy_t
) octx
);
2113 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2116 MALLOC(lucid_ctx_buffer
, void *, lucidlen
, M_TEMP
, M_WAITOK
| M_ZERO
);
2117 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) octx
, lucidlen
, lucid_ctx_buffer
);
2119 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2123 if (cp
->gss_clnt_ctx_id
) {
2124 gss_krb5_destroy_context(cp
->gss_clnt_ctx_id
);
2126 cp
->gss_clnt_ctx_id
= gss_krb5_make_context(lucid_ctx_buffer
, lucidlen
);
2127 if (cp
->gss_clnt_ctx_id
== NULL
) {
2128 printf("Failed to make context from lucid_ctx_buffer\n");
2131 for (uint32_t i
= 0; i
< nmp
->nm_etype
.count
; i
++) {
2132 if (nmp
->nm_etype
.etypes
[i
] == cp
->gss_clnt_ctx_id
->gss_cryptor
.etype
) {
2139 /* Free context token used as input */
2140 if (cp
->gss_clnt_token
) {
2141 FREE(cp
->gss_clnt_token
, M_TEMP
);
2143 cp
->gss_clnt_token
= NULL
;
2144 cp
->gss_clnt_tokenlen
= 0;
2146 if (otokenlen
> 0) {
2147 /* Set context token to gss output token */
2148 MALLOC(cp
->gss_clnt_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
2149 if (cp
->gss_clnt_token
== NULL
) {
2150 printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
2151 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2154 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_clnt_token
);
2156 printf("Could not copyout gss token\n");
2157 FREE(cp
->gss_clnt_token
, M_TEMP
);
2158 cp
->gss_clnt_token
= NULL
;
2159 return NFSERR_EAUTH
;
2161 cp
->gss_clnt_tokenlen
= otokenlen
;
2164 if (selected
!= (uint32_t)-1) {
2165 nmp
->nm_etype
.selected
= selected
;
2166 NFS_GSS_DBG("etype selected = %d\n", nmp
->nm_etype
.etypes
[selected
]);
2168 NFS_GSS_DBG("Up call succeeded major = %d\n", cp
->gss_clnt_major
);
2172 if (cp
->gss_clnt_token
) {
2173 FREE(cp
->gss_clnt_token
, M_TEMP
);
2175 cp
->gss_clnt_token
= NULL
;
2176 cp
->gss_clnt_tokenlen
= 0;
2177 /* Server's handle isn't valid. Don't reuse */
2178 cp
->gss_clnt_handle_len
= 0;
2179 if (cp
->gss_clnt_handle
!= NULL
) {
2180 FREE(cp
->gss_clnt_handle
, M_TEMP
);
2181 cp
->gss_clnt_handle
= NULL
;
2184 NFS_GSS_DBG("Up call returned NFSERR_EAUTH");
2185 return NFSERR_EAUTH
;
2189 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
2190 * credential. The sequence number window that the server returns
2191 * at context setup indicates the maximum number of client calls that
2192 * can be outstanding on a context. The client maintains a bitmap that
2193 * represents the server's window. Each pending request has a bit set
2194 * in the window bitmap. When a reply comes in or times out, we reset
2195 * the bit in the bitmap and if there are any other threads waiting for
2196 * a context slot we notify the waiting thread(s).
2198 * Note that if a request is retransmitted, it will have a single XID
2199 * but it may be associated with multiple sequence numbers. So we
2200 * may have to reset multiple sequence number bits in the window bitmap.
2203 nfs_gss_clnt_rpcdone(struct nfsreq
*req
)
2205 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
2206 struct gss_seq
*gsp
, *ngsp
;
2209 if (cp
== NULL
|| !(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)) {
2210 return; // no context - don't bother
2213 * Reset the bit for this request in the
2214 * sequence number window to indicate it's done.
2215 * We do this even if the request timed out.
2217 lck_mtx_lock(cp
->gss_clnt_mtx
);
2218 gsp
= SLIST_FIRST(&req
->r_gss_seqlist
);
2219 if (gsp
&& gsp
->gss_seqnum
> (cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
)) {
2220 win_resetbit(cp
->gss_clnt_seqbits
,
2221 gsp
->gss_seqnum
% cp
->gss_clnt_seqwin
);
2225 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
2227 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
) {
2228 if (++i
> GSS_CLNT_SEQLISTMAX
) {
2229 SLIST_REMOVE(&req
->r_gss_seqlist
, gsp
, gss_seq
, gss_seqnext
);
2235 * If there's a thread waiting for
2236 * the window to advance, wake it up.
2238 if (cp
->gss_clnt_flags
& GSS_NEEDSEQ
) {
2239 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
2242 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2246 * Create a reference to a context from a request
2247 * and bump the reference count
2250 nfs_gss_clnt_ctx_ref(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
2252 req
->r_gss_ctx
= cp
;
2254 lck_mtx_lock(cp
->gss_clnt_mtx
);
2255 cp
->gss_clnt_refcnt
++;
2256 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2260 * Remove a context reference from a request
2261 * If the reference count drops to zero, and the
2262 * context is invalid, destroy the context
2265 nfs_gss_clnt_ctx_unref(struct nfsreq
*req
)
2267 struct nfsmount
*nmp
= req
->r_nmp
;
2268 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
2269 int on_neg_cache
= 0;
2273 char CTXBUF
[NFS_CTXBUFSZ
];
2279 req
->r_gss_ctx
= NULL
;
2281 lck_mtx_lock(cp
->gss_clnt_mtx
);
2282 if (--cp
->gss_clnt_refcnt
< 0) {
2283 panic("Over release of gss context!\n");
2286 if (cp
->gss_clnt_refcnt
== 0) {
2287 if ((cp
->gss_clnt_flags
& GSS_CTX_INVAL
) &&
2288 cp
->gss_clnt_ctx_id
) {
2289 gss_krb5_destroy_context(cp
->gss_clnt_ctx_id
);
2290 cp
->gss_clnt_ctx_id
= NULL
;
2292 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2294 if ((cp
->gss_clnt_flags
& GSS_CTX_USECOUNT
) && !nfs_gss_clnt_mnt_rele(nmp
)) {
2295 cp
->gss_clnt_flags
&= ~GSS_CTX_USECOUNT
;
2297 if (cp
->gss_clnt_nctime
) {
2302 if (!destroy
&& cp
->gss_clnt_nctime
== 0 &&
2303 (cp
->gss_clnt_flags
& GSS_CTX_INVAL
)) {
2305 cp
->gss_clnt_nctime
= now
.tv_sec
;
2308 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2310 NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req
, cp
));
2312 lck_mtx_lock(&nmp
->nm_lock
);
2313 if (cp
->gss_clnt_entries
.tqe_next
!= NFSNOLIST
) {
2314 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
2317 nmp
->nm_ncentries
--;
2319 lck_mtx_unlock(&nmp
->nm_lock
);
2321 nfs_gss_clnt_ctx_destroy(cp
);
2322 } else if (neg_cache
) {
2323 NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req
, cp
));
2325 lck_mtx_lock(&nmp
->nm_lock
);
2326 nmp
->nm_ncentries
++;
2327 nfs_gss_clnt_ctx_neg_cache_reap(nmp
);
2328 lck_mtx_unlock(&nmp
->nm_lock
);
2331 NFS_GSS_CLNT_CTX_DUMP(nmp
);
2335 * Try and reap any old negative cache entries.
2339 nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount
*nmp
)
2341 struct nfs_gss_clnt_ctx
*cp
, *tcp
;
2345 /* Try and reap old, unreferenced, expired contexts */
2348 NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp
->nm_ncentries
);
2350 TAILQ_FOREACH_SAFE(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
, tcp
) {
2353 /* Don't reap STICKY contexts */
2354 if ((cp
->gss_clnt_flags
& GSS_CTX_STICKY
) ||
2355 !(cp
->gss_clnt_flags
& GSS_CTX_INVAL
)) {
2358 /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
2359 if (nmp
->nm_ncentries
<= GSS_MAX_NEG_CACHE_ENTRIES
) {
2362 /* Contexts too young */
2363 if (cp
->gss_clnt_nctime
+ GSS_NEG_CACHE_TO
>= now
.tv_sec
) {
2366 /* Not referenced, remove it. */
2367 lck_mtx_lock(cp
->gss_clnt_mtx
);
2368 if (cp
->gss_clnt_refcnt
== 0) {
2369 cp
->gss_clnt_flags
|= GSS_CTX_DESTROY
;
2372 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2374 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
2375 nmp
->nm_ncentries
++;
2377 nfs_gss_clnt_ctx_destroy(cp
);
2380 NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped
, nmp
->nm_ncentries
);
2384 * Clean a context to be cached
2387 nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx
*cp
)
2389 /* Preserve gss_clnt_mtx */
2390 assert(cp
->gss_clnt_thread
== NULL
); /* Will be set to this thread */
2391 /* gss_clnt_entries we should not be on any list at this point */
2392 cp
->gss_clnt_flags
= 0;
2393 /* gss_clnt_refcnt should be zero */
2394 assert(cp
->gss_clnt_refcnt
== 0);
2396 * We are who we are preserve:
2398 * gss_clnt_principal
2403 /* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */
2404 cp
->gss_clnt_seqnum
= 0;
2405 /* Preserve gss_clnt_service, we're not changing flavors */
2406 if (cp
->gss_clnt_handle
) {
2407 FREE(cp
->gss_clnt_handle
, M_TEMP
);
2408 cp
->gss_clnt_handle
= NULL
;
2410 cp
->gss_clnt_handle_len
= 0;
2411 cp
->gss_clnt_nctime
= 0;
2412 cp
->gss_clnt_seqwin
= 0;
2413 if (cp
->gss_clnt_seqbits
) {
2414 FREE(cp
->gss_clnt_seqbits
, M_TEMP
);
2415 cp
->gss_clnt_seqbits
= NULL
;
2417 /* Preserve gss_clnt_mport. Still talking to the same gssd */
2418 if (cp
->gss_clnt_verf
) {
2419 FREE(cp
->gss_clnt_verf
, M_TEMP
);
2420 cp
->gss_clnt_verf
= NULL
;
2422 /* Service name might change on failover, so reset it */
2423 if (cp
->gss_clnt_svcname
) {
2424 FREE(cp
->gss_clnt_svcname
, M_TEMP
);
2425 cp
->gss_clnt_svcname
= NULL
;
2426 cp
->gss_clnt_svcnt
= 0;
2428 cp
->gss_clnt_svcnamlen
= 0;
2429 cp
->gss_clnt_cred_handle
= 0;
2430 cp
->gss_clnt_context
= 0;
2431 if (cp
->gss_clnt_token
) {
2432 FREE(cp
->gss_clnt_token
, M_TEMP
);
2433 cp
->gss_clnt_token
= NULL
;
2435 cp
->gss_clnt_tokenlen
= 0;
2436 /* XXX gss_clnt_ctx_id ??? */
2439 * gss_clnt_gssd_flags
2447 * Copy a source context to a new context. This is used to create a new context
2448 * with the identity of the old context for renewal. The old context is invalid
2449 * at this point but may have reference still to it, so it is not safe to use that
2453 nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx
*scp
, struct nfs_gss_clnt_ctx
**dcpp
)
2455 struct nfs_gss_clnt_ctx
*dcp
;
2457 *dcpp
= (struct nfs_gss_clnt_ctx
*)NULL
;
2458 MALLOC(dcp
, struct nfs_gss_clnt_ctx
*, sizeof(struct nfs_gss_clnt_ctx
), M_TEMP
, M_WAITOK
);
2462 bzero(dcp
, sizeof(struct nfs_gss_clnt_ctx
));
2463 dcp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
2464 dcp
->gss_clnt_cred
= scp
->gss_clnt_cred
;
2465 kauth_cred_ref(dcp
->gss_clnt_cred
);
2466 dcp
->gss_clnt_prinlen
= scp
->gss_clnt_prinlen
;
2467 dcp
->gss_clnt_prinnt
= scp
->gss_clnt_prinnt
;
2468 if (scp
->gss_clnt_principal
) {
2469 MALLOC(dcp
->gss_clnt_principal
, uint8_t *, dcp
->gss_clnt_prinlen
, M_TEMP
, M_WAITOK
| M_ZERO
);
2470 if (dcp
->gss_clnt_principal
== NULL
) {
2474 bcopy(scp
->gss_clnt_principal
, dcp
->gss_clnt_principal
, dcp
->gss_clnt_prinlen
);
2476 /* Note we don't preserve the display name, that will be set by a successful up call */
2477 dcp
->gss_clnt_service
= scp
->gss_clnt_service
;
2478 dcp
->gss_clnt_mport
= host_copy_special_port(scp
->gss_clnt_mport
);
2479 dcp
->gss_clnt_ctx_id
= NULL
; /* Will be set from successful upcall */
2480 dcp
->gss_clnt_gssd_flags
= scp
->gss_clnt_gssd_flags
;
2481 dcp
->gss_clnt_major
= scp
->gss_clnt_major
;
2482 dcp
->gss_clnt_minor
= scp
->gss_clnt_minor
;
2483 dcp
->gss_clnt_ptime
= scp
->gss_clnt_ptime
;
2494 nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx
*cp
)
2496 NFS_GSS_DBG("Destroying context %d/%d\n",
2497 kauth_cred_getasid(cp
->gss_clnt_cred
),
2498 kauth_cred_getauid(cp
->gss_clnt_cred
));
2500 host_release_special_port(cp
->gss_clnt_mport
);
2501 cp
->gss_clnt_mport
= IPC_PORT_NULL
;
2503 if (cp
->gss_clnt_mtx
) {
2504 lck_mtx_destroy(cp
->gss_clnt_mtx
, nfs_gss_clnt_grp
);
2505 cp
->gss_clnt_mtx
= (lck_mtx_t
*)NULL
;
2507 if (IS_VALID_CRED(cp
->gss_clnt_cred
)) {
2508 kauth_cred_unref(&cp
->gss_clnt_cred
);
2510 cp
->gss_clnt_entries
.tqe_next
= NFSNOLIST
;
2511 cp
->gss_clnt_entries
.tqe_prev
= NFSNOLIST
;
2512 if (cp
->gss_clnt_principal
) {
2513 FREE(cp
->gss_clnt_principal
, M_TEMP
);
2514 cp
->gss_clnt_principal
= NULL
;
2516 if (cp
->gss_clnt_display
) {
2517 FREE(cp
->gss_clnt_display
, M_TEMP
);
2518 cp
->gss_clnt_display
= NULL
;
2520 if (cp
->gss_clnt_ctx_id
) {
2521 gss_krb5_destroy_context(cp
->gss_clnt_ctx_id
);
2522 cp
->gss_clnt_ctx_id
= NULL
;
2525 nfs_gss_clnt_ctx_clean(cp
);
2531 * The context for a user is invalid.
2532 * Mark the context as invalid, then
2533 * create a new context.
2536 nfs_gss_clnt_ctx_renew(struct nfsreq
*req
)
2538 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
2539 struct nfs_gss_clnt_ctx
*ncp
;
2540 struct nfsmount
*nmp
;
2542 char CTXBUF
[NFS_CTXBUFSZ
];
2548 if (req
->r_nmp
== NULL
) {
2553 lck_mtx_lock(cp
->gss_clnt_mtx
);
2554 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
2555 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2556 nfs_gss_clnt_ctx_unref(req
);
2557 return 0; // already being renewed
2560 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2562 if (cp
->gss_clnt_flags
& (GSS_NEEDCTX
| GSS_NEEDSEQ
)) {
2563 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
2566 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2568 if (cp
->gss_clnt_proc
== RPCSEC_GSS_DESTROY
) {
2569 return EACCES
; /* Destroying a context is best effort. Don't renew. */
2572 * If we're setting up a context let nfs_gss_clnt_ctx_init know this is not working
2573 * and to try some other etype.
2575 if (cp
->gss_clnt_proc
!= RPCSEC_GSS_DATA
) {
2578 error
= nfs_gss_clnt_ctx_copy(cp
, &ncp
);
2579 NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req
, ncp
));
2580 nfs_gss_clnt_ctx_unref(req
);
2585 lck_mtx_lock(&nmp
->nm_lock
);
2587 * Note we don't bother taking the new context mutex as we're
2588 * not findable at the moment.
2590 ncp
->gss_clnt_thread
= current_thread();
2591 nfs_gss_clnt_ctx_ref(req
, ncp
);
2592 TAILQ_INSERT_HEAD(&nmp
->nm_gsscl
, ncp
, gss_clnt_entries
);
2593 lck_mtx_unlock(&nmp
->nm_lock
);
2595 error
= nfs_gss_clnt_ctx_init_retry(req
, ncp
); // Initialize new context
2597 nfs_gss_clnt_ctx_unref(req
);
2605 * Destroy all the contexts associated with a mount.
2606 * The contexts are also destroyed by the server.
2609 nfs_gss_clnt_ctx_unmount(struct nfsmount
*nmp
)
2611 struct nfs_gss_clnt_ctx
*cp
;
2612 struct nfsm_chain nmreq
, nmrep
;
2620 req
= zalloc(nfs_req_zone
);
2622 lck_mtx_lock(&nmp
->nm_lock
);
2623 while ((cp
= TAILQ_FIRST(&nmp
->nm_gsscl
))) {
2624 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
2625 cp
->gss_clnt_entries
.tqe_next
= NFSNOLIST
;
2626 lck_mtx_lock(cp
->gss_clnt_mtx
);
2627 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2628 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2631 cp
->gss_clnt_refcnt
++;
2632 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2633 req
->r_gss_ctx
= cp
;
2635 lck_mtx_unlock(&nmp
->nm_lock
);
2637 * Tell the server to destroy its context.
2638 * But don't bother if it's a forced unmount.
2640 if (!nfs_mount_gone(nmp
) &&
2641 (cp
->gss_clnt_flags
& (GSS_CTX_INVAL
| GSS_CTX_DESTROY
| GSS_CTX_COMPLETE
)) == GSS_CTX_COMPLETE
) {
2642 cp
->gss_clnt_proc
= RPCSEC_GSS_DESTROY
;
2645 nfsm_chain_null(&nmreq
);
2646 nfsm_chain_null(&nmrep
);
2647 nfsm_chain_build_alloc_init(error
, &nmreq
, 0);
2648 nfsm_chain_build_done(error
, &nmreq
);
2650 nfs_request_gss(nmp
->nm_mountp
, &nmreq
,
2651 current_thread(), cp
->gss_clnt_cred
, 0, cp
, &nmrep
, &status
);
2653 nfsm_chain_cleanup(&nmreq
);
2654 nfsm_chain_cleanup(&nmrep
);
2658 * Mark the context invalid then drop
2659 * the reference to remove it if its
2662 lck_mtx_lock(cp
->gss_clnt_mtx
);
2663 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2664 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2665 nfs_gss_clnt_ctx_unref(req
);
2666 lck_mtx_lock(&nmp
->nm_lock
);
2668 lck_mtx_unlock(&nmp
->nm_lock
);
2669 assert(TAILQ_EMPTY(&nmp
->nm_gsscl
));
2670 NFS_ZFREE(nfs_req_zone
, req
);
2675 * Removes a mounts context for a credential
2678 nfs_gss_clnt_ctx_remove(struct nfsmount
*nmp
, kauth_cred_t cred
)
2680 struct nfs_gss_clnt_ctx
*cp
, *tcp
;
2683 req
= zalloc(nfs_req_zone
);
2686 NFS_GSS_DBG("Enter\n");
2687 NFS_GSS_CLNT_CTX_DUMP(nmp
);
2688 lck_mtx_lock(&nmp
->nm_lock
);
2689 TAILQ_FOREACH_SAFE(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
, tcp
) {
2690 lck_mtx_lock(cp
->gss_clnt_mtx
);
2691 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, cred
)) {
2692 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2693 NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
2694 kauth_cred_getasid(cp
->gss_clnt_cred
),
2695 kauth_cred_getauid(cp
->gss_clnt_cred
),
2696 cp
->gss_clnt_refcnt
);
2697 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2700 cp
->gss_clnt_refcnt
++;
2701 cp
->gss_clnt_flags
|= (GSS_CTX_INVAL
| GSS_CTX_DESTROY
);
2702 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2703 req
->r_gss_ctx
= cp
;
2704 lck_mtx_unlock(&nmp
->nm_lock
);
2706 * Drop the reference to remove it if its
2709 NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
2710 kauth_cred_getasid(cp
->gss_clnt_cred
),
2711 kauth_cred_getuid(cp
->gss_clnt_cred
),
2712 cp
->gss_clnt_refcnt
);
2713 nfs_gss_clnt_ctx_unref(req
);
2714 NFS_ZFREE(nfs_req_zone
, req
);
2717 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2720 lck_mtx_unlock(&nmp
->nm_lock
);
2722 NFS_ZFREE(nfs_req_zone
, req
);
2723 NFS_GSS_DBG("Returning ENOENT\n");
2728 * Sets a mounts principal for a session associated with cred.
2731 nfs_gss_clnt_ctx_set_principal(struct nfsmount
*nmp
, vfs_context_t ctx
,
2732 uint8_t *principal
, size_t princlen
, uint32_t nametype
)
2737 NFS_GSS_DBG("Enter:\n");
2739 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
| Z_ZERO
);
2741 req
->r_auth
= nmp
->nm_auth
;
2742 req
->r_thread
= vfs_context_thread(ctx
);
2743 req
->r_cred
= vfs_context_ucred(ctx
);
2745 error
= nfs_gss_clnt_ctx_find_principal(req
, principal
, princlen
, nametype
);
2746 NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error
);
2748 * We don't care about auth errors. Those would indicate that the context is in the
2749 * neagative cache and if and when the user has credentials for the principal
2750 * we should be good to go in that we will select those credentials for this principal.
2752 if (error
== EACCES
|| error
== EAUTH
|| error
== ENEEDAUTH
) {
2756 /* We're done with this request */
2757 nfs_gss_clnt_ctx_unref(req
);
2758 NFS_ZFREE(nfs_req_zone
, req
);
2763 * Gets a mounts principal from a session associated with cred
2766 nfs_gss_clnt_ctx_get_principal(struct nfsmount
*nmp
, vfs_context_t ctx
,
2767 struct user_nfs_gss_principal
*p
)
2771 struct nfs_gss_clnt_ctx
*cp
;
2772 kauth_cred_t cred
= vfs_context_ucred(ctx
);
2773 const char *princ
= NULL
;
2774 char CTXBUF
[NFS_CTXBUFSZ
];
2776 /* Make sure the the members of the struct user_nfs_gss_principal are initialized */
2777 p
->nametype
= GSSD_STRING_NAME
;
2778 p
->principal
= USER_ADDR_NULL
;
2782 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
);
2784 lck_mtx_lock(&nmp
->nm_lock
);
2785 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
2786 lck_mtx_lock(cp
->gss_clnt_mtx
);
2787 if (cp
->gss_clnt_flags
& GSS_CTX_DESTROY
) {
2788 NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
2789 NFS_GSS_CTX(req
, cp
),
2790 cp
->gss_clnt_refcnt
);
2791 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2794 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, cred
)) {
2795 cp
->gss_clnt_refcnt
++;
2796 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2799 lck_mtx_unlock(cp
->gss_clnt_mtx
);
2804 lck_mtx_unlock(&nmp
->nm_lock
);
2805 p
->flags
|= NFS_IOC_NO_CRED_FLAG
; /* No credentials, valid or invalid on this mount */
2806 NFS_GSS_DBG("No context found for session %d by uid %d\n",
2807 kauth_cred_getasid(cred
), kauth_cred_getuid(cred
));
2808 NFS_ZFREE(nfs_req_zone
, req
);
2812 /* Indicate if the cred is INVALID */
2813 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
2814 p
->flags
|= NFS_IOC_INVALID_CRED_FLAG
;
2817 /* We have set a principal on the mount */
2818 if (cp
->gss_clnt_principal
) {
2819 princ
= (char *)cp
->gss_clnt_principal
;
2820 p
->princlen
= cp
->gss_clnt_prinlen
;
2821 p
->nametype
= cp
->gss_clnt_prinnt
;
2822 } else if (cp
->gss_clnt_display
) {
2823 /* We have a successful use the the default credential */
2824 princ
= cp
->gss_clnt_display
;
2825 p
->princlen
= strlen(cp
->gss_clnt_display
);
2829 * If neither of the above is true we have an invalid default credential
2830 * So from above p->principal is USER_ADDR_NULL and princ is NULL
2836 MALLOC(pp
, char *, p
->princlen
, M_TEMP
, M_WAITOK
);
2837 bcopy(princ
, pp
, p
->princlen
);
2838 p
->principal
= CAST_USER_ADDR_T(pp
);
2841 lck_mtx_unlock(&nmp
->nm_lock
);
2843 req
->r_gss_ctx
= cp
;
2844 NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(req
, NULL
));
2845 nfs_gss_clnt_ctx_unref(req
);
2846 NFS_ZFREE(nfs_req_zone
, req
);
2849 #endif /* CONFIG_NFS_CLIENT */
2856 #if CONFIG_NFS_SERVER
2859 * Find a server context based on a handle value received
2860 * in an RPCSEC_GSS credential.
2862 static struct nfs_gss_svc_ctx
*
2863 nfs_gss_svc_ctx_find(uint32_t handle
)
2865 struct nfs_gss_svc_ctx_hashhead
*head
;
2866 struct nfs_gss_svc_ctx
*cp
;
2873 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(handle
)];
2875 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
2877 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
, &timenow
);
2879 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2881 LIST_FOREACH(cp
, head
, gss_svc_entries
) {
2882 if (cp
->gss_svc_handle
== handle
) {
2883 if (timenow
> cp
->gss_svc_incarnation
+ GSS_SVC_CTX_TTL
) {
2885 * Context has or is about to expire. Don't use.
2886 * We'll return null and the client will have to create
2889 cp
->gss_svc_handle
= 0;
2891 * Make sure though that we stay around for GSS_CTX_PEND seconds
2892 * for other threads that might be using the context.
2894 cp
->gss_svc_incarnation
= timenow
;
2899 lck_mtx_lock(cp
->gss_svc_mtx
);
2900 cp
->gss_svc_refcnt
++;
2901 lck_mtx_unlock(cp
->gss_svc_mtx
);
2906 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2912 * Insert a new server context into the hash table
2913 * and start the context reap thread if necessary.
2916 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*cp
)
2918 struct nfs_gss_svc_ctx_hashhead
*head
;
2919 struct nfs_gss_svc_ctx
*p
;
2921 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2924 * Give the client a random handle so that if we reboot
2925 * it's unlikely the client will get a bad context match.
2926 * Make sure it's not zero or already assigned.
2929 cp
->gss_svc_handle
= random();
2930 if (cp
->gss_svc_handle
== 0) {
2933 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(cp
->gss_svc_handle
)];
2934 LIST_FOREACH(p
, head
, gss_svc_entries
)
2935 if (p
->gss_svc_handle
== cp
->gss_svc_handle
) {
2939 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2940 &cp
->gss_svc_incarnation
);
2941 LIST_INSERT_HEAD(head
, cp
, gss_svc_entries
);
2942 nfs_gss_ctx_count
++;
2944 if (!nfs_gss_timer_on
) {
2945 nfs_gss_timer_on
= 1;
2947 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
2948 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
2951 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2955 * This function is called via the kernel's callout
2956 * mechanism. It runs only when there are
2957 * cached RPCSEC_GSS contexts.
2960 nfs_gss_svc_ctx_timer(__unused
void *param1
, __unused
void *param2
)
2962 struct nfs_gss_svc_ctx
*cp
, *next
;
2967 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2968 clock_get_uptime(&timenow
);
2970 NFS_GSS_DBG("is running\n");
2973 * Scan all the hash chains
2975 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
2977 * For each hash chain, look for entries
2978 * that haven't been used in a while.
2980 LIST_FOREACH_SAFE(cp
, &nfs_gss_svc_ctx_hashtbl
[i
], gss_svc_entries
, next
) {
2982 if (timenow
> cp
->gss_svc_incarnation
+
2983 (cp
->gss_svc_handle
? GSS_SVC_CTX_TTL
: 0)
2984 && cp
->gss_svc_refcnt
== 0) {
2986 * A stale context - remove it
2988 LIST_REMOVE(cp
, gss_svc_entries
);
2989 NFS_GSS_DBG("Removing contex for %d\n", cp
->gss_svc_uid
);
2990 if (cp
->gss_svc_seqbits
) {
2991 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2993 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3000 nfs_gss_ctx_count
= contexts
;
3003 * If there are still some cached contexts left,
3004 * set up another callout to check on them later.
3006 nfs_gss_timer_on
= nfs_gss_ctx_count
> 0;
3007 if (nfs_gss_timer_on
) {
3008 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
3009 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
3012 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
3016 * Here the server receives an RPCSEC_GSS credential in an
3017 * RPC call header. First there's some checking to make sure
3018 * the credential is appropriate - whether the context is still
3019 * being set up, or is complete. Then we use the handle to find
3020 * the server's context and validate the verifier, which contains
3021 * a signed checksum of the RPC header. If the verifier checks
3022 * out, we extract the user's UID and groups from the context
3023 * and use it to set up a UNIX credential for the user's request.
3026 nfs_gss_svc_cred_get(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
3028 uint32_t vers
, proc
, seqnum
, service
;
3029 uint32_t handle
, handle_len
;
3031 struct nfs_gss_svc_ctx
*cp
= NULL
;
3032 uint32_t flavor
= 0;
3035 size_t argsize
, start
, header_len
;
3036 gss_buffer_desc cksum
;
3037 struct nfsm_chain nmc_tmp
;
3038 mbuf_t reply_mbuf
, prev_mbuf
, pad_mbuf
;
3040 vers
= proc
= seqnum
= service
= handle_len
= 0;
3043 nfsm_chain_get_32(error
, nmc
, vers
);
3044 if (vers
!= RPCSEC_GSS_VERS_1
) {
3045 error
= NFSERR_AUTHERR
| AUTH_REJECTCRED
;
3049 nfsm_chain_get_32(error
, nmc
, proc
);
3050 nfsm_chain_get_32(error
, nmc
, seqnum
);
3051 nfsm_chain_get_32(error
, nmc
, service
);
3052 nfsm_chain_get_32(error
, nmc
, handle_len
);
3058 * Make sure context setup/destroy is being done with a nullproc
3060 if (proc
!= RPCSEC_GSS_DATA
&& nd
->nd_procnum
!= NFSPROC_NULL
) {
3061 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
3066 * If the sequence number is greater than the max
3067 * allowable, reject and have the client init a
3070 if (seqnum
> GSS_MAXSEQ
) {
3071 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
3076 service
== RPCSEC_GSS_SVC_NONE
? RPCAUTH_KRB5
:
3077 service
== RPCSEC_GSS_SVC_INTEGRITY
? RPCAUTH_KRB5I
:
3078 service
== RPCSEC_GSS_SVC_PRIVACY
? RPCAUTH_KRB5P
: 0;
3080 if (proc
== RPCSEC_GSS_INIT
) {
3082 * Limit the total number of contexts
3084 if (nfs_gss_ctx_count
> nfs_gss_ctx_max
) {
3085 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
3090 * Set up a new context
3092 MALLOC(cp
, struct nfs_gss_svc_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
| M_ZERO
);
3097 cp
->gss_svc_mtx
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
3098 cp
->gss_svc_refcnt
= 1;
3101 * Use the handle to find the context
3103 if (handle_len
!= sizeof(handle
)) {
3104 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
3107 nfsm_chain_get_32(error
, nmc
, handle
);
3111 cp
= nfs_gss_svc_ctx_find(handle
);
3113 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
3118 cp
->gss_svc_proc
= proc
;
3120 if (proc
== RPCSEC_GSS_DATA
|| proc
== RPCSEC_GSS_DESTROY
) {
3121 struct posix_cred temp_pcred
;
3123 if (cp
->gss_svc_seqwin
== 0) {
3125 * Context isn't complete
3127 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
3131 if (!nfs_gss_svc_seqnum_valid(cp
, seqnum
)) {
3133 * Sequence number is bad
3135 error
= EINVAL
; // drop the request
3140 * Validate the verifier.
3141 * The verifier contains an encrypted checksum
3142 * of the call header from the XID up to and
3143 * including the credential. We compute the
3144 * checksum and compare it with what came in
3147 header_len
= nfsm_chain_offset(nmc
);
3148 nfsm_chain_get_32(error
, nmc
, flavor
);
3149 nfsm_chain_get_32(error
, nmc
, cksum
.length
);
3153 if (flavor
!= RPCSEC_GSS
|| cksum
.length
> KRB5_MAX_MIC_SIZE
) {
3154 error
= NFSERR_AUTHERR
| AUTH_BADVERF
;
3156 MALLOC(cksum
.value
, void *, cksum
.length
, M_TEMP
, M_WAITOK
);
3157 nfsm_chain_get_opaque(error
, nmc
, cksum
.length
, cksum
.value
);
3163 /* Now verify the client's call header checksum */
3164 major
= gss_krb5_verify_mic_mbuf((uint32_t *)&error
, cp
->gss_svc_ctx_id
, nmc
->nmc_mhead
, 0, header_len
, &cksum
, NULL
);
3165 (void)gss_release_buffer(NULL
, &cksum
);
3166 if (major
!= GSS_S_COMPLETE
) {
3167 printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n", error
);
3168 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
3172 nd
->nd_gss_seqnum
= seqnum
;
3175 * Set up the user's cred
3177 bzero(&temp_pcred
, sizeof(temp_pcred
));
3178 temp_pcred
.cr_uid
= cp
->gss_svc_uid
;
3179 bcopy(cp
->gss_svc_gids
, temp_pcred
.cr_groups
,
3180 sizeof(gid_t
) * cp
->gss_svc_ngroups
);
3181 temp_pcred
.cr_ngroups
= (short)cp
->gss_svc_ngroups
;
3183 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
3184 if (nd
->nd_cr
== NULL
) {
3188 clock_get_uptime(&cp
->gss_svc_incarnation
);
3191 * If the call arguments are integrity or privacy protected
3192 * then we need to check them here.
3195 case RPCSEC_GSS_SVC_NONE
:
3198 case RPCSEC_GSS_SVC_INTEGRITY
:
3200 * Here's what we expect in the integrity call args:
3202 * - length of seq num + call args (4 bytes)
3203 * - sequence number (4 bytes)
3204 * - call args (variable bytes)
3205 * - length of checksum token
3206 * - checksum of seqnum + call args
3208 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
3209 if (arglen
> NFS_MAXPACKET
) {
3215 nfsm_chain_adv(error
, &nmc_tmp
, arglen
);
3216 nfsm_chain_get_32(error
, &nmc_tmp
, cksum
.length
);
3218 if (cksum
.length
> 0 && cksum
.length
< GSS_MAX_MIC_LEN
) {
3219 MALLOC(cksum
.value
, void *, cksum
.length
, M_TEMP
, M_WAITOK
);
3222 if (cksum
.value
== NULL
) {
3226 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksum
.length
, cksum
.value
);
3228 /* Verify the checksum over the call args */
3229 start
= nfsm_chain_offset(nmc
);
3231 major
= gss_krb5_verify_mic_mbuf((uint32_t *)&error
, cp
->gss_svc_ctx_id
,
3232 nmc
->nmc_mhead
, start
, arglen
, &cksum
, NULL
);
3233 FREE(cksum
.value
, M_TEMP
);
3234 if (major
!= GSS_S_COMPLETE
) {
3235 printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error
);
3241 * Get the sequence number prepended to the args
3242 * and compare it against the one sent in the
3245 nfsm_chain_get_32(error
, nmc
, seqnum
);
3246 if (seqnum
!= nd
->nd_gss_seqnum
) {
3247 error
= EBADRPC
; // returns as GARBAGEARGS
3251 case RPCSEC_GSS_SVC_PRIVACY
:
3253 * Here's what we expect in the privacy call args:
3255 * - length of wrap token
3256 * - wrap token (37-40 bytes)
3258 prev_mbuf
= nmc
->nmc_mcur
;
3259 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
3260 if (arglen
> NFS_MAXPACKET
) {
3265 /* Get the wrap token (current mbuf in the chain starting at the current offset) */
3266 start
= nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
);
3268 /* split out the wrap token */
3270 error
= gss_normalize_mbuf(nmc
->nmc_mcur
, start
, &argsize
, &reply_mbuf
, &pad_mbuf
, 0);
3275 assert(argsize
== arglen
);
3277 assert(nfsm_pad(arglen
) == mbuf_len(pad_mbuf
));
3278 mbuf_free(pad_mbuf
);
3280 assert(nfsm_pad(arglen
) == 0);
3283 major
= gss_krb5_unwrap_mbuf((uint32_t *)&error
, cp
->gss_svc_ctx_id
, &reply_mbuf
, 0, arglen
, NULL
, NULL
);
3284 if (major
!= GSS_S_COMPLETE
) {
3285 printf("%s: gss_krb5_unwrap_mbuf failes %d\n", __func__
, error
);
3289 /* Now replace the wrapped arguments with the unwrapped ones */
3290 mbuf_setnext(prev_mbuf
, reply_mbuf
);
3291 nmc
->nmc_mcur
= reply_mbuf
;
3292 nmc
->nmc_ptr
= mbuf_data(reply_mbuf
);
3293 nmc
->nmc_left
= mbuf_len(reply_mbuf
);
3296 * - sequence number (4 bytes)
3300 // nfsm_chain_reverse(nmc, nfsm_pad(toklen));
3303 * Get the sequence number prepended to the args
3304 * and compare it against the one sent in the
3307 nfsm_chain_get_32(error
, nmc
, seqnum
);
3308 if (seqnum
!= nd
->nd_gss_seqnum
) {
3309 printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n",
3310 __func__
, seqnum
, nd
->nd_gss_seqnum
);
3311 printmbuf("reply_mbuf", nmc
->nmc_mhead
, 0, 0);
3312 printf("reply_mbuf %p nmc_head %p\n", reply_mbuf
, nmc
->nmc_mhead
);
3313 error
= EBADRPC
; // returns as GARBAGEARGS
3321 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
3322 * then we expect a null verifier.
3324 nfsm_chain_get_32(error
, nmc
, flavor
);
3325 nfsm_chain_get_32(error
, nmc
, verflen
);
3326 if (error
|| flavor
!= RPCAUTH_NULL
|| verflen
> 0) {
3327 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
3330 if (proc
== RPCSEC_GSS_INIT
) {
3331 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3339 nd
->nd_gss_context
= cp
;
3343 nfs_gss_svc_ctx_deref(cp
);
3349 * Insert the server's verifier into the RPC reply header.
3350 * It contains a signed checksum of the sequence number that
3351 * was received in the RPC call.
3352 * Then go on to add integrity or privacy if necessary.
3355 nfs_gss_svc_verf_put(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
3357 struct nfs_gss_svc_ctx
*cp
;
3359 gss_buffer_desc cksum
, seqbuf
;
3360 uint32_t network_seqnum
;
3361 cp
= nd
->nd_gss_context
;
3364 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
) {
3366 * If the context isn't yet complete
3367 * then return a null verifier.
3369 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
);
3370 nfsm_chain_add_32(error
, nmc
, 0);
3375 * Compute checksum of the request seq number
3376 * If it's the final reply of context setup
3377 * then return the checksum of the context
3380 seqbuf
.length
= NFSX_UNSIGNED
;
3381 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
3382 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
3383 network_seqnum
= htonl(cp
->gss_svc_seqwin
);
3385 network_seqnum
= htonl(nd
->nd_gss_seqnum
);
3387 seqbuf
.value
= &network_seqnum
;
3389 major
= gss_krb5_get_mic((uint32_t *)&error
, cp
->gss_svc_ctx_id
, 0, &seqbuf
, &cksum
);
3390 if (major
!= GSS_S_COMPLETE
) {
3395 * Now wrap it in a token and add
3396 * the verifier to the reply.
3398 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
3399 nfsm_chain_add_32(error
, nmc
, cksum
.length
);
3400 nfsm_chain_add_opaque(error
, nmc
, cksum
.value
, cksum
.length
);
3401 gss_release_buffer(NULL
, &cksum
);
3407 * The results aren't available yet, but if they need to be
3408 * checksummed for integrity protection or encrypted, then
3409 * we can record the start offset here, insert a place-holder
3410 * for the results length, as well as the sequence number.
3411 * The rest of the work is done later by nfs_gss_svc_protect_reply()
3412 * when the results are available.
3415 nfs_gss_svc_prepare_reply(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
3417 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
3420 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
3421 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
3425 switch (nd
->nd_sec
) {
3431 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
3432 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
3440 * The results are checksummed or encrypted for return to the client
3443 nfs_gss_svc_protect_reply(struct nfsrv_descript
*nd
, mbuf_t mrep __unused
)
3445 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
3446 struct nfsm_chain nmrep_res
, *nmc_res
= &nmrep_res
;
3452 * Using a reference to the mbuf where we previously split the reply
3453 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
3454 * one that allows us to prepend a length field or token, (nmc_pre)
3455 * and the second which holds just the results that we're going to
3456 * checksum and/or encrypt. When we're done, we join the chains back
3460 mb
= nd
->nd_gss_mb
; // the mbuf where we split
3461 results
= mbuf_next(mb
); // first mbuf in the results
3462 error
= mbuf_setnext(mb
, NULL
); // disconnect the chains
3466 nfs_gss_nfsm_chain(nmc_res
, mb
); // set up the prepend chain
3467 nfsm_chain_build_done(error
, nmc_res
);
3472 if (nd
->nd_sec
== RPCAUTH_KRB5I
) {
3473 error
= rpc_gss_integ_data_create(cp
->gss_svc_ctx_id
, &results
, nd
->nd_gss_seqnum
, &reslen
);
3476 error
= rpc_gss_priv_data_create(cp
->gss_svc_ctx_id
, &results
, nd
->nd_gss_seqnum
, &reslen
);
3478 nfs_gss_append_chain(nmc_res
, results
); // Append the results mbufs
3479 nfsm_chain_build_done(error
, nmc_res
);
3485 * This function handles the context setup calls from the client.
3486 * Essentially, it implements the NFS null procedure calls when
3487 * an RPCSEC_GSS credential is used.
3488 * This is the context maintenance function. It creates and
3489 * destroys server contexts at the whim of the client.
3490 * During context creation, it receives GSS-API tokens from the
3491 * client, passes them up to gssd, and returns a received token
3492 * back to the client in the null procedure reply.
3495 nfs_gss_svc_ctx_init(struct nfsrv_descript
*nd
, struct nfsrv_sock
*slp
, mbuf_t
*mrepp
)
3497 struct nfs_gss_svc_ctx
*cp
= NULL
;
3500 struct nfsm_chain
*nmreq
, nmrep
;
3503 nmreq
= &nd
->nd_nmreq
;
3504 nfsm_chain_null(&nmrep
);
3506 cp
= nd
->nd_gss_context
;
3509 switch (cp
->gss_svc_proc
) {
3510 case RPCSEC_GSS_INIT
:
3511 nfs_gss_svc_ctx_insert(cp
);
3514 case RPCSEC_GSS_CONTINUE_INIT
:
3515 /* Get the token from the request */
3516 nfsm_chain_get_32(error
, nmreq
, cp
->gss_svc_tokenlen
);
3517 cp
->gss_svc_token
= NULL
;
3518 if (cp
->gss_svc_tokenlen
> 0 && cp
->gss_svc_tokenlen
< GSS_MAX_TOKEN_LEN
) {
3519 MALLOC(cp
->gss_svc_token
, u_char
*, cp
->gss_svc_tokenlen
, M_TEMP
, M_WAITOK
);
3521 if (cp
->gss_svc_token
== NULL
) {
3522 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3525 nfsm_chain_get_opaque(error
, nmreq
, cp
->gss_svc_tokenlen
, cp
->gss_svc_token
);
3527 /* Use the token in a gss_accept_sec_context upcall */
3528 error
= nfs_gss_svc_gssd_upcall(cp
);
3530 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3531 if (error
== NFSERR_EAUTH
) {
3538 * If the context isn't complete, pass the new token
3539 * back to the client for another round.
3541 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
) {
3546 * Now the server context is complete.
3549 clock_get_uptime(&cp
->gss_svc_incarnation
);
3551 cp
->gss_svc_seqwin
= GSS_SVC_SEQWINDOW
;
3552 MALLOC(cp
->gss_svc_seqbits
, uint32_t *,
3553 nfsm_rndup((cp
->gss_svc_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
| M_ZERO
);
3554 if (cp
->gss_svc_seqbits
== NULL
) {
3555 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3560 case RPCSEC_GSS_DATA
:
3561 /* Just a nullproc ping - do nothing */
3564 case RPCSEC_GSS_DESTROY
:
3566 * Don't destroy the context immediately because
3567 * other active requests might still be using it.
3568 * Instead, schedule it for destruction after
3569 * GSS_CTX_PEND time has elapsed.
3571 cp
= nfs_gss_svc_ctx_find(cp
->gss_svc_handle
);
3573 cp
->gss_svc_handle
= 0; // so it can't be found
3574 lck_mtx_lock(cp
->gss_svc_mtx
);
3575 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
3576 &cp
->gss_svc_incarnation
);
3577 lck_mtx_unlock(cp
->gss_svc_mtx
);
3581 autherr
= RPCSEC_GSS_CREDPROBLEM
;
3585 /* Now build the reply */
3587 if (nd
->nd_repstat
== 0) {
3588 nd
->nd_repstat
= autherr
? (NFSERR_AUTHERR
| autherr
) : NFSERR_RETVOID
;
3590 sz
= 7 * NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_svc_tokenlen
); // size of results
3591 error
= nfsrv_rephead(nd
, slp
, &nmrep
, sz
);
3592 *mrepp
= nmrep
.nmc_mhead
;
3593 if (error
|| autherr
) {
3597 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
3598 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
3599 nfsm_chain_add_32(error
, &nmrep
, sizeof(cp
->gss_svc_handle
));
3600 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_handle
);
3602 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_major
);
3603 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_minor
);
3604 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_seqwin
);
3606 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_tokenlen
);
3607 if (cp
->gss_svc_token
!= NULL
) {
3608 nfsm_chain_add_opaque(error
, &nmrep
, cp
->gss_svc_token
, cp
->gss_svc_tokenlen
);
3609 FREE(cp
->gss_svc_token
, M_TEMP
);
3610 cp
->gss_svc_token
= NULL
;
3616 nd
->nd_gss_context
= NULL
;
3617 LIST_REMOVE(cp
, gss_svc_entries
);
3618 if (cp
->gss_svc_seqbits
!= NULL
) {
3619 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
3621 if (cp
->gss_svc_token
!= NULL
) {
3622 FREE(cp
->gss_svc_token
, M_TEMP
);
3624 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3628 nfsm_chain_build_done(error
, &nmrep
);
3630 nfsm_chain_cleanup(&nmrep
);
3637 * This is almost a mirror-image of the client side upcall.
3638 * It passes and receives a token, but invokes gss_accept_sec_context.
3639 * If it's the final call of the context setup, then gssd also returns
3640 * the session key and the user's UID.
3643 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*cp
)
3648 gssd_byte_buffer octx
= NULL
;
3649 uint32_t lucidlen
= 0;
3650 void *lucid_ctx_buffer
;
3652 vm_map_copy_t itoken
= NULL
;
3653 gssd_byte_buffer otoken
= NULL
;
3654 mach_msg_type_number_t otokenlen
;
3656 char svcname
[] = "nfs";
3658 kr
= host_get_gssd_port(host_priv_self(), &mp
);
3659 if (kr
!= KERN_SUCCESS
) {
3660 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr
, kr
);
3663 if (!IPC_PORT_VALID(mp
)) {
3664 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
3668 if (cp
->gss_svc_tokenlen
> 0) {
3669 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
3673 printf("Calling mach_gss_accept_sec_context\n");
3674 kr
= mach_gss_accept_sec_context(
3676 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_svc_tokenlen
,
3679 &cp
->gss_svc_context
,
3680 &cp
->gss_svc_cred_handle
,
3684 &cp
->gss_svc_ngroups
,
3685 &octx
, (mach_msg_type_number_t
*) &lucidlen
,
3686 &otoken
, &otokenlen
,
3688 &cp
->gss_svc_minor
);
3690 printf("mach_gss_accept_sec_context returned %d\n", kr
);
3691 if (kr
!= KERN_SUCCESS
) {
3692 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr
, kr
);
3693 if (kr
== MIG_SERVER_DIED
&& cp
->gss_svc_context
== 0 &&
3694 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
) {
3695 if (cp
->gss_svc_tokenlen
> 0) {
3696 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
3700 host_release_special_port(mp
);
3704 host_release_special_port(mp
);
3707 if (lucidlen
> MAX_LUCIDLEN
) {
3708 printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n", lucidlen
);
3709 vm_map_copy_discard((vm_map_copy_t
) octx
);
3710 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3713 MALLOC(lucid_ctx_buffer
, void *, lucidlen
, M_TEMP
, M_WAITOK
| M_ZERO
);
3714 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) octx
, lucidlen
, lucid_ctx_buffer
);
3716 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3717 FREE(lucid_ctx_buffer
, M_TEMP
);
3720 if (cp
->gss_svc_ctx_id
) {
3721 gss_krb5_destroy_context(cp
->gss_svc_ctx_id
);
3723 cp
->gss_svc_ctx_id
= gss_krb5_make_context(lucid_ctx_buffer
, lucidlen
);
3724 if (cp
->gss_svc_ctx_id
== NULL
) {
3725 printf("Failed to make context from lucid_ctx_buffer\n");
3730 /* Free context token used as input */
3731 if (cp
->gss_svc_token
) {
3732 FREE(cp
->gss_svc_token
, M_TEMP
);
3734 cp
->gss_svc_token
= NULL
;
3735 cp
->gss_svc_tokenlen
= 0;
3737 if (otokenlen
> 0) {
3738 /* Set context token to gss output token */
3739 MALLOC(cp
->gss_svc_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
3740 if (cp
->gss_svc_token
== NULL
) {
3741 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
3742 vm_map_copy_discard((vm_map_copy_t
) otoken
);
3745 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_svc_token
);
3747 FREE(cp
->gss_svc_token
, M_TEMP
);
3748 cp
->gss_svc_token
= NULL
;
3749 return NFSERR_EAUTH
;
3751 cp
->gss_svc_tokenlen
= otokenlen
;
3757 FREE(cp
->gss_svc_token
, M_TEMP
);
3758 cp
->gss_svc_tokenlen
= 0;
3759 cp
->gss_svc_token
= NULL
;
3761 return NFSERR_EAUTH
;
3765 * Validate the sequence number in the credential as described
3766 * in RFC 2203 Section 5.3.3.1
3768 * Here the window of valid sequence numbers is represented by
3769 * a bitmap. As each sequence number is received, its bit is
3770 * set in the bitmap. An invalid sequence number lies below
3771 * the lower bound of the window, or is within the window but
3772 * has its bit already set.
3775 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*cp
, uint32_t seq
)
3777 uint32_t *bits
= cp
->gss_svc_seqbits
;
3778 uint32_t win
= cp
->gss_svc_seqwin
;
3781 lck_mtx_lock(cp
->gss_svc_mtx
);
3784 * If greater than the window upper bound,
3785 * move the window up, and set the bit.
3787 if (seq
> cp
->gss_svc_seqmax
) {
3788 if (seq
- cp
->gss_svc_seqmax
> win
) {
3789 bzero(bits
, nfsm_rndup((win
+ 7) / 8));
3791 for (i
= cp
->gss_svc_seqmax
+ 1; i
< seq
; i
++) {
3792 win_resetbit(bits
, i
% win
);
3795 win_setbit(bits
, seq
% win
);
3796 cp
->gss_svc_seqmax
= seq
;
3797 lck_mtx_unlock(cp
->gss_svc_mtx
);
3802 * Invalid if below the lower bound of the window
3804 if (seq
<= cp
->gss_svc_seqmax
- win
) {
3805 lck_mtx_unlock(cp
->gss_svc_mtx
);
3810 * In the window, invalid if the bit is already set
3812 if (win_getbit(bits
, seq
% win
)) {
3813 lck_mtx_unlock(cp
->gss_svc_mtx
);
3816 win_setbit(bits
, seq
% win
);
3817 lck_mtx_unlock(cp
->gss_svc_mtx
);
3822 * Drop a reference to a context
3824 * Note that it's OK for the context to exist
3825 * with a refcount of zero. The refcount isn't
3826 * checked until we're about to reap an expired one.
3829 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx
*cp
)
3831 lck_mtx_lock(cp
->gss_svc_mtx
);
3832 if (cp
->gss_svc_refcnt
> 0) {
3833 cp
->gss_svc_refcnt
--;
3835 printf("nfs_gss_ctx_deref: zero refcount\n");
3837 lck_mtx_unlock(cp
->gss_svc_mtx
);
3841 * Called at NFS server shutdown - destroy all contexts
3844 nfs_gss_svc_cleanup(void)
3846 struct nfs_gss_svc_ctx_hashhead
*head
;
3847 struct nfs_gss_svc_ctx
*cp
, *ncp
;
3850 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
3853 * Run through all the buckets
3855 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
3857 * Remove and free all entries in the bucket
3859 head
= &nfs_gss_svc_ctx_hashtbl
[i
];
3860 LIST_FOREACH_SAFE(cp
, head
, gss_svc_entries
, ncp
) {
3861 LIST_REMOVE(cp
, gss_svc_entries
);
3862 if (cp
->gss_svc_seqbits
) {
3863 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
3865 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
3870 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
3873 #endif /* CONFIG_NFS_SERVER */
3877 * The following functions are used by both client and server.
3881 * Release a host special port that was obtained by host_get_special_port
3882 * or one of its macros (host_get_gssd_port in this case).
3883 * This really should be in a public kpi.
3886 /* This should be in a public header if this routine is not */
3887 extern void ipc_port_release_send(ipc_port_t
);
3888 extern ipc_port_t
ipc_port_copy_send(ipc_port_t
);
3891 host_release_special_port(mach_port_t mp
)
3893 if (IPC_PORT_VALID(mp
)) {
3894 ipc_port_release_send(mp
);
3899 host_copy_special_port(mach_port_t mp
)
3901 return ipc_port_copy_send(mp
);
3905 * The token that is sent and received in the gssd upcall
3906 * has unbounded variable length. Mach RPC does not pass
3907 * the token in-line. Instead it uses page mapping to handle
3908 * these parameters. This function allocates a VM buffer
3909 * to hold the token for an upcall and copies the token
3910 * (received from the client) into it. The VM buffer is
3911 * marked with a src_destroy flag so that the upcall will
3912 * automatically de-allocate the buffer when the upcall is
3916 nfs_gss_mach_alloc_buffer(u_char
*buf
, size_t buflen
, vm_map_copy_t
*addr
)
3919 vm_offset_t kmem_buf
;
3923 if (buf
== NULL
|| buflen
== 0) {
3927 tbuflen
= vm_map_round_page(buflen
,
3928 vm_map_page_mask(ipc_kernel_map
));
3930 if (tbuflen
< buflen
) {
3931 printf("nfs_gss_mach_alloc_buffer: vm_map_round_page failed\n");
3935 kr
= vm_allocate_kernel(ipc_kernel_map
, &kmem_buf
, tbuflen
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_FILE
);
3937 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3941 kr
= vm_map_wire_kernel(ipc_kernel_map
,
3942 vm_map_trunc_page(kmem_buf
,
3943 vm_map_page_mask(ipc_kernel_map
)),
3944 vm_map_round_page(kmem_buf
+ tbuflen
,
3945 vm_map_page_mask(ipc_kernel_map
)),
3946 VM_PROT_READ
| VM_PROT_WRITE
, VM_KERN_MEMORY_FILE
, FALSE
);
3948 printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
3952 bcopy(buf
, (void *) kmem_buf
, buflen
);
3953 // Shouldn't need to bzero below since vm_allocate returns zeroed pages
3954 // bzero(kmem_buf + buflen, tbuflen - buflen);
3956 kr
= vm_map_unwire(ipc_kernel_map
,
3957 vm_map_trunc_page(kmem_buf
,
3958 vm_map_page_mask(ipc_kernel_map
)),
3959 vm_map_round_page(kmem_buf
+ tbuflen
,
3960 vm_map_page_mask(ipc_kernel_map
)),
3963 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3967 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
) kmem_buf
,
3968 (vm_map_size_t
) buflen
, TRUE
, addr
);
3970 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3976 * Here we handle a token received from the gssd via an upcall.
3977 * The received token resides in an allocate VM buffer.
3978 * We copy the token out of this buffer to a chunk of malloc'ed
3979 * memory of the right size, then de-allocate the VM buffer.
3982 nfs_gss_mach_vmcopyout(vm_map_copy_t in
, uint32_t len
, u_char
*out
)
3984 vm_map_offset_t map_data
;
3988 error
= vm_map_copyout(ipc_kernel_map
, &map_data
, in
);
3993 data
= CAST_DOWN(vm_offset_t
, map_data
);
3994 bcopy((void *) data
, out
, len
);
3995 vm_deallocate(ipc_kernel_map
, data
, len
);
4001 * Return the number of bytes in an mbuf chain.
4004 nfs_gss_mchain_length(mbuf_t mhead
)
4009 for (mb
= mhead
; mb
; mb
= mbuf_next(mb
)) {
4010 len
+= mbuf_len(mb
);
4017 * Append an args or results mbuf chain to the header chain
4020 nfs_gss_append_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
4025 /* Connect the mbuf chains */
4026 error
= mbuf_setnext(nmc
->nmc_mcur
, mc
);
4031 /* Find the last mbuf in the chain */
4033 for (mb
= mc
; mb
; mb
= mbuf_next(mb
)) {
4037 nmc
->nmc_mcur
= tail
;
4038 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
4039 nmc
->nmc_left
= mbuf_trailingspace(tail
);
4044 #if CONFIG_NFS_SERVER /* Only used by CONFIG_NFS_SERVER */
4046 * Convert an mbuf chain to an NFS mbuf chain
4049 nfs_gss_nfsm_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
4053 /* Find the last mbuf in the chain */
4055 for (mb
= mc
; mb
; mb
= mbuf_next(mb
)) {
4059 nmc
->nmc_mhead
= mc
;
4060 nmc
->nmc_mcur
= tail
;
4061 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
4062 nmc
->nmc_left
= mbuf_trailingspace(tail
);
4065 #endif /* CONFIG_NFS_SERVER */
4069 #define DISPLAYLEN 16
4070 #define MAXDISPLAYLEN 256
4073 hexdump(const char *msg
, void *data
, size_t len
)
4077 char *p
, disbuf
[3 * DISPLAYLEN
+ 1];
4079 printf("NFS DEBUG %s len=%d:\n", msg
, (uint32_t)len
);
4080 if (len
> MAXDISPLAYLEN
) {
4081 len
= MAXDISPLAYLEN
;
4084 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
4085 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3) {
4086 snprintf(p
, 4, "%02x ", d
[i
+ j
]);
4088 printf("\t%s\n", disbuf
);
4093 #endif /* CONFIG_NFS */