2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed. Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
43 * The client and server contexts are set up dynamically. When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential. The server's verifier has a signed
58 * checksum of the current sequence number.
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request. The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
77 #include <sys/param.h>
78 #include <sys/systm.h>
80 #include <sys/kauth.h>
81 #include <sys/kernel.h>
82 #include <sys/mount_internal.h>
83 #include <sys/vnode.h>
85 #include <sys/malloc.h>
86 #include <sys/kpi_mbuf.h>
87 #include <sys/ucred.h>
89 #include <kern/host.h>
90 #include <libkern/libkern.h>
92 #include <mach/task.h>
93 #include <mach/host_special_ports.h>
94 #include <mach/host_priv.h>
95 #include <mach/thread_act.h>
96 #include <mach/mig_errors.h>
97 #include <mach/vm_map.h>
98 #include <vm/vm_map.h>
99 #include <vm/vm_kern.h>
100 #include <gssd/gssd_mach.h>
102 #include <nfs/rpcv2.h>
103 #include <nfs/nfsproto.h>
105 #include <nfs/nfsnode.h>
106 #include <nfs/nfs_gss.h>
107 #include <nfs/nfsmount.h>
108 #include <nfs/xdr_subs.h>
109 #include <nfs/nfsm_subs.h>
110 #include <nfs/nfs_gss.h>
112 #include "nfs_gss_crypto.h"
114 #define NFS_GSS_MACH_MAX_RETRIES 3
116 #define NFS_GSS_DBG(...) NFS_DBG(NFS_FAC_GSS, 7, ## __VA_ARGS__)
117 #define NFS_GSS_ISDBG (NFS_DEBUG_FACILITY & NFS_FAC_GSS)
122 MD5_DESCBC_CTX m_ctx
;
123 HMAC_SHA1_DES3KD_CTX h_ctx
;
127 #define MAX_DIGEST SHA_DIGEST_LENGTH
128 #ifdef NFS_KERNEL_DEBUG
129 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
130 (panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
132 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
133 (printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
137 u_long nfs_gss_svc_ctx_hash
;
138 struct nfs_gss_svc_ctx_hashhead
*nfs_gss_svc_ctx_hashtbl
;
139 lck_mtx_t
*nfs_gss_svc_ctx_mutex
;
140 lck_grp_t
*nfs_gss_svc_grp
;
141 uint32_t nfsrv_gss_context_ttl
= GSS_CTX_EXPIRE
;
142 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
143 #endif /* NFSSERVER */
146 lck_grp_t
*nfs_gss_clnt_grp
;
148 #endif /* NFSCLIENT */
151 * These octet strings are used to encode/decode ASN.1 tokens
152 * in the RPCSEC_GSS verifiers.
154 static u_char krb5_tokhead
[] __attribute__((unused
)) = { 0x60, 0x23 };
155 u_char krb5_mech
[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
156 static u_char krb5_mic
[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
157 static u_char krb5_mic3
[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
158 static u_char krb5_wrap
[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
159 static u_char krb5_wrap3
[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
160 static u_char iv0
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
162 #define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
163 #define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
166 * The size of the Kerberos v5 ASN.1 token
169 * Note that the second octet of the krb5_tokhead (0x23) is a
170 * DER-encoded size field that has variable length. If the size
171 * is 128 bytes or greater, then it uses two bytes, three bytes
172 * if 65536 or greater, and so on. Since the MIC tokens are
173 * separate from the data, the size is always the same: 35 bytes (0x23).
174 * However, the wrap token is different. Its size field includes the
175 * size of the token + the encrypted data that follows. So the size
176 * field may be two, three or four bytes.
178 #define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
179 #define KRB5_SZ_MECH sizeof(krb5_mech)
180 #define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
181 #define KRB5_SZ_SEQ 8
182 #define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
183 #define KRB5_SZ_TOKEN_NOSUM (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
184 #define KRB5_SZ_TOKEN(cksumlen) ((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
185 #define KRB5_SZ_TOKMAX(cksumlen) (KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
188 static int nfs_gss_clnt_ctx_find(struct nfsreq
*);
189 static int nfs_gss_clnt_ctx_failover(struct nfsreq
*);
190 static int nfs_gss_clnt_ctx_init(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
191 static int nfs_gss_clnt_ctx_init_retry(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
192 static int nfs_gss_clnt_ctx_callserver(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
193 static uint8_t *nfs_gss_clnt_svcname(struct nfsmount
*, gssd_nametype
*, uint32_t *);
194 static int nfs_gss_clnt_gssd_upcall(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
195 static void nfs_gss_clnt_ctx_remove(struct nfsmount
*, struct nfs_gss_clnt_ctx
*);
196 #endif /* NFSCLIENT */
199 static struct nfs_gss_svc_ctx
*nfs_gss_svc_ctx_find(uint32_t);
200 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*);
201 static void nfs_gss_svc_ctx_timer(void *, void *);
202 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*);
203 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*, uint32_t);
204 #endif /* NFSSERVER */
206 static void host_release_special_port(mach_port_t
);
207 static mach_port_t
host_copy_special_port(mach_port_t
);
208 static void nfs_gss_mach_alloc_buffer(u_char
*, uint32_t, vm_map_copy_t
*);
209 static int nfs_gss_mach_vmcopyout(vm_map_copy_t
, uint32_t, u_char
*);
210 static int nfs_gss_token_get(gss_key_info
*ki
, u_char
*, u_char
*, int, uint32_t *, u_char
*);
211 static int nfs_gss_token_put(gss_key_info
*ki
, u_char
*, u_char
*, int, int, u_char
*);
212 static int nfs_gss_der_length_size(int);
213 static void nfs_gss_der_length_put(u_char
**, int);
214 static int nfs_gss_der_length_get(u_char
**);
215 static int nfs_gss_mchain_length(mbuf_t
);
216 static int nfs_gss_append_chain(struct nfsm_chain
*, mbuf_t
);
217 static void nfs_gss_nfsm_chain(struct nfsm_chain
*, mbuf_t
);
218 static void nfs_gss_cksum_mchain(gss_key_info
*, mbuf_t
, u_char
*, int, int, u_char
*);
219 static void nfs_gss_cksum_chain(gss_key_info
*, struct nfsm_chain
*, u_char
*, int, int, u_char
*);
220 static void nfs_gss_cksum_rep(gss_key_info
*, uint32_t, u_char
*);
221 static void nfs_gss_encrypt_mchain(gss_key_info
*, mbuf_t
, int, int, int);
222 static void nfs_gss_encrypt_chain(gss_key_info
*, struct nfsm_chain
*, int, int, int);
224 static void gss_digest_Init(GSS_DIGEST_CTX
*, gss_key_info
*);
225 static void gss_digest_Update(GSS_DIGEST_CTX
*, void *, size_t);
226 static void gss_digest_Final(GSS_DIGEST_CTX
*, void *);
227 static void gss_des_crypt(gss_key_info
*, des_cblock
*, des_cblock
*,
228 int32_t, des_cblock
*, des_cblock
*, int, int);
229 static int gss_key_init(gss_key_info
*, uint32_t);
232 thread_call_t nfs_gss_svc_ctx_timer_call
;
233 int nfs_gss_timer_on
= 0;
234 uint32_t nfs_gss_ctx_count
= 0;
235 const uint32_t nfs_gss_ctx_max
= GSS_SVC_MAXCONTEXTS
;
236 #endif /* NFSSERVER */
239 * Initialization when NFS starts
245 nfs_gss_clnt_grp
= lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL
);
246 #endif /* NFSCLIENT */
249 nfs_gss_svc_grp
= lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL
);
251 nfs_gss_svc_ctx_hashtbl
= hashinit(SVC_CTX_HASHSZ
, M_TEMP
, &nfs_gss_svc_ctx_hash
);
252 nfs_gss_svc_ctx_mutex
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
254 nfs_gss_svc_ctx_timer_call
= thread_call_allocate(nfs_gss_svc_ctx_timer
, NULL
);
255 #endif /* NFSSERVER */
261 * Is it OK to fall back to using AUTH_SYS?
264 nfs_gss_sysok(struct nfsreq
*req
)
266 struct nfsmount
*nmp
= req
->r_nmp
;
269 if (req
->r_wrongsec
) /* Not OK if we're trying to handle a wrongsec error */
271 if (!nmp
->nm_sec
.count
) /* assume it's OK if we don't have a set of flavors */
273 for (i
=0; i
< nmp
->nm_sec
.count
; i
++)
274 if (nmp
->nm_sec
.flavors
[i
] == RPCAUTH_SYS
)
280 * Find the context for a particular user.
282 * If the context doesn't already exist
283 * then create a new context for this user.
285 * Note that the code allows superuser (uid == 0)
286 * to adopt the context of another user.
288 * We'll match on the audit session ids, since those
289 * processes will have acccess to the same credential cache.
292 #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
293 #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
296 nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1
, kauth_cred_t cred2
)
298 if (kauth_cred_getasid(cred1
) == kauth_cred_getasid(cred2
))
305 nfs_gss_clnt_ctx_find(struct nfsreq
*req
)
307 struct nfsmount
*nmp
= req
->r_nmp
;
308 struct nfs_gss_clnt_ctx
*cp
;
311 lck_mtx_lock(&nmp
->nm_lock
);
312 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
313 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, req
->r_cred
)) {
314 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
)
316 nfs_gss_clnt_ctx_ref(req
, cp
);
317 lck_mtx_unlock(&nmp
->nm_lock
);
322 if (kauth_cred_getuid(req
->r_cred
) == 0) {
324 * If superuser is trying to get access, then co-opt
325 * the first valid context in the list.
326 * XXX Ultimately, we need to allow superuser to
327 * go ahead and attempt to set up its own context
328 * in case one is set up for it.
330 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
331 if (!(cp
->gss_clnt_flags
& GSS_CTX_INVAL
)) {
332 nfs_gss_clnt_ctx_ref(req
, cp
);
333 lck_mtx_unlock(&nmp
->nm_lock
);
340 * Not found - create a new context
344 * If the thread is async, then it cannot get
345 * kerberos creds and set up a proper context.
346 * If no sec= mount option is given, attempt
347 * to failover to sec=sys.
349 if (req
->r_thread
== NULL
) {
350 if (nfs_gss_sysok(req
)) {
351 error
= nfs_gss_clnt_ctx_failover(req
);
353 printf("nfs_gss_clnt_ctx_find: no context for async\n");
354 error
= NFSERR_EAUTH
;
357 lck_mtx_unlock(&nmp
->nm_lock
);
361 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
363 lck_mtx_unlock(&nmp
->nm_lock
);
367 cp
->gss_clnt_cred
= req
->r_cred
;
368 kauth_cred_ref(cp
->gss_clnt_cred
);
369 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
370 cp
->gss_clnt_thread
= current_thread();
371 nfs_gss_clnt_ctx_ref(req
, cp
);
372 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
373 lck_mtx_unlock(&nmp
->nm_lock
);
375 error
= nfs_gss_clnt_ctx_init_retry(req
, cp
); // Initialize new context
377 nfs_gss_clnt_ctx_unref(req
);
380 * If we failed to set up a Kerberos context for this
381 * user and no sec= mount option was given, but the
382 * server indicated that it could support AUTH_SYS, then set
383 * up a dummy context that allows this user to attempt
386 if (error
&& nfs_gss_sysok(req
) &&
387 (error
!= ENXIO
) && (error
!= ETIMEDOUT
)) {
388 lck_mtx_lock(&nmp
->nm_lock
);
389 error
= nfs_gss_clnt_ctx_failover(req
);
390 lck_mtx_unlock(&nmp
->nm_lock
);
397 * Set up a dummy context to allow the use of sec=sys
398 * for this user, if the server allows sec=sys.
399 * The context is valid for GSS_CLNT_SYS_VALID seconds,
400 * so that the user will periodically attempt to fail back
401 * and get a real credential.
403 * Assumes context list (nm_lock) is locked
406 nfs_gss_clnt_ctx_failover(struct nfsreq
*req
)
408 struct nfsmount
*nmp
= req
->r_nmp
;
409 struct nfs_gss_clnt_ctx
*cp
;
412 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
416 cp
->gss_clnt_service
= RPCSEC_GSS_SVC_SYS
;
417 cp
->gss_clnt_cred
= req
->r_cred
;
418 kauth_cred_ref(cp
->gss_clnt_cred
);
419 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
421 cp
->gss_clnt_ctime
= now
.tv_sec
; // time stamp
422 nfs_gss_clnt_ctx_ref(req
, cp
);
423 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
429 * Inserts an RPCSEC_GSS credential into an RPC header.
430 * After the credential is inserted, the code continues
431 * to build the verifier which contains a signed checksum
435 nfs_gss_clnt_cred_put(struct nfsreq
*req
, struct nfsm_chain
*nmc
, mbuf_t args
)
437 struct nfs_gss_clnt_ctx
*cp
;
440 int slpflag
, recordmark
= 0;
441 int start
, len
, offset
= 0;
443 struct nfsm_chain nmc_tmp
;
445 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
446 u_char cksum
[MAX_DIGEST
];
452 slpflag
|= (NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
453 recordmark
= (req
->r_nmp
->nm_sotype
== SOCK_STREAM
);
456 if (req
->r_gss_ctx
== NULL
) {
458 * Find the context for this user.
459 * If no context is found, one will
462 error
= nfs_gss_clnt_ctx_find(req
);
469 * If it's a dummy context for a user that's using
470 * a fallback to sec=sys, then just return an error
471 * so rpchead can encode an RPCAUTH_UNIX cred.
473 if (cp
->gss_clnt_service
== RPCSEC_GSS_SVC_SYS
) {
475 * The dummy context is valid for just
476 * GSS_CLNT_SYS_VALID seconds. If the context
477 * is older than this, mark it invalid and try
478 * again to get a real one.
480 lck_mtx_lock(cp
->gss_clnt_mtx
);
482 if (now
.tv_sec
> cp
->gss_clnt_ctime
+ GSS_CLNT_SYS_VALID
) {
483 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
484 lck_mtx_unlock(cp
->gss_clnt_mtx
);
485 nfs_gss_clnt_ctx_unref(req
);
488 lck_mtx_unlock(cp
->gss_clnt_mtx
);
493 * If the context thread isn't null, then the context isn't
494 * yet complete and is for the exclusive use of the thread
495 * doing the context setup. Wait until the context thread
498 lck_mtx_lock(cp
->gss_clnt_mtx
);
499 if (cp
->gss_clnt_thread
&& cp
->gss_clnt_thread
!= current_thread()) {
500 cp
->gss_clnt_flags
|= GSS_NEEDCTX
;
501 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "ctxwait", NULL
);
503 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
505 nfs_gss_clnt_ctx_unref(req
);
508 lck_mtx_unlock(cp
->gss_clnt_mtx
);
510 ki
= &cp
->gss_clnt_kinfo
;
511 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) {
513 * Get a sequence number for this request.
514 * Check whether the oldest request in the window is complete.
515 * If it's still pending, then wait until it's done before
516 * we allocate a new sequence number and allow this request
519 lck_mtx_lock(cp
->gss_clnt_mtx
);
520 while (win_getbit(cp
->gss_clnt_seqbits
,
521 ((cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
) + 1) % cp
->gss_clnt_seqwin
)) {
522 cp
->gss_clnt_flags
|= GSS_NEEDSEQ
;
523 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "seqwin", NULL
);
525 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
528 lck_mtx_lock(cp
->gss_clnt_mtx
);
529 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
530 /* Renewed while while we were waiting */
531 lck_mtx_unlock(cp
->gss_clnt_mtx
);
532 nfs_gss_clnt_ctx_unref(req
);
536 seqnum
= ++cp
->gss_clnt_seqnum
;
537 win_setbit(cp
->gss_clnt_seqbits
, seqnum
% cp
->gss_clnt_seqwin
);
538 lck_mtx_unlock(cp
->gss_clnt_mtx
);
540 MALLOC(gsp
, struct gss_seq
*, sizeof(*gsp
), M_TEMP
, M_WAITOK
|M_ZERO
);
543 gsp
->gss_seqnum
= seqnum
;
544 SLIST_INSERT_HEAD(&req
->r_gss_seqlist
, gsp
, gss_seqnext
);
547 /* Insert the credential */
548 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
549 nfsm_chain_add_32(error
, nmc
, 5 * NFSX_UNSIGNED
+ cp
->gss_clnt_handle_len
);
550 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS_VERS_1
);
551 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_proc
);
552 nfsm_chain_add_32(error
, nmc
, seqnum
);
553 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_service
);
554 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_handle_len
);
555 if (cp
->gss_clnt_handle_len
> 0) {
556 if (cp
->gss_clnt_handle
== NULL
)
558 nfsm_chain_add_opaque(error
, nmc
, cp
->gss_clnt_handle
, cp
->gss_clnt_handle_len
);
563 * Now add the verifier
565 if (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
||
566 cp
->gss_clnt_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
568 * If the context is still being created
569 * then use a null verifier.
571 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
); // flavor
572 nfsm_chain_add_32(error
, nmc
, 0); // length
573 nfsm_chain_build_done(error
, nmc
);
575 nfs_gss_append_chain(nmc
, args
);
579 offset
= recordmark
? NFSX_UNSIGNED
: 0; // record mark
580 nfsm_chain_build_done(error
, nmc
);
581 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), offset
, 0, cksum
);
583 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
584 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
); // flavor
585 nfsm_chain_add_32(error
, nmc
, toklen
); // length
586 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
587 nfsm_chain_build_done(error
, nmc
);
592 * Now we may have to compute integrity or encrypt the call args
593 * per RFC 2203 Section 5.3.2
595 switch (cp
->gss_clnt_service
) {
596 case RPCSEC_GSS_SVC_NONE
:
597 nfs_gss_append_chain(nmc
, args
);
599 case RPCSEC_GSS_SVC_INTEGRITY
:
600 len
= nfs_gss_mchain_length(args
); // Find args length
601 req
->r_gss_arglen
= len
; // Stash the args len
602 len
+= NFSX_UNSIGNED
; // Add seqnum length
603 nfsm_chain_add_32(error
, nmc
, len
); // and insert it
604 start
= nfsm_chain_offset(nmc
);
605 nfsm_chain_add_32(error
, nmc
, seqnum
); // Insert seqnum
606 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Offset to args
607 nfsm_chain_build_done(error
, nmc
);
610 nfs_gss_append_chain(nmc
, args
); // Append the args mbufs
612 /* Now compute a checksum over the seqnum + args */
613 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, len
, cksum
);
615 /* Insert it into a token and append to the request */
616 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
617 nfsm_chain_finish_mbuf(error
, nmc
); // force checksum into new mbuf
618 nfsm_chain_add_32(error
, nmc
, toklen
);
619 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
620 nfsm_chain_build_done(error
, nmc
);
622 case RPCSEC_GSS_SVC_PRIVACY
:
623 /* Prepend a new mbuf with the confounder & sequence number */
624 nfsm_chain_build_alloc_init(error
, &nmc_tmp
, 3 * NFSX_UNSIGNED
);
625 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 1-4
626 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 4-8
627 nfsm_chain_add_32(error
, &nmc_tmp
, seqnum
);
628 nfsm_chain_build_done(error
, &nmc_tmp
);
631 nfs_gss_append_chain(&nmc_tmp
, args
); // Append the args mbufs
633 len
= nfs_gss_mchain_length(args
); // Find args length
634 len
+= 3 * NFSX_UNSIGNED
; // add confounder & seqnum
635 req
->r_gss_arglen
= len
; // Stash length
638 * Append a pad trailer - per RFC 1964 section 1.2.2.3
639 * Since XDR data is always 32-bit aligned, it
640 * needs to be padded either by 4 bytes or 8 bytes.
642 nfsm_chain_finish_mbuf(error
, &nmc_tmp
); // force padding into new mbuf
644 nfsm_chain_add_32(error
, &nmc_tmp
, 0x04040404);
645 len
+= NFSX_UNSIGNED
;
647 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
648 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
649 len
+= 2 * NFSX_UNSIGNED
;
651 nfsm_chain_build_done(error
, &nmc_tmp
);
653 /* Now compute a checksum over the confounder + seqnum + args */
654 nfs_gss_cksum_chain(ki
, &nmc_tmp
, ALG_WRAP(ki
), 0, len
, cksum
);
656 /* Insert it into a token */
657 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 1, len
, cksum
);
658 nfsm_chain_add_32(error
, nmc
, toklen
+ len
); // token + args length
659 nfsm_chain_add_opaque_nopad(error
, nmc
, tokbuf
, toklen
);
660 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Stash offset
661 nfsm_chain_build_done(error
, nmc
);
664 nfs_gss_append_chain(nmc
, nmc_tmp
.nmc_mhead
); // Append the args mbufs
666 /* Finally, encrypt the args */
667 nfs_gss_encrypt_chain(ki
, &nmc_tmp
, 0, len
, DES_ENCRYPT
);
669 /* Add null XDR pad if the ASN.1 token misaligned the data */
670 pad
= nfsm_pad(toklen
+ len
);
672 nfsm_chain_add_opaque_nopad(error
, nmc
, iv0
, pad
);
673 nfsm_chain_build_done(error
, nmc
);
682 * When receiving a reply, the client checks the verifier
683 * returned by the server. Check that the verifier is the
684 * correct type, then extract the sequence number checksum
685 * from the token in the credential and compare it with a
686 * computed checksum of the sequence number in the request
690 nfs_gss_clnt_verf_get(
692 struct nfsm_chain
*nmc
,
695 uint32_t *accepted_statusp
)
697 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
698 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
700 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
701 struct nfsm_chain nmc_tmp
;
703 uint32_t reslen
, start
, cksumlen
, toklen
;
705 gss_key_info
*ki
= &cp
->gss_clnt_kinfo
;
707 reslen
= cksumlen
= 0;
708 *accepted_statusp
= 0;
711 return (NFSERR_EAUTH
);
713 * If it's not an RPCSEC_GSS verifier, then it has to
714 * be a null verifier that resulted from either
715 * a CONTINUE_NEEDED reply during context setup or
716 * from the reply to an AUTH_UNIX call from a dummy
717 * context that resulted from a fallback to sec=sys.
719 if (verftype
!= RPCSEC_GSS
) {
720 if (verftype
!= RPCAUTH_NULL
)
721 return (NFSERR_EAUTH
);
722 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
&&
723 cp
->gss_clnt_service
!= RPCSEC_GSS_SVC_SYS
)
724 return (NFSERR_EAUTH
);
726 nfsm_chain_adv(error
, nmc
, nfsm_rndup(verflen
));
727 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
732 * If we received an RPCSEC_GSS verifier but the
733 * context isn't yet complete, then it must be
734 * the context complete message from the server.
735 * The verifier will contain an encrypted checksum
736 * of the window but we don't have the session key
737 * yet so we can't decrypt it. Stash the verifier
738 * and check it later in nfs_gss_clnt_ctx_init() when
739 * the context is complete.
741 if (!(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)) {
742 MALLOC(cp
->gss_clnt_verf
, u_char
*, verflen
, M_TEMP
, M_WAITOK
|M_ZERO
);
743 if (cp
->gss_clnt_verf
== NULL
)
745 nfsm_chain_get_opaque(error
, nmc
, verflen
, cp
->gss_clnt_verf
);
746 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
750 if (verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
751 return (NFSERR_EAUTH
);
754 * Get the 8 octet sequence number
755 * checksum out of the verifier token.
757 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
760 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum1
);
765 * Search the request sequence numbers for this reply, starting
766 * with the most recent, looking for a checksum that matches
767 * the one in the verifier returned by the server.
769 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
770 nfs_gss_cksum_rep(ki
, gsp
->gss_seqnum
, cksum2
);
771 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) == 0)
775 return (NFSERR_EAUTH
);
778 * Get the RPC accepted status
780 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
781 if (*accepted_statusp
!= RPC_SUCCESS
)
785 * Now we may have to check integrity or decrypt the results
786 * per RFC 2203 Section 5.3.2
788 switch (cp
->gss_clnt_service
) {
789 case RPCSEC_GSS_SVC_NONE
:
792 case RPCSEC_GSS_SVC_INTEGRITY
:
794 * Here's what we expect in the integrity results:
796 * - length of seq num + results (4 bytes)
797 * - sequence number (4 bytes)
798 * - results (variable bytes)
799 * - length of checksum token (37)
800 * - checksum of seqnum + results (37 bytes)
802 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
803 if (reslen
> NFS_MAXPACKET
) {
808 /* Compute a checksum over the sequence number + results */
809 start
= nfsm_chain_offset(nmc
);
810 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, reslen
, cksum1
);
813 * Get the sequence number prepended to the results
814 * and compare it against the list in the request.
816 nfsm_chain_get_32(error
, nmc
, seqnum
);
817 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
818 if (seqnum
== gsp
->gss_seqnum
)
827 * Advance to the end of the results and
828 * fetch the checksum computed by the server.
831 reslen
-= NFSX_UNSIGNED
; // already skipped seqnum
832 nfsm_chain_adv(error
, &nmc_tmp
, reslen
); // skip over the results
833 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
834 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
838 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
841 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum2
);
845 /* Verify that the checksums are the same */
846 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
851 case RPCSEC_GSS_SVC_PRIVACY
:
853 * Here's what we expect in the privacy results:
855 * - length of confounder + seq num + token + results
856 * - wrap token (37-40 bytes)
857 * - confounder (8 bytes)
858 * - sequence number (4 bytes)
859 * - results (encrypted)
861 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
862 if (reslen
> NFS_MAXPACKET
) {
867 /* Get the token that prepends the encrypted results */
868 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
871 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 0,
875 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
876 reslen
-= toklen
; // size of confounder + seqnum + results
878 /* decrypt the confounder + sequence number + results */
879 start
= nfsm_chain_offset(nmc
);
880 nfs_gss_encrypt_chain(ki
, nmc
, start
, reslen
, DES_DECRYPT
);
882 /* Compute a checksum over the confounder + sequence number + results */
883 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, reslen
, cksum2
);
885 /* Verify that the checksums are the same */
886 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
891 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
894 * Get the sequence number prepended to the results
895 * and compare it against the list in the request.
897 nfsm_chain_get_32(error
, nmc
, seqnum
);
898 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
899 if (seqnum
== gsp
->gss_seqnum
)
914 * An RPCSEC_GSS request with no integrity or privacy consists
915 * of just the header mbufs followed by the arg mbufs.
917 * However, integrity or privacy both trailer mbufs to the args,
918 * which means we have to do some work to restore the arg mbuf
919 * chain to its previous state in case we need to retransmit.
921 * The location and length of the args is marked by two fields
922 * in the request structure: r_gss_argoff and r_gss_arglen,
923 * which are stashed when the NFS request is built.
926 nfs_gss_clnt_args_restore(struct nfsreq
*req
)
928 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
929 struct nfsm_chain mchain
, *nmc
= &mchain
;
933 return (NFSERR_EAUTH
);
935 if ((cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) == 0)
938 nfsm_chain_dissect_init(error
, nmc
, req
->r_mhead
); // start at RPC header
939 nfsm_chain_adv(error
, nmc
, req
->r_gss_argoff
); // advance to args
943 switch (cp
->gss_clnt_service
) {
944 case RPCSEC_GSS_SVC_NONE
:
947 case RPCSEC_GSS_SVC_INTEGRITY
:
949 * All we have to do here is remove the appended checksum mbufs.
950 * We know that the checksum starts in a new mbuf beyond the end
953 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
); // adv to last args mbuf
957 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the cksum mbuf
958 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
960 case RPCSEC_GSS_SVC_PRIVACY
:
962 * The args are encrypted along with prepended confounders and seqnum.
963 * First we decrypt, the confounder, seqnum and args then skip to the
964 * final mbuf of the args.
965 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
966 * Finally, we remove between 4 and 8 bytes of encryption padding
967 * as well as any alignment padding in the trailing mbuf.
969 len
= req
->r_gss_arglen
;
970 len
+= len
% 8 > 0 ? 4 : 8; // add DES padding length
971 nfs_gss_encrypt_chain(&cp
->gss_clnt_kinfo
, nmc
,
972 req
->r_gss_argoff
, len
, DES_DECRYPT
);
973 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
);
976 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the pad mbuf
977 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
985 * This function sets up a new context on the client.
986 * Context setup alternates upcalls to the gssd with NFS nullproc calls
987 * to the server. Each of these calls exchanges an opaque token, obtained
988 * via the gssd's calls into the GSS-API on either the client or the server.
989 * This cycle of calls ends when the client's upcall to the gssd and the
990 * server's response both return GSS_S_COMPLETE. At this point, the client
991 * should have its session key and a handle that it can use to refer to its
992 * new context on the server.
995 nfs_gss_clnt_ctx_init(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
997 struct nfsmount
*nmp
= req
->r_nmp
;
998 int client_complete
= 0;
999 int server_complete
= 0;
1000 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
1003 gss_key_info
*ki
= &cp
->gss_clnt_kinfo
;
1005 /* Initialize a new client context */
1007 cp
->gss_clnt_svcname
= nfs_gss_clnt_svcname(nmp
, &cp
->gss_clnt_svcnt
, &cp
->gss_clnt_svcnamlen
);
1008 if (cp
->gss_clnt_svcname
== NULL
) {
1009 error
= NFSERR_EAUTH
;
1013 cp
->gss_clnt_proc
= RPCSEC_GSS_INIT
;
1015 cp
->gss_clnt_service
=
1016 req
->r_auth
== RPCAUTH_KRB5
? RPCSEC_GSS_SVC_NONE
:
1017 req
->r_auth
== RPCAUTH_KRB5I
? RPCSEC_GSS_SVC_INTEGRITY
:
1018 req
->r_auth
== RPCAUTH_KRB5P
? RPCSEC_GSS_SVC_PRIVACY
: 0;
1020 cp
->gss_clnt_gssd_flags
= (nfs_single_des
? GSSD_NFS_1DES
: 0);
1022 * Now loop around alternating gss_init_sec_context and
1023 * gss_accept_sec_context upcalls to the gssd on the client
1024 * and server side until the context is complete - or fails.
1029 /* Upcall to the gss_init_sec_context in the gssd */
1030 error
= nfs_gss_clnt_gssd_upcall(req
, cp
);
1034 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1035 client_complete
= 1;
1036 if (server_complete
)
1038 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1039 error
= NFSERR_EAUTH
;
1044 * Pass the token to the server.
1046 error
= nfs_gss_clnt_ctx_callserver(req
, cp
);
1048 if (error
== ENEEDAUTH
&& cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
&&
1049 (cp
->gss_clnt_gssd_flags
& (GSSD_RESTART
| GSSD_NFS_1DES
)) == 0) {
1050 NFS_GSS_DBG("Retrying with single DES for req %p\n", req
);
1051 cp
->gss_clnt_gssd_flags
= (GSSD_RESTART
| GSSD_NFS_1DES
);
1052 if (cp
->gss_clnt_token
)
1053 FREE(cp
->gss_clnt_token
, M_TEMP
);
1054 cp
->gss_clnt_token
= NULL
;
1055 cp
->gss_clnt_tokenlen
= 0;
1058 // Reset flags, if error = ENEEDAUTH we will try 3des again
1059 cp
->gss_clnt_gssd_flags
= 0;
1062 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1063 server_complete
= 1;
1064 if (client_complete
)
1066 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1067 error
= NFSERR_EAUTH
;
1071 cp
->gss_clnt_proc
= RPCSEC_GSS_CONTINUE_INIT
;
1075 * The context is apparently established successfully
1077 lck_mtx_lock(cp
->gss_clnt_mtx
);
1078 cp
->gss_clnt_flags
|= GSS_CTX_COMPLETE
;
1079 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1080 cp
->gss_clnt_proc
= RPCSEC_GSS_DATA
;
1082 cp
->gss_clnt_ctime
= now
.tv_sec
; // time stamp
1086 * Compute checksum of the server's window
1088 nfs_gss_cksum_rep(ki
, cp
->gss_clnt_seqwin
, cksum1
);
1091 * and see if it matches the one in the
1092 * verifier the server returned.
1094 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), cp
->gss_clnt_verf
, 0,
1096 FREE(cp
->gss_clnt_verf
, M_TEMP
);
1097 cp
->gss_clnt_verf
= NULL
;
1099 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1100 error
= NFSERR_EAUTH
;
1105 * Set an initial sequence number somewhat randomized.
1106 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1107 * Add the size of the sequence window so seqbits arithmetic
1108 * doesn't go negative.
1110 cp
->gss_clnt_seqnum
= (random() & 0xffff) + cp
->gss_clnt_seqwin
;
1113 * Allocate a bitmap to keep track of which requests
1114 * are pending within the sequence number window.
1116 MALLOC(cp
->gss_clnt_seqbits
, uint32_t *,
1117 nfsm_rndup((cp
->gss_clnt_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
1118 if (cp
->gss_clnt_seqbits
== NULL
)
1119 error
= NFSERR_EAUTH
;
1122 * If the error is ENEEDAUTH we're not done, so no need
1123 * to wake up other threads again. This thread will retry in
1124 * the find or renew routines.
1126 if (error
== ENEEDAUTH
)
1130 * If there's an error, just mark it as invalid.
1131 * It will be removed when the reference count
1134 lck_mtx_lock(cp
->gss_clnt_mtx
);
1136 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1139 * Wake any threads waiting to use the context
1141 cp
->gss_clnt_thread
= NULL
;
1142 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1143 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1146 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1152 * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1153 * But if there's a failure in trying to establish the context it keeps
1154 * retrying at progressively longer intervals in case the failure is
1155 * due to some transient condition. For instance, the server might be
1156 * failing the context setup because directory services is not coming
1157 * up in a timely fashion.
1160 nfs_gss_clnt_ctx_init_retry(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1162 struct nfsmount
*nmp
= req
->r_nmp
;
1167 int timeo
= NFS_TRYLATERDEL
;
1174 /* For an "intr" mount allow a signal to interrupt the retries */
1175 slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
1177 while ((error
= nfs_gss_clnt_ctx_init(req
, cp
)) == ENEEDAUTH
) {
1179 waituntil
= now
.tv_sec
+ timeo
;
1180 while (now
.tv_sec
< waituntil
) {
1181 tsleep(NULL
, PSOCK
| slpflag
, "nfs_gss_clnt_ctx_init_retry", hz
);
1183 error
= nfs_sigintr(req
->r_nmp
, req
, current_thread(), 0);
1190 /* If it's a soft mount just give up after a while */
1191 if (NMFLAG(nmp
, SOFT
) && (retries
> nmp
->nm_retry
)) {
1201 return 0; // success
1204 * Give up on this context
1206 lck_mtx_lock(cp
->gss_clnt_mtx
);
1207 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1210 * Wake any threads waiting to use the context
1212 cp
->gss_clnt_thread
= NULL
;
1213 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1214 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1217 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1223 * Call the NFS server using a null procedure for context setup.
1224 * Even though it's a null procedure and nominally has no arguments
1225 * RFC 2203 requires that the GSS-API token be passed as an argument
1226 * and received as a reply.
1229 nfs_gss_clnt_ctx_callserver(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1231 struct nfsm_chain nmreq
, nmrep
;
1232 int error
= 0, status
;
1237 nfsm_chain_null(&nmreq
);
1238 nfsm_chain_null(&nmrep
);
1239 sz
= NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_clnt_tokenlen
);
1240 nfsm_chain_build_alloc_init(error
, &nmreq
, sz
);
1241 nfsm_chain_add_32(error
, &nmreq
, cp
->gss_clnt_tokenlen
);
1242 if (cp
->gss_clnt_tokenlen
> 0)
1243 nfsm_chain_add_opaque(error
, &nmreq
, cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
);
1244 nfsm_chain_build_done(error
, &nmreq
);
1248 /* Call the server */
1249 error
= nfs_request_gss(req
->r_nmp
->nm_mountp
, &nmreq
, req
->r_thread
, req
->r_cred
,
1250 (req
->r_flags
& R_OPTMASK
), cp
, &nmrep
, &status
);
1251 if (cp
->gss_clnt_token
!= NULL
) {
1252 FREE(cp
->gss_clnt_token
, M_TEMP
);
1253 cp
->gss_clnt_token
= NULL
;
1260 /* Get the server's reply */
1262 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_handle_len
);
1263 if (cp
->gss_clnt_handle
!= NULL
) {
1264 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1265 cp
->gss_clnt_handle
= NULL
;
1267 if (cp
->gss_clnt_handle_len
> 0) {
1268 MALLOC(cp
->gss_clnt_handle
, u_char
*, cp
->gss_clnt_handle_len
, M_TEMP
, M_WAITOK
);
1269 if (cp
->gss_clnt_handle
== NULL
) {
1273 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_handle_len
, cp
->gss_clnt_handle
);
1275 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_major
);
1276 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_minor
);
1277 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_seqwin
);
1278 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_tokenlen
);
1281 if (cp
->gss_clnt_tokenlen
> 0) {
1282 MALLOC(cp
->gss_clnt_token
, u_char
*, cp
->gss_clnt_tokenlen
, M_TEMP
, M_WAITOK
);
1283 if (cp
->gss_clnt_token
== NULL
) {
1287 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_tokenlen
, cp
->gss_clnt_token
);
1291 * Make sure any unusual errors are expanded and logged by gssd
1293 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1294 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1295 char who
[] = "server";
1296 char unknown
[] = "<unknown>";
1298 (void) mach_gss_log_error(
1300 !req
->r_nmp
? unknown
:
1301 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
,
1302 kauth_cred_getuid(cp
->gss_clnt_cred
),
1305 cp
->gss_clnt_minor
);
1309 nfsm_chain_cleanup(&nmreq
);
1310 nfsm_chain_cleanup(&nmrep
);
1316 * We construct the service principal as a gss hostbased service principal of
1317 * the form nfs@<server>, unless the servers principal was passed down in the
1318 * mount arguments. If the arguments don't specify the service principal, the
1319 * server name is extracted the location passed in the mount argument if
1320 * available. Otherwise assume a format of <server>:<path> in the
1321 * mntfromname. We don't currently support url's or other bizarre formats like
1322 * path@server. Mount_url will convert the nfs url into <server>:<path> when
1323 * calling mount, so this works out well in practice.
1328 nfs_gss_clnt_svcname(struct nfsmount
*nmp
, gssd_nametype
*nt
, uint32_t *len
)
1330 char *svcname
, *d
, *server
;
1336 if (nmp
->nm_sprinc
) {
1337 *len
= strlen(nmp
->nm_sprinc
) + 1;
1338 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1339 *nt
= GSSD_HOSTBASED
;
1340 if (svcname
== NULL
)
1342 strlcpy(svcname
, nmp
->nm_sprinc
, *len
);
1344 return ((uint8_t *)svcname
);
1347 *nt
= GSSD_HOSTBASED
;
1348 if (nmp
->nm_locations
.nl_numlocs
&& !(NFS_GSS_ISDBG
&& (NFS_DEBUG_FLAGS
& 0x1))) {
1349 lindx
= nmp
->nm_locations
.nl_current
.nli_loc
;
1350 sindx
= nmp
->nm_locations
.nl_current
.nli_serv
;
1351 server
= nmp
->nm_locations
.nl_locations
[lindx
]->nl_servers
[sindx
]->ns_name
;
1352 *len
= (uint32_t)strlen(server
);
1354 /* Older binaries using older mount args end up here */
1355 server
= vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
;
1356 NFS_GSS_DBG("nfs getting gss svcname from %s\n", server
);
1357 d
= strchr(server
, ':');
1358 *len
= (uint32_t)(d
? (d
- server
) : strlen(server
));
1361 *len
+= 5; /* "nfs@" plus null */
1362 MALLOC(svcname
, char *, *len
, M_TEMP
, M_WAITOK
);
1363 strlcpy(svcname
, "nfs", *len
);
1364 strlcat(svcname
, "@", *len
);
1365 strlcat(svcname
, server
, *len
);
1366 NFS_GSS_DBG("nfs svcname = %s\n", svcname
);
1368 return ((uint8_t *)svcname
);
1372 * Get a mach port to talk to gssd.
1373 * gssd lives in the root bootstrap, so we call gssd's lookup routine
1374 * to get a send right to talk to a new gssd instance that launchd has launched
1375 * based on the cred's uid and audit session id.
1379 nfs_gss_clnt_get_upcall_port(kauth_cred_t credp
)
1381 mach_port_t gssd_host_port
, uc_port
= IPC_PORT_NULL
;
1386 kr
= host_get_gssd_port(host_priv_self(), &gssd_host_port
);
1387 if (kr
!= KERN_SUCCESS
) {
1388 printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr
, kr
);
1389 return (IPC_PORT_NULL
);
1391 if (!IPC_PORT_VALID(gssd_host_port
)) {
1392 printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1393 return (IPC_PORT_NULL
);
1396 asid
= kauth_cred_getasid(credp
);
1397 uid
= kauth_cred_getauid(credp
);
1398 if (uid
== AU_DEFAUDITID
)
1399 uid
= kauth_cred_getuid(credp
);
1400 kr
= mach_gss_lookup(gssd_host_port
, uid
, asid
, &uc_port
);
1401 if (kr
!= KERN_SUCCESS
)
1402 printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr
, kr
);
1408 * Make an upcall to the gssd using Mach RPC
1409 * The upcall is made using a host special port.
1410 * This allows launchd to fire up the gssd in the
1411 * user's session. This is important, since gssd
1412 * must have access to the user's credential cache.
1415 nfs_gss_clnt_gssd_upcall(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1418 gssd_byte_buffer okey
= NULL
;
1419 uint32_t skeylen
= 0;
1421 vm_map_copy_t itoken
= NULL
;
1422 gssd_byte_buffer otoken
= NULL
;
1423 mach_msg_type_number_t otokenlen
;
1425 uint8_t *principal
= NULL
;
1427 int32_t nt
= GSSD_STRING_NAME
;
1428 vm_map_copy_t pname
= NULL
;
1429 vm_map_copy_t svcname
= NULL
;
1430 char display_name
[MAX_DISPLAY_STR
] = "";
1432 uint32_t nfs_1des
= (cp
->gss_clnt_gssd_flags
& GSSD_NFS_1DES
);
1433 struct nfsmount
*nmp
;
1436 * NFS currently only supports default principals or
1437 * principals based on the uid of the caller, unless
1438 * the principal to use for the mounting cred was specified
1439 * in the mount argmuments. If the realm to use was specified
1440 * then will send that up as the principal since the realm is
1441 * preceed by an "@" gssd that will try and select the default
1442 * principal for that realm.
1446 if (nmp
== NULL
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)))
1449 if (cp
->gss_clnt_principal
&& cp
->gss_clnt_prinlen
) {
1450 principal
= cp
->gss_clnt_principal
;
1451 plen
= cp
->gss_clnt_prinlen
;
1452 nt
= cp
->gss_clnt_prinnt
;
1453 } else if (nmp
->nm_principal
&& IS_VALID_CRED(nmp
->nm_mcred
) && req
->r_cred
== nmp
->nm_mcred
) {
1454 plen
= (uint32_t)strlen(nmp
->nm_principal
);
1455 MALLOC(principal
, uint8_t *, plen
, M_TEMP
, M_WAITOK
| M_ZERO
);
1456 if (principal
== NULL
)
1458 bcopy(nmp
->nm_principal
, principal
, plen
);
1459 cp
->gss_clnt_prinnt
= nt
= GSSD_USER
;
1461 else if (nmp
->nm_realm
) {
1462 plen
= (uint32_t)strlen(nmp
->nm_realm
);
1463 principal
= (uint8_t *)nmp
->nm_realm
;
1467 if (!IPC_PORT_VALID(cp
->gss_clnt_mport
)) {
1468 cp
->gss_clnt_mport
= nfs_gss_clnt_get_upcall_port(req
->r_cred
);
1469 if (cp
->gss_clnt_mport
== IPC_PORT_NULL
)
1474 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
1475 if (cp
->gss_clnt_svcnamlen
)
1476 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
1477 if (cp
->gss_clnt_tokenlen
)
1478 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1481 kr
= mach_gss_init_sec_context_v2(
1484 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_clnt_tokenlen
,
1485 kauth_cred_getuid(cp
->gss_clnt_cred
),
1487 (gssd_byte_buffer
)pname
, (mach_msg_type_number_t
) plen
,
1489 (gssd_byte_buffer
)svcname
, (mach_msg_type_number_t
) cp
->gss_clnt_svcnamlen
,
1491 &cp
->gss_clnt_gssd_flags
,
1492 &cp
->gss_clnt_context
,
1493 &cp
->gss_clnt_cred_handle
,
1495 &okey
, (mach_msg_type_number_t
*) &skeylen
,
1496 &otoken
, &otokenlen
,
1497 cp
->gss_clnt_display
? NULL
: display_name
,
1498 &cp
->gss_clnt_major
,
1499 &cp
->gss_clnt_minor
);
1501 /* Should be cleared and set in gssd ? */
1502 cp
->gss_clnt_gssd_flags
&= ~GSSD_RESTART
;
1503 cp
->gss_clnt_gssd_flags
|= nfs_1des
;
1505 if (kr
!= KERN_SUCCESS
) {
1506 printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr
, kr
);
1507 if (kr
== MIG_SERVER_DIED
&& cp
->gss_clnt_cred_handle
== 0 &&
1508 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
&&
1509 !vfs_isforce(nmp
->nm_mountp
) && (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) == 0) {
1511 nfs_gss_mach_alloc_buffer(principal
, plen
, &pname
);
1512 if (cp
->gss_clnt_svcnamlen
)
1513 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_svcname
, cp
->gss_clnt_svcnamlen
, &svcname
);
1514 if (cp
->gss_clnt_tokenlen
> 0)
1515 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1519 host_release_special_port(cp
->gss_clnt_mport
);
1520 cp
->gss_clnt_mport
= IPC_PORT_NULL
;
1524 if (cp
->gss_clnt_display
== NULL
&& *display_name
!= '\0') {
1525 int dlen
= strnlen(display_name
, MAX_DISPLAY_STR
) + 1; /* Add extra byte to include '\0' */
1527 if (dlen
< MAX_DISPLAY_STR
) {
1528 MALLOC(cp
->gss_clnt_display
, char *, dlen
, M_TEMP
, M_WAITOK
);
1529 if (cp
->gss_clnt_display
== NULL
)
1531 bcopy(display_name
, cp
->gss_clnt_display
, dlen
);
1538 * Make sure any unusual errors are expanded and logged by gssd
1540 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
1542 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1543 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1544 #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1545 char who
[] = "client";
1546 uint32_t gss_error
= GETMAJERROR(cp
->gss_clnt_major
);
1548 (void) mach_gss_log_error(
1550 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1551 kauth_cred_getuid(cp
->gss_clnt_cred
),
1554 cp
->gss_clnt_minor
);
1555 gss_error
= gss_error
? gss_error
: cp
->gss_clnt_major
;
1556 printf("NFS gssd auth failure mount %s for %s major = %d minor = %d\n",
1557 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, cp
->gss_clnt_display
? cp
->gss_clnt_display
: who
,
1558 gss_error
, (int32_t)cp
->gss_clnt_minor
);
1562 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
1563 printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen
);
1564 vm_map_copy_discard((vm_map_copy_t
) okey
);
1565 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1568 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
,
1569 cp
->gss_clnt_kinfo
.skey
);
1571 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1575 error
= gss_key_init(&cp
->gss_clnt_kinfo
, skeylen
);
1580 /* Free context token used as input */
1581 if (cp
->gss_clnt_token
)
1582 FREE(cp
->gss_clnt_token
, M_TEMP
);
1583 cp
->gss_clnt_token
= NULL
;
1584 cp
->gss_clnt_tokenlen
= 0;
1586 if (otokenlen
> 0) {
1587 /* Set context token to gss output token */
1588 MALLOC(cp
->gss_clnt_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
1589 if (cp
->gss_clnt_token
== NULL
) {
1590 printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
1591 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1594 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_clnt_token
);
1596 FREE(cp
->gss_clnt_token
, M_TEMP
);
1597 cp
->gss_clnt_token
= NULL
;
1598 return (NFSERR_EAUTH
);
1600 cp
->gss_clnt_tokenlen
= otokenlen
;
1606 if (cp
->gss_clnt_token
)
1607 FREE(cp
->gss_clnt_token
, M_TEMP
);
1608 cp
->gss_clnt_token
= NULL
;
1609 cp
->gss_clnt_tokenlen
= 0;
1611 return (NFSERR_EAUTH
);
1615 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1616 * credential. The sequence number window that the server returns
1617 * at context setup indicates the maximum number of client calls that
1618 * can be outstanding on a context. The client maintains a bitmap that
1619 * represents the server's window. Each pending request has a bit set
1620 * in the window bitmap. When a reply comes in or times out, we reset
1621 * the bit in the bitmap and if there are any other threads waiting for
1622 * a context slot we notify the waiting thread(s).
1624 * Note that if a request is retransmitted, it will have a single XID
1625 * but it may be associated with multiple sequence numbers. So we
1626 * may have to reset multiple sequence number bits in the window bitmap.
1629 nfs_gss_clnt_rpcdone(struct nfsreq
*req
)
1631 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1632 struct gss_seq
*gsp
, *ngsp
;
1635 if (cp
== NULL
|| !(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
))
1636 return; // no context - don't bother
1638 * Reset the bit for this request in the
1639 * sequence number window to indicate it's done.
1640 * We do this even if the request timed out.
1642 lck_mtx_lock(cp
->gss_clnt_mtx
);
1643 gsp
= SLIST_FIRST(&req
->r_gss_seqlist
);
1644 if (gsp
&& gsp
->gss_seqnum
> (cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
))
1645 win_resetbit(cp
->gss_clnt_seqbits
,
1646 gsp
->gss_seqnum
% cp
->gss_clnt_seqwin
);
1649 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1651 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
) {
1652 if (++i
> GSS_CLNT_SEQLISTMAX
) {
1653 SLIST_REMOVE(&req
->r_gss_seqlist
, gsp
, gss_seq
, gss_seqnext
);
1659 * If there's a thread waiting for
1660 * the window to advance, wake it up.
1662 if (cp
->gss_clnt_flags
& GSS_NEEDSEQ
) {
1663 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
1666 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1670 * Create a reference to a context from a request
1671 * and bump the reference count
1674 nfs_gss_clnt_ctx_ref(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1676 req
->r_gss_ctx
= cp
;
1678 lck_mtx_lock(cp
->gss_clnt_mtx
);
1679 cp
->gss_clnt_refcnt
++;
1680 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1684 * Remove a context reference from a request
1685 * If the reference count drops to zero, and the
1686 * context is invalid, destroy the context
1689 nfs_gss_clnt_ctx_unref(struct nfsreq
*req
)
1691 struct nfsmount
*nmp
= req
->r_nmp
;
1692 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1697 req
->r_gss_ctx
= NULL
;
1699 lck_mtx_lock(cp
->gss_clnt_mtx
);
1700 if (--cp
->gss_clnt_refcnt
== 0
1701 && cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
1702 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1705 lck_mtx_lock(&nmp
->nm_lock
);
1706 nfs_gss_clnt_ctx_remove(nmp
, cp
);
1708 lck_mtx_unlock(&nmp
->nm_lock
);
1712 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1719 nfs_gss_clnt_ctx_remove(struct nfsmount
*nmp
, struct nfs_gss_clnt_ctx
*cp
)
1722 * If dequeueing, assume nmp->nm_lock is held
1725 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
1727 host_release_special_port(cp
->gss_clnt_mport
);
1729 if (cp
->gss_clnt_mtx
)
1730 lck_mtx_destroy(cp
->gss_clnt_mtx
, nfs_gss_clnt_grp
);
1731 if (IS_VALID_CRED(cp
->gss_clnt_cred
))
1732 kauth_cred_unref(&cp
->gss_clnt_cred
);
1733 if (cp
->gss_clnt_principal
)
1734 FREE(cp
->gss_clnt_principal
, M_TEMP
);
1735 if (cp
->gss_clnt_display
)
1736 FREE(cp
->gss_clnt_display
, M_TEMP
);
1737 if (cp
->gss_clnt_handle
)
1738 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1739 if (cp
->gss_clnt_seqbits
)
1740 FREE(cp
->gss_clnt_seqbits
, M_TEMP
);
1741 if (cp
->gss_clnt_token
)
1742 FREE(cp
->gss_clnt_token
, M_TEMP
);
1743 if (cp
->gss_clnt_svcname
)
1744 FREE(cp
->gss_clnt_svcname
, M_TEMP
);
1749 * The context for a user is invalid.
1750 * Mark the context as invalid, then
1751 * create a new context.
1754 nfs_gss_clnt_ctx_renew(struct nfsreq
*req
)
1756 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1757 struct nfsmount
*nmp
= req
->r_nmp
;
1758 struct nfs_gss_clnt_ctx
*ncp
;
1760 kauth_cred_t saved_cred
;
1761 mach_port_t saved_mport
;
1766 lck_mtx_lock(cp
->gss_clnt_mtx
);
1767 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
1768 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1769 nfs_gss_clnt_ctx_unref(req
);
1770 return (0); // already being renewed
1772 saved_cred
= cp
->gss_clnt_cred
;
1773 kauth_cred_ref(saved_cred
);
1774 saved_mport
= host_copy_special_port(cp
->gss_clnt_mport
);
1776 /* Remove the old context */
1777 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1780 * If there's a thread waiting
1781 * in the old context, wake it up.
1783 if (cp
->gss_clnt_flags
& (GSS_NEEDCTX
| GSS_NEEDSEQ
)) {
1784 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
1787 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1790 * Create a new context
1792 MALLOC(ncp
, struct nfs_gss_clnt_ctx
*, sizeof(*ncp
),
1793 M_TEMP
, M_WAITOK
|M_ZERO
);
1799 ncp
->gss_clnt_cred
= saved_cred
;
1800 kauth_cred_ref(ncp
->gss_clnt_cred
);
1801 ncp
->gss_clnt_mport
= host_copy_special_port(saved_mport
); // re-use the gssd port
1802 ncp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
1803 ncp
->gss_clnt_thread
= current_thread();
1804 lck_mtx_lock(&nmp
->nm_lock
);
1805 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, ncp
, gss_clnt_entries
);
1806 lck_mtx_unlock(&nmp
->nm_lock
);
1808 /* Adjust reference counts to new and old context */
1809 nfs_gss_clnt_ctx_unref(req
);
1810 nfs_gss_clnt_ctx_ref(req
, ncp
);
1812 error
= nfs_gss_clnt_ctx_init_retry(req
, ncp
); // Initialize new context
1814 host_release_special_port(saved_mport
);
1815 kauth_cred_unref(&saved_cred
);
1817 nfs_gss_clnt_ctx_unref(req
);
1823 * Destroy all the contexts associated with a mount.
1824 * The contexts are also destroyed by the server.
1827 nfs_gss_clnt_ctx_unmount(struct nfsmount
*nmp
)
1829 struct nfs_gss_clnt_ctx
*cp
;
1830 struct nfsm_chain nmreq
, nmrep
;
1837 lck_mtx_lock(&nmp
->nm_lock
);
1838 cp
= TAILQ_FIRST(&nmp
->nm_gsscl
);
1840 lck_mtx_lock(cp
->gss_clnt_mtx
);
1841 cp
->gss_clnt_refcnt
++;
1842 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1845 lck_mtx_unlock(&nmp
->nm_lock
);
1850 * Tell the server to destroy its context.
1851 * But don't bother if it's a forced unmount
1852 * or if it's a dummy sec=sys context.
1854 if (!(nmp
->nm_state
& NFSSTA_FORCE
) && (cp
->gss_clnt_service
!= RPCSEC_GSS_SVC_SYS
)) {
1855 cp
->gss_clnt_proc
= RPCSEC_GSS_DESTROY
;
1858 nfsm_chain_null(&nmreq
);
1859 nfsm_chain_null(&nmrep
);
1860 nfsm_chain_build_alloc_init(error
, &nmreq
, 0);
1861 nfsm_chain_build_done(error
, &nmreq
);
1863 nfs_request_gss(nmp
->nm_mountp
, &nmreq
,
1864 current_thread(), cp
->gss_clnt_cred
, 0, cp
, &nmrep
, &status
);
1865 nfsm_chain_cleanup(&nmreq
);
1866 nfsm_chain_cleanup(&nmrep
);
1870 * Mark the context invalid then drop
1871 * the reference to remove it if its
1874 lck_mtx_lock(cp
->gss_clnt_mtx
);
1875 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1876 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1877 nfs_gss_clnt_ctx_unref(&req
);
1882 * Destroy a mounts context for a credential
1885 nfs_gss_clnt_ctx_destroy(struct nfsmount
*nmp
, kauth_cred_t cred
)
1887 struct nfs_gss_clnt_ctx
*cp
;
1892 lck_mtx_lock(&nmp
->nm_lock
);
1893 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
1894 if (nfs_gss_clnt_ctx_cred_match(cp
->gss_clnt_cred
, cred
)) {
1895 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
)
1897 lck_mtx_lock(cp
->gss_clnt_mtx
);
1898 cp
->gss_clnt_refcnt
++;
1899 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1900 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1905 lck_mtx_unlock(&nmp
->nm_lock
);
1911 * Drop the reference to remove it if its
1914 nfs_gss_clnt_ctx_unref(&req
);
1920 #endif /* NFSCLIENT */
1930 * Find a server context based on a handle value received
1931 * in an RPCSEC_GSS credential.
1933 static struct nfs_gss_svc_ctx
*
1934 nfs_gss_svc_ctx_find(uint32_t handle
)
1936 struct nfs_gss_svc_ctx_hashhead
*head
;
1937 struct nfs_gss_svc_ctx
*cp
;
1943 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(handle
)];
1945 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
1947 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
, &timenow
);
1949 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
1951 LIST_FOREACH(cp
, head
, gss_svc_entries
) {
1952 if (cp
->gss_svc_handle
== handle
) {
1953 if (timenow
> cp
->gss_svc_incarnation
+ GSS_SVC_CTX_TTL
) {
1955 * Context has or is about to expire. Don't use.
1956 * We'll return null and the client will have to create
1959 cp
->gss_svc_handle
= 0;
1961 * Make sure though that we stay around for GSS_CTX_PEND seconds
1962 * for other threads that might be using the context.
1964 cp
->gss_svc_incarnation
= timenow
;
1969 lck_mtx_lock(cp
->gss_svc_mtx
);
1970 cp
->gss_svc_refcnt
++;
1971 lck_mtx_unlock(cp
->gss_svc_mtx
);
1976 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
1982 * Insert a new server context into the hash table
1983 * and start the context reap thread if necessary.
1986 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*cp
)
1988 struct nfs_gss_svc_ctx_hashhead
*head
;
1989 struct nfs_gss_svc_ctx
*p
;
1991 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
1994 * Give the client a random handle so that if we reboot
1995 * it's unlikely the client will get a bad context match.
1996 * Make sure it's not zero or already assigned.
1999 cp
->gss_svc_handle
= random();
2000 if (cp
->gss_svc_handle
== 0)
2002 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(cp
->gss_svc_handle
)];
2003 LIST_FOREACH(p
, head
, gss_svc_entries
)
2004 if (p
->gss_svc_handle
== cp
->gss_svc_handle
)
2007 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2008 &cp
->gss_svc_incarnation
);
2009 LIST_INSERT_HEAD(head
, cp
, gss_svc_entries
);
2010 nfs_gss_ctx_count
++;
2012 if (!nfs_gss_timer_on
) {
2013 nfs_gss_timer_on
= 1;
2015 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
2016 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
2019 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2023 * This function is called via the kernel's callout
2024 * mechanism. It runs only when there are
2025 * cached RPCSEC_GSS contexts.
2028 nfs_gss_svc_ctx_timer(__unused
void *param1
, __unused
void *param2
)
2030 struct nfs_gss_svc_ctx
*cp
, *next
;
2035 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2036 clock_get_uptime(&timenow
);
2039 * Scan all the hash chains
2041 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
2043 * For each hash chain, look for entries
2044 * that haven't been used in a while.
2046 LIST_FOREACH_SAFE(cp
, &nfs_gss_svc_ctx_hashtbl
[i
], gss_svc_entries
, next
) {
2048 if (timenow
> cp
->gss_svc_incarnation
+
2049 (cp
->gss_svc_handle
? GSS_SVC_CTX_TTL
: 0)
2050 && cp
->gss_svc_refcnt
== 0) {
2052 * A stale context - remove it
2054 LIST_REMOVE(cp
, gss_svc_entries
);
2055 if (cp
->gss_svc_seqbits
)
2056 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2057 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2064 nfs_gss_ctx_count
= contexts
;
2067 * If there are still some cached contexts left,
2068 * set up another callout to check on them later.
2070 nfs_gss_timer_on
= nfs_gss_ctx_count
> 0;
2071 if (nfs_gss_timer_on
)
2072 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
2073 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, nfsrv_gss_context_ttl
)) * MSECS_PER_SEC
);
2075 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2079 * Here the server receives an RPCSEC_GSS credential in an
2080 * RPC call header. First there's some checking to make sure
2081 * the credential is appropriate - whether the context is still
2082 * being set up, or is complete. Then we use the handle to find
2083 * the server's context and validate the verifier, which contains
2084 * a signed checksum of the RPC header. If the verifier checks
2085 * out, we extract the user's UID and groups from the context
2086 * and use it to set up a UNIX credential for the user's request.
2089 nfs_gss_svc_cred_get(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2091 uint32_t vers
, proc
, seqnum
, service
;
2092 uint32_t handle
, handle_len
;
2093 struct nfs_gss_svc_ctx
*cp
= NULL
;
2094 uint32_t flavor
= 0, verflen
= 0;
2096 uint32_t arglen
, start
, toklen
, cksumlen
;
2097 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
2098 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
2099 struct nfsm_chain nmc_tmp
;
2102 vers
= proc
= seqnum
= service
= handle_len
= 0;
2103 arglen
= cksumlen
= 0;
2105 nfsm_chain_get_32(error
, nmc
, vers
);
2106 if (vers
!= RPCSEC_GSS_VERS_1
) {
2107 error
= NFSERR_AUTHERR
| AUTH_REJECTCRED
;
2111 nfsm_chain_get_32(error
, nmc
, proc
);
2112 nfsm_chain_get_32(error
, nmc
, seqnum
);
2113 nfsm_chain_get_32(error
, nmc
, service
);
2114 nfsm_chain_get_32(error
, nmc
, handle_len
);
2119 * Make sure context setup/destroy is being done with a nullproc
2121 if (proc
!= RPCSEC_GSS_DATA
&& nd
->nd_procnum
!= NFSPROC_NULL
) {
2122 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2127 * If the sequence number is greater than the max
2128 * allowable, reject and have the client init a
2131 if (seqnum
> GSS_MAXSEQ
) {
2132 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2137 service
== RPCSEC_GSS_SVC_NONE
? RPCAUTH_KRB5
:
2138 service
== RPCSEC_GSS_SVC_INTEGRITY
? RPCAUTH_KRB5I
:
2139 service
== RPCSEC_GSS_SVC_PRIVACY
? RPCAUTH_KRB5P
: 0;
2141 if (proc
== RPCSEC_GSS_INIT
) {
2143 * Limit the total number of contexts
2145 if (nfs_gss_ctx_count
> nfs_gss_ctx_max
) {
2146 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2151 * Set up a new context
2153 MALLOC(cp
, struct nfs_gss_svc_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
2158 cp
->gss_svc_mtx
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
2159 cp
->gss_svc_refcnt
= 1;
2163 * Use the handle to find the context
2165 if (handle_len
!= sizeof(handle
)) {
2166 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2169 nfsm_chain_get_32(error
, nmc
, handle
);
2172 cp
= nfs_gss_svc_ctx_find(handle
);
2174 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2179 cp
->gss_svc_proc
= proc
;
2180 ki
= &cp
->gss_svc_kinfo
;
2182 if (proc
== RPCSEC_GSS_DATA
|| proc
== RPCSEC_GSS_DESTROY
) {
2183 struct posix_cred temp_pcred
;
2185 if (cp
->gss_svc_seqwin
== 0) {
2187 * Context isn't complete
2189 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2193 if (!nfs_gss_svc_seqnum_valid(cp
, seqnum
)) {
2195 * Sequence number is bad
2197 error
= EINVAL
; // drop the request
2201 /* Now compute the client's call header checksum */
2202 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), 0, 0, cksum1
);
2205 * Validate the verifier.
2206 * The verifier contains an encrypted checksum
2207 * of the call header from the XID up to and
2208 * including the credential. We compute the
2209 * checksum and compare it with what came in
2212 nfsm_chain_get_32(error
, nmc
, flavor
);
2213 nfsm_chain_get_32(error
, nmc
, verflen
);
2216 if (flavor
!= RPCSEC_GSS
|| verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
2217 error
= NFSERR_AUTHERR
| AUTH_BADVERF
;
2218 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
2222 /* Get the checksum from the token inside the verifier */
2223 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
2228 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2229 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
2233 nd
->nd_gss_seqnum
= seqnum
;
2236 * Set up the user's cred
2238 bzero(&temp_pcred
, sizeof(temp_pcred
));
2239 temp_pcred
.cr_uid
= cp
->gss_svc_uid
;
2240 bcopy(cp
->gss_svc_gids
, temp_pcred
.cr_groups
,
2241 sizeof(gid_t
) * cp
->gss_svc_ngroups
);
2242 temp_pcred
.cr_ngroups
= cp
->gss_svc_ngroups
;
2244 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
2245 if (nd
->nd_cr
== NULL
) {
2249 clock_get_uptime(&cp
->gss_svc_incarnation
);
2252 * If the call arguments are integrity or privacy protected
2253 * then we need to check them here.
2256 case RPCSEC_GSS_SVC_NONE
:
2259 case RPCSEC_GSS_SVC_INTEGRITY
:
2261 * Here's what we expect in the integrity call args:
2263 * - length of seq num + call args (4 bytes)
2264 * - sequence number (4 bytes)
2265 * - call args (variable bytes)
2266 * - length of checksum token (37)
2267 * - checksum of seqnum + call args (37 bytes)
2269 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2270 if (arglen
> NFS_MAXPACKET
) {
2275 /* Compute the checksum over the call args */
2276 start
= nfsm_chain_offset(nmc
);
2277 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, arglen
, cksum1
);
2280 * Get the sequence number prepended to the args
2281 * and compare it against the one sent in the
2284 nfsm_chain_get_32(error
, nmc
, seqnum
);
2285 if (seqnum
!= nd
->nd_gss_seqnum
) {
2286 error
= EBADRPC
; // returns as GARBAGEARGS
2291 * Advance to the end of the args and
2292 * fetch the checksum computed by the client.
2295 arglen
-= NFSX_UNSIGNED
; // skipped seqnum
2296 nfsm_chain_adv(error
, &nmc_tmp
, arglen
); // skip args
2297 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
2298 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
2302 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
2305 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
2308 /* Verify that the checksums are the same */
2309 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2314 case RPCSEC_GSS_SVC_PRIVACY
:
2316 * Here's what we expect in the privacy call args:
2318 * - length of confounder + seq num + token + call args
2319 * - wrap token (37-40 bytes)
2320 * - confounder (8 bytes)
2321 * - sequence number (4 bytes)
2322 * - call args (encrypted)
2324 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2325 if (arglen
> NFS_MAXPACKET
) {
2330 /* Get the token that prepends the encrypted args */
2331 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
2334 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 1,
2338 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
2340 /* decrypt the 8 byte confounder + seqnum + args */
2341 start
= nfsm_chain_offset(nmc
);
2343 nfs_gss_encrypt_chain(ki
, nmc
, start
, arglen
, DES_DECRYPT
);
2345 /* Compute a checksum over the sequence number + results */
2346 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, arglen
, cksum2
);
2348 /* Verify that the checksums are the same */
2349 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2355 * Get the sequence number prepended to the args
2356 * and compare it against the one sent in the
2359 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
2360 nfsm_chain_get_32(error
, nmc
, seqnum
);
2361 if (seqnum
!= nd
->nd_gss_seqnum
) {
2362 error
= EBADRPC
; // returns as GARBAGEARGS
2369 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2370 * then we expect a null verifier.
2372 nfsm_chain_get_32(error
, nmc
, flavor
);
2373 nfsm_chain_get_32(error
, nmc
, verflen
);
2374 if (error
|| flavor
!= RPCAUTH_NULL
|| verflen
> 0)
2375 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2377 if (proc
== RPCSEC_GSS_INIT
) {
2378 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2386 nd
->nd_gss_context
= cp
;
2390 nfs_gss_svc_ctx_deref(cp
);
2395 * Insert the server's verifier into the RPC reply header.
2396 * It contains a signed checksum of the sequence number that
2397 * was received in the RPC call.
2398 * Then go on to add integrity or privacy if necessary.
2401 nfs_gss_svc_verf_put(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2403 struct nfs_gss_svc_ctx
*cp
;
2405 u_char tokbuf
[KRB5_SZ_TOKEN(MAX_DIGEST
)];
2407 u_char cksum
[MAX_DIGEST
];
2410 cp
= nd
->nd_gss_context
;
2411 ki
= &cp
->gss_svc_kinfo
;
2413 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
) {
2415 * If the context isn't yet complete
2416 * then return a null verifier.
2418 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
);
2419 nfsm_chain_add_32(error
, nmc
, 0);
2424 * Compute checksum of the request seq number
2425 * If it's the final reply of context setup
2426 * then return the checksum of the context
2429 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2430 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2431 nfs_gss_cksum_rep(ki
, cp
->gss_svc_seqwin
, cksum
);
2433 nfs_gss_cksum_rep(ki
, nd
->nd_gss_seqnum
, cksum
);
2435 * Now wrap it in a token and add
2436 * the verifier to the reply.
2438 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
2439 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
2440 nfsm_chain_add_32(error
, nmc
, toklen
);
2441 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
2447 * The results aren't available yet, but if they need to be
2448 * checksummed for integrity protection or encrypted, then
2449 * we can record the start offset here, insert a place-holder
2450 * for the results length, as well as the sequence number.
2451 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2452 * when the results are available.
2455 nfs_gss_svc_prepare_reply(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2457 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
2460 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2461 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2464 switch (nd
->nd_sec
) {
2469 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
2470 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
2471 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
2474 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
2475 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
2476 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 1-4
2477 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 5-8
2478 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
2486 * The results are checksummed or encrypted for return to the client
2489 nfs_gss_svc_protect_reply(struct nfsrv_descript
*nd
, mbuf_t mrep
)
2491 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
2492 struct nfsm_chain nmrep_res
, *nmc_res
= &nmrep_res
;
2493 struct nfsm_chain nmrep_pre
, *nmc_pre
= &nmrep_pre
;
2496 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
2498 u_char cksum
[MAX_DIGEST
];
2500 gss_key_info
*ki
= &cp
->gss_svc_kinfo
;
2503 * Using a reference to the mbuf where we previously split the reply
2504 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
2505 * one that allows us to prepend a length field or token, (nmc_pre)
2506 * and the second which holds just the results that we're going to
2507 * checksum and/or encrypt. When we're done, we join the chains back
2510 nfs_gss_nfsm_chain(nmc_res
, mrep
); // set up the results chain
2511 mb
= nd
->nd_gss_mb
; // the mbuf where we split
2512 results
= mbuf_next(mb
); // first mbuf in the results
2513 reslen
= nfs_gss_mchain_length(results
); // length of results
2514 error
= mbuf_setnext(mb
, NULL
); // disconnect the chains
2517 nfs_gss_nfsm_chain(nmc_pre
, mb
); // set up the prepend chain
2519 if (nd
->nd_sec
== RPCAUTH_KRB5I
) {
2520 nfsm_chain_add_32(error
, nmc_pre
, reslen
);
2521 nfsm_chain_build_done(error
, nmc_pre
);
2524 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
2526 /* Now compute the checksum over the results data */
2527 nfs_gss_cksum_mchain(ki
, results
, ALG_MIC(ki
), 0, reslen
, cksum
);
2529 /* Put it into a token and append to the request */
2530 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
2531 nfsm_chain_add_32(error
, nmc_res
, toklen
);
2532 nfsm_chain_add_opaque(error
, nmc_res
, tokbuf
, toklen
);
2533 nfsm_chain_build_done(error
, nmc_res
);
2537 * Append a pad trailer - per RFC 1964 section 1.2.2.3
2538 * Since XDR data is always 32-bit aligned, it
2539 * needs to be padded either by 4 bytes or 8 bytes.
2541 if (reslen
% 8 > 0) {
2542 nfsm_chain_add_32(error
, nmc_res
, 0x04040404);
2543 reslen
+= NFSX_UNSIGNED
;
2545 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
2546 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
2547 reslen
+= 2 * NFSX_UNSIGNED
;
2549 nfsm_chain_build_done(error
, nmc_res
);
2551 /* Now compute the checksum over the results data */
2552 nfs_gss_cksum_mchain(ki
, results
, ALG_WRAP(ki
), 0, reslen
, cksum
);
2554 /* Put it into a token and insert in the reply */
2555 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 0, reslen
, cksum
);
2556 nfsm_chain_add_32(error
, nmc_pre
, toklen
+ reslen
);
2557 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, tokbuf
, toklen
);
2558 nfsm_chain_build_done(error
, nmc_pre
);
2561 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
2563 /* Encrypt the confounder + seqnum + results */
2564 nfs_gss_encrypt_mchain(ki
, results
, 0, reslen
, DES_ENCRYPT
);
2566 /* Add null XDR pad if the ASN.1 token misaligned the data */
2567 pad
= nfsm_pad(toklen
+ reslen
);
2569 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, iv0
, pad
);
2570 nfsm_chain_build_done(error
, nmc_pre
);
2578 * This function handles the context setup calls from the client.
2579 * Essentially, it implements the NFS null procedure calls when
2580 * an RPCSEC_GSS credential is used.
2581 * This is the context maintenance function. It creates and
2582 * destroys server contexts at the whim of the client.
2583 * During context creation, it receives GSS-API tokens from the
2584 * client, passes them up to gssd, and returns a received token
2585 * back to the client in the null procedure reply.
2588 nfs_gss_svc_ctx_init(struct nfsrv_descript
*nd
, struct nfsrv_sock
*slp
, mbuf_t
*mrepp
)
2590 struct nfs_gss_svc_ctx
*cp
= NULL
;
2593 struct nfsm_chain
*nmreq
, nmrep
;
2596 nmreq
= &nd
->nd_nmreq
;
2597 nfsm_chain_null(&nmrep
);
2599 cp
= nd
->nd_gss_context
;
2602 switch (cp
->gss_svc_proc
) {
2603 case RPCSEC_GSS_INIT
:
2604 nfs_gss_svc_ctx_insert(cp
);
2607 case RPCSEC_GSS_CONTINUE_INIT
:
2608 /* Get the token from the request */
2609 nfsm_chain_get_32(error
, nmreq
, cp
->gss_svc_tokenlen
);
2610 if (cp
->gss_svc_tokenlen
== 0) {
2611 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2614 MALLOC(cp
->gss_svc_token
, u_char
*, cp
->gss_svc_tokenlen
, M_TEMP
, M_WAITOK
);
2615 if (cp
->gss_svc_token
== NULL
) {
2616 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2619 nfsm_chain_get_opaque(error
, nmreq
, cp
->gss_svc_tokenlen
, cp
->gss_svc_token
);
2621 /* Use the token in a gss_accept_sec_context upcall */
2622 error
= nfs_gss_svc_gssd_upcall(cp
);
2624 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2625 if (error
== NFSERR_EAUTH
)
2631 * If the context isn't complete, pass the new token
2632 * back to the client for another round.
2634 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
)
2638 * Now the server context is complete.
2641 clock_get_uptime(&cp
->gss_svc_incarnation
);
2643 cp
->gss_svc_seqwin
= GSS_SVC_SEQWINDOW
;
2644 MALLOC(cp
->gss_svc_seqbits
, uint32_t *,
2645 nfsm_rndup((cp
->gss_svc_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
2646 if (cp
->gss_svc_seqbits
== NULL
) {
2647 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2652 case RPCSEC_GSS_DATA
:
2653 /* Just a nullproc ping - do nothing */
2656 case RPCSEC_GSS_DESTROY
:
2658 * Don't destroy the context immediately because
2659 * other active requests might still be using it.
2660 * Instead, schedule it for destruction after
2661 * GSS_CTX_PEND time has elapsed.
2663 cp
= nfs_gss_svc_ctx_find(cp
->gss_svc_handle
);
2665 cp
->gss_svc_handle
= 0; // so it can't be found
2666 lck_mtx_lock(cp
->gss_svc_mtx
);
2667 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2668 &cp
->gss_svc_incarnation
);
2669 lck_mtx_unlock(cp
->gss_svc_mtx
);
2673 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2677 /* Now build the reply */
2679 if (nd
->nd_repstat
== 0)
2680 nd
->nd_repstat
= autherr
? (NFSERR_AUTHERR
| autherr
) : NFSERR_RETVOID
;
2681 sz
= 7 * NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_svc_tokenlen
); // size of results
2682 error
= nfsrv_rephead(nd
, slp
, &nmrep
, sz
);
2683 *mrepp
= nmrep
.nmc_mhead
;
2684 if (error
|| autherr
)
2687 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2688 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
2689 nfsm_chain_add_32(error
, &nmrep
, sizeof(cp
->gss_svc_handle
));
2690 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_handle
);
2692 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_major
);
2693 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_minor
);
2694 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_seqwin
);
2696 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_tokenlen
);
2697 if (cp
->gss_svc_token
!= NULL
) {
2698 nfsm_chain_add_opaque(error
, &nmrep
, cp
->gss_svc_token
, cp
->gss_svc_tokenlen
);
2699 FREE(cp
->gss_svc_token
, M_TEMP
);
2700 cp
->gss_svc_token
= NULL
;
2706 nd
->nd_gss_context
= NULL
;
2707 LIST_REMOVE(cp
, gss_svc_entries
);
2708 if (cp
->gss_svc_seqbits
!= NULL
)
2709 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2710 if (cp
->gss_svc_token
!= NULL
)
2711 FREE(cp
->gss_svc_token
, M_TEMP
);
2712 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2716 nfsm_chain_build_done(error
, &nmrep
);
2718 nfsm_chain_cleanup(&nmrep
);
2725 * This is almost a mirror-image of the client side upcall.
2726 * It passes and receives a token, but invokes gss_accept_sec_context.
2727 * If it's the final call of the context setup, then gssd also returns
2728 * the session key and the user's UID.
2731 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*cp
)
2736 gssd_byte_buffer okey
= NULL
;
2737 uint32_t skeylen
= 0;
2739 vm_map_copy_t itoken
= NULL
;
2740 gssd_byte_buffer otoken
= NULL
;
2741 mach_msg_type_number_t otokenlen
;
2743 char svcname
[] = "nfs";
2745 kr
= host_get_gssd_port(host_priv_self(), &mp
);
2746 if (kr
!= KERN_SUCCESS
) {
2747 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr
, kr
);
2750 if (!IPC_PORT_VALID(mp
)) {
2751 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
2755 if (cp
->gss_svc_tokenlen
> 0)
2756 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
2759 kr
= mach_gss_accept_sec_context(
2761 (gssd_byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_svc_tokenlen
,
2764 &cp
->gss_svc_context
,
2765 &cp
->gss_svc_cred_handle
,
2769 &cp
->gss_svc_ngroups
,
2770 &okey
, (mach_msg_type_number_t
*) &skeylen
,
2771 &otoken
, &otokenlen
,
2773 &cp
->gss_svc_minor
);
2775 if (kr
!= KERN_SUCCESS
) {
2776 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr
, kr
);
2777 if (kr
== MIG_SERVER_DIED
&& cp
->gss_svc_context
== 0 &&
2778 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
) {
2779 if (cp
->gss_svc_tokenlen
> 0)
2780 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
2783 host_release_special_port(mp
);
2787 host_release_special_port(mp
);
2790 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
2791 printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen
);
2792 vm_map_copy_discard((vm_map_copy_t
) okey
);
2793 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2796 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
, cp
->gss_svc_kinfo
.skey
);
2798 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2801 error
= gss_key_init(&cp
->gss_svc_kinfo
, skeylen
);
2807 /* Free context token used as input */
2808 if (cp
->gss_svc_token
)
2809 FREE(cp
->gss_svc_token
, M_TEMP
);
2810 cp
->gss_svc_token
= NULL
;
2811 cp
->gss_svc_tokenlen
= 0;
2813 if (otokenlen
> 0) {
2814 /* Set context token to gss output token */
2815 MALLOC(cp
->gss_svc_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
2816 if (cp
->gss_svc_token
== NULL
) {
2817 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
2818 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2821 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_svc_token
);
2823 FREE(cp
->gss_svc_token
, M_TEMP
);
2824 cp
->gss_svc_token
= NULL
;
2825 return (NFSERR_EAUTH
);
2827 cp
->gss_svc_tokenlen
= otokenlen
;
2833 FREE(cp
->gss_svc_token
, M_TEMP
);
2834 cp
->gss_svc_tokenlen
= 0;
2835 cp
->gss_svc_token
= NULL
;
2837 return (NFSERR_EAUTH
);
2841 * Validate the sequence number in the credential as described
2842 * in RFC 2203 Section 5.3.3.1
2844 * Here the window of valid sequence numbers is represented by
2845 * a bitmap. As each sequence number is received, its bit is
2846 * set in the bitmap. An invalid sequence number lies below
2847 * the lower bound of the window, or is within the window but
2848 * has its bit already set.
2851 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*cp
, uint32_t seq
)
2853 uint32_t *bits
= cp
->gss_svc_seqbits
;
2854 uint32_t win
= cp
->gss_svc_seqwin
;
2857 lck_mtx_lock(cp
->gss_svc_mtx
);
2860 * If greater than the window upper bound,
2861 * move the window up, and set the bit.
2863 if (seq
> cp
->gss_svc_seqmax
) {
2864 if (seq
- cp
->gss_svc_seqmax
> win
)
2865 bzero(bits
, nfsm_rndup((win
+ 7) / 8));
2867 for (i
= cp
->gss_svc_seqmax
+ 1; i
< seq
; i
++)
2868 win_resetbit(bits
, i
% win
);
2869 win_setbit(bits
, seq
% win
);
2870 cp
->gss_svc_seqmax
= seq
;
2871 lck_mtx_unlock(cp
->gss_svc_mtx
);
2876 * Invalid if below the lower bound of the window
2878 if (seq
<= cp
->gss_svc_seqmax
- win
) {
2879 lck_mtx_unlock(cp
->gss_svc_mtx
);
2884 * In the window, invalid if the bit is already set
2886 if (win_getbit(bits
, seq
% win
)) {
2887 lck_mtx_unlock(cp
->gss_svc_mtx
);
2890 win_setbit(bits
, seq
% win
);
2891 lck_mtx_unlock(cp
->gss_svc_mtx
);
2896 * Drop a reference to a context
2898 * Note that it's OK for the context to exist
2899 * with a refcount of zero. The refcount isn't
2900 * checked until we're about to reap an expired one.
2903 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx
*cp
)
2905 lck_mtx_lock(cp
->gss_svc_mtx
);
2906 if (cp
->gss_svc_refcnt
> 0)
2907 cp
->gss_svc_refcnt
--;
2909 printf("nfs_gss_ctx_deref: zero refcount\n");
2910 lck_mtx_unlock(cp
->gss_svc_mtx
);
2914 * Called at NFS server shutdown - destroy all contexts
2917 nfs_gss_svc_cleanup(void)
2919 struct nfs_gss_svc_ctx_hashhead
*head
;
2920 struct nfs_gss_svc_ctx
*cp
, *ncp
;
2923 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2926 * Run through all the buckets
2928 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
2930 * Remove and free all entries in the bucket
2932 head
= &nfs_gss_svc_ctx_hashtbl
[i
];
2933 LIST_FOREACH_SAFE(cp
, head
, gss_svc_entries
, ncp
) {
2934 LIST_REMOVE(cp
, gss_svc_entries
);
2935 if (cp
->gss_svc_seqbits
)
2936 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2937 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2942 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2945 #endif /* NFSSERVER */
2949 * The following functions are used by both client and server.
2953 * Release a host special port that was obtained by host_get_special_port
2954 * or one of its macros (host_get_gssd_port in this case).
2955 * This really should be in a public kpi.
2958 /* This should be in a public header if this routine is not */
2959 extern void ipc_port_release_send(ipc_port_t
);
2960 extern ipc_port_t
ipc_port_copy_send(ipc_port_t
);
2963 host_release_special_port(mach_port_t mp
)
2965 if (IPC_PORT_VALID(mp
))
2966 ipc_port_release_send(mp
);
2970 host_copy_special_port(mach_port_t mp
)
2972 return (ipc_port_copy_send(mp
));
2976 * The token that is sent and received in the gssd upcall
2977 * has unbounded variable length. Mach RPC does not pass
2978 * the token in-line. Instead it uses page mapping to handle
2979 * these parameters. This function allocates a VM buffer
2980 * to hold the token for an upcall and copies the token
2981 * (received from the client) into it. The VM buffer is
2982 * marked with a src_destroy flag so that the upcall will
2983 * automatically de-allocate the buffer when the upcall is
2987 nfs_gss_mach_alloc_buffer(u_char
*buf
, uint32_t buflen
, vm_map_copy_t
*addr
)
2990 vm_offset_t kmem_buf
;
2994 if (buf
== NULL
|| buflen
== 0)
2997 tbuflen
= vm_map_round_page(buflen
,
2998 vm_map_page_mask(ipc_kernel_map
));
2999 kr
= vm_allocate(ipc_kernel_map
, &kmem_buf
, tbuflen
, VM_FLAGS_ANYWHERE
);
3001 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3005 kr
= vm_map_wire(ipc_kernel_map
,
3006 vm_map_trunc_page(kmem_buf
,
3007 vm_map_page_mask(ipc_kernel_map
)),
3008 vm_map_round_page(kmem_buf
+ tbuflen
,
3009 vm_map_page_mask(ipc_kernel_map
)),
3010 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
3012 printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
3016 bcopy(buf
, (void *) kmem_buf
, buflen
);
3017 // Shouldn't need to bzero below since vm_allocate returns zeroed pages
3018 // bzero(kmem_buf + buflen, tbuflen - buflen);
3020 kr
= vm_map_unwire(ipc_kernel_map
,
3021 vm_map_trunc_page(kmem_buf
,
3022 vm_map_page_mask(ipc_kernel_map
)),
3023 vm_map_round_page(kmem_buf
+ tbuflen
,
3024 vm_map_page_mask(ipc_kernel_map
)),
3027 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3031 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
) kmem_buf
,
3032 (vm_map_size_t
) buflen
, TRUE
, addr
);
3034 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3040 * Here we handle a token received from the gssd via an upcall.
3041 * The received token resides in an allocate VM buffer.
3042 * We copy the token out of this buffer to a chunk of malloc'ed
3043 * memory of the right size, then de-allocate the VM buffer.
3046 nfs_gss_mach_vmcopyout(vm_map_copy_t in
, uint32_t len
, u_char
*out
)
3048 vm_map_offset_t map_data
;
3052 error
= vm_map_copyout(ipc_kernel_map
, &map_data
, in
);
3056 data
= CAST_DOWN(vm_offset_t
, map_data
);
3057 bcopy((void *) data
, out
, len
);
3058 vm_deallocate(ipc_kernel_map
, data
, len
);
3064 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
3065 * Returns the size of the token, since it contains a variable
3066 * length DER encoded size field.
3077 static uint32_t seqnum
= 0;
3083 * Fill in the token header: 2 octets.
3084 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
3085 * followed by the length of the token: 35 + 0 octets for a
3086 * MIC token, or 35 + encrypted octets for a wrap token;
3089 toklen
= KRB5_SZ_MECH
+ KRB5_SZ_ALG
+ KRB5_SZ_SEQ
+ HASHLEN(ki
);
3090 nfs_gss_der_length_put(&p
, toklen
+ datalen
);
3093 * Fill in the DER encoded mech OID for Kerberos v5.
3094 * This represents the Kerberos OID 1.2.840.113554.1.2.2
3095 * described in RFC 2623, section 4.2
3097 bcopy(krb5_mech
, p
, sizeof(krb5_mech
));
3098 p
+= sizeof(krb5_mech
);
3101 * Now at the token described in RFC 1964, section 1.2.1
3102 * Fill in the token ID, integrity algorithm indicator,
3103 * for DES MAC MD5, and four filler octets.
3104 * The alg string encodes the bytes to represent either
3105 * a MIC token or a WRAP token for Kerberos.
3107 bcopy(alg
, p
, KRB5_SZ_ALG
);
3111 * Now encode the sequence number according to
3112 * RFC 1964, section 1.2.1.2 which dictates 4 octets
3113 * of sequence number followed by 4 bytes of direction
3114 * indicator: 0x00 for initiator or 0xff for acceptor.
3115 * We DES CBC encrypt the sequence number using the first
3116 * 8 octets of the checksum field as an initialization
3118 * Note that this sequence number is not at all related
3119 * to the RPCSEC_GSS protocol sequence number. This
3120 * number is private to the ASN.1 token. The only
3121 * requirement is that it not be repeated in case the
3122 * server has replay detection on, which normally should
3123 * not be the case, since RFC 2203 section 5.2.3 says that
3124 * replay detection and sequence checking must be turned off.
3127 for (i
= 0; i
< 4; i
++)
3128 plain
[i
] = (u_char
) ((seqnum
>> (i
* 8)) & 0xff);
3129 for (i
= 4; i
< 8; i
++)
3130 plain
[i
] = initiator
? 0x00 : 0xff;
3131 gss_des_crypt(ki
, (des_cblock
*) plain
, (des_cblock
*) p
, 8,
3132 (des_cblock
*) cksum
, NULL
, DES_ENCRYPT
, KG_USAGE_SEQ
);
3136 * Finally, append the octets of the
3137 * checksum of the alg + plaintext data.
3138 * The plaintext could be an RPC call header,
3139 * the window value, or a sequence number.
3141 bcopy(cksum
, p
, HASHLEN(ki
));
3148 * Determine size of ASN.1 DER length
3151 nfs_gss_der_length_size(int len
)
3154 len
< (1 << 7) ? 1 :
3155 len
< (1 << 8) ? 2 :
3156 len
< (1 << 16) ? 3 :
3157 len
< (1 << 24) ? 4 : 5;
3161 * Encode an ASN.1 DER length field
3164 nfs_gss_der_length_put(u_char
**pp
, int len
)
3166 int sz
= nfs_gss_der_length_size(len
);
3170 *p
++ = (u_char
) len
;
3172 *p
++ = (u_char
) ((sz
-1) | 0x80);
3175 *p
++ = (u_char
) ((len
>> (sz
* 8)) & 0xff);
3182 * Decode an ASN.1 DER length field
3185 nfs_gss_der_length_get(u_char
**pp
)
3188 uint32_t flen
, len
= 0;
3192 if ((*p
++ & 0x80) == 0)
3195 if (flen
> sizeof(uint32_t))
3198 len
= (len
<< 8) + *p
++;
3205 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
3221 * Check that we have a valid token header
3224 return (AUTH_BADCRED
);
3225 (void) nfs_gss_der_length_get(&p
); // ignore the size
3228 * Check that we have the DER encoded Kerberos v5 mech OID
3230 if (bcmp(p
, krb5_mech
, sizeof(krb5_mech
) != 0))
3231 return (AUTH_BADCRED
);
3232 p
+= sizeof(krb5_mech
);
3235 * Now check the token ID, DES MAC MD5 algorithm
3236 * indicator, and filler octets.
3238 if (bcmp(p
, alg
, KRB5_SZ_ALG
) != 0)
3239 return (AUTH_BADCRED
);
3243 * Now decrypt the sequence number.
3244 * Note that the gss decryption uses the first 8 octets
3245 * of the checksum field as an initialization vector (p + 8).
3246 * Per RFC 2203 section 5.2.2 we don't check the sequence number
3247 * in the ASN.1 token because the RPCSEC_GSS protocol has its
3248 * own sequence number described in section 5.3.3.1
3251 gss_des_crypt(ki
, (des_cblock
*)p
, (des_cblock
*) plain
, 8,
3252 (des_cblock
*) (p
+ 8), NULL
, DES_DECRYPT
, KG_USAGE_SEQ
);
3254 for (i
= 0; i
< 4; i
++)
3255 seqnum
|= plain
[i
] << (i
* 8);
3258 * Make sure the direction
3259 * indicator octets are correct.
3261 d
= initiator
? 0x00 : 0xff;
3262 for (i
= 4; i
< 8; i
++)
3264 return (AUTH_BADCRED
);
3267 * Finally, get the checksum
3269 bcopy(p
, cksum
, HASHLEN(ki
));
3279 * Return the number of bytes in an mbuf chain.
3282 nfs_gss_mchain_length(mbuf_t mhead
)
3287 for (mb
= mhead
; mb
; mb
= mbuf_next(mb
))
3288 len
+= mbuf_len(mb
);
3294 * Append an args or results mbuf chain to the header chain
3297 nfs_gss_append_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3302 /* Connect the mbuf chains */
3303 error
= mbuf_setnext(nmc
->nmc_mcur
, mc
);
3307 /* Find the last mbuf in the chain */
3309 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3312 nmc
->nmc_mcur
= tail
;
3313 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3314 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3320 * Convert an mbuf chain to an NFS mbuf chain
3323 nfs_gss_nfsm_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3327 /* Find the last mbuf in the chain */
3329 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3332 nmc
->nmc_mhead
= mc
;
3333 nmc
->nmc_mcur
= tail
;
3334 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3335 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3341 * Compute a checksum over an mbuf chain.
3342 * Start building an MD5 digest at the given offset and keep
3343 * going until the end of data in the current mbuf is reached.
3344 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3348 nfs_gss_cksum_mchain(
3359 GSS_DIGEST_CTX context
;
3361 gss_digest_Init(&context
, ki
);
3364 * Logically prepend the first 8 bytes of the algorithm
3365 * field as required by RFC 1964, section 1.2.1.1
3367 gss_digest_Update(&context
, alg
, KRB5_SZ_ALG
);
3370 * Move down the mbuf chain until we reach the given
3371 * byte offset, then start MD5 on the mbuf data until
3372 * we've done len bytes.
3375 for (mb
= mhead
; mb
&& len
> 0; mb
= mbuf_next(mb
)) {
3376 ptr
= mbuf_data(mb
);
3377 left
= mbuf_len(mb
);
3378 if (offset
>= left
) {
3379 /* Offset not yet reached */
3383 /* At or beyond offset - checksum data */
3388 bytes
= left
< len
? left
: len
;
3390 gss_digest_Update(&context
, ptr
, bytes
);
3394 gss_digest_Final(&context
, digest
);
3398 * Compute a checksum over an NFS mbuf chain.
3399 * Start building an MD5 digest at the given offset and keep
3400 * going until the end of data in the current mbuf is reached.
3401 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3405 nfs_gss_cksum_chain(
3407 struct nfsm_chain
*nmc
,
3414 * If the length parameter is zero, then we need
3415 * to use the length from the offset to the current
3416 * encode/decode offset.
3419 len
= nfsm_chain_offset(nmc
) - offset
;
3421 return (nfs_gss_cksum_mchain(ki
, nmc
->nmc_mhead
, alg
, offset
, len
, cksum
));
3425 * Compute a checksum of the sequence number (or sequence window)
3426 * of an RPCSEC_GSS reply.
3429 nfs_gss_cksum_rep(gss_key_info
*ki
, uint32_t seqnum
, u_char
*cksum
)
3431 GSS_DIGEST_CTX context
;
3432 uint32_t val
= htonl(seqnum
);
3434 gss_digest_Init(&context
, ki
);
3437 * Logically prepend the first 8 bytes of the MIC
3438 * token as required by RFC 1964, section 1.2.1.1
3440 gss_digest_Update(&context
, ALG_MIC(ki
), KRB5_SZ_ALG
);
3443 * Compute the digest of the seqnum in network order
3445 gss_digest_Update(&context
, &val
, 4);
3446 gss_digest_Final(&context
, cksum
);
3450 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3453 nfs_gss_encrypt_mchain(
3462 u_char tmp
[8], ivec
[8];
3463 int left
, left8
, remain
;
3469 * Move down the mbuf chain until we reach the given
3470 * byte offset, then start encrypting the mbuf data until
3471 * we've done len bytes.
3474 for (mb
= mhead
; mb
&& len
> 0; mb
= mbn
) {
3475 mbn
= mbuf_next(mb
);
3476 ptr
= mbuf_data(mb
);
3477 left
= mbuf_len(mb
);
3478 if (offset
>= left
) {
3479 /* Offset not yet reached */
3483 /* At or beyond offset - encrypt data */
3489 * DES or DES3 CBC has to encrypt 8 bytes at a time.
3490 * If the number of bytes to be encrypted in this
3491 * mbuf isn't some multiple of 8 bytes, encrypt all
3492 * the 8 byte blocks, then combine the remaining
3493 * bytes with enough from the next mbuf to make up
3494 * an 8 byte block and encrypt that block separately,
3495 * i.e. that block is split across two mbufs.
3498 left8
= left
- remain
;
3499 left
= left8
< len
? left8
: len
;
3501 gss_des_crypt(ki
, (des_cblock
*) ptr
, (des_cblock
*) ptr
,
3502 left
, &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
3506 if (mbn
&& remain
> 0) {
3507 nptr
= mbuf_data(mbn
);
3508 offset
= 8 - remain
;
3509 bcopy(ptr
+ left
, tmp
, remain
); // grab from this mbuf
3510 bcopy(nptr
, tmp
+ remain
, offset
); // grab from next mbuf
3511 gss_des_crypt(ki
, (des_cblock
*) tmp
, (des_cblock
*) tmp
, 8,
3512 &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
3513 bcopy(tmp
, ptr
+ left
, remain
); // return to this mbuf
3514 bcopy(tmp
+ remain
, nptr
, offset
); // return to next mbuf
3521 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
3524 nfs_gss_encrypt_chain(
3526 struct nfsm_chain
*nmc
,
3532 * If the length parameter is zero, then we need
3533 * to use the length from the offset to the current
3534 * encode/decode offset.
3537 len
= nfsm_chain_offset(nmc
) - offset
;
3539 return (nfs_gss_encrypt_mchain(ki
, nmc
->nmc_mhead
, offset
, len
, encrypt
));
3543 * The routines that follow provide abstractions for doing digests and crypto.
3547 gss_digest_Init(GSS_DIGEST_CTX
*ctx
, gss_key_info
*ki
)
3549 ctx
->type
= ki
->type
;
3551 case NFS_GSS_1DES
: MD5_DESCBC_Init(&ctx
->m_ctx
, &ki
->ks_u
.des
.gss_sched
);
3553 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Init(&ctx
->h_ctx
, ki
->ks_u
.des3
.ckey
, 0);
3556 printf("gss_digest_Init: Unknown key info type %d\n", ki
->type
);
3561 gss_digest_Update(GSS_DIGEST_CTX
*ctx
, void *data
, size_t len
)
3563 switch (ctx
->type
) {
3564 case NFS_GSS_1DES
: MD5_DESCBC_Update(&ctx
->m_ctx
, data
, len
);
3566 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Update(&ctx
->h_ctx
, data
, len
);
3572 gss_digest_Final(GSS_DIGEST_CTX
*ctx
, void *digest
)
3574 switch (ctx
->type
) {
3575 case NFS_GSS_1DES
: MD5_DESCBC_Final(digest
, &ctx
->m_ctx
);
3577 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Final(digest
, &ctx
->h_ctx
);
3583 gss_des_crypt(gss_key_info
*ki
, des_cblock
*in
, des_cblock
*out
,
3584 int32_t len
, des_cblock
*iv
, des_cblock
*retiv
, int encrypt
, int usage
)
3589 des_cbc_key_schedule
*sched
= ((usage
== KG_USAGE_SEAL
) ?
3590 &ki
->ks_u
.des
.gss_sched_Ke
:
3591 &ki
->ks_u
.des
.gss_sched
);
3592 des_cbc_encrypt(in
, out
, len
, sched
, iv
, retiv
, encrypt
);
3597 des3_cbc_encrypt(in
, out
, len
, &ki
->ks_u
.des3
.gss_sched
, iv
, retiv
, encrypt
);
3603 gss_key_init(gss_key_info
*ki
, uint32_t skeylen
)
3609 ki
->keybytes
= skeylen
;
3611 case sizeof(des_cblock
):
3612 ki
->type
= NFS_GSS_1DES
;
3613 ki
->hash_len
= MD5_DESCBC_DIGEST_LENGTH
;
3614 ki
->ks_u
.des
.key
= (des_cblock
*)ki
->skey
;
3615 rc
= des_cbc_key_sched(ki
->ks_u
.des
.key
, &ki
->ks_u
.des
.gss_sched
);
3618 for (i
= 0; i
< ki
->keybytes
; i
++)
3619 k
[0][i
] = 0xf0 ^ (*ki
->ks_u
.des
.key
)[i
];
3620 rc
= des_cbc_key_sched(&k
[0], &ki
->ks_u
.des
.gss_sched_Ke
);
3622 case 3*sizeof(des_cblock
):
3623 ki
->type
= NFS_GSS_3DES
;
3624 ki
->hash_len
= SHA_DIGEST_LENGTH
;
3625 ki
->ks_u
.des3
.key
= (des_cblock (*)[3])ki
->skey
;
3626 des3_derive_key(*ki
->ks_u
.des3
.key
, ki
->ks_u
.des3
.ckey
,
3627 KEY_USAGE_DES3_SIGN
, KEY_USAGE_LEN
);
3628 rc
= des3_cbc_key_sched(*ki
->ks_u
.des3
.key
, &ki
->ks_u
.des3
.gss_sched
);
3633 printf("gss_key_init: Invalid key length %d\n", skeylen
);
3642 #define DISPLAYLEN 16
3643 #define MAXDISPLAYLEN 256
3646 hexdump(const char *msg
, void *data
, size_t len
)
3650 char *p
, disbuf
[3*DISPLAYLEN
+1];
3652 printf("NFS DEBUG %s len=%d:\n", msg
, (uint32_t)len
);
3653 if (len
> MAXDISPLAYLEN
)
3654 len
= MAXDISPLAYLEN
;
3656 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
3657 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3)
3658 snprintf(p
, 4, "%02x ", d
[i
+ j
]);
3659 printf("\t%s\n", disbuf
);