2 * Copyright (c) 2007-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * These functions implement RPCSEC_GSS security for the NFS client and server.
31 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
32 * protection as described in Internet RFC 2203 and 2623.
34 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
35 * It requires the client and server negotiate a secure connection as part of a
36 * security context. The context state is maintained in client and server structures.
37 * On the client side, each user of an NFS mount is assigned their own context,
38 * identified by UID, on their first use of the mount, and it persists until the
39 * unmount or until the context is renewed. Each user context has a corresponding
40 * server context which the server maintains until the client destroys it, or
41 * until the context expires.
43 * The client and server contexts are set up dynamically. When a user attempts
44 * to send an NFS request, if there is no context for the user, then one is
45 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
46 * During this exchange, the client and server pass a security token that is
47 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
48 * the user to the server (and vice-versa). The client and server also receive
49 * a unique session key that can be used to digitally sign the credentials and
50 * verifier or optionally to provide data integrity and/or privacy.
52 * Once the context is complete, the client and server enter a normal data
53 * exchange phase - beginning with the NFS request that prompted the context
54 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
55 * credential and verifier, and the server returns a verifier as well.
56 * For simple authentication, the verifier contains a signed checksum of the
57 * RPC header, including the credential. The server's verifier has a signed
58 * checksum of the current sequence number.
60 * Each client call contains a sequence number that nominally increases by one
61 * on each request. The sequence number is intended to prevent replay attacks.
62 * Since the protocol can be used over UDP, there is some allowance for
63 * out-of-sequence requests, so the server checks whether the sequence numbers
64 * are within a sequence "window". If a sequence number is outside the lower
65 * bound of the window, the server silently drops the request. This has some
66 * implications for retransmission. If a request needs to be retransmitted, the
67 * client must bump the sequence number even if the request XID is unchanged.
69 * When the NFS mount is unmounted, the client sends a "destroy" credential
70 * to delete the server's context for each user of the mount. Since it's
71 * possible for the client to crash or disconnect without sending the destroy
72 * message, the server has a thread that reaps contexts that have been idle
77 #include <sys/param.h>
78 #include <sys/systm.h>
80 #include <sys/kauth.h>
81 #include <sys/kernel.h>
82 #include <sys/mount_internal.h>
83 #include <sys/vnode.h>
85 #include <sys/malloc.h>
86 #include <sys/kpi_mbuf.h>
88 #include <kern/host.h>
89 #include <libkern/libkern.h>
91 #include <mach/task.h>
92 #include <mach/task_special_ports.h>
93 #include <mach/host_priv.h>
94 #include <mach/thread_act.h>
95 #include <mach/mig_errors.h>
96 #include <mach/vm_map.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <gssd/gssd_mach.h>
101 #include <nfs/rpcv2.h>
102 #include <nfs/nfsproto.h>
104 #include <nfs/nfsnode.h>
105 #include <nfs/nfs_gss.h>
106 #include <nfs/nfsmount.h>
107 #include <nfs/xdr_subs.h>
108 #include <nfs/nfsm_subs.h>
109 #include <nfs/nfs_gss.h>
111 #include "nfs_gss_crypto.h"
113 #define NFS_GSS_MACH_MAX_RETRIES 3
118 MD5_DESCBC_CTX m_ctx
;
119 HMAC_SHA1_DES3KD_CTX h_ctx
;
123 #define MAX_DIGEST SHA_DIGEST_LENGTH
124 #ifdef NFS_KERNEL_DEBUG
125 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
126 (panic("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
128 #define HASHLEN(ki) (((ki)->hash_len > MAX_DIGEST) ? \
129 (printf("nfs_gss.c:%d ki->hash_len is invalid = %d\n", __LINE__, (ki)->hash_len), MAX_DIGEST) : (ki)->hash_len)
133 u_long nfs_gss_svc_ctx_hash
;
134 struct nfs_gss_svc_ctx_hashhead
*nfs_gss_svc_ctx_hashtbl
;
135 lck_mtx_t
*nfs_gss_svc_ctx_mutex
;
136 lck_grp_t
*nfs_gss_svc_grp
;
137 uint32_t nfsrv_gss_context_ttl
= GSS_CTX_EXPIRE
;
138 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
139 #endif /* NFSSERVER */
142 lck_grp_t
*nfs_gss_clnt_grp
;
144 #endif /* NFSCLIENT */
147 * These octet strings are used to encode/decode ASN.1 tokens
148 * in the RPCSEC_GSS verifiers.
150 static u_char krb5_tokhead
[] = { 0x60, 0x23 };
151 static u_char krb5_mech
[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
152 static u_char krb5_mic
[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
153 static u_char krb5_mic3
[] = { 0x01, 0x01, 0x04, 0x00, 0xff, 0xff, 0xff, 0xff };
154 static u_char krb5_wrap
[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
155 static u_char krb5_wrap3
[] = { 0x02, 0x01, 0x04, 0x00, 0x02, 0x00, 0xff, 0xff };
156 static u_char iv0
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // DES MAC Initialization Vector
158 #define ALG_MIC(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_mic : krb5_mic3)
159 #define ALG_WRAP(ki) (((ki)->type == NFS_GSS_1DES) ? krb5_wrap : krb5_wrap3)
162 * The size of the Kerberos v5 ASN.1 token
165 * Note that the second octet of the krb5_tokhead (0x23) is a
166 * DER-encoded size field that has variable length. If the size
167 * is 128 bytes or greater, then it uses two bytes, three bytes
168 * if 65536 or greater, and so on. Since the MIC tokens are
169 * separate from the data, the size is always the same: 35 bytes (0x23).
170 * However, the wrap token is different. Its size field includes the
171 * size of the token + the encrypted data that follows. So the size
172 * field may be two, three or four bytes.
174 #define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
175 #define KRB5_SZ_MECH sizeof(krb5_mech)
176 #define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
177 #define KRB5_SZ_SEQ 8
178 #define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
179 #define KRB5_SZ_TOKEN_NOSUM (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ)
180 #define KRB5_SZ_TOKEN(cksumlen) ((cksumlen) + KRB5_SZ_TOKEN_NOSUM)
181 #define KRB5_SZ_TOKMAX(cksumlen) (KRB5_SZ_TOKEN(cksumlen) + KRB5_SZ_EXTRA)
184 static int nfs_gss_clnt_ctx_find(struct nfsreq
*);
185 static int nfs_gss_clnt_ctx_failover(struct nfsreq
*);
186 static int nfs_gss_clnt_ctx_init(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
187 static int nfs_gss_clnt_ctx_callserver(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
188 static char *nfs_gss_clnt_svcname(struct nfsmount
*);
189 static int nfs_gss_clnt_gssd_upcall(struct nfsreq
*, struct nfs_gss_clnt_ctx
*);
190 static void nfs_gss_clnt_ctx_remove(struct nfsmount
*, struct nfs_gss_clnt_ctx
*);
191 static int nfs_gss_clnt_ctx_delay(struct nfsreq
*, int *);
192 #endif /* NFSCLIENT */
195 static struct nfs_gss_svc_ctx
*nfs_gss_svc_ctx_find(uint32_t);
196 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*);
197 static void nfs_gss_svc_ctx_timer(void *, void *);
198 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*);
199 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*, uint32_t);
200 #endif /* NFSSERVER */
202 static void task_release_special_port(mach_port_t
);
203 static mach_port_t
task_copy_special_port(mach_port_t
);
204 static void nfs_gss_mach_alloc_buffer(u_char
*, uint32_t, vm_map_copy_t
*);
205 static int nfs_gss_mach_vmcopyout(vm_map_copy_t
, uint32_t, u_char
*);
206 static int nfs_gss_token_get(gss_key_info
*ki
, u_char
*, u_char
*, int, uint32_t *, u_char
*);
207 static int nfs_gss_token_put(gss_key_info
*ki
, u_char
*, u_char
*, int, int, u_char
*);
208 static int nfs_gss_der_length_size(int);
209 static void nfs_gss_der_length_put(u_char
**, int);
210 static int nfs_gss_der_length_get(u_char
**);
211 static int nfs_gss_mchain_length(mbuf_t
);
212 static int nfs_gss_append_chain(struct nfsm_chain
*, mbuf_t
);
213 static void nfs_gss_nfsm_chain(struct nfsm_chain
*, mbuf_t
);
214 static void nfs_gss_cksum_mchain(gss_key_info
*, mbuf_t
, u_char
*, int, int, u_char
*);
215 static void nfs_gss_cksum_chain(gss_key_info
*, struct nfsm_chain
*, u_char
*, int, int, u_char
*);
216 static void nfs_gss_cksum_rep(gss_key_info
*, uint32_t, u_char
*);
217 static void nfs_gss_encrypt_mchain(gss_key_info
*, mbuf_t
, int, int, int);
218 static void nfs_gss_encrypt_chain(gss_key_info
*, struct nfsm_chain
*, int, int, int);
220 static void gss_digest_Init(GSS_DIGEST_CTX
*, gss_key_info
*);
221 static void gss_digest_Update(GSS_DIGEST_CTX
*, void *, size_t);
222 static void gss_digest_Final(GSS_DIGEST_CTX
*, void *);
223 static void gss_des_crypt(gss_key_info
*, des_cblock
*, des_cblock
*,
224 int32_t, des_cblock
*, des_cblock
*, int, int);
225 static int gss_key_init(gss_key_info
*, uint32_t);
228 thread_call_t nfs_gss_svc_ctx_timer_call
;
229 int nfs_gss_timer_on
= 0;
230 uint32_t nfs_gss_ctx_count
= 0;
231 const uint32_t nfs_gss_ctx_max
= GSS_SVC_MAXCONTEXTS
;
232 #endif /* NFSSERVER */
235 * Initialization when NFS starts
241 nfs_gss_clnt_grp
= lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL
);
242 #endif /* NFSCLIENT */
245 nfs_gss_svc_grp
= lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL
);
247 nfs_gss_svc_ctx_hashtbl
= hashinit(SVC_CTX_HASHSZ
, M_TEMP
, &nfs_gss_svc_ctx_hash
);
248 nfs_gss_svc_ctx_mutex
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
250 nfs_gss_svc_ctx_timer_call
= thread_call_allocate(nfs_gss_svc_ctx_timer
, NULL
);
251 #endif /* NFSSERVER */
257 * Find the context for a particular user.
259 * If the context doesn't already exist
260 * then create a new context for this user.
262 * Note that the code allows superuser (uid == 0)
263 * to adopt the context of another user.
266 nfs_gss_clnt_ctx_find(struct nfsreq
*req
)
268 struct nfsmount
*nmp
= req
->r_nmp
;
269 struct nfs_gss_clnt_ctx
*cp
;
270 uid_t uid
= kauth_cred_getuid(req
->r_cred
);
274 lck_mtx_lock(&nmp
->nm_lock
);
275 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
276 if (cp
->gss_clnt_uid
== uid
) {
277 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
)
279 lck_mtx_unlock(&nmp
->nm_lock
);
280 nfs_gss_clnt_ctx_ref(req
, cp
);
287 * If superuser is trying to get access, then co-opt
288 * the first valid context in the list.
289 * XXX Ultimately, we need to allow superuser to
290 * go ahead and attempt to set up its own context
291 * in case one is set up for it.
293 TAILQ_FOREACH(cp
, &nmp
->nm_gsscl
, gss_clnt_entries
) {
294 if (!(cp
->gss_clnt_flags
& GSS_CTX_INVAL
)) {
295 lck_mtx_unlock(&nmp
->nm_lock
);
296 nfs_gss_clnt_ctx_ref(req
, cp
);
303 * Not found - create a new context
307 * If the thread is async, then it cannot get
308 * kerberos creds and set up a proper context.
309 * If no sec= mount option is given, attempt
310 * to failover to sec=sys.
312 if (req
->r_thread
== NULL
) {
313 if (nmp
->nm_flag
& NFSMNT_SECSYSOK
) {
314 error
= nfs_gss_clnt_ctx_failover(req
);
316 printf("nfs_gss_clnt_ctx_find: no context for async\n");
317 error
= NFSERR_EAUTH
;
320 lck_mtx_unlock(&nmp
->nm_lock
);
324 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
326 lck_mtx_unlock(&nmp
->nm_lock
);
330 cp
->gss_clnt_uid
= uid
;
331 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
332 cp
->gss_clnt_thread
= current_thread();
333 nfs_gss_clnt_ctx_ref(req
, cp
);
334 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
335 lck_mtx_unlock(&nmp
->nm_lock
);
338 error
= nfs_gss_clnt_ctx_init(req
, cp
);
339 if (error
== ENEEDAUTH
) {
340 error
= nfs_gss_clnt_ctx_delay(req
, &retrycnt
);
344 /* Giving up on this context */
345 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
348 * Wake any threads waiting to use the context
350 lck_mtx_lock(cp
->gss_clnt_mtx
);
351 cp
->gss_clnt_thread
= NULL
;
352 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
353 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
356 lck_mtx_unlock(cp
->gss_clnt_mtx
);
361 nfs_gss_clnt_ctx_unref(req
);
364 * If we failed to set up a Kerberos context for this
365 * user and no sec= mount option was given, but the
366 * server indicated that it could support AUTH_SYS, then set
367 * up a dummy context that allows this user to attempt
370 if (error
&& (nmp
->nm_flag
& NFSMNT_SECSYSOK
) &&
371 (error
!= ENXIO
) && (error
!= ETIMEDOUT
)) {
372 lck_mtx_lock(&nmp
->nm_lock
);
373 error
= nfs_gss_clnt_ctx_failover(req
);
374 lck_mtx_unlock(&nmp
->nm_lock
);
381 * Set up a dummy context to allow the use of sec=sys
382 * for this user, if the server allows sec=sys.
383 * The context is valid for GSS_CLNT_SYS_VALID seconds,
384 * so that the user will periodically attempt to fail back
385 * and get a real credential.
387 * Assumes context list (nm_lock) is locked
390 nfs_gss_clnt_ctx_failover(struct nfsreq
*req
)
392 struct nfsmount
*nmp
= req
->r_nmp
;
393 struct nfs_gss_clnt_ctx
*cp
;
394 uid_t uid
= kauth_cred_getuid(req
->r_cred
);
397 MALLOC(cp
, struct nfs_gss_clnt_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
401 cp
->gss_clnt_service
= RPCSEC_GSS_SVC_SYS
;
402 cp
->gss_clnt_uid
= uid
;
403 cp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
405 cp
->gss_clnt_ctime
= now
.tv_sec
; // time stamp
406 nfs_gss_clnt_ctx_ref(req
, cp
);
407 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
413 * Inserts an RPCSEC_GSS credential into an RPC header.
414 * After the credential is inserted, the code continues
415 * to build the verifier which contains a signed checksum
419 nfs_gss_clnt_cred_put(struct nfsreq
*req
, struct nfsm_chain
*nmc
, mbuf_t args
)
421 struct nfs_gss_clnt_ctx
*cp
;
424 int slpflag
, recordmark
= 0;
425 int start
, len
, offset
= 0;
427 struct nfsm_chain nmc_tmp
;
429 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
430 u_char cksum
[MAX_DIGEST
];
436 slpflag
|= ((req
->r_nmp
->nm_flag
& NFSMNT_INT
) && req
->r_thread
) ? PCATCH
: 0;
437 recordmark
= (req
->r_nmp
->nm_sotype
== SOCK_STREAM
);
440 if (req
->r_gss_ctx
== NULL
) {
442 * Find the context for this user.
443 * If no context is found, one will
446 error
= nfs_gss_clnt_ctx_find(req
);
453 * If it's a dummy context for a user that's using
454 * a fallback to sec=sys, then just return an error
455 * so rpchead can encode an RPCAUTH_UNIX cred.
457 if (cp
->gss_clnt_service
== RPCSEC_GSS_SVC_SYS
) {
459 * The dummy context is valid for just
460 * GSS_CLNT_SYS_VALID seconds. If the context
461 * is older than this, mark it invalid and try
462 * again to get a real one.
464 lck_mtx_lock(cp
->gss_clnt_mtx
);
466 if (now
.tv_sec
> cp
->gss_clnt_ctime
+ GSS_CLNT_SYS_VALID
) {
467 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
468 lck_mtx_unlock(cp
->gss_clnt_mtx
);
469 nfs_gss_clnt_ctx_unref(req
);
472 lck_mtx_unlock(cp
->gss_clnt_mtx
);
477 * If the context thread isn't null, then the context isn't
478 * yet complete and is for the exclusive use of the thread
479 * doing the context setup. Wait until the context thread
482 lck_mtx_lock(cp
->gss_clnt_mtx
);
483 if (cp
->gss_clnt_thread
&& cp
->gss_clnt_thread
!= current_thread()) {
484 cp
->gss_clnt_flags
|= GSS_NEEDCTX
;
485 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
| PDROP
, "ctxwait", NULL
);
486 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
488 nfs_gss_clnt_ctx_unref(req
);
491 lck_mtx_unlock(cp
->gss_clnt_mtx
);
493 ki
= &cp
->gss_clnt_kinfo
;
494 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) {
496 * Get a sequence number for this request.
497 * Check whether the oldest request in the window is complete.
498 * If it's still pending, then wait until it's done before
499 * we allocate a new sequence number and allow this request
502 lck_mtx_lock(cp
->gss_clnt_mtx
);
503 while (win_getbit(cp
->gss_clnt_seqbits
,
504 ((cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
) + 1) % cp
->gss_clnt_seqwin
)) {
505 cp
->gss_clnt_flags
|= GSS_NEEDSEQ
;
506 msleep(cp
, cp
->gss_clnt_mtx
, slpflag
, "seqwin", NULL
);
507 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
508 lck_mtx_unlock(cp
->gss_clnt_mtx
);
511 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
512 /* Renewed while while we were waiting */
513 lck_mtx_unlock(cp
->gss_clnt_mtx
);
514 nfs_gss_clnt_ctx_unref(req
);
518 seqnum
= ++cp
->gss_clnt_seqnum
;
519 win_setbit(cp
->gss_clnt_seqbits
, seqnum
% cp
->gss_clnt_seqwin
);
520 lck_mtx_unlock(cp
->gss_clnt_mtx
);
522 MALLOC(gsp
, struct gss_seq
*, sizeof(*gsp
), M_TEMP
, M_WAITOK
|M_ZERO
);
525 gsp
->gss_seqnum
= seqnum
;
526 SLIST_INSERT_HEAD(&req
->r_gss_seqlist
, gsp
, gss_seqnext
);
529 /* Insert the credential */
530 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
531 nfsm_chain_add_32(error
, nmc
, 5 * NFSX_UNSIGNED
+ cp
->gss_clnt_handle_len
);
532 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS_VERS_1
);
533 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_proc
);
534 nfsm_chain_add_32(error
, nmc
, seqnum
);
535 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_service
);
536 nfsm_chain_add_32(error
, nmc
, cp
->gss_clnt_handle_len
);
537 if (cp
->gss_clnt_handle_len
> 0) {
538 if (cp
->gss_clnt_handle
== NULL
)
540 nfsm_chain_add_opaque(error
, nmc
, cp
->gss_clnt_handle
, cp
->gss_clnt_handle_len
);
545 * Now add the verifier
547 if (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
||
548 cp
->gss_clnt_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
550 * If the context is still being created
551 * then use a null verifier.
553 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
); // flavor
554 nfsm_chain_add_32(error
, nmc
, 0); // length
555 nfsm_chain_build_done(error
, nmc
);
557 nfs_gss_append_chain(nmc
, args
);
561 offset
= recordmark
? NFSX_UNSIGNED
: 0; // record mark
562 nfsm_chain_build_done(error
, nmc
);
563 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), offset
, 0, cksum
);
565 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
566 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
); // flavor
567 nfsm_chain_add_32(error
, nmc
, toklen
); // length
568 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
569 nfsm_chain_build_done(error
, nmc
);
574 * Now we may have to compute integrity or encrypt the call args
575 * per RFC 2203 Section 5.3.2
577 switch (cp
->gss_clnt_service
) {
578 case RPCSEC_GSS_SVC_NONE
:
579 nfs_gss_append_chain(nmc
, args
);
581 case RPCSEC_GSS_SVC_INTEGRITY
:
582 len
= nfs_gss_mchain_length(args
); // Find args length
583 req
->r_gss_arglen
= len
; // Stash the args len
584 len
+= NFSX_UNSIGNED
; // Add seqnum length
585 nfsm_chain_add_32(error
, nmc
, len
); // and insert it
586 start
= nfsm_chain_offset(nmc
);
587 nfsm_chain_add_32(error
, nmc
, seqnum
); // Insert seqnum
588 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Offset to args
589 nfsm_chain_build_done(error
, nmc
);
592 nfs_gss_append_chain(nmc
, args
); // Append the args mbufs
594 /* Now compute a checksum over the seqnum + args */
595 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, len
, cksum
);
597 /* Insert it into a token and append to the request */
598 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 1, 0, cksum
);
599 nfsm_chain_finish_mbuf(error
, nmc
); // force checksum into new mbuf
600 nfsm_chain_add_32(error
, nmc
, toklen
);
601 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
602 nfsm_chain_build_done(error
, nmc
);
604 case RPCSEC_GSS_SVC_PRIVACY
:
605 /* Prepend a new mbuf with the confounder & sequence number */
606 nfsm_chain_build_alloc_init(error
, &nmc_tmp
, 3 * NFSX_UNSIGNED
);
607 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 1-4
608 nfsm_chain_add_32(error
, &nmc_tmp
, random()); // confounder bytes 4-8
609 nfsm_chain_add_32(error
, &nmc_tmp
, seqnum
);
610 nfsm_chain_build_done(error
, &nmc_tmp
);
613 nfs_gss_append_chain(&nmc_tmp
, args
); // Append the args mbufs
615 len
= nfs_gss_mchain_length(args
); // Find args length
616 len
+= 3 * NFSX_UNSIGNED
; // add confounder & seqnum
617 req
->r_gss_arglen
= len
; // Stash length
620 * Append a pad trailer - per RFC 1964 section 1.2.2.3
621 * Since XDR data is always 32-bit aligned, it
622 * needs to be padded either by 4 bytes or 8 bytes.
624 nfsm_chain_finish_mbuf(error
, &nmc_tmp
); // force padding into new mbuf
626 nfsm_chain_add_32(error
, &nmc_tmp
, 0x04040404);
627 len
+= NFSX_UNSIGNED
;
629 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
630 nfsm_chain_add_32(error
, &nmc_tmp
, 0x08080808);
631 len
+= 2 * NFSX_UNSIGNED
;
633 nfsm_chain_build_done(error
, &nmc_tmp
);
635 /* Now compute a checksum over the confounder + seqnum + args */
636 nfs_gss_cksum_chain(ki
, &nmc_tmp
, ALG_WRAP(ki
), 0, len
, cksum
);
638 /* Insert it into a token */
639 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 1, len
, cksum
);
640 nfsm_chain_add_32(error
, nmc
, toklen
+ len
); // token + args length
641 nfsm_chain_add_opaque_nopad(error
, nmc
, tokbuf
, toklen
);
642 req
->r_gss_argoff
= nfsm_chain_offset(nmc
); // Stash offset
643 nfsm_chain_build_done(error
, nmc
);
646 nfs_gss_append_chain(nmc
, nmc_tmp
.nmc_mhead
); // Append the args mbufs
648 /* Finally, encrypt the args */
649 nfs_gss_encrypt_chain(ki
, &nmc_tmp
, 0, len
, DES_ENCRYPT
);
651 /* Add null XDR pad if the ASN.1 token misaligned the data */
652 pad
= nfsm_pad(toklen
+ len
);
654 nfsm_chain_add_opaque_nopad(error
, nmc
, iv0
, pad
);
655 nfsm_chain_build_done(error
, nmc
);
664 * When receiving a reply, the client checks the verifier
665 * returned by the server. Check that the verifier is the
666 * correct type, then extract the sequence number checksum
667 * from the token in the credential and compare it with a
668 * computed checksum of the sequence number in the request
672 nfs_gss_clnt_verf_get(
674 struct nfsm_chain
*nmc
,
677 uint32_t *accepted_statusp
)
679 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
680 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
682 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
683 struct nfsm_chain nmc_tmp
;
685 uint32_t reslen
, start
, cksumlen
, toklen
;
687 gss_key_info
*ki
= &cp
->gss_clnt_kinfo
;
689 reslen
= cksumlen
= 0;
690 *accepted_statusp
= 0;
693 return (NFSERR_EAUTH
);
695 * If it's not an RPCSEC_GSS verifier, then it has to
696 * be a null verifier that resulted from either
697 * a CONTINUE_NEEDED reply during context setup or
698 * from the reply to an AUTH_UNIX call from a dummy
699 * context that resulted from a fallback to sec=sys.
701 if (verftype
!= RPCSEC_GSS
) {
702 if (verftype
!= RPCAUTH_NULL
)
703 return (NFSERR_EAUTH
);
704 if (cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
&&
705 cp
->gss_clnt_service
!= RPCSEC_GSS_SVC_SYS
)
706 return (NFSERR_EAUTH
);
708 nfsm_chain_adv(error
, nmc
, nfsm_rndup(verflen
));
709 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
714 * If we received an RPCSEC_GSS verifier but the
715 * context isn't yet complete, then it must be
716 * the context complete message from the server.
717 * The verifier will contain an encrypted checksum
718 * of the window but we don't have the session key
719 * yet so we can't decrypt it. Stash the verifier
720 * and check it later in nfs_gss_clnt_ctx_init() when
721 * the context is complete.
723 if (!(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
)) {
724 MALLOC(cp
->gss_clnt_verf
, u_char
*, verflen
, M_TEMP
, M_WAITOK
|M_ZERO
);
725 if (cp
->gss_clnt_verf
== NULL
)
727 nfsm_chain_get_opaque(error
, nmc
, verflen
, cp
->gss_clnt_verf
);
728 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
732 if (verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
733 return (NFSERR_EAUTH
);
736 * Get the 8 octet sequence number
737 * checksum out of the verifier token.
739 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
742 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum1
);
747 * Search the request sequence numbers for this reply, starting
748 * with the most recent, looking for a checksum that matches
749 * the one in the verifier returned by the server.
751 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
752 nfs_gss_cksum_rep(ki
, gsp
->gss_seqnum
, cksum2
);
753 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) == 0)
757 return (NFSERR_EAUTH
);
760 * Get the RPC accepted status
762 nfsm_chain_get_32(error
, nmc
, *accepted_statusp
);
763 if (*accepted_statusp
!= RPC_SUCCESS
)
767 * Now we may have to check integrity or decrypt the results
768 * per RFC 2203 Section 5.3.2
770 switch (cp
->gss_clnt_service
) {
771 case RPCSEC_GSS_SVC_NONE
:
774 case RPCSEC_GSS_SVC_INTEGRITY
:
776 * Here's what we expect in the integrity results:
778 * - length of seq num + results (4 bytes)
779 * - sequence number (4 bytes)
780 * - results (variable bytes)
781 * - length of checksum token (37)
782 * - checksum of seqnum + results (37 bytes)
784 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
785 if (reslen
> NFS_MAXPACKET
) {
790 /* Compute a checksum over the sequence number + results */
791 start
= nfsm_chain_offset(nmc
);
792 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, reslen
, cksum1
);
795 * Get the sequence number prepended to the results
796 * and compare it against the list in the request.
798 nfsm_chain_get_32(error
, nmc
, seqnum
);
799 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
800 if (seqnum
== gsp
->gss_seqnum
)
809 * Advance to the end of the results and
810 * fetch the checksum computed by the server.
813 reslen
-= NFSX_UNSIGNED
; // already skipped seqnum
814 nfsm_chain_adv(error
, &nmc_tmp
, reslen
); // skip over the results
815 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
816 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
820 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
823 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 0, NULL
, cksum2
);
827 /* Verify that the checksums are the same */
828 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
833 case RPCSEC_GSS_SVC_PRIVACY
:
835 * Here's what we expect in the privacy results:
837 * - length of confounder + seq num + token + results
838 * - wrap token (37-40 bytes)
839 * - confounder (8 bytes)
840 * - sequence number (4 bytes)
841 * - results (encrypted)
843 nfsm_chain_get_32(error
, nmc
, reslen
); // length of results
844 if (reslen
> NFS_MAXPACKET
) {
849 /* Get the token that prepends the encrypted results */
850 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
853 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 0,
857 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
858 reslen
-= toklen
; // size of confounder + seqnum + results
860 /* decrypt the confounder + sequence number + results */
861 start
= nfsm_chain_offset(nmc
);
862 nfs_gss_encrypt_chain(ki
, nmc
, start
, reslen
, DES_DECRYPT
);
864 /* Compute a checksum over the confounder + sequence number + results */
865 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, reslen
, cksum2
);
867 /* Verify that the checksums are the same */
868 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
873 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
876 * Get the sequence number prepended to the results
877 * and compare it against the list in the request.
879 nfsm_chain_get_32(error
, nmc
, seqnum
);
880 SLIST_FOREACH(gsp
, &req
->r_gss_seqlist
, gss_seqnext
) {
881 if (seqnum
== gsp
->gss_seqnum
)
896 * An RPCSEC_GSS request with no integrity or privacy consists
897 * of just the header mbufs followed by the arg mbufs.
899 * However, integrity or privacy both trailer mbufs to the args,
900 * which means we have to do some work to restore the arg mbuf
901 * chain to its previous state in case we need to retransmit.
903 * The location and length of the args is marked by two fields
904 * in the request structure: r_gss_argoff and r_gss_arglen,
905 * which are stashed when the NFS request is built.
908 nfs_gss_clnt_args_restore(struct nfsreq
*req
)
910 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
911 struct nfsm_chain mchain
, *nmc
= &mchain
;
915 return (NFSERR_EAUTH
);
917 if ((cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
) == 0)
920 nfsm_chain_dissect_init(error
, nmc
, req
->r_mhead
); // start at RPC header
921 nfsm_chain_adv(error
, nmc
, req
->r_gss_argoff
); // advance to args
925 switch (cp
->gss_clnt_service
) {
926 case RPCSEC_GSS_SVC_NONE
:
929 case RPCSEC_GSS_SVC_INTEGRITY
:
931 * All we have to do here is remove the appended checksum mbufs.
932 * We know that the checksum starts in a new mbuf beyond the end
935 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
); // adv to last args mbuf
939 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the cksum mbuf
940 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
942 case RPCSEC_GSS_SVC_PRIVACY
:
944 * The args are encrypted along with prepended confounders and seqnum.
945 * First we decrypt, the confounder, seqnum and args then skip to the
946 * final mbuf of the args.
947 * The arglen includes 8 bytes of confounder and 4 bytes of seqnum.
948 * Finally, we remove between 4 and 8 bytes of encryption padding
949 * as well as any alignment padding in the trailing mbuf.
951 len
= req
->r_gss_arglen
;
952 len
+= len
% 8 > 0 ? 4 : 8; // add DES padding length
953 nfs_gss_encrypt_chain(&cp
->gss_clnt_kinfo
, nmc
,
954 req
->r_gss_argoff
, len
, DES_DECRYPT
);
955 nfsm_chain_adv(error
, nmc
, req
->r_gss_arglen
);
958 mbuf_freem(mbuf_next(nmc
->nmc_mcur
)); // free the pad mbuf
959 error
= mbuf_setnext(nmc
->nmc_mcur
, NULL
);
967 * This function sets up a new context on the client.
968 * Context setup alternates upcalls to the gssd with NFS nullproc calls
969 * to the server. Each of these calls exchanges an opaque token, obtained
970 * via the gssd's calls into the GSS-API on either the client or the server.
971 * This cycle of calls ends when the client's upcall to the gssd and the
972 * server's response both return GSS_S_COMPLETE. At this point, the client
973 * should have its session key and a handle that it can use to refer to its
974 * new context on the server.
977 nfs_gss_clnt_ctx_init(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
979 struct nfsmount
*nmp
= req
->r_nmp
;
980 int client_complete
= 0;
981 int server_complete
= 0;
982 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
985 gss_key_info
*ki
= &cp
->gss_clnt_kinfo
;
987 /* Initialize a new client context */
989 cp
->gss_clnt_svcname
= nfs_gss_clnt_svcname(nmp
);
990 if (cp
->gss_clnt_svcname
== NULL
) {
991 error
= NFSERR_EAUTH
;
995 cp
->gss_clnt_proc
= RPCSEC_GSS_INIT
;
997 cp
->gss_clnt_service
=
998 nmp
->nm_auth
== RPCAUTH_KRB5
? RPCSEC_GSS_SVC_NONE
:
999 nmp
->nm_auth
== RPCAUTH_KRB5I
? RPCSEC_GSS_SVC_INTEGRITY
:
1000 nmp
->nm_auth
== RPCAUTH_KRB5P
? RPCSEC_GSS_SVC_PRIVACY
: 0;
1002 cp
->gss_clnt_gssd_flags
= (nfs_single_des
? GSSD_NFS_1DES
: 0);
1004 * Now loop around alternating gss_init_sec_context and
1005 * gss_accept_sec_context upcalls to the gssd on the client
1006 * and server side until the context is complete - or fails.
1011 /* Upcall to the gss_init_sec_context in the gssd */
1012 error
= nfs_gss_clnt_gssd_upcall(req
, cp
);
1016 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1017 client_complete
= 1;
1018 if (server_complete
)
1020 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1021 error
= NFSERR_EAUTH
;
1026 * Pass the token to the server.
1028 error
= nfs_gss_clnt_ctx_callserver(req
, cp
);
1030 if (cp
->gss_clnt_proc
== RPCSEC_GSS_INIT
&&
1031 (cp
->gss_clnt_gssd_flags
& (GSSD_RESTART
| GSSD_NFS_1DES
)) == 0) {
1032 cp
->gss_clnt_gssd_flags
= (GSSD_RESTART
| GSSD_NFS_1DES
);
1033 if (cp
->gss_clnt_token
)
1034 FREE(cp
->gss_clnt_token
, M_TEMP
);
1035 cp
->gss_clnt_token
= NULL
;
1036 cp
->gss_clnt_tokenlen
= 0;
1039 // Reset flags, if error = ENEEDAUTH we will try 3des again
1040 cp
->gss_clnt_gssd_flags
= 0;
1043 if (cp
->gss_clnt_major
== GSS_S_COMPLETE
) {
1044 server_complete
= 1;
1045 if (client_complete
)
1047 } else if (cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1048 error
= NFSERR_EAUTH
;
1052 cp
->gss_clnt_proc
= RPCSEC_GSS_CONTINUE_INIT
;
1056 * The context is apparently established successfully
1058 cp
->gss_clnt_flags
|= GSS_CTX_COMPLETE
;
1059 cp
->gss_clnt_proc
= RPCSEC_GSS_DATA
;
1061 cp
->gss_clnt_ctime
= now
.tv_sec
; // time stamp
1065 * Compute checksum of the server's window
1067 nfs_gss_cksum_rep(ki
, cp
->gss_clnt_seqwin
, cksum1
);
1070 * and see if it matches the one in the
1071 * verifier the server returned.
1073 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), cp
->gss_clnt_verf
, 0,
1075 FREE(cp
->gss_clnt_verf
, M_TEMP
);
1076 cp
->gss_clnt_verf
= NULL
;
1078 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1079 error
= NFSERR_EAUTH
;
1084 * Set an initial sequence number somewhat randomized.
1085 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1086 * Add the size of the sequence window so seqbits arithmetic
1087 * doesn't go negative.
1089 cp
->gss_clnt_seqnum
= (random() & 0xffff) + cp
->gss_clnt_seqwin
;
1092 * Allocate a bitmap to keep track of which requests
1093 * are pending within the sequence number window.
1095 MALLOC(cp
->gss_clnt_seqbits
, uint32_t *,
1096 nfsm_rndup((cp
->gss_clnt_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
1097 if (cp
->gss_clnt_seqbits
== NULL
)
1098 error
= NFSERR_EAUTH
;
1101 * If the error is ENEEDAUTH we're not done, so no need
1102 * to wake up other threads again. This thread will retry in
1103 * the find or renew routines.
1105 if (error
== ENEEDAUTH
)
1109 * If there's an error, just mark it as invalid.
1110 * It will be removed when the reference count
1114 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1117 * Wake any threads waiting to use the context
1119 lck_mtx_lock(cp
->gss_clnt_mtx
);
1120 cp
->gss_clnt_thread
= NULL
;
1121 if (cp
->gss_clnt_flags
& GSS_NEEDCTX
) {
1122 cp
->gss_clnt_flags
&= ~GSS_NEEDCTX
;
1125 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1131 * Call the NFS server using a null procedure for context setup.
1132 * Even though it's a null procedure and nominally has no arguments
1133 * RFC 2203 requires that the GSS-API token be passed as an argument
1134 * and received as a reply.
1137 nfs_gss_clnt_ctx_callserver(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1139 struct nfsm_chain nmreq
, nmrep
;
1140 int error
= 0, status
;
1145 nfsm_chain_null(&nmreq
);
1146 nfsm_chain_null(&nmrep
);
1147 sz
= NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_clnt_tokenlen
);
1148 nfsm_chain_build_alloc_init(error
, &nmreq
, sz
);
1149 nfsm_chain_add_32(error
, &nmreq
, cp
->gss_clnt_tokenlen
);
1150 if (cp
->gss_clnt_tokenlen
> 0)
1151 nfsm_chain_add_opaque(error
, &nmreq
, cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
);
1152 nfsm_chain_build_done(error
, &nmreq
);
1156 /* Call the server */
1157 error
= nfs_request_gss(req
->r_nmp
->nm_mountp
, &nmreq
, req
->r_thread
, req
->r_cred
,
1158 (req
->r_flags
& R_OPTMASK
), cp
, &nmrep
, &status
);
1159 if (cp
->gss_clnt_token
!= NULL
) {
1160 FREE(cp
->gss_clnt_token
, M_TEMP
);
1161 cp
->gss_clnt_token
= NULL
;
1168 /* Get the server's reply */
1170 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_handle_len
);
1171 if (cp
->gss_clnt_handle
!= NULL
) {
1172 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1173 cp
->gss_clnt_handle
= NULL
;
1175 if (cp
->gss_clnt_handle_len
> 0) {
1176 MALLOC(cp
->gss_clnt_handle
, u_char
*, cp
->gss_clnt_handle_len
, M_TEMP
, M_WAITOK
);
1177 if (cp
->gss_clnt_handle
== NULL
) {
1181 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_handle_len
, cp
->gss_clnt_handle
);
1183 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_major
);
1184 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_minor
);
1185 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_seqwin
);
1186 nfsm_chain_get_32(error
, &nmrep
, cp
->gss_clnt_tokenlen
);
1189 if (cp
->gss_clnt_tokenlen
> 0) {
1190 MALLOC(cp
->gss_clnt_token
, u_char
*, cp
->gss_clnt_tokenlen
, M_TEMP
, M_WAITOK
);
1191 if (cp
->gss_clnt_token
== NULL
) {
1195 nfsm_chain_get_opaque(error
, &nmrep
, cp
->gss_clnt_tokenlen
, cp
->gss_clnt_token
);
1199 * Make sure any unusual errors are expanded and logged by gssd
1201 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1202 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1203 char who
[] = "server";
1204 char unknown
[] = "<unknown>";
1206 (void) mach_gss_log_error(
1208 !req
->r_nmp
? unknown
:
1209 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
,
1213 cp
->gss_clnt_minor
);
1217 nfsm_chain_cleanup(&nmreq
);
1218 nfsm_chain_cleanup(&nmrep
);
1224 * Ugly hack to get the service principal from the f_mntfromname field in
1225 * the statfs struct. We assume a format of server:path. We don't currently
1226 * support url's or other bizarre formats like path@server. A better solution
1227 * here might be to allow passing the service principal down in the mount args.
1228 * For kerberos we just use the default realm.
1231 nfs_gss_clnt_svcname(struct nfsmount
*nmp
)
1233 char *svcname
, *d
, *mntfromhere
;
1238 mntfromhere
= &vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
[0];
1239 len
= strlen(mntfromhere
) + 5; /* "nfs/" plus null */
1240 MALLOC(svcname
, char *, len
, M_TEMP
, M_NOWAIT
);
1241 if (svcname
== NULL
)
1243 strlcpy(svcname
, "nfs/", len
);
1244 strlcat(svcname
, mntfromhere
, len
);
1245 d
= strchr(svcname
, ':');
1253 * Make an upcall to the gssd using Mach RPC
1254 * The upcall is made using a task special port.
1255 * This allows launchd to fire up the gssd in the
1256 * user's session. This is important, since gssd
1257 * must have access to the user's credential cache.
1260 nfs_gss_clnt_gssd_upcall(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1263 byte_buffer okey
= NULL
;
1264 uint32_t skeylen
= 0;
1266 vm_map_copy_t itoken
= NULL
;
1267 byte_buffer otoken
= NULL
;
1268 mach_msg_type_number_t otokenlen
;
1274 * NFS currently only supports default principals or
1275 * principals based on the uid of the caller.
1277 * N.B. Note we define a one character array for the principal
1278 * so that we can hold an empty string required by mach, since
1279 * the kernel is being compiled with -Wwrite-strings.
1282 if (cp
->gss_clnt_mport
== NULL
) {
1283 kr
= task_get_gssd_port(get_threadtask(req
->r_thread
), &cp
->gss_clnt_mport
);
1284 if (kr
!= KERN_SUCCESS
) {
1285 printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %x (%d)\n", kr
, kr
);
1288 if (!IPC_PORT_VALID(cp
->gss_clnt_mport
)) {
1289 printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
1290 cp
->gss_clnt_mport
= NULL
;
1295 if (cp
->gss_clnt_tokenlen
> 0)
1296 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1299 kr
= mach_gss_init_sec_context(
1302 (byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_clnt_tokenlen
,
1305 cp
->gss_clnt_svcname
,
1307 cp
->gss_clnt_gssd_flags
,
1308 &cp
->gss_clnt_context
,
1309 &cp
->gss_clnt_cred_handle
,
1311 &okey
, (mach_msg_type_number_t
*) &skeylen
,
1312 &otoken
, &otokenlen
,
1313 &cp
->gss_clnt_major
,
1314 &cp
->gss_clnt_minor
);
1316 cp
->gss_clnt_gssd_flags
&= ~GSSD_RESTART
;
1318 if (kr
!= KERN_SUCCESS
) {
1319 printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr
, kr
);
1320 if (kr
== MIG_SERVER_DIED
&& cp
->gss_clnt_cred_handle
== 0 &&
1321 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
) {
1322 if (cp
->gss_clnt_tokenlen
> 0)
1323 nfs_gss_mach_alloc_buffer(cp
->gss_clnt_token
, cp
->gss_clnt_tokenlen
, &itoken
);
1326 task_release_special_port(cp
->gss_clnt_mport
);
1327 cp
->gss_clnt_mport
= NULL
;
1332 * Make sure any unusual errors are expanded and logged by gssd
1334 if (cp
->gss_clnt_major
!= GSS_S_COMPLETE
&&
1335 cp
->gss_clnt_major
!= GSS_S_CONTINUE_NEEDED
) {
1336 char who
[] = "client";
1337 char unknown
[] = "<unknown>";
1339 (void) mach_gss_log_error(
1341 !req
->r_nmp
? unknown
:
1342 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
,
1346 cp
->gss_clnt_minor
);
1350 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
1351 printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen
);
1352 vm_map_copy_discard((vm_map_copy_t
) okey
);
1353 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1356 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
,
1357 cp
->gss_clnt_kinfo
.skey
);
1359 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1363 error
= gss_key_init(&cp
->gss_clnt_kinfo
, skeylen
);
1368 /* Free context token used as input */
1369 if (cp
->gss_clnt_token
)
1370 FREE(cp
->gss_clnt_token
, M_TEMP
);
1371 cp
->gss_clnt_token
= NULL
;
1372 cp
->gss_clnt_tokenlen
= 0;
1374 if (otokenlen
> 0) {
1375 /* Set context token to gss output token */
1376 MALLOC(cp
->gss_clnt_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
1377 if (cp
->gss_clnt_token
== NULL
) {
1378 printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
1379 vm_map_copy_discard((vm_map_copy_t
) otoken
);
1382 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_clnt_token
);
1384 FREE(cp
->gss_clnt_token
, M_TEMP
);
1385 cp
->gss_clnt_token
= NULL
;
1386 return (NFSERR_EAUTH
);
1388 cp
->gss_clnt_tokenlen
= otokenlen
;
1394 if (cp
->gss_clnt_token
)
1395 FREE(cp
->gss_clnt_token
, M_TEMP
);
1396 cp
->gss_clnt_token
= NULL
;
1397 cp
->gss_clnt_tokenlen
= 0;
1399 return (NFSERR_EAUTH
);
1403 * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
1404 * credential. The sequence number window that the server returns
1405 * at context setup indicates the maximum number of client calls that
1406 * can be outstanding on a context. The client maintains a bitmap that
1407 * represents the server's window. Each pending request has a bit set
1408 * in the window bitmap. When a reply comes in or times out, we reset
1409 * the bit in the bitmap and if there are any other threads waiting for
1410 * a context slot we notify the waiting thread(s).
1412 * Note that if a request is retransmitted, it will have a single XID
1413 * but it may be associated with multiple sequence numbers. So we
1414 * may have to reset multiple sequence number bits in the window bitmap.
1417 nfs_gss_clnt_rpcdone(struct nfsreq
*req
)
1419 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1420 struct gss_seq
*gsp
, *ngsp
;
1423 if (cp
== NULL
|| !(cp
->gss_clnt_flags
& GSS_CTX_COMPLETE
))
1424 return; // no context - don't bother
1426 * Reset the bit for this request in the
1427 * sequence number window to indicate it's done.
1428 * We do this even if the request timed out.
1430 lck_mtx_lock(cp
->gss_clnt_mtx
);
1431 gsp
= SLIST_FIRST(&req
->r_gss_seqlist
);
1432 if (gsp
&& gsp
->gss_seqnum
> (cp
->gss_clnt_seqnum
- cp
->gss_clnt_seqwin
))
1433 win_resetbit(cp
->gss_clnt_seqbits
,
1434 gsp
->gss_seqnum
% cp
->gss_clnt_seqwin
);
1437 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
1439 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
) {
1440 if (++i
> GSS_CLNT_SEQLISTMAX
) {
1441 SLIST_REMOVE(&req
->r_gss_seqlist
, gsp
, gss_seq
, gss_seqnext
);
1447 * If there's a thread waiting for
1448 * the window to advance, wake it up.
1450 if (cp
->gss_clnt_flags
& GSS_NEEDSEQ
) {
1451 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
1454 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1458 * Create a reference to a context from a request
1459 * and bump the reference count
1462 nfs_gss_clnt_ctx_ref(struct nfsreq
*req
, struct nfs_gss_clnt_ctx
*cp
)
1464 req
->r_gss_ctx
= cp
;
1466 lck_mtx_lock(cp
->gss_clnt_mtx
);
1467 cp
->gss_clnt_refcnt
++;
1468 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1472 * Remove a context reference from a request
1473 * If the reference count drops to zero, and the
1474 * context is invalid, destroy the context
1477 nfs_gss_clnt_ctx_unref(struct nfsreq
*req
)
1479 struct nfsmount
*nmp
= req
->r_nmp
;
1480 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1485 req
->r_gss_ctx
= NULL
;
1487 lck_mtx_lock(cp
->gss_clnt_mtx
);
1488 if (--cp
->gss_clnt_refcnt
== 0
1489 && cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
1490 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1493 lck_mtx_lock(&nmp
->nm_lock
);
1494 nfs_gss_clnt_ctx_remove(nmp
, cp
);
1496 lck_mtx_unlock(&nmp
->nm_lock
);
1500 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1507 nfs_gss_clnt_ctx_remove(struct nfsmount
*nmp
, struct nfs_gss_clnt_ctx
*cp
)
1510 * If dequeueing, assume nmp->nm_lock is held
1513 TAILQ_REMOVE(&nmp
->nm_gsscl
, cp
, gss_clnt_entries
);
1515 if (cp
->gss_clnt_mport
)
1516 task_release_special_port(cp
->gss_clnt_mport
);
1517 if (cp
->gss_clnt_mtx
)
1518 lck_mtx_destroy(cp
->gss_clnt_mtx
, nfs_gss_clnt_grp
);
1519 if (cp
->gss_clnt_handle
)
1520 FREE(cp
->gss_clnt_handle
, M_TEMP
);
1521 if (cp
->gss_clnt_seqbits
)
1522 FREE(cp
->gss_clnt_seqbits
, M_TEMP
);
1523 if (cp
->gss_clnt_token
)
1524 FREE(cp
->gss_clnt_token
, M_TEMP
);
1525 if (cp
->gss_clnt_svcname
)
1526 FREE(cp
->gss_clnt_svcname
, M_TEMP
);
1531 * The context for a user is invalid.
1532 * Mark the context as invalid, then
1533 * create a new context.
1536 nfs_gss_clnt_ctx_renew(struct nfsreq
*req
)
1538 struct nfs_gss_clnt_ctx
*cp
= req
->r_gss_ctx
;
1539 struct nfsmount
*nmp
= req
->r_nmp
;
1540 struct nfs_gss_clnt_ctx
*ncp
;
1543 mach_port_t saved_mport
;
1549 lck_mtx_lock(cp
->gss_clnt_mtx
);
1550 if (cp
->gss_clnt_flags
& GSS_CTX_INVAL
) {
1551 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1552 nfs_gss_clnt_ctx_unref(req
);
1553 return (0); // already being renewed
1555 saved_uid
= cp
->gss_clnt_uid
;
1556 saved_mport
= task_copy_special_port(cp
->gss_clnt_mport
);
1558 /* Remove the old context */
1559 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1562 * If there's a thread waiting
1563 * in the old context, wake it up.
1565 if (cp
->gss_clnt_flags
& (GSS_NEEDCTX
| GSS_NEEDSEQ
)) {
1566 cp
->gss_clnt_flags
&= ~GSS_NEEDSEQ
;
1569 lck_mtx_unlock(cp
->gss_clnt_mtx
);
1572 * Create a new context
1574 MALLOC(ncp
, struct nfs_gss_clnt_ctx
*, sizeof(*ncp
),
1575 M_TEMP
, M_WAITOK
|M_ZERO
);
1581 ncp
->gss_clnt_uid
= saved_uid
;
1582 ncp
->gss_clnt_mport
= task_copy_special_port(saved_mport
); // re-use the gssd port
1583 ncp
->gss_clnt_mtx
= lck_mtx_alloc_init(nfs_gss_clnt_grp
, LCK_ATTR_NULL
);
1584 ncp
->gss_clnt_thread
= current_thread();
1585 lck_mtx_lock(&nmp
->nm_lock
);
1586 TAILQ_INSERT_TAIL(&nmp
->nm_gsscl
, ncp
, gss_clnt_entries
);
1587 lck_mtx_unlock(&nmp
->nm_lock
);
1589 /* Adjust reference counts to new and old context */
1590 nfs_gss_clnt_ctx_unref(req
);
1591 nfs_gss_clnt_ctx_ref(req
, ncp
);
1594 error
= nfs_gss_clnt_ctx_init(req
, ncp
); // Initialize new context
1595 if (error
== ENEEDAUTH
) {
1596 error
= nfs_gss_clnt_ctx_delay(req
, &retrycnt
);
1601 task_release_special_port(saved_mport
);
1603 nfs_gss_clnt_ctx_unref(req
);
1609 * Destroy all the contexts associated with a mount.
1610 * The contexts are also destroyed by the server.
1613 nfs_gss_clnt_ctx_unmount(struct nfsmount
*nmp
, int mntflags
)
1615 struct nfs_gss_clnt_ctx
*cp
;
1616 struct ucred temp_cred
;
1618 struct nfsm_chain nmreq
, nmrep
;
1622 bzero((caddr_t
) &temp_cred
, sizeof(temp_cred
));
1623 temp_cred
.cr_ngroups
= 1;
1627 lck_mtx_lock(&nmp
->nm_lock
);
1628 cp
= TAILQ_FIRST(&nmp
->nm_gsscl
);
1629 lck_mtx_unlock(&nmp
->nm_lock
);
1633 nfs_gss_clnt_ctx_ref(&req
, cp
);
1636 * Tell the server to destroy its context.
1637 * But don't bother if it's a forced unmount
1638 * or if it's a dummy sec=sys context.
1640 if (!(mntflags
& MNT_FORCE
) && cp
->gss_clnt_service
!= RPCSEC_GSS_SVC_SYS
) {
1641 temp_cred
.cr_uid
= cp
->gss_clnt_uid
;
1642 cred
= kauth_cred_create(&temp_cred
);
1643 cp
->gss_clnt_proc
= RPCSEC_GSS_DESTROY
;
1646 nfsm_chain_null(&nmreq
);
1647 nfsm_chain_null(&nmrep
);
1648 nfsm_chain_build_alloc_init(error
, &nmreq
, 0);
1649 nfsm_chain_build_done(error
, &nmreq
);
1651 nfs_request_gss(nmp
->nm_mountp
, &nmreq
,
1652 current_thread(), cred
, 0, cp
, &nmrep
, &status
);
1653 nfsm_chain_cleanup(&nmreq
);
1654 nfsm_chain_cleanup(&nmrep
);
1655 kauth_cred_unref(&cred
);
1659 * Mark the context invalid then drop
1660 * the reference to remove it if its
1663 cp
->gss_clnt_flags
|= GSS_CTX_INVAL
;
1664 nfs_gss_clnt_ctx_unref(&req
);
1669 * If we get a failure in trying to establish a context we need to wait a
1670 * little while to see if the server is feeling better. In our case this is
1671 * probably a failure in directory services not coming up in a timely fashion.
1672 * This routine sort of mimics receiving a jukebox error.
1675 nfs_gss_clnt_ctx_delay(struct nfsreq
*req
, int *retry
)
1677 int timeo
= (1 << *retry
) * NFS_TRYLATERDEL
;
1679 struct nfsmount
*nmp
= req
->r_nmp
;
1685 if ((nmp
->nm_flag
& NFSMNT_SOFT
) && *retry
> nmp
->nm_retry
)
1691 waituntil
= now
.tv_sec
+ timeo
;
1692 while (now
.tv_sec
< waituntil
) {
1693 tsleep(&lbolt
, PSOCK
, "nfs_gss_clnt_ctx_delay", 0);
1694 error
= nfs_sigintr(req
->r_nmp
, req
, current_thread(), 0);
1705 #endif /* NFSCLIENT */
1715 * Find a server context based on a handle value received
1716 * in an RPCSEC_GSS credential.
1718 static struct nfs_gss_svc_ctx
*
1719 nfs_gss_svc_ctx_find(uint32_t handle
)
1721 struct nfs_gss_svc_ctx_hashhead
*head
;
1722 struct nfs_gss_svc_ctx
*cp
;
1728 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(handle
)];
1730 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
1732 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
, &timenow
);
1734 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
1736 LIST_FOREACH(cp
, head
, gss_svc_entries
)
1737 if (cp
->gss_svc_handle
== handle
) {
1738 if (timenow
> cp
->gss_svc_incarnation
+ GSS_SVC_CTX_TTL
) {
1740 * Context has or is about to expire. Don't use.
1741 * We'll return null and the client will have to create
1744 cp
->gss_svc_handle
= 0;
1746 * Make sure though that we stay around for GSS_CTC_PEND seconds
1747 * for other threads that might be using the context.
1749 cp
->gss_svc_incarnation
= timenow
;
1755 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
1761 * Insert a new server context into the hash table
1762 * and start the context reap thread if necessary.
1765 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx
*cp
)
1767 struct nfs_gss_svc_ctx_hashhead
*head
;
1769 head
= &nfs_gss_svc_ctx_hashtbl
[SVC_CTX_HASH(cp
->gss_svc_handle
)];
1771 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
1772 LIST_INSERT_HEAD(head
, cp
, gss_svc_entries
);
1773 nfs_gss_ctx_count
++;
1775 if (!nfs_gss_timer_on
) {
1776 nfs_gss_timer_on
= 1;
1778 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
1779 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, GSS_SVC_CTX_TTL
)) * MSECS_PER_SEC
);
1782 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
1786 * This function is called via the kernel's callout
1787 * mechanism. It runs only when there are
1788 * cached RPCSEC_GSS contexts.
1791 nfs_gss_svc_ctx_timer(__unused
void *param1
, __unused
void *param2
)
1793 struct nfs_gss_svc_ctx_hashhead
*head
;
1794 struct nfs_gss_svc_ctx
*cp
, *next
;
1799 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
1800 clock_get_uptime(&timenow
);
1803 * Scan all the hash chains
1804 * Assume nfs_gss_svc_ctx_mutex is held
1806 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
1808 * For each hash chain, look for entries
1809 * that haven't been used in a while.
1811 head
= &nfs_gss_svc_ctx_hashtbl
[i
];
1812 for (cp
= LIST_FIRST(head
); cp
; cp
= next
) {
1814 next
= LIST_NEXT(cp
, gss_svc_entries
);
1815 if (timenow
> cp
->gss_svc_incarnation
+
1816 (cp
->gss_svc_handle
? GSS_SVC_CTX_TTL
: 0)) {
1818 * A stale context - remove it
1820 LIST_REMOVE(cp
, gss_svc_entries
);
1821 if (cp
->gss_svc_seqbits
)
1822 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
1823 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
1830 nfs_gss_ctx_count
= contexts
;
1833 * If there are still some cached contexts left,
1834 * set up another callout to check on them later.
1836 nfs_gss_timer_on
= nfs_gss_ctx_count
> 0;
1837 if (nfs_gss_timer_on
)
1838 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call
,
1839 min(GSS_TIMER_PERIOD
, max(GSS_CTX_TTL_MIN
, GSS_SVC_CTX_TTL
)) * MSECS_PER_SEC
);
1841 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
1845 * Here the server receives an RPCSEC_GSS credential in an
1846 * RPC call header. First there's some checking to make sure
1847 * the credential is appropriate - whether the context is still
1848 * being set up, or is complete. Then we use the handle to find
1849 * the server's context and validate the verifier, which contains
1850 * a signed checksum of the RPC header. If the verifier checks
1851 * out, we extract the user's UID and groups from the context
1852 * and use it to set up a UNIX credential for the user's request.
1855 nfs_gss_svc_cred_get(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
1857 uint32_t vers
, proc
, seqnum
, service
;
1858 uint32_t handle
, handle_len
;
1859 struct nfs_gss_svc_ctx
*cp
= NULL
;
1860 uint32_t flavor
= 0, verflen
= 0;
1862 uint32_t arglen
, start
, toklen
, cksumlen
;
1863 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
1864 u_char cksum1
[MAX_DIGEST
], cksum2
[MAX_DIGEST
];
1865 struct nfsm_chain nmc_tmp
;
1868 vers
= proc
= seqnum
= service
= handle_len
= 0;
1869 arglen
= cksumlen
= 0;
1871 nfsm_chain_get_32(error
, nmc
, vers
);
1872 if (vers
!= RPCSEC_GSS_VERS_1
) {
1873 error
= NFSERR_AUTHERR
| AUTH_REJECTCRED
;
1877 nfsm_chain_get_32(error
, nmc
, proc
);
1878 nfsm_chain_get_32(error
, nmc
, seqnum
);
1879 nfsm_chain_get_32(error
, nmc
, service
);
1880 nfsm_chain_get_32(error
, nmc
, handle_len
);
1885 * Make sure context setup/destroy is being done with a nullproc
1887 if (proc
!= RPCSEC_GSS_DATA
&& nd
->nd_procnum
!= NFSPROC_NULL
) {
1888 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
1893 * If the sequence number is greater than the max
1894 * allowable, reject and have the client init a
1897 if (seqnum
> GSS_MAXSEQ
) {
1898 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
1903 service
== RPCSEC_GSS_SVC_NONE
? RPCAUTH_KRB5
:
1904 service
== RPCSEC_GSS_SVC_INTEGRITY
? RPCAUTH_KRB5I
:
1905 service
== RPCSEC_GSS_SVC_PRIVACY
? RPCAUTH_KRB5P
: 0;
1907 if (proc
== RPCSEC_GSS_INIT
) {
1909 * Limit the total number of contexts
1911 if (nfs_gss_ctx_count
> nfs_gss_ctx_max
) {
1912 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
1917 * Set up a new context
1919 MALLOC(cp
, struct nfs_gss_svc_ctx
*, sizeof(*cp
), M_TEMP
, M_WAITOK
|M_ZERO
);
1927 * Use the handle to find the context
1929 if (handle_len
!= sizeof(handle
)) {
1930 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
1933 nfsm_chain_get_32(error
, nmc
, handle
);
1936 cp
= nfs_gss_svc_ctx_find(handle
);
1938 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
1943 cp
->gss_svc_proc
= proc
;
1944 ki
= &cp
->gss_svc_kinfo
;
1946 if (proc
== RPCSEC_GSS_DATA
|| proc
== RPCSEC_GSS_DESTROY
) {
1947 struct ucred temp_cred
;
1949 if (cp
->gss_svc_seqwin
== 0) {
1951 * Context isn't complete
1953 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
1957 if (!nfs_gss_svc_seqnum_valid(cp
, seqnum
)) {
1959 * Sequence number is bad
1961 error
= EINVAL
; // drop the request
1965 /* Now compute the client's call header checksum */
1966 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), 0, 0, cksum1
);
1969 * Validate the verifier.
1970 * The verifier contains an encrypted checksum
1971 * of the call header from the XID up to and
1972 * including the credential. We compute the
1973 * checksum and compare it with what came in
1976 nfsm_chain_get_32(error
, nmc
, flavor
);
1977 nfsm_chain_get_32(error
, nmc
, verflen
);
1978 if (flavor
!= RPCSEC_GSS
|| verflen
!= KRB5_SZ_TOKEN(ki
->hash_len
))
1979 error
= NFSERR_AUTHERR
| AUTH_BADVERF
;
1980 nfsm_chain_get_opaque(error
, nmc
, verflen
, tokbuf
);
1984 /* Get the checksum from the token inside the verifier */
1985 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
1990 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
1991 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CTXPROBLEM
;
1995 nd
->nd_gss_seqnum
= seqnum
;
1998 * Set up the user's cred
2000 bzero(&temp_cred
, sizeof(temp_cred
));
2001 temp_cred
.cr_uid
= cp
->gss_svc_uid
;
2002 bcopy(cp
->gss_svc_gids
, temp_cred
.cr_groups
,
2003 sizeof(gid_t
) * cp
->gss_svc_ngroups
);
2004 temp_cred
.cr_ngroups
= cp
->gss_svc_ngroups
;
2006 nd
->nd_cr
= kauth_cred_create(&temp_cred
);
2007 if (nd
->nd_cr
== NULL
) {
2011 clock_get_uptime(&cp
->gss_svc_incarnation
);
2014 * If the call arguments are integrity or privacy protected
2015 * then we need to check them here.
2018 case RPCSEC_GSS_SVC_NONE
:
2021 case RPCSEC_GSS_SVC_INTEGRITY
:
2023 * Here's what we expect in the integrity call args:
2025 * - length of seq num + call args (4 bytes)
2026 * - sequence number (4 bytes)
2027 * - call args (variable bytes)
2028 * - length of checksum token (37)
2029 * - checksum of seqnum + call args (37 bytes)
2031 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2032 if (arglen
> NFS_MAXPACKET
) {
2037 /* Compute the checksum over the call args */
2038 start
= nfsm_chain_offset(nmc
);
2039 nfs_gss_cksum_chain(ki
, nmc
, ALG_MIC(ki
), start
, arglen
, cksum1
);
2042 * Get the sequence number prepended to the args
2043 * and compare it against the one sent in the
2046 nfsm_chain_get_32(error
, nmc
, seqnum
);
2047 if (seqnum
!= nd
->nd_gss_seqnum
) {
2048 error
= EBADRPC
; // returns as GARBAGEARGS
2053 * Advance to the end of the args and
2054 * fetch the checksum computed by the client.
2057 arglen
-= NFSX_UNSIGNED
; // skipped seqnum
2058 nfsm_chain_adv(error
, &nmc_tmp
, arglen
); // skip args
2059 nfsm_chain_get_32(error
, &nmc_tmp
, cksumlen
); // length of checksum
2060 if (cksumlen
!= KRB5_SZ_TOKEN(ki
->hash_len
)) {
2064 nfsm_chain_get_opaque(error
, &nmc_tmp
, cksumlen
, tokbuf
);
2067 error
= nfs_gss_token_get(ki
, ALG_MIC(ki
), tokbuf
, 1,
2070 /* Verify that the checksums are the same */
2071 if (error
|| bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2076 case RPCSEC_GSS_SVC_PRIVACY
:
2078 * Here's what we expect in the privacy call args:
2080 * - length of confounder + seq num + token + call args
2081 * - wrap token (37-40 bytes)
2082 * - confounder (8 bytes)
2083 * - sequence number (4 bytes)
2084 * - call args (encrypted)
2086 nfsm_chain_get_32(error
, nmc
, arglen
); // length of args
2087 if (arglen
> NFS_MAXPACKET
) {
2092 /* Get the token that prepends the encrypted args */
2093 nfsm_chain_get_opaque(error
, nmc
, KRB5_SZ_TOKMAX(ki
->hash_len
), tokbuf
);
2096 error
= nfs_gss_token_get(ki
, ALG_WRAP(ki
), tokbuf
, 1,
2100 nfsm_chain_reverse(nmc
, nfsm_pad(toklen
));
2102 /* decrypt the 8 byte confounder + seqnum + args */
2103 start
= nfsm_chain_offset(nmc
);
2105 nfs_gss_encrypt_chain(ki
, nmc
, start
, arglen
, DES_DECRYPT
);
2107 /* Compute a checksum over the sequence number + results */
2108 nfs_gss_cksum_chain(ki
, nmc
, ALG_WRAP(ki
), start
, arglen
, cksum2
);
2110 /* Verify that the checksums are the same */
2111 if (bcmp(cksum1
, cksum2
, HASHLEN(ki
)) != 0) {
2117 * Get the sequence number prepended to the args
2118 * and compare it against the one sent in the
2121 nfsm_chain_adv(error
, nmc
, 8); // skip over the confounder
2122 nfsm_chain_get_32(error
, nmc
, seqnum
);
2123 if (seqnum
!= nd
->nd_gss_seqnum
) {
2124 error
= EBADRPC
; // returns as GARBAGEARGS
2131 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
2132 * then we expect a null verifier.
2134 nfsm_chain_get_32(error
, nmc
, flavor
);
2135 nfsm_chain_get_32(error
, nmc
, verflen
);
2136 if (error
|| flavor
!= RPCAUTH_NULL
|| verflen
> 0)
2137 error
= NFSERR_AUTHERR
| RPCSEC_GSS_CREDPROBLEM
;
2142 nd
->nd_gss_context
= cp
;
2148 * Insert the server's verifier into the RPC reply header.
2149 * It contains a signed checksum of the sequence number that
2150 * was received in the RPC call.
2151 * Then go on to add integrity or privacy if necessary.
2154 nfs_gss_svc_verf_put(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2156 struct nfs_gss_svc_ctx
*cp
;
2158 u_char tokbuf
[KRB5_SZ_TOKEN(MAX_DIGEST
)];
2160 u_char cksum
[MAX_DIGEST
];
2163 cp
= nd
->nd_gss_context
;
2164 ki
= &cp
->gss_svc_kinfo
;
2166 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
) {
2168 * If the context isn't yet complete
2169 * then return a null verifier.
2171 nfsm_chain_add_32(error
, nmc
, RPCAUTH_NULL
);
2172 nfsm_chain_add_32(error
, nmc
, 0);
2177 * Compute checksum of the request seq number
2178 * If it's the final reply of context setup
2179 * then return the checksum of the context
2182 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2183 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2184 nfs_gss_cksum_rep(ki
, cp
->gss_svc_seqwin
, cksum
);
2186 nfs_gss_cksum_rep(ki
, nd
->nd_gss_seqnum
, cksum
);
2188 * Now wrap it in a token and add
2189 * the verifier to the reply.
2191 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
2192 nfsm_chain_add_32(error
, nmc
, RPCSEC_GSS
);
2193 nfsm_chain_add_32(error
, nmc
, toklen
);
2194 nfsm_chain_add_opaque(error
, nmc
, tokbuf
, toklen
);
2200 * The results aren't available yet, but if they need to be
2201 * checksummed for integrity protection or encrypted, then
2202 * we can record the start offset here, insert a place-holder
2203 * for the results length, as well as the sequence number.
2204 * The rest of the work is done later by nfs_gss_svc_protect_reply()
2205 * when the results are available.
2208 nfs_gss_svc_prepare_reply(struct nfsrv_descript
*nd
, struct nfsm_chain
*nmc
)
2210 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
2213 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2214 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
)
2217 switch (nd
->nd_sec
) {
2222 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
2223 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
2224 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
2227 nd
->nd_gss_mb
= nmc
->nmc_mcur
; // record current mbuf
2228 nfsm_chain_finish_mbuf(error
, nmc
); // split the chain here
2229 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 1-4
2230 nfsm_chain_add_32(error
, nmc
, random()); // confounder bytes 5-8
2231 nfsm_chain_add_32(error
, nmc
, nd
->nd_gss_seqnum
); // req sequence number
2239 * The results are checksummed or encrypted for return to the client
2242 nfs_gss_svc_protect_reply(struct nfsrv_descript
*nd
, mbuf_t mrep
)
2244 struct nfs_gss_svc_ctx
*cp
= nd
->nd_gss_context
;
2245 struct nfsm_chain nmrep_res
, *nmc_res
= &nmrep_res
;
2246 struct nfsm_chain nmrep_pre
, *nmc_pre
= &nmrep_pre
;
2249 u_char tokbuf
[KRB5_SZ_TOKMAX(MAX_DIGEST
)];
2251 u_char cksum
[MAX_DIGEST
];
2253 gss_key_info
*ki
= &cp
->gss_svc_kinfo
;
2256 * Using a reference to the mbuf where we previously split the reply
2257 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
2258 * one that allows us to prepend a length field or token, (nmc_pre)
2259 * and the second which holds just the results that we're going to
2260 * checksum and/or encrypt. When we're done, we join the chains back
2263 nfs_gss_nfsm_chain(nmc_res
, mrep
); // set up the results chain
2264 mb
= nd
->nd_gss_mb
; // the mbuf where we split
2265 results
= mbuf_next(mb
); // first mbuf in the results
2266 reslen
= nfs_gss_mchain_length(results
); // length of results
2267 error
= mbuf_setnext(mb
, NULL
); // disconnect the chains
2270 nfs_gss_nfsm_chain(nmc_pre
, mb
); // set up the prepend chain
2272 if (nd
->nd_sec
== RPCAUTH_KRB5I
) {
2273 nfsm_chain_add_32(error
, nmc_pre
, reslen
);
2274 nfsm_chain_build_done(error
, nmc_pre
);
2277 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
2279 /* Now compute the checksum over the results data */
2280 nfs_gss_cksum_mchain(ki
, results
, ALG_MIC(ki
), 0, reslen
, cksum
);
2282 /* Put it into a token and append to the request */
2283 toklen
= nfs_gss_token_put(ki
, ALG_MIC(ki
), tokbuf
, 0, 0, cksum
);
2284 nfsm_chain_add_32(error
, nmc_res
, toklen
);
2285 nfsm_chain_add_opaque(error
, nmc_res
, tokbuf
, toklen
);
2286 nfsm_chain_build_done(error
, nmc_res
);
2290 * Append a pad trailer - per RFC 1964 section 1.2.2.3
2291 * Since XDR data is always 32-bit aligned, it
2292 * needs to be padded either by 4 bytes or 8 bytes.
2294 if (reslen
% 8 > 0) {
2295 nfsm_chain_add_32(error
, nmc_res
, 0x04040404);
2296 reslen
+= NFSX_UNSIGNED
;
2298 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
2299 nfsm_chain_add_32(error
, nmc_res
, 0x08080808);
2300 reslen
+= 2 * NFSX_UNSIGNED
;
2302 nfsm_chain_build_done(error
, nmc_res
);
2304 /* Now compute the checksum over the results data */
2305 nfs_gss_cksum_mchain(ki
, results
, ALG_WRAP(ki
), 0, reslen
, cksum
);
2307 /* Put it into a token and insert in the reply */
2308 toklen
= nfs_gss_token_put(ki
, ALG_WRAP(ki
), tokbuf
, 0, reslen
, cksum
);
2309 nfsm_chain_add_32(error
, nmc_pre
, toklen
+ reslen
);
2310 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, tokbuf
, toklen
);
2311 nfsm_chain_build_done(error
, nmc_pre
);
2314 nfs_gss_append_chain(nmc_pre
, results
); // Append the results mbufs
2316 /* Encrypt the confounder + seqnum + results */
2317 nfs_gss_encrypt_mchain(ki
, results
, 0, reslen
, DES_ENCRYPT
);
2319 /* Add null XDR pad if the ASN.1 token misaligned the data */
2320 pad
= nfsm_pad(toklen
+ reslen
);
2322 nfsm_chain_add_opaque_nopad(error
, nmc_pre
, iv0
, pad
);
2323 nfsm_chain_build_done(error
, nmc_pre
);
2331 * This function handles the context setup calls from the client.
2332 * Essentially, it implements the NFS null procedure calls when
2333 * an RPCSEC_GSS credential is used.
2334 * This is the context maintenance function. It creates and
2335 * destroys server contexts at the whim of the client.
2336 * During context creation, it receives GSS-API tokens from the
2337 * client, passes them up to gssd, and returns a received token
2338 * back to the client in the null procedure reply.
2341 nfs_gss_svc_ctx_init(struct nfsrv_descript
*nd
, struct nfsrv_sock
*slp
, mbuf_t
*mrepp
)
2343 struct nfs_gss_svc_ctx
*cp
= NULL
;
2344 uint32_t handle
= 0;
2347 struct nfsm_chain
*nmreq
, nmrep
;
2350 nmreq
= &nd
->nd_nmreq
;
2351 nfsm_chain_null(&nmrep
);
2353 cp
= nd
->nd_gss_context
;
2356 switch (cp
->gss_svc_proc
) {
2357 case RPCSEC_GSS_INIT
:
2359 * Give the client a random handle so that
2360 * if we reboot it's unlikely the client
2361 * will get a bad context match.
2362 * Make sure it's not zero, or already assigned.
2366 } while (nfs_gss_svc_ctx_find(handle
) != NULL
|| handle
== 0);
2367 cp
->gss_svc_handle
= handle
;
2368 cp
->gss_svc_mtx
= lck_mtx_alloc_init(nfs_gss_svc_grp
, LCK_ATTR_NULL
);
2369 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2370 &cp
->gss_svc_incarnation
);
2372 nfs_gss_svc_ctx_insert(cp
);
2376 case RPCSEC_GSS_CONTINUE_INIT
:
2377 /* Get the token from the request */
2378 nfsm_chain_get_32(error
, nmreq
, cp
->gss_svc_tokenlen
);
2379 if (cp
->gss_svc_tokenlen
== 0) {
2380 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2383 MALLOC(cp
->gss_svc_token
, u_char
*, cp
->gss_svc_tokenlen
, M_TEMP
, M_WAITOK
);
2384 if (cp
->gss_svc_token
== NULL
) {
2385 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2388 nfsm_chain_get_opaque(error
, nmreq
, cp
->gss_svc_tokenlen
, cp
->gss_svc_token
);
2390 /* Use the token in a gss_accept_sec_context upcall */
2391 error
= nfs_gss_svc_gssd_upcall(cp
);
2393 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2394 if (error
== NFSERR_EAUTH
)
2400 * If the context isn't complete, pass the new token
2401 * back to the client for another round.
2403 if (cp
->gss_svc_major
!= GSS_S_COMPLETE
)
2407 * Now the server context is complete.
2410 clock_get_uptime(&cp
->gss_svc_incarnation
);
2412 cp
->gss_svc_seqwin
= GSS_SVC_SEQWINDOW
;
2413 MALLOC(cp
->gss_svc_seqbits
, uint32_t *,
2414 nfsm_rndup((cp
->gss_svc_seqwin
+ 7) / 8), M_TEMP
, M_WAITOK
|M_ZERO
);
2415 if (cp
->gss_svc_seqbits
== NULL
) {
2416 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2421 case RPCSEC_GSS_DATA
:
2422 /* Just a nullproc ping - do nothing */
2425 case RPCSEC_GSS_DESTROY
:
2427 * Don't destroy the context immediately because
2428 * other active requests might still be using it.
2429 * Instead, schedule it for destruction after
2430 * GSS_CTX_PEND time has elapsed.
2432 cp
= nfs_gss_svc_ctx_find(cp
->gss_svc_handle
);
2434 cp
->gss_svc_handle
= 0; // so it can't be found
2435 lck_mtx_lock(cp
->gss_svc_mtx
);
2436 clock_interval_to_deadline(GSS_CTX_PEND
, NSEC_PER_SEC
,
2437 &cp
->gss_svc_incarnation
);
2438 lck_mtx_unlock(cp
->gss_svc_mtx
);
2442 autherr
= RPCSEC_GSS_CREDPROBLEM
;
2446 /* Now build the reply */
2448 if (nd
->nd_repstat
== 0)
2449 nd
->nd_repstat
= autherr
? (NFSERR_AUTHERR
| autherr
) : NFSERR_RETVOID
;
2450 sz
= 7 * NFSX_UNSIGNED
+ nfsm_rndup(cp
->gss_svc_tokenlen
); // size of results
2451 error
= nfsrv_rephead(nd
, slp
, &nmrep
, sz
);
2452 *mrepp
= nmrep
.nmc_mhead
;
2453 if (error
|| autherr
)
2456 if (cp
->gss_svc_proc
== RPCSEC_GSS_INIT
||
2457 cp
->gss_svc_proc
== RPCSEC_GSS_CONTINUE_INIT
) {
2458 nfsm_chain_add_32(error
, &nmrep
, sizeof(cp
->gss_svc_handle
));
2459 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_handle
);
2461 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_major
);
2462 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_minor
);
2463 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_seqwin
);
2465 nfsm_chain_add_32(error
, &nmrep
, cp
->gss_svc_tokenlen
);
2466 if (cp
->gss_svc_token
!= NULL
) {
2467 nfsm_chain_add_opaque(error
, &nmrep
, cp
->gss_svc_token
, cp
->gss_svc_tokenlen
);
2468 FREE(cp
->gss_svc_token
, M_TEMP
);
2469 cp
->gss_svc_token
= NULL
;
2475 nd
->nd_gss_context
= NULL
;
2476 LIST_REMOVE(cp
, gss_svc_entries
);
2477 if (cp
->gss_svc_seqbits
!= NULL
)
2478 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2479 if (cp
->gss_svc_token
!= NULL
)
2480 FREE(cp
->gss_svc_token
, M_TEMP
);
2481 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2485 nfsm_chain_build_done(error
, &nmrep
);
2487 nfsm_chain_cleanup(&nmrep
);
2494 * This is almost a mirror-image of the client side upcall.
2495 * It passes and receives a token, but invokes gss_accept_sec_context.
2496 * If it's the final call of the context setup, then gssd also returns
2497 * the session key and the user's UID.
2500 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx
*cp
)
2505 byte_buffer okey
= NULL
;
2506 uint32_t skeylen
= 0;
2508 vm_map_copy_t itoken
= NULL
;
2509 byte_buffer otoken
= NULL
;
2510 mach_msg_type_number_t otokenlen
;
2512 char svcname
[] = "nfs";
2514 kr
= task_get_gssd_port(get_threadtask(current_thread()), &mp
);
2515 if (kr
!= KERN_SUCCESS
) {
2516 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr
, kr
);
2519 if (!IPC_PORT_VALID(mp
)) {
2520 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
2524 if (cp
->gss_svc_tokenlen
> 0)
2525 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
2528 kr
= mach_gss_accept_sec_context(
2530 (byte_buffer
) itoken
, (mach_msg_type_number_t
) cp
->gss_svc_tokenlen
,
2533 &cp
->gss_svc_context
,
2534 &cp
->gss_svc_cred_handle
,
2538 &cp
->gss_svc_ngroups
,
2539 &okey
, (mach_msg_type_number_t
*) &skeylen
,
2540 &otoken
, &otokenlen
,
2542 &cp
->gss_svc_minor
);
2544 if (kr
!= KERN_SUCCESS
) {
2545 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr
, kr
);
2546 if (kr
== MIG_SERVER_DIED
&& cp
->gss_svc_context
== 0 &&
2547 retry_cnt
++ < NFS_GSS_MACH_MAX_RETRIES
) {
2548 if (cp
->gss_svc_tokenlen
> 0)
2549 nfs_gss_mach_alloc_buffer(cp
->gss_svc_token
, cp
->gss_svc_tokenlen
, &itoken
);
2552 task_release_special_port(mp
);
2556 task_release_special_port(mp
);
2559 if (skeylen
!= SKEYLEN
&& skeylen
!= SKEYLEN3
) {
2560 printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen
);
2561 vm_map_copy_discard((vm_map_copy_t
) okey
);
2562 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2565 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) okey
, skeylen
, cp
->gss_svc_kinfo
.skey
);
2567 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2570 error
= gss_key_init(&cp
->gss_svc_kinfo
, skeylen
);
2576 /* Free context token used as input */
2577 if (cp
->gss_svc_token
)
2578 FREE(cp
->gss_svc_token
, M_TEMP
);
2579 cp
->gss_svc_token
= NULL
;
2580 cp
->gss_svc_tokenlen
= 0;
2582 if (otokenlen
> 0) {
2583 /* Set context token to gss output token */
2584 MALLOC(cp
->gss_svc_token
, u_char
*, otokenlen
, M_TEMP
, M_WAITOK
);
2585 if (cp
->gss_svc_token
== NULL
) {
2586 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen
);
2587 vm_map_copy_discard((vm_map_copy_t
) otoken
);
2590 error
= nfs_gss_mach_vmcopyout((vm_map_copy_t
) otoken
, otokenlen
, cp
->gss_svc_token
);
2592 FREE(cp
->gss_svc_token
, M_TEMP
);
2593 cp
->gss_svc_token
= NULL
;
2594 return (NFSERR_EAUTH
);
2596 cp
->gss_svc_tokenlen
= otokenlen
;
2602 FREE(cp
->gss_svc_token
, M_TEMP
);
2603 cp
->gss_svc_tokenlen
= 0;
2604 cp
->gss_svc_token
= NULL
;
2606 return (NFSERR_EAUTH
);
2610 * Validate the sequence number in the credential as described
2611 * in RFC 2203 Section 5.3.3.1
2613 * Here the window of valid sequence numbers is represented by
2614 * a bitmap. As each sequence number is received, its bit is
2615 * set in the bitmap. An invalid sequence number lies below
2616 * the lower bound of the window, or is within the window but
2617 * has its bit already set.
2620 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx
*cp
, uint32_t seq
)
2622 uint32_t *bits
= cp
->gss_svc_seqbits
;
2623 uint32_t win
= cp
->gss_svc_seqwin
;
2626 lck_mtx_lock(cp
->gss_svc_mtx
);
2629 * If greater than the window upper bound,
2630 * move the window up, and set the bit.
2632 if (seq
> cp
->gss_svc_seqmax
) {
2633 if (seq
- cp
->gss_svc_seqmax
> win
)
2634 bzero(bits
, nfsm_rndup((win
+ 7) / 8));
2636 for (i
= cp
->gss_svc_seqmax
+ 1; i
< seq
; i
++)
2637 win_resetbit(bits
, i
% win
);
2638 win_setbit(bits
, seq
% win
);
2639 cp
->gss_svc_seqmax
= seq
;
2640 lck_mtx_unlock(cp
->gss_svc_mtx
);
2645 * Invalid if below the lower bound of the window
2647 if (seq
<= cp
->gss_svc_seqmax
- win
) {
2648 lck_mtx_unlock(cp
->gss_svc_mtx
);
2653 * In the window, invalid if the bit is already set
2655 if (win_getbit(bits
, seq
% win
)) {
2656 lck_mtx_unlock(cp
->gss_svc_mtx
);
2659 win_setbit(bits
, seq
% win
);
2660 lck_mtx_unlock(cp
->gss_svc_mtx
);
2665 * Called at NFS server shutdown - destroy all contexts
2668 nfs_gss_svc_cleanup(void)
2670 struct nfs_gss_svc_ctx_hashhead
*head
;
2671 struct nfs_gss_svc_ctx
*cp
, *ncp
;
2674 lck_mtx_lock(nfs_gss_svc_ctx_mutex
);
2677 * Run through all the buckets
2679 for (i
= 0; i
< SVC_CTX_HASHSZ
; i
++) {
2681 * Remove and free all entries in the bucket
2683 head
= &nfs_gss_svc_ctx_hashtbl
[i
];
2684 LIST_FOREACH_SAFE(cp
, head
, gss_svc_entries
, ncp
) {
2685 LIST_REMOVE(cp
, gss_svc_entries
);
2686 if (cp
->gss_svc_seqbits
)
2687 FREE(cp
->gss_svc_seqbits
, M_TEMP
);
2688 lck_mtx_destroy(cp
->gss_svc_mtx
, nfs_gss_svc_grp
);
2693 lck_mtx_unlock(nfs_gss_svc_ctx_mutex
);
2696 #endif /* NFSSERVER */
2700 * The following functions are used by both client and server.
2704 * Release a task special port that was obtained by task_get_special_port
2705 * or one of its macros (task_get_gssd_port in this case).
2706 * This really should be in a public kpi.
2709 /* This should be in a public header if this routine is not */
2710 extern void ipc_port_release_send(ipc_port_t
);
2711 extern ipc_port_t
ipc_port_copy_send(ipc_port_t
);
2714 task_release_special_port(mach_port_t mp
)
2717 ipc_port_release_send(mp
);
2721 task_copy_special_port(mach_port_t mp
)
2723 return ipc_port_copy_send(mp
);
2727 * The token that is sent and received in the gssd upcall
2728 * has unbounded variable length. Mach RPC does not pass
2729 * the token in-line. Instead it uses page mapping to handle
2730 * these parameters. This function allocates a VM buffer
2731 * to hold the token for an upcall and copies the token
2732 * (received from the client) into it. The VM buffer is
2733 * marked with a src_destroy flag so that the upcall will
2734 * automatically de-allocate the buffer when the upcall is
2738 nfs_gss_mach_alloc_buffer(u_char
*buf
, uint32_t buflen
, vm_map_copy_t
*addr
)
2741 vm_offset_t kmem_buf
;
2745 if (buf
== NULL
|| buflen
== 0)
2748 tbuflen
= round_page(buflen
);
2749 kr
= vm_allocate(ipc_kernel_map
, &kmem_buf
, tbuflen
, VM_FLAGS_ANYWHERE
);
2751 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
2755 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(kmem_buf
),
2756 vm_map_round_page(kmem_buf
+ tbuflen
),
2757 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
2759 printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n");
2763 bcopy(buf
, (void *) kmem_buf
, buflen
);
2764 // Shouldn't need to bzero below since vm_allocate returns zeroed pages
2765 // bzero(kmem_buf + buflen, tbuflen - buflen);
2767 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(kmem_buf
),
2768 vm_map_round_page(kmem_buf
+ tbuflen
), FALSE
);
2770 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
2774 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
) kmem_buf
,
2775 (vm_map_size_t
) buflen
, TRUE
, addr
);
2777 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
2783 * Here we handle a token received from the gssd via an upcall.
2784 * The received token resides in an allocate VM buffer.
2785 * We copy the token out of this buffer to a chunk of malloc'ed
2786 * memory of the right size, then de-allocate the VM buffer.
2789 nfs_gss_mach_vmcopyout(vm_map_copy_t in
, uint32_t len
, u_char
*out
)
2791 vm_map_offset_t map_data
;
2795 error
= vm_map_copyout(ipc_kernel_map
, &map_data
, in
);
2799 data
= CAST_DOWN(vm_offset_t
, map_data
);
2800 bcopy((void *) data
, out
, len
);
2801 vm_deallocate(ipc_kernel_map
, data
, len
);
2807 * Encode an ASN.1 token to be wrapped in an RPCSEC_GSS verifier.
2808 * Returns the size of the token, since it contains a variable
2809 * length DER encoded size field.
2820 static uint32_t seqnum
= 0;
2826 * Fill in the token header: 2 octets.
2827 * This is 0x06 - an ASN.1 tag for APPLICATION, 0, SEQUENCE
2828 * followed by the length of the token: 35 + 0 octets for a
2829 * MIC token, or 35 + encrypted octets for a wrap token;
2832 toklen
= KRB5_SZ_MECH
+ KRB5_SZ_ALG
+ KRB5_SZ_SEQ
+ HASHLEN(ki
);
2833 nfs_gss_der_length_put(&p
, toklen
+ datalen
);
2836 * Fill in the DER encoded mech OID for Kerberos v5.
2837 * This represents the Kerberos OID 1.2.840.113554.1.2.2
2838 * described in RFC 2623, section 4.2
2840 bcopy(krb5_mech
, p
, sizeof(krb5_mech
));
2841 p
+= sizeof(krb5_mech
);
2844 * Now at the token described in RFC 1964, section 1.2.1
2845 * Fill in the token ID, integrity algorithm indicator,
2846 * for DES MAC MD5, and four filler octets.
2847 * The alg string encodes the bytes to represent either
2848 * a MIC token or a WRAP token for Kerberos.
2850 bcopy(alg
, p
, KRB5_SZ_ALG
);
2854 * Now encode the sequence number according to
2855 * RFC 1964, section 1.2.1.2 which dictates 4 octets
2856 * of sequence number followed by 4 bytes of direction
2857 * indicator: 0x00 for initiator or 0xff for acceptor.
2858 * We DES CBC encrypt the sequence number using the first
2859 * 8 octets of the checksum field as an initialization
2861 * Note that this sequence number is not at all related
2862 * to the RPCSEC_GSS protocol sequence number. This
2863 * number is private to the ASN.1 token. The only
2864 * requirement is that it not be repeated in case the
2865 * server has replay detection on, which normally should
2866 * not be the case, since RFC 2203 section 5.2.3 says that
2867 * replay detection and sequence checking must be turned off.
2870 for (i
= 0; i
< 4; i
++)
2871 plain
[i
] = (u_char
) ((seqnum
>> (i
* 8)) & 0xff);
2872 for (i
= 4; i
< 8; i
++)
2873 plain
[i
] = initiator
? 0x00 : 0xff;
2874 gss_des_crypt(ki
, (des_cblock
*) plain
, (des_cblock
*) p
, 8,
2875 (des_cblock
*) cksum
, NULL
, DES_ENCRYPT
, KG_USAGE_SEQ
);
2879 * Finally, append the octets of the
2880 * checksum of the alg + plaintext data.
2881 * The plaintext could be an RPC call header,
2882 * the window value, or a sequence number.
2884 bcopy(cksum
, p
, HASHLEN(ki
));
2891 * Determine size of ASN.1 DER length
2894 nfs_gss_der_length_size(int len
)
2897 len
< (1 << 7) ? 1 :
2898 len
< (1 << 8) ? 2 :
2899 len
< (1 << 16) ? 3 :
2900 len
< (1 << 24) ? 4 : 5;
2904 * Encode an ASN.1 DER length field
2907 nfs_gss_der_length_put(u_char
**pp
, int len
)
2909 int sz
= nfs_gss_der_length_size(len
);
2913 *p
++ = (u_char
) len
;
2915 *p
++ = (u_char
) ((sz
-1) | 0x80);
2918 *p
++ = (u_char
) ((len
>> (sz
* 8)) & 0xff);
2925 * Decode an ASN.1 DER length field
2928 nfs_gss_der_length_get(u_char
**pp
)
2931 uint32_t flen
, len
= 0;
2935 if ((*p
++ & 0x80) == 0)
2938 if (flen
> sizeof(uint32_t))
2941 len
= (len
<< 8) + *p
++;
2948 * Decode an ASN.1 token from an RPCSEC_GSS verifier.
2964 * Check that we have a valid token header
2967 return (AUTH_BADCRED
);
2968 (void) nfs_gss_der_length_get(&p
); // ignore the size
2971 * Check that we have the DER encoded Kerberos v5 mech OID
2973 if (bcmp(p
, krb5_mech
, sizeof(krb5_mech
) != 0))
2974 return (AUTH_BADCRED
);
2975 p
+= sizeof(krb5_mech
);
2978 * Now check the token ID, DES MAC MD5 algorithm
2979 * indicator, and filler octets.
2981 if (bcmp(p
, alg
, KRB5_SZ_ALG
) != 0)
2982 return (AUTH_BADCRED
);
2986 * Now decrypt the sequence number.
2987 * Note that the gss decryption uses the first 8 octets
2988 * of the checksum field as an initialization vector (p + 8).
2989 * Per RFC 2203 section 5.2.2 we don't check the sequence number
2990 * in the ASN.1 token because the RPCSEC_GSS protocol has its
2991 * own sequence number described in section 5.3.3.1
2994 gss_des_crypt(ki
, (des_cblock
*)p
, (des_cblock
*) plain
, 8,
2995 (des_cblock
*) (p
+ 8), NULL
, DES_DECRYPT
, KG_USAGE_SEQ
);
2997 for (i
= 0; i
< 4; i
++)
2998 seqnum
|= plain
[i
] << (i
* 8);
3001 * Make sure the direction
3002 * indicator octets are correct.
3004 d
= initiator
? 0x00 : 0xff;
3005 for (i
= 4; i
< 8; i
++)
3007 return (AUTH_BADCRED
);
3010 * Finally, get the checksum
3012 bcopy(p
, cksum
, HASHLEN(ki
));
3022 * Return the number of bytes in an mbuf chain.
3025 nfs_gss_mchain_length(mbuf_t mhead
)
3030 for (mb
= mhead
; mb
; mb
= mbuf_next(mb
))
3031 len
+= mbuf_len(mb
);
3037 * Append an args or results mbuf chain to the header chain
3040 nfs_gss_append_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3045 /* Connect the mbuf chains */
3046 error
= mbuf_setnext(nmc
->nmc_mcur
, mc
);
3050 /* Find the last mbuf in the chain */
3052 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3055 nmc
->nmc_mcur
= tail
;
3056 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3057 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3063 * Convert an mbuf chain to an NFS mbuf chain
3066 nfs_gss_nfsm_chain(struct nfsm_chain
*nmc
, mbuf_t mc
)
3070 /* Find the last mbuf in the chain */
3072 for (mb
= mc
; mb
; mb
= mbuf_next(mb
))
3075 nmc
->nmc_mhead
= mc
;
3076 nmc
->nmc_mcur
= tail
;
3077 nmc
->nmc_ptr
= (caddr_t
) mbuf_data(tail
) + mbuf_len(tail
);
3078 nmc
->nmc_left
= mbuf_trailingspace(tail
);
3084 * Compute a checksum over an mbuf chain.
3085 * Start building an MD5 digest at the given offset and keep
3086 * going until the end of data in the current mbuf is reached.
3087 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3091 nfs_gss_cksum_mchain(
3102 GSS_DIGEST_CTX context
;
3104 gss_digest_Init(&context
, ki
);
3107 * Logically prepend the first 8 bytes of the algorithm
3108 * field as required by RFC 1964, section 1.2.1.1
3110 gss_digest_Update(&context
, alg
, KRB5_SZ_ALG
);
3113 * Move down the mbuf chain until we reach the given
3114 * byte offset, then start MD5 on the mbuf data until
3115 * we've done len bytes.
3118 for (mb
= mhead
; mb
&& len
> 0; mb
= mbuf_next(mb
)) {
3119 ptr
= mbuf_data(mb
);
3120 left
= mbuf_len(mb
);
3121 if (offset
>= left
) {
3122 /* Offset not yet reached */
3126 /* At or beyond offset - checksum data */
3131 bytes
= left
< len
? left
: len
;
3133 gss_digest_Update(&context
, ptr
, bytes
);
3137 gss_digest_Final(&context
, digest
);
3141 * Compute a checksum over an NFS mbuf chain.
3142 * Start building an MD5 digest at the given offset and keep
3143 * going until the end of data in the current mbuf is reached.
3144 * Then convert the 16 byte MD5 digest to an 8 byte DES CBC
3148 nfs_gss_cksum_chain(
3150 struct nfsm_chain
*nmc
,
3157 * If the length parameter is zero, then we need
3158 * to use the length from the offset to the current
3159 * encode/decode offset.
3162 len
= nfsm_chain_offset(nmc
) - offset
;
3164 return (nfs_gss_cksum_mchain(ki
, nmc
->nmc_mhead
, alg
, offset
, len
, cksum
));
3168 * Compute a checksum of the sequence number (or sequence window)
3169 * of an RPCSEC_GSS reply.
3172 nfs_gss_cksum_rep(gss_key_info
*ki
, uint32_t seqnum
, u_char
*cksum
)
3174 GSS_DIGEST_CTX context
;
3175 uint32_t val
= htonl(seqnum
);
3177 gss_digest_Init(&context
, ki
);
3180 * Logically prepend the first 8 bytes of the MIC
3181 * token as required by RFC 1964, section 1.2.1.1
3183 gss_digest_Update(&context
, ALG_MIC(ki
), KRB5_SZ_ALG
);
3186 * Compute the digest of the seqnum in network order
3188 gss_digest_Update(&context
, &val
, 4);
3189 gss_digest_Final(&context
, cksum
);
3193 * Encrypt or decrypt data in an mbuf chain with des-cbc.
3196 nfs_gss_encrypt_mchain(
3205 u_char tmp
[8], ivec
[8];
3206 int left
, left8
, remain
;
3212 * Move down the mbuf chain until we reach the given
3213 * byte offset, then start encrypting the mbuf data until
3214 * we've done len bytes.
3217 for (mb
= mhead
; mb
&& len
> 0; mb
= mbn
) {
3218 mbn
= mbuf_next(mb
);
3219 ptr
= mbuf_data(mb
);
3220 left
= mbuf_len(mb
);
3221 if (offset
>= left
) {
3222 /* Offset not yet reached */
3226 /* At or beyond offset - encrypt data */
3232 * DES or DES3 CBC has to encrypt 8 bytes at a time.
3233 * If the number of bytes to be encrypted in this
3234 * mbuf isn't some multiple of 8 bytes, encrypt all
3235 * the 8 byte blocks, then combine the remaining
3236 * bytes with enough from the next mbuf to make up
3237 * an 8 byte block and encrypt that block separately,
3238 * i.e. that block is split across two mbufs.
3241 left8
= left
- remain
;
3242 left
= left8
< len
? left8
: len
;
3244 gss_des_crypt(ki
, (des_cblock
*) ptr
, (des_cblock
*) ptr
,
3245 left
, &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
3249 if (mbn
&& remain
> 0) {
3250 nptr
= mbuf_data(mbn
);
3251 offset
= 8 - remain
;
3252 bcopy(ptr
+ left
, tmp
, remain
); // grab from this mbuf
3253 bcopy(nptr
, tmp
+ remain
, offset
); // grab from next mbuf
3254 gss_des_crypt(ki
, (des_cblock
*) tmp
, (des_cblock
*) tmp
, 8,
3255 &ivec
, &ivec
, encrypt
, KG_USAGE_SEAL
);
3256 bcopy(tmp
, ptr
+ left
, remain
); // return to this mbuf
3257 bcopy(tmp
+ remain
, nptr
, offset
); // return to next mbuf
3264 * Encrypt or decrypt data in an NFS mbuf chain with des-cbc.
3267 nfs_gss_encrypt_chain(
3269 struct nfsm_chain
*nmc
,
3275 * If the length parameter is zero, then we need
3276 * to use the length from the offset to the current
3277 * encode/decode offset.
3280 len
= nfsm_chain_offset(nmc
) - offset
;
3282 return (nfs_gss_encrypt_mchain(ki
, nmc
->nmc_mhead
, offset
, len
, encrypt
));
3286 * The routines that follow provide abstractions for doing digests and crypto.
3290 gss_digest_Init(GSS_DIGEST_CTX
*ctx
, gss_key_info
*ki
)
3292 ctx
->type
= ki
->type
;
3294 case NFS_GSS_1DES
: MD5_DESCBC_Init(&ctx
->m_ctx
, &ki
->ks_u
.des
.gss_sched
);
3296 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Init(&ctx
->h_ctx
, ki
->ks_u
.des3
.ckey
, 0);
3299 printf("gss_digest_Init: Unknown key info type %d\n", ki
->type
);
3304 gss_digest_Update(GSS_DIGEST_CTX
*ctx
, void *data
, size_t len
)
3306 switch (ctx
->type
) {
3307 case NFS_GSS_1DES
: MD5_DESCBC_Update(&ctx
->m_ctx
, data
, len
);
3309 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Update(&ctx
->h_ctx
, data
, len
);
3315 gss_digest_Final(GSS_DIGEST_CTX
*ctx
, void *digest
)
3317 switch (ctx
->type
) {
3318 case NFS_GSS_1DES
: MD5_DESCBC_Final(digest
, &ctx
->m_ctx
);
3320 case NFS_GSS_3DES
: HMAC_SHA1_DES3KD_Final(digest
, &ctx
->h_ctx
);
3326 gss_des_crypt(gss_key_info
*ki
, des_cblock
*in
, des_cblock
*out
,
3327 int32_t len
, des_cblock
*iv
, des_cblock
*retiv
, int encrypt
, int usage
)
3332 des_key_schedule
*sched
= ((usage
== KG_USAGE_SEAL
) ?
3333 &ki
->ks_u
.des
.gss_sched_Ke
:
3334 &ki
->ks_u
.des
.gss_sched
);
3335 des_cbc_encrypt(in
, out
, len
, *sched
, iv
, retiv
, encrypt
);
3340 des3_cbc_encrypt(in
, out
, len
, ki
->ks_u
.des3
.gss_sched
, iv
, retiv
, encrypt
);
3346 gss_key_init(gss_key_info
*ki
, uint32_t skeylen
)
3352 ki
->keybytes
= skeylen
;
3354 case sizeof(des_cblock
):
3355 ki
->type
= NFS_GSS_1DES
;
3356 ki
->hash_len
= MD5_DESCBC_DIGEST_LENGTH
;
3357 ki
->ks_u
.des
.key
= (des_cblock
*)ki
->skey
;
3358 rc
= des_key_sched(ki
->ks_u
.des
.key
, ki
->ks_u
.des
.gss_sched
);
3361 for (i
= 0; i
< ki
->keybytes
; i
++)
3362 k
[0][i
] = 0xf0 ^ (*ki
->ks_u
.des
.key
)[i
];
3363 rc
= des_key_sched(&k
[0], ki
->ks_u
.des
.gss_sched_Ke
);
3365 case 3*sizeof(des_cblock
):
3366 ki
->type
= NFS_GSS_3DES
;
3367 ki
->hash_len
= SHA_DIGEST_LENGTH
;
3368 ki
->ks_u
.des3
.key
= (des_cblock (*)[3])ki
->skey
;
3369 des3_derive_key(*ki
->ks_u
.des3
.key
, ki
->ks_u
.des3
.ckey
,
3370 KEY_USAGE_DES3_SIGN
, KEY_USAGE_LEN
);
3371 rc
= des3_key_sched(*ki
->ks_u
.des3
.key
, ki
->ks_u
.des3
.gss_sched
);
3376 printf("gss_key_init: Invalid key length %d\n", skeylen
);
3385 #define DISPLAYLEN 16
3386 #define MAXDISPLAYLEN 256
3389 hexdump(const char *msg
, void *data
, size_t len
)
3393 char *p
, disbuf
[3*DISPLAYLEN
+1];
3395 printf("NFS DEBUG %s len=%d:\n", msg
, (uint32_t)len
);
3396 if (len
> MAXDISPLAYLEN
)
3397 len
= MAXDISPLAYLEN
;
3399 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
3400 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3)
3401 snprintf(p
, 4, "%02x ", d
[i
+ j
]);
3402 printf("\t%s\n", disbuf
);