]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/gss/gss_krb5_mech.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / nfs / gss / gss_krb5_mech.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (c) 1999 Kungliga Tekniska Högskolan
31 * (Royal Institute of Technology, Stockholm, Sweden).
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 *
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * 3. Neither the name of KTH nor the names of its contributors may be
46 * used to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE
53 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
56 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 #include <stdint.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/kpi_mbuf.h>
68 #include <sys/random.h>
69 #include <mach_assert.h>
70 #include <kern/assert.h>
71 #include <libkern/OSAtomic.h>
72 #include "gss_krb5_mech.h"
73
74 lck_grp_t *gss_krb5_mech_grp;
75
76 typedef struct crypt_walker_ctx {
77 size_t length;
78 const struct ccmode_cbc *ccmode;
79 cccbc_ctx *crypt_ctx;
80 cccbc_iv *iv;
81 } *crypt_walker_ctx_t;
82
83 typedef struct hmac_walker_ctx {
84 const struct ccdigest_info *di;
85 struct cchmac_ctx *hmac_ctx;
86 } *hmac_walker_ctx_t;
87
88 typedef size_t (*ccpad_func)(const struct ccmode_cbc *, cccbc_ctx *, cccbc_iv *,
89 size_t nbytes, const void *, void *);
90
91 static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size);
92
93 size_t gss_mbuf_len(mbuf_t, size_t);
94 errno_t gss_prepend_mbuf(mbuf_t *, uint8_t *, size_t);
95 errno_t gss_append_mbuf(mbuf_t, uint8_t *, size_t);
96 errno_t gss_strip_mbuf(mbuf_t, int);
97 int mbuf_walk(mbuf_t, size_t, size_t, size_t, int (*)(void *, uint8_t *, size_t), void *);
98
99 void do_crypt_init(crypt_walker_ctx_t, int, crypto_ctx_t, cccbc_ctx *);
100 int do_crypt(void *, uint8_t *, size_t);
101 void do_hmac_init(hmac_walker_ctx_t, crypto_ctx_t, void *);
102 int do_hmac(void *, uint8_t *, size_t);
103
104 void krb5_make_usage(uint32_t, uint8_t, uint8_t[KRB5_USAGE_LEN]);
105 void krb5_key_derivation(crypto_ctx_t, const void *, size_t, void **, size_t);
106 void cc_key_schedule_create(crypto_ctx_t);
107 void gss_crypto_ctx_free(crypto_ctx_t);
108 int gss_crypto_ctx_init(struct crypto_ctx *, lucid_context_t);
109
110 errno_t krb5_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t, int, cccbc_ctx *);
111 int krb5_mic(crypto_ctx_t, gss_buffer_t, gss_buffer_t, gss_buffer_t, uint8_t *, int *, int, int);
112 int krb5_mic_mbuf(crypto_ctx_t, gss_buffer_t, mbuf_t, size_t, size_t, gss_buffer_t, uint8_t *, int *, int, int);
113
114 uint32_t gss_krb5_cfx_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
115 uint32_t gss_krb5_cfx_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
116 uint32_t gss_krb5_cfx_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
117 uint32_t gss_krb5_cfx_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
118 errno_t krb5_cfx_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t *, int, int);
119 uint32_t gss_krb5_cfx_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
120 uint32_t gss_krb5_cfx_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
121
122 int gss_krb5_mech_is_initialized(void);
123 void gss_krb5_mech_init(void);
124
125 /* Debugging routines */
126 void
127 printmbuf(const char *str, mbuf_t mb, uint32_t offset, uint32_t len)
128 {
129 size_t i;
130 int cout = 1;
131
132 len = len ? len : ~0;
133 printf("%s mbuf = %p offset = %d len = %d:\n", str ? str : "mbuf", mb, offset, len);
134 for (; mb && len; mb = mbuf_next(mb)) {
135 if (offset >= mbuf_len(mb)) {
136 offset -= mbuf_len(mb);
137 continue;
138 }
139 for (i = offset; len && i < mbuf_len(mb); i++) {
140 const char *s = (cout % 8) ? " " : (cout % 16) ? " " : "\n";
141 printf("%02x%s", ((uint8_t *)mbuf_data(mb))[i], s);
142 len--;
143 cout++;
144 }
145 offset = 0;
146 }
147 if ((cout - 1) % 16) {
148 printf("\n");
149 }
150 printf("Count chars %d\n", cout - 1);
151 }
152
153 void
154 printgbuf(const char *str, gss_buffer_t buf)
155 {
156 size_t i;
157 size_t len = buf->length > 128 ? 128 : buf->length;
158
159 printf("%s: len = %d value = %p\n", str ? str : "buffer", (int)buf->length, buf->value);
160 for (i = 0; i < len; i++) {
161 const char *s = ((i + 1) % 8) ? " " : ((i + 1) % 16) ? " " : "\n";
162 printf("%02x%s", ((uint8_t *)buf->value)[i], s);
163 }
164 if (i % 16) {
165 printf("\n");
166 }
167 }
168
169 /*
170 * Initialize the data structures for the gss kerberos mech.
171 */
172 #define GSS_KRB5_NOT_INITIALIZED 0
173 #define GSS_KRB5_INITIALIZING 1
174 #define GSS_KRB5_INITIALIZED 2
175 static volatile uint32_t gss_krb5_mech_initted = GSS_KRB5_NOT_INITIALIZED;
176
177 int
178 gss_krb5_mech_is_initialized(void)
179 {
180 return gss_krb5_mech_initted == GSS_KRB5_NOT_INITIALIZED;
181 }
182
183 void
184 gss_krb5_mech_init(void)
185 {
186 extern void IOSleep(int);
187
188 /* Once initted always initted */
189 if (gss_krb5_mech_initted == GSS_KRB5_INITIALIZED) {
190 return;
191 }
192
193 /* make sure we init only once */
194 if (!OSCompareAndSwap(GSS_KRB5_NOT_INITIALIZED, GSS_KRB5_INITIALIZING, &gss_krb5_mech_initted)) {
195 /* wait until initialization is complete */
196 while (!gss_krb5_mech_is_initialized()) {
197 IOSleep(10);
198 }
199 return;
200 }
201 gss_krb5_mech_grp = lck_grp_alloc_init("gss_krb5_mech", LCK_GRP_ATTR_NULL);
202 gss_krb5_mech_initted = GSS_KRB5_INITIALIZED;
203 }
204
205 uint32_t
206 gss_release_buffer(uint32_t *minor, gss_buffer_t buf)
207 {
208 if (minor) {
209 *minor = 0;
210 }
211 if (buf->value) {
212 FREE(buf->value, M_TEMP);
213 }
214 buf->value = NULL;
215 buf->length = 0;
216 return GSS_S_COMPLETE;
217 }
218
219 /*
220 * GSS mbuf routines
221 */
222
223 size_t
224 gss_mbuf_len(mbuf_t mb, size_t offset)
225 {
226 size_t len;
227
228 for (len = 0; mb; mb = mbuf_next(mb)) {
229 len += mbuf_len(mb);
230 }
231 return (offset > len) ? 0 : len - offset;
232 }
233
234 /*
235 * Split an mbuf in a chain into two mbufs such that the original mbuf
236 * points to the original mbuf and the new mbuf points to the rest of the
237 * chain. The first mbuf length is the first len bytes and the second
238 * mbuf contains the remaining bytes. if len is zero or equals
239 * mbuf_len(mb) the don't create a new mbuf. We are already at an mbuf
240 * boundary. Return the mbuf that starts at the offset.
241 */
242 static errno_t
243 split_one_mbuf(mbuf_t mb, size_t offset, mbuf_t *nmb, int join)
244 {
245 errno_t error;
246
247 *nmb = mb;
248 /* We don't have an mbuf or we're alread on an mbuf boundary */
249 if (mb == NULL || offset == 0) {
250 return 0;
251 }
252
253 /* If the mbuf length is offset then the next mbuf is the one we want */
254 if (mbuf_len(mb) == offset) {
255 *nmb = mbuf_next(mb);
256 if (!join) {
257 mbuf_setnext(mb, NULL);
258 }
259 return 0;
260 }
261
262 if (offset > mbuf_len(mb)) {
263 return EINVAL;
264 }
265
266 error = mbuf_split(mb, offset, MBUF_WAITOK, nmb);
267 if (error) {
268 return error;
269 }
270
271 if (mbuf_flags(*nmb) & MBUF_PKTHDR) {
272 /* We don't want to copy the pkthdr. mbuf_split does that. */
273 error = mbuf_setflags_mask(*nmb, ~MBUF_PKTHDR, MBUF_PKTHDR);
274 }
275
276 if (join) {
277 /* Join the chain again */
278 mbuf_setnext(mb, *nmb);
279 }
280
281 return 0;
282 }
283
284 /*
285 * Given an mbuf with an offset and length return the chain such that
286 * offset and offset + *subchain_length are on mbuf boundaries. If
287 * *mbuf_length is less that the length of the chain after offset
288 * return that length in *mbuf_length. The mbuf sub chain starting at
289 * offset is returned in *subchain. If an error occurs return the
290 * corresponding errno. Note if there are less than offset bytes then
291 * subchain will be set to NULL and *subchain_length will be set to
292 * zero. If *subchain_length is 0; then set it to the length of the
293 * chain starting at offset. Join parameter is used to indicate whether
294 * the mbuf chain will be joined again as on chain, just rearranged so
295 * that offset and subchain_length are on mbuf boundaries.
296 */
297
298 errno_t
299 gss_normalize_mbuf(mbuf_t chain, size_t offset, size_t *subchain_length, mbuf_t *subchain, mbuf_t *tail, int join)
300 {
301 size_t length = *subchain_length ? *subchain_length : ~0;
302 size_t len;
303 mbuf_t mb, nmb;
304 errno_t error;
305
306 if (tail == NULL) {
307 tail = &nmb;
308 }
309 *tail = NULL;
310 *subchain = NULL;
311
312 for (len = offset, mb = chain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) {
313 len -= mbuf_len(mb);
314 }
315
316 /* if we don't have offset bytes just return */
317 if (mb == NULL) {
318 return 0;
319 }
320
321 error = split_one_mbuf(mb, len, subchain, join);
322 if (error) {
323 return error;
324 }
325
326 assert(subchain != NULL && *subchain != NULL);
327 assert(offset == 0 ? mb == *subchain : 1);
328
329 len = gss_mbuf_len(*subchain, 0);
330 length = (length > len) ? len : length;
331 *subchain_length = length;
332
333 for (len = length, mb = *subchain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) {
334 len -= mbuf_len(mb);
335 }
336
337 error = split_one_mbuf(mb, len, tail, join);
338
339 return error;
340 }
341
342 mbuf_t
343 gss_join_mbuf(mbuf_t head, mbuf_t body, mbuf_t tail)
344 {
345 mbuf_t mb;
346
347 for (mb = head; mb && mbuf_next(mb); mb = mbuf_next(mb)) {
348 ;
349 }
350 if (mb) {
351 mbuf_setnext(mb, body);
352 }
353 for (mb = body; mb && mbuf_next(mb); mb = mbuf_next(mb)) {
354 ;
355 }
356 if (mb) {
357 mbuf_setnext(mb, tail);
358 }
359 mb = head ? head : (body ? body : tail);
360 return mb;
361 }
362
363 /*
364 * Prepend size bytes to the mbuf chain.
365 */
366 errno_t
367 gss_prepend_mbuf(mbuf_t *chain, uint8_t *bytes, size_t size)
368 {
369 uint8_t *data = mbuf_data(*chain);
370 size_t leading = mbuf_leadingspace(*chain);
371 size_t trailing = mbuf_trailingspace(*chain);
372 size_t mlen = mbuf_len(*chain);
373 errno_t error;
374
375 if (size > leading && size <= leading + trailing) {
376 data = memmove(data + size - leading, data, mlen);
377 mbuf_setdata(*chain, data, mlen);
378 }
379
380 error = mbuf_prepend(chain, size, MBUF_WAITOK);
381 if (error) {
382 return error;
383 }
384 data = mbuf_data(*chain);
385 memcpy(data, bytes, size);
386
387 return 0;
388 }
389
390 errno_t
391 gss_append_mbuf(mbuf_t chain, uint8_t *bytes, size_t size)
392 {
393 size_t len = 0;
394 mbuf_t mb;
395
396 if (chain == NULL) {
397 return EINVAL;
398 }
399
400 for (mb = chain; mb; mb = mbuf_next(mb)) {
401 len += mbuf_len(mb);
402 }
403
404 return mbuf_copyback(chain, len, size, bytes, MBUF_WAITOK);
405 }
406
407 errno_t
408 gss_strip_mbuf(mbuf_t chain, int size)
409 {
410 if (chain == NULL) {
411 return EINVAL;
412 }
413
414 mbuf_adj(chain, size);
415
416 return 0;
417 }
418
419
420 /*
421 * Kerberos mech generic crypto support for mbufs
422 */
423
424 /*
425 * Walk the mbuf after the given offset calling the passed in crypto function
426 * for len bytes. Note the length, len should be a multiple of the blocksize and
427 * there should be at least len bytes available after the offset in the mbuf chain.
428 * padding should be done before calling this routine.
429 */
430 int
431 mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_fn)(void *, uint8_t *data, size_t length), void *ctx)
432 {
433 mbuf_t mb;
434 size_t mlen, residue;
435 uint8_t *ptr;
436 int error = 0;
437
438 /* Move to the start of the chain */
439 for (mb = mbp; mb && len > 0; mb = mbuf_next(mb)) {
440 ptr = mbuf_data(mb);
441 mlen = mbuf_len(mb);
442 if (offset >= mlen) {
443 /* Offset not yet reached */
444 offset -= mlen;
445 continue;
446 }
447 /* Found starting point in chain */
448 ptr += offset;
449 mlen -= offset;
450 offset = 0;
451
452 /*
453 * Handle the data in this mbuf. If the length to
454 * walk is less than the data in the mbuf, set
455 * the mbuf length left to be the length left
456 */
457 mlen = mlen < len ? mlen : len;
458 /* Figure out how much is a multple of blocksize */
459 residue = mlen % blocksize;
460 /* And addjust the mleft length to be the largest multiple of blocksized */
461 mlen -= residue;
462 /* run our hash/encrypt/decrpyt function */
463 if (mlen > 0) {
464 error = crypto_fn(ctx, ptr, mlen);
465 if (error) {
466 break;
467 }
468 ptr += mlen;
469 len -= mlen;
470 }
471 /*
472 * If we have a residue then to get a full block for our crypto
473 * function, we need to copy the residue into our block size
474 * block and use the next mbuf to get the rest of the data for
475 * the block. N.B. We generally assume that from the offset
476 * passed in, that the total length, len, is a multple of
477 * blocksize and that there are at least len bytes in the chain
478 * from the offset. We also assume there is at least (blocksize
479 * - residue) size data in any next mbuf for residue > 0. If not
480 * we attemp to pullup bytes from down the chain.
481 */
482 if (residue) {
483 mbuf_t nmb = mbuf_next(mb);
484 uint8_t *nptr = NULL, block[blocksize];
485
486 assert(nmb);
487 len -= residue;
488 offset = blocksize - residue;
489 if (len < offset) {
490 offset = len;
491 /*
492 * We don't have enough bytes so zero the block
493 * so that any trailing bytes will be zero.
494 */
495 cc_clear(sizeof(block), block);
496 }
497 memcpy(block, ptr, residue);
498 if (len && nmb) {
499 mlen = mbuf_len(nmb);
500 if (mlen < offset) {
501 error = mbuf_pullup(&nmb, offset - mlen);
502 if (error) {
503 mbuf_setnext(mb, NULL);
504 return error;
505 }
506 }
507 nptr = mbuf_data(nmb);
508 memcpy(block + residue, nptr, offset);
509 }
510 len -= offset;
511 error = crypto_fn(ctx, block, sizeof(block));
512 if (error) {
513 break;
514 }
515 memcpy(ptr, block, residue);
516 if (nptr) {
517 memcpy(nptr, block + residue, offset);
518 }
519 }
520 }
521
522 return error;
523 }
524
525 void
526 do_crypt_init(crypt_walker_ctx_t wctx, int encrypt, crypto_ctx_t cctx, cccbc_ctx *ks)
527 {
528 memset(wctx, 0, sizeof(*wctx));
529 wctx->length = 0;
530 wctx->ccmode = encrypt ? cctx->enc_mode : cctx->dec_mode;
531 wctx->crypt_ctx = ks;
532 MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK | M_ZERO);
533 cccbc_set_iv(wctx->ccmode, wctx->iv, NULL);
534 }
535
536 int
537 do_crypt(void *walker, uint8_t *data, size_t len)
538 {
539 struct crypt_walker_ctx *wctx = (crypt_walker_ctx_t)walker;
540 size_t nblocks;
541
542 nblocks = len / wctx->ccmode->block_size;
543 assert(len % wctx->ccmode->block_size == 0);
544 cccbc_update(wctx->ccmode, wctx->crypt_ctx, wctx->iv, nblocks, data, data);
545 wctx->length += len;
546
547 return 0;
548 }
549
550 void
551 do_hmac_init(hmac_walker_ctx_t wctx, crypto_ctx_t cctx, void *key)
552 {
553 size_t alloc_size = cchmac_di_size(cctx->di);
554
555 wctx->di = cctx->di;
556 MALLOC(wctx->hmac_ctx, struct cchmac_ctx *, alloc_size, M_TEMP, M_WAITOK | M_ZERO);
557 cchmac_init(cctx->di, wctx->hmac_ctx, cctx->keylen, key);
558 }
559
560 int
561 do_hmac(void *walker, uint8_t *data, size_t len)
562 {
563 hmac_walker_ctx_t wctx = (hmac_walker_ctx_t)walker;
564
565 cchmac_update(wctx->di, wctx->hmac_ctx, len, data);
566
567 return 0;
568 }
569
570
571 int
572 krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
573 {
574 uint8_t digest[ctx->di->output_size];
575 cchmac_di_decl(ctx->di, hmac_ctx);
576 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
577 void *key2use;
578
579 if (ikey) {
580 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
581 lck_mtx_lock(ctx->lock);
582 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
583 cc_key_schedule_create(ctx);
584 }
585 ctx->flags |= CRYPTO_KS_ALLOCED;
586 lck_mtx_unlock(ctx->lock);
587 }
588 key2use = ctx->ks.ikey[kdx];
589 } else {
590 key2use = ctx->ckey[kdx];
591 }
592
593 cchmac_init(ctx->di, hmac_ctx, ctx->keylen, key2use);
594
595 if (header) {
596 cchmac_update(ctx->di, hmac_ctx, header->length, header->value);
597 }
598
599 cchmac_update(ctx->di, hmac_ctx, bp->length, bp->value);
600
601 if (trailer) {
602 cchmac_update(ctx->di, hmac_ctx, trailer->length, trailer->value);
603 }
604
605 cchmac_final(ctx->di, hmac_ctx, digest);
606
607 if (verify) {
608 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
609 } else {
610 memcpy(mic, digest, ctx->digest_size);
611 }
612
613 return 0;
614 }
615
616 int
617 krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header,
618 mbuf_t mbp, size_t offset, size_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
619 {
620 struct hmac_walker_ctx wctx;
621 uint8_t digest[ctx->di->output_size];
622 int error;
623 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
624 void *key2use;
625
626 if (ikey) {
627 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
628 lck_mtx_lock(ctx->lock);
629 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
630 cc_key_schedule_create(ctx);
631 }
632 ctx->flags |= CRYPTO_KS_ALLOCED;
633 lck_mtx_unlock(ctx->lock);
634 }
635 key2use = ctx->ks.ikey[kdx];
636 } else {
637 key2use = ctx->ckey[kdx];
638 }
639
640 do_hmac_init(&wctx, ctx, key2use);
641
642 if (header) {
643 cchmac_update(ctx->di, wctx.hmac_ctx, header->length, header->value);
644 }
645
646 error = mbuf_walk(mbp, offset, len, 1, do_hmac, &wctx);
647
648 if (error) {
649 return error;
650 }
651 if (trailer) {
652 cchmac_update(ctx->di, wctx.hmac_ctx, trailer->length, trailer->value);
653 }
654
655 cchmac_final(ctx->di, wctx.hmac_ctx, digest);
656 FREE(wctx.hmac_ctx, M_TEMP);
657
658 if (verify) {
659 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
660 if (!*verify) {
661 return EBADRPC;
662 }
663 } else {
664 memcpy(mic, digest, ctx->digest_size);
665 }
666
667 return 0;
668 }
669
670 errno_t
671 /* __attribute__((optnone)) */
672 krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t len, int encrypt, cccbc_ctx *ks)
673 {
674 struct crypt_walker_ctx wctx;
675 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
676 size_t plen = len;
677 size_t cts_len = 0;
678 mbuf_t mb, lmb = NULL;
679 int error;
680
681 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
682 lck_mtx_lock(ctx->lock);
683 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
684 cc_key_schedule_create(ctx);
685 }
686 ctx->flags |= CRYPTO_KS_ALLOCED;
687 lck_mtx_unlock(ctx->lock);
688 }
689 if (!ks) {
690 ks = encrypt ? ctx->ks.enc : ctx->ks.dec;
691 }
692
693 if ((ctx->flags & CRYPTO_CTS_ENABLE) && ctx->mpad == 1) {
694 uint8_t block[ccmode->block_size];
695 /* if the length is less than or equal to a blocksize. We just encrypt the block */
696 if (len <= ccmode->block_size) {
697 if (len < ccmode->block_size) {
698 memset(block, 0, sizeof(block));
699 gss_append_mbuf(*mbp, block, ccmode->block_size);
700 }
701 plen = ccmode->block_size;
702 } else {
703 /* determine where the last two blocks are */
704 size_t r = len % ccmode->block_size;
705
706 cts_len = r ? r + ccmode->block_size : 2 * ccmode->block_size;
707 plen = len - cts_len;
708 /* If plen is 0 we only have two blocks to crypt with ccpad below */
709 if (plen == 0) {
710 lmb = *mbp;
711 } else {
712 gss_normalize_mbuf(*mbp, 0, &plen, &mb, &lmb, 0);
713 assert(*mbp == mb);
714 assert(plen == len - cts_len);
715 assert(gss_mbuf_len(mb, 0) == plen);
716 assert(gss_mbuf_len(lmb, 0) == cts_len);
717 }
718 }
719 } else if (len % ctx->mpad) {
720 uint8_t pad_block[ctx->mpad];
721 size_t padlen = ctx->mpad - (len % ctx->mpad);
722
723 memset(pad_block, 0, padlen);
724 error = gss_append_mbuf(*mbp, pad_block, padlen);
725 if (error) {
726 return error;
727 }
728 plen = len + padlen;
729 }
730 do_crypt_init(&wctx, encrypt, ctx, ks);
731 if (plen) {
732 error = mbuf_walk(*mbp, 0, plen, ccmode->block_size, do_crypt, &wctx);
733 if (error) {
734 return error;
735 }
736 }
737
738 if ((ctx->flags & CRYPTO_CTS_ENABLE) && cts_len) {
739 uint8_t cts_pad[2 * ccmode->block_size];
740 ccpad_func do_ccpad = encrypt ? ccpad_cts3_encrypt : ccpad_cts3_decrypt;
741
742 assert(cts_len <= 2 * ccmode->block_size && cts_len > ccmode->block_size);
743 memset(cts_pad, 0, sizeof(cts_pad));
744 mbuf_copydata(lmb, 0, cts_len, cts_pad);
745 mbuf_freem(lmb);
746 do_ccpad(ccmode, wctx.crypt_ctx, wctx.iv, cts_len, cts_pad, cts_pad);
747 gss_append_mbuf(*mbp, cts_pad, cts_len);
748 }
749 FREE(wctx.iv, M_TEMP);
750
751 return 0;
752 }
753
754 /*
755 * Key derivation routines
756 */
757
758 static int
759 rr13(unsigned char *buf, size_t len)
760 {
761 size_t bytes = (len + 7) / 8;
762 unsigned char tmp[bytes];
763 size_t i;
764
765 if (len == 0) {
766 return 0;
767 }
768
769 {
770 const int bits = 13 % len;
771 const int lbit = len % 8;
772
773 memcpy(tmp, buf, bytes);
774 if (lbit) {
775 /* pad final byte with inital bits */
776 tmp[bytes - 1] &= 0xff << (8 - lbit);
777 for (i = lbit; i < 8; i += len) {
778 tmp[bytes - 1] |= buf[0] >> i;
779 }
780 }
781 for (i = 0; i < bytes; i++) {
782 ssize_t bb;
783 ssize_t b1, s1, b2, s2;
784
785 /* calculate first bit position of this byte */
786 bb = 8 * i - bits;
787 while (bb < 0) {
788 bb += len;
789 }
790 /* byte offset and shift count */
791 b1 = bb / 8;
792 s1 = bb % 8;
793 if ((size_t)bb + 8 > bytes * 8) {
794 /* watch for wraparound */
795 s2 = (len + 8 - s1) % 8;
796 } else {
797 s2 = 8 - s1;
798 }
799 b2 = (b1 + 1) % bytes;
800 buf[i] = 0xff & ((tmp[b1] << s1) | (tmp[b2] >> s2));
801 }
802 }
803 return 0;
804 }
805
806
807 /* Add `b' to `a', both being one's complement numbers. */
808 static void
809 add1(unsigned char *a, unsigned char *b, size_t len)
810 {
811 ssize_t i;
812 int carry = 0;
813
814 for (i = len - 1; i >= 0; i--) {
815 int x = a[i] + b[i] + carry;
816 carry = x > 0xff;
817 a[i] = x & 0xff;
818 }
819 for (i = len - 1; carry && i >= 0; i--) {
820 int x = a[i] + carry;
821 carry = x > 0xff;
822 a[i] = x & 0xff;
823 }
824 }
825
826
827 static int
828 krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size)
829 {
830 /* if len < size we need at most N * len bytes, ie < 2 * size;
831 * if len > size we need at most 2 * len */
832 int ret = 0;
833 size_t maxlen = 2 * lmax(size, len);
834 size_t l = 0;
835 unsigned char tmp[maxlen];
836 unsigned char buf[len];
837
838 memcpy(buf, instr, len);
839 memset(foldstr, 0, size);
840 do {
841 memcpy(tmp + l, buf, len);
842 l += len;
843 ret = rr13(buf, len * 8);
844 if (ret) {
845 goto out;
846 }
847 while (l >= size) {
848 add1(foldstr, tmp, size);
849 l -= size;
850 if (l == 0) {
851 break;
852 }
853 memmove(tmp, tmp + size, l);
854 }
855 } while (l != 0);
856 out:
857
858 return ret;
859 }
860
861 void
862 krb5_make_usage(uint32_t usage_no, uint8_t suffix, uint8_t usage_string[KRB5_USAGE_LEN])
863 {
864 uint32_t i;
865
866 for (i = 0; i < 4; i++) {
867 usage_string[i] = ((usage_no >> 8 * (3 - i)) & 0xff);
868 }
869 usage_string[i] = suffix;
870 }
871
872 void
873 krb5_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **dkey, size_t dklen)
874 {
875 size_t blocksize = ctx->enc_mode->block_size;
876 cccbc_iv_decl(blocksize, iv);
877 cccbc_ctx_decl(ctx->enc_mode->size, enc_ctx);
878 size_t ksize = 8 * dklen;
879 size_t nblocks = (ksize + 8 * blocksize - 1) / (8 * blocksize);
880 uint8_t *dkptr;
881 uint8_t block[blocksize];
882
883 MALLOC(*dkey, void *, nblocks * blocksize, M_TEMP, M_WAITOK | M_ZERO);
884 dkptr = *dkey;
885
886 krb5_n_fold(cons, conslen, block, blocksize);
887 cccbc_init(ctx->enc_mode, enc_ctx, ctx->keylen, ctx->key);
888 for (size_t i = 0; i < nblocks; i++) {
889 cccbc_set_iv(ctx->enc_mode, iv, NULL);
890 cccbc_update(ctx->enc_mode, enc_ctx, iv, 1, block, block);
891 memcpy(dkptr, block, blocksize);
892 dkptr += blocksize;
893 }
894 }
895
896 static void
897 des_make_key(const uint8_t rawkey[7], uint8_t deskey[8])
898 {
899 uint8_t val = 0;
900
901 memcpy(deskey, rawkey, 7);
902 for (int i = 0; i < 7; i++) {
903 val |= ((deskey[i] & 1) << (i + 1));
904 }
905 deskey[7] = val;
906 ccdes_key_set_odd_parity(deskey, 8);
907 }
908
909 static void
910 krb5_3des_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **des3key)
911 {
912 const struct ccmode_cbc *cbcmode = ctx->enc_mode;
913 void *rawkey;
914 uint8_t *kptr, *rptr;
915
916 MALLOC(*des3key, void *, 3 * cbcmode->block_size, M_TEMP, M_WAITOK | M_ZERO);
917 krb5_key_derivation(ctx, cons, conslen, &rawkey, 3 * (cbcmode->block_size - 1));
918 kptr = (uint8_t *)*des3key;
919 rptr = (uint8_t *)rawkey;
920
921 for (int i = 0; i < 3; i++) {
922 des_make_key(rptr, kptr);
923 rptr += cbcmode->block_size - 1;
924 kptr += cbcmode->block_size;
925 }
926
927 cc_clear(3 * (cbcmode->block_size - 1), rawkey);
928 FREE(rawkey, M_TEMP);
929 }
930
931 /*
932 * Create a key schecule
933 *
934 */
935 void
936 cc_key_schedule_create(crypto_ctx_t ctx)
937 {
938 uint8_t usage_string[KRB5_USAGE_LEN];
939 lucid_context_t lctx = ctx->gss_ctx;
940 void *ekey;
941
942 switch (lctx->key_data.proto) {
943 case 0: {
944 if (ctx->ks.enc == NULL) {
945 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
946 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ctx->key);
947 }
948 if (ctx->ks.dec == NULL) {
949 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
950 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ctx->key);
951 }
952 }
953 OS_FALLTHROUGH;
954 case 1: {
955 if (ctx->ks.enc == NULL) {
956 krb5_make_usage(lctx->initiate ?
957 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
958 0xAA, usage_string);
959 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
960 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
961 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ekey);
962 FREE(ekey, M_TEMP);
963 }
964 if (ctx->ks.dec == NULL) {
965 krb5_make_usage(lctx->initiate ?
966 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
967 0xAA, usage_string);
968 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
969 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
970 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ekey);
971 FREE(ekey, M_TEMP);
972 }
973 if (ctx->ks.ikey[GSS_SND] == NULL) {
974 krb5_make_usage(lctx->initiate ?
975 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
976 0x55, usage_string);
977 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_SND], ctx->keylen);
978 }
979 if (ctx->ks.ikey[GSS_RCV] == NULL) {
980 krb5_make_usage(lctx->initiate ?
981 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
982 0x55, usage_string);
983 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_RCV], ctx->keylen);
984 }
985 }
986 }
987 }
988
989 void
990 gss_crypto_ctx_free(crypto_ctx_t ctx)
991 {
992 ctx->ks.ikey[GSS_SND] = NULL;
993 if (ctx->ks.ikey[GSS_RCV] && ctx->key != ctx->ks.ikey[GSS_RCV]) {
994 cc_clear(ctx->keylen, ctx->ks.ikey[GSS_RCV]);
995 FREE(ctx->ks.ikey[GSS_RCV], M_TEMP);
996 }
997 ctx->ks.ikey[GSS_RCV] = NULL;
998 if (ctx->ks.enc) {
999 cccbc_ctx_clear(ctx->enc_mode->size, ctx->ks.enc);
1000 FREE(ctx->ks.enc, M_TEMP);
1001 ctx->ks.enc = NULL;
1002 }
1003 if (ctx->ks.dec) {
1004 cccbc_ctx_clear(ctx->dec_mode->size, ctx->ks.dec);
1005 FREE(ctx->ks.dec, M_TEMP);
1006 ctx->ks.dec = NULL;
1007 }
1008 if (ctx->ckey[GSS_SND] && ctx->ckey[GSS_SND] != ctx->key) {
1009 cc_clear(ctx->keylen, ctx->ckey[GSS_SND]);
1010 FREE(ctx->ckey[GSS_SND], M_TEMP);
1011 }
1012 ctx->ckey[GSS_SND] = NULL;
1013 if (ctx->ckey[GSS_RCV] && ctx->ckey[GSS_RCV] != ctx->key) {
1014 cc_clear(ctx->keylen, ctx->ckey[GSS_RCV]);
1015 FREE(ctx->ckey[GSS_RCV], M_TEMP);
1016 }
1017 ctx->ckey[GSS_RCV] = NULL;
1018 ctx->key = NULL;
1019 ctx->keylen = 0;
1020 }
1021
1022 int
1023 gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid)
1024 {
1025 ctx->gss_ctx = lucid;
1026 void *key;
1027 uint8_t usage_string[KRB5_USAGE_LEN];
1028
1029 ctx->keylen = ctx->gss_ctx->ctx_key.key.key_len;
1030 key = ctx->gss_ctx->ctx_key.key.key_val;
1031 ctx->etype = ctx->gss_ctx->ctx_key.etype;
1032 ctx->key = key;
1033
1034 switch (ctx->etype) {
1035 case AES128_CTS_HMAC_SHA1_96:
1036 case AES256_CTS_HMAC_SHA1_96:
1037 ctx->enc_mode = ccaes_cbc_encrypt_mode();
1038 assert(ctx->enc_mode);
1039 ctx->dec_mode = ccaes_cbc_decrypt_mode();
1040 assert(ctx->dec_mode);
1041 ctx->ks.enc = NULL;
1042 ctx->ks.dec = NULL;
1043 ctx->di = ccsha1_di();
1044 assert(ctx->di);
1045 ctx->flags = CRYPTO_CTS_ENABLE;
1046 ctx->mpad = 1;
1047 ctx->digest_size = 12; /* 96 bits */
1048 krb5_make_usage(ctx->gss_ctx->initiate ?
1049 KRB5_USAGE_INITIATOR_SIGN : KRB5_USAGE_ACCEPTOR_SIGN,
1050 0x99, usage_string);
1051 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND], ctx->keylen);
1052 krb5_make_usage(ctx->gss_ctx->initiate ?
1053 KRB5_USAGE_ACCEPTOR_SIGN : KRB5_USAGE_INITIATOR_SIGN,
1054 0x99, usage_string);
1055 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV], ctx->keylen);
1056 break;
1057 case DES3_CBC_SHA1_KD:
1058 ctx->enc_mode = ccdes3_cbc_encrypt_mode();
1059 assert(ctx->enc_mode);
1060 ctx->dec_mode = ccdes3_cbc_decrypt_mode();
1061 assert(ctx->dec_mode);
1062 ctx->ks.ikey[GSS_SND] = ctx->key;
1063 ctx->ks.ikey[GSS_RCV] = ctx->key;
1064 ctx->di = ccsha1_di();
1065 assert(ctx->di);
1066 ctx->flags = 0;
1067 ctx->mpad = ctx->enc_mode->block_size;
1068 ctx->digest_size = 20; /* 160 bits */
1069 krb5_make_usage(KRB5_USAGE_ACCEPTOR_SIGN, 0x99, usage_string);
1070 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND]);
1071 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV]);
1072 break;
1073 default:
1074 return ENOTSUP;
1075 }
1076
1077 ctx->lock = lck_mtx_alloc_init(gss_krb5_mech_grp, LCK_ATTR_NULL);
1078
1079 return 0;
1080 }
1081
1082 /*
1083 * CFX gss support routines
1084 */
1085 /* From Heimdal cfx.h file RFC 4121 Cryptoo framework extensions */
1086 typedef struct gss_cfx_mic_token_desc_struct {
1087 uint8_t TOK_ID[2]; /* 04 04 */
1088 uint8_t Flags;
1089 uint8_t Filler[5];
1090 uint8_t SND_SEQ[8];
1091 } gss_cfx_mic_token_desc, *gss_cfx_mic_token;
1092
1093 typedef struct gss_cfx_wrap_token_desc_struct {
1094 uint8_t TOK_ID[2]; /* 05 04 */
1095 uint8_t Flags;
1096 uint8_t Filler;
1097 uint8_t EC[2];
1098 uint8_t RRC[2];
1099 uint8_t SND_SEQ[8];
1100 } gss_cfx_wrap_token_desc, *gss_cfx_wrap_token;
1101
1102 /* End of cfx.h file */
1103
1104 #define CFXSentByAcceptor (1 << 0)
1105 #define CFXSealed (1 << 1)
1106 #define CFXAcceptorSubkey (1 << 2)
1107
1108 const gss_cfx_mic_token_desc mic_cfx_token = {
1109 .TOK_ID = "\x04\x04",
1110 .Flags = 0,
1111 .Filler = "\xff\xff\xff\xff\xff",
1112 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1113 };
1114
1115 const gss_cfx_wrap_token_desc wrap_cfx_token = {
1116 .TOK_ID = "\x05\04",
1117 .Flags = 0,
1118 .Filler = '\xff',
1119 .EC = "\x00\x00",
1120 .RRC = "\x00\x00",
1121 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1122 };
1123
1124 static int
1125 gss_krb5_cfx_verify_mic_token(gss_ctx_id_t ctx, gss_cfx_mic_token token)
1126 {
1127 int i;
1128 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1129 uint8_t flags = 0;
1130
1131 if (token->TOK_ID[0] != mic_cfx_token.TOK_ID[0] || token->TOK_ID[1] != mic_cfx_token.TOK_ID[1]) {
1132 printf("Bad mic TOK_ID %x %x\n", token->TOK_ID[0], token->TOK_ID[1]);
1133 return EBADRPC;
1134 }
1135 if (lctx->initiate) {
1136 flags |= CFXSentByAcceptor;
1137 }
1138 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1139 flags |= CFXAcceptorSubkey;
1140 }
1141 if (token->Flags != flags) {
1142 printf("Bad flags received %x exptect %x\n", token->Flags, flags);
1143 return EBADRPC;
1144 }
1145 for (i = 0; i < 5; i++) {
1146 if (token->Filler[i] != mic_cfx_token.Filler[i]) {
1147 break;
1148 }
1149 }
1150
1151 if (i != 5) {
1152 printf("Bad mic filler %x @ %d\n", token->Filler[i], i);
1153 return EBADRPC;
1154 }
1155
1156 return 0;
1157 }
1158
1159 uint32_t
1160 gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */
1161 gss_ctx_id_t ctx, /* context_handle */
1162 gss_qop_t qop __unused, /* qop_req (ignored) */
1163 gss_buffer_t mbp, /* message mbuf */
1164 gss_buffer_t mic /* message_token */)
1165 {
1166 gss_cfx_mic_token_desc token;
1167 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1168 crypto_ctx_t cctx = &ctx->gss_cryptor;
1169 gss_buffer_desc header;
1170 uint32_t rv;
1171 uint64_t seq = htonll(lctx->send_seq);
1172
1173 if (minor == NULL) {
1174 minor = &rv;
1175 }
1176 *minor = 0;
1177 token = mic_cfx_token;
1178 mic->length = sizeof(token) + cctx->digest_size;
1179 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1180 if (!lctx->initiate) {
1181 token.Flags |= CFXSentByAcceptor;
1182 }
1183 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1184 token.Flags |= CFXAcceptorSubkey;
1185 }
1186 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1187 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1188 header.value = &token;
1189 header.length = sizeof(gss_cfx_mic_token_desc);
1190
1191 *minor = krb5_mic(cctx, NULL, mbp, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1192
1193 if (*minor) {
1194 mic->length = 0;
1195 FREE(mic->value, M_TEMP);
1196 mic->value = NULL;
1197 } else {
1198 memcpy(mic->value, &token, sizeof(token));
1199 }
1200
1201 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
1202 }
1203
1204 uint32_t
1205 gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */
1206 gss_ctx_id_t ctx, /* context_handle */
1207 gss_buffer_t mbp, /* message_buffer */
1208 gss_buffer_t mic, /* message_token */
1209 gss_qop_t *qop /* qop_state */)
1210 {
1211 gss_cfx_mic_token token = mic->value;
1212 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1213 crypto_ctx_t cctx = &ctx->gss_cryptor;
1214 uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc);
1215 int verified = 0;
1216 uint64_t seq;
1217 uint32_t rv;
1218 gss_buffer_desc header;
1219
1220 if (qop) {
1221 *qop = GSS_C_QOP_DEFAULT;
1222 }
1223 if (minor == NULL) {
1224 minor = &rv;
1225 }
1226
1227 if (mic->length != sizeof(gss_cfx_mic_token_desc) + cctx->digest_size) {
1228 printf("mic token wrong length\n");
1229 *minor = EBADRPC;
1230 goto out;
1231 }
1232 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1233 if (*minor) {
1234 return GSS_S_FAILURE;
1235 }
1236 header.value = token;
1237 header.length = sizeof(gss_cfx_mic_token_desc);
1238 *minor = krb5_mic(cctx, NULL, mbp, &header, digest, &verified, 0, 0);
1239
1240 if (verified) {
1241 //XXX errors and such? Sequencing and replay? Not supported in RPCSEC_GSS
1242 memcpy(&seq, token->SND_SEQ, sizeof(uint64_t));
1243 seq = ntohll(seq);
1244 lctx->recv_seq = seq;
1245 }
1246
1247 out:
1248 return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1249 }
1250
1251 uint32_t
1252 gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */
1253 gss_ctx_id_t ctx, /* context_handle */
1254 gss_qop_t qop __unused, /* qop_req (ignored) */
1255 mbuf_t mbp, /* message mbuf */
1256 size_t offset, /* offest */
1257 size_t len, /* length */
1258 gss_buffer_t mic /* message_token */)
1259 {
1260 gss_cfx_mic_token_desc token;
1261 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1262 crypto_ctx_t cctx = &ctx->gss_cryptor;
1263 uint32_t rv;
1264 uint64_t seq = htonll(lctx->send_seq);
1265 gss_buffer_desc header;
1266
1267 if (minor == NULL) {
1268 minor = &rv;
1269 }
1270 *minor = 0;
1271
1272 token = mic_cfx_token;
1273 mic->length = sizeof(token) + cctx->digest_size;
1274 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1275 if (!lctx->initiate) {
1276 token.Flags |= CFXSentByAcceptor;
1277 }
1278 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1279 token.Flags |= CFXAcceptorSubkey;
1280 }
1281
1282 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1283 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1284
1285 header.length = sizeof(token);
1286 header.value = &token;
1287
1288 len = len ? len : gss_mbuf_len(mbp, offset);
1289 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1290
1291 if (*minor) {
1292 mic->length = 0;
1293 FREE(mic->value, M_TEMP);
1294 mic->value = NULL;
1295 } else {
1296 memcpy(mic->value, &token, sizeof(token));
1297 }
1298
1299 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
1300 }
1301
1302
1303 uint32_t
1304 gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */
1305 gss_ctx_id_t ctx, /* context_handle */
1306 mbuf_t mbp, /* message_buffer */
1307 size_t offset, /* offset */
1308 size_t len, /* length */
1309 gss_buffer_t mic, /* message_token */
1310 gss_qop_t *qop /* qop_state */)
1311 {
1312 gss_cfx_mic_token token = mic->value;
1313 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1314 crypto_ctx_t cctx = &ctx->gss_cryptor;
1315 uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc);
1316 int verified;
1317 uint64_t seq;
1318 uint32_t rv;
1319 gss_buffer_desc header;
1320
1321 if (qop) {
1322 *qop = GSS_C_QOP_DEFAULT;
1323 }
1324
1325 if (minor == NULL) {
1326 minor = &rv;
1327 }
1328
1329 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1330 if (*minor) {
1331 return GSS_S_FAILURE;
1332 }
1333
1334 header.length = sizeof(gss_cfx_mic_token_desc);
1335 header.value = mic->value;
1336
1337 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, digest, &verified, 0, 0);
1338 if (*minor) {
1339 return GSS_S_FAILURE;
1340 }
1341
1342 //XXX errors and such? Sequencing and replay? Not Supported RPCSEC_GSS
1343 memcpy(&seq, token->SND_SEQ, sizeof(uint64_t));
1344 seq = ntohll(seq);
1345 lctx->recv_seq = seq;
1346
1347 return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1348 }
1349
1350 errno_t
1351 krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int reverse)
1352 {
1353 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
1354 uint8_t confounder[ccmode->block_size];
1355 uint8_t digest[ctx->digest_size];
1356 size_t tlen, r = 0;
1357 errno_t error;
1358
1359 if (encrypt) {
1360 assert(ccmode->block_size <= UINT_MAX);
1361 read_random(confounder, (u_int)ccmode->block_size);
1362 error = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
1363 if (error) {
1364 return error;
1365 }
1366 tlen = *len + ccmode->block_size;
1367 if (ctx->mpad > 1) {
1368 r = ctx->mpad - (tlen % ctx->mpad);
1369 }
1370 /* We expect that r == 0 from krb5_cfx_wrap */
1371 if (r != 0) {
1372 uint8_t mpad[r];
1373 memset(mpad, 0, r);
1374 error = gss_append_mbuf(*mbp, mpad, r);
1375 if (error) {
1376 return error;
1377 }
1378 }
1379 tlen += r;
1380 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, NULL, 1, 0);
1381 if (error) {
1382 return error;
1383 }
1384 error = krb5_crypt_mbuf(ctx, mbp, tlen, 1, NULL);
1385 if (error) {
1386 return error;
1387 }
1388 error = gss_append_mbuf(*mbp, digest, ctx->digest_size);
1389 if (error) {
1390 return error;
1391 }
1392 *len = tlen + ctx->digest_size;
1393 return 0;
1394 } else {
1395 int verf;
1396 cccbc_ctx *ks = NULL;
1397
1398 if (*len < ctx->digest_size + sizeof(confounder)) {
1399 return EBADRPC;
1400 }
1401 tlen = *len - ctx->digest_size;
1402 /* get the digest */
1403 error = mbuf_copydata(*mbp, tlen, ctx->digest_size, digest);
1404 /* Remove the digest from the mbuffer */
1405 error = gss_strip_mbuf(*mbp, -ctx->digest_size);
1406 if (error) {
1407 return error;
1408 }
1409
1410 if (reverse) {
1411 /*
1412 * Derive a key schedule that the sender can unwrap with. This
1413 * is so that RPCSEC_GSS can restore encrypted arguments for
1414 * resending. We do that because the RPCSEC_GSS sequence number in
1415 * the rpc header is prepended to the body of the message before wrapping.
1416 */
1417 void *ekey;
1418 uint8_t usage_string[KRB5_USAGE_LEN];
1419 lucid_context_t lctx = ctx->gss_ctx;
1420
1421 krb5_make_usage(lctx->initiate ?
1422 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
1423 0xAA, usage_string);
1424 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
1425 MALLOC(ks, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
1426 cccbc_init(ctx->dec_mode, ks, ctx->keylen, ekey);
1427 FREE(ekey, M_TEMP);
1428 }
1429 error = krb5_crypt_mbuf(ctx, mbp, tlen, 0, ks);
1430 FREE(ks, M_TEMP);
1431 if (error) {
1432 return error;
1433 }
1434 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, &verf, 1, reverse);
1435 if (error) {
1436 return error;
1437 }
1438 if (!verf) {
1439 return EBADRPC;
1440 }
1441 /* strip off the confounder */
1442 assert(ccmode->block_size <= INT_MAX);
1443 error = gss_strip_mbuf(*mbp, (int)ccmode->block_size);
1444 if (error) {
1445 return error;
1446 }
1447 *len = tlen - ccmode->block_size;
1448 }
1449 return 0;
1450 }
1451
1452 uint32_t
1453 gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */
1454 gss_ctx_id_t ctx, /* context_handle */
1455 int conf_flag, /* conf_req_flag */
1456 gss_qop_t qop __unused, /* qop_req */
1457 mbuf_t *mbp, /* input/output message_buffer */
1458 size_t len, /* mbuf chain length */
1459 int *conf /* conf_state */)
1460 {
1461 gss_cfx_wrap_token_desc token;
1462 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1463 crypto_ctx_t cctx = &ctx->gss_cryptor;
1464 int error = 0;
1465 uint32_t mv;
1466 uint64_t seq = htonll(lctx->send_seq);
1467
1468 if (minor == NULL) {
1469 minor = &mv;
1470 }
1471 if (conf) {
1472 *conf = conf_flag;
1473 }
1474
1475 *minor = 0;
1476 token = wrap_cfx_token;
1477 if (!lctx->initiate) {
1478 token.Flags |= CFXSentByAcceptor;
1479 }
1480 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1481 token.Flags |= CFXAcceptorSubkey;
1482 }
1483 memcpy(&token.SND_SEQ, &seq, sizeof(uint64_t));
1484 lctx->send_seq++;
1485 if (conf_flag) {
1486 uint8_t pad[cctx->mpad];
1487 size_t plen = 0;
1488
1489 token.Flags |= CFXSealed;
1490 memset(pad, 0, cctx->mpad);
1491 if (cctx->mpad > 1) {
1492 size_t val = cctx->mpad - ((len + sizeof(gss_cfx_wrap_token_desc)) % cctx->mpad);
1493 plen = sizeof(val) > sizeof(uint32_t) ? htonll(val) : htonl(val);
1494 token.EC[0] = ((plen >> 8) & 0xff);
1495 token.EC[1] = (plen & 0xff);
1496 }
1497 if (plen) {
1498 error = gss_append_mbuf(*mbp, pad, plen);
1499 len += plen;
1500 }
1501 if (error == 0) {
1502 error = gss_append_mbuf(*mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1503 len += sizeof(gss_cfx_wrap_token_desc);
1504 }
1505 if (error == 0) {
1506 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 1, 0);
1507 }
1508 if (error == 0) {
1509 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1510 }
1511 } else {
1512 uint8_t digest[cctx->digest_size];
1513 gss_buffer_desc header;
1514
1515 header.length = sizeof(token);
1516 header.value = &token;
1517
1518 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, NULL, 1, 0);
1519 if (error == 0) {
1520 error = gss_append_mbuf(*mbp, digest, cctx->digest_size);
1521 if (error == 0) {
1522 uint32_t plen = htonl(cctx->digest_size);
1523 memcpy(token.EC, &plen, 2);
1524 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1525 }
1526 }
1527 }
1528 if (error) {
1529 *minor = error;
1530 return GSS_S_FAILURE;
1531 }
1532
1533 return GSS_S_COMPLETE;
1534 }
1535
1536 /*
1537 * Given a wrap token the has a rrc, move the trailer back to the end.
1538 */
1539 static void
1540 gss_krb5_cfx_unwrap_rrc_mbuf(mbuf_t header, size_t rrc)
1541 {
1542 mbuf_t body, trailer;
1543
1544 gss_normalize_mbuf(header, sizeof(gss_cfx_wrap_token_desc), &rrc, &trailer, &body, 0);
1545 gss_join_mbuf(header, body, trailer);
1546 }
1547
1548 uint32_t
1549 gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */
1550 gss_ctx_id_t ctx, /* context_handle */
1551 mbuf_t *mbp, /* input/output message_buffer */
1552 size_t len, /* mbuf chain length */
1553 int *conf_flag, /* conf_state */
1554 gss_qop_t *qop /* qop state */)
1555 {
1556 gss_cfx_wrap_token_desc token;
1557 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1558 crypto_ctx_t cctx = &ctx->gss_cryptor;
1559 int error, conf;
1560 uint32_t ec = 0, rrc = 0;
1561 uint64_t seq;
1562 int reverse = (*qop == GSS_C_QOP_REVERSE);
1563 int initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1564
1565 error = mbuf_copydata(*mbp, 0, sizeof(gss_cfx_wrap_token_desc), &token);
1566 gss_strip_mbuf(*mbp, sizeof(gss_cfx_wrap_token_desc));
1567 len -= sizeof(gss_cfx_wrap_token_desc);
1568
1569 /* Check for valid token */
1570 if (token.TOK_ID[0] != wrap_cfx_token.TOK_ID[0] ||
1571 token.TOK_ID[1] != wrap_cfx_token.TOK_ID[1] ||
1572 token.Filler != wrap_cfx_token.Filler) {
1573 printf("Token id does not match\n");
1574 goto badrpc;
1575 }
1576 if ((initiate && !(token.Flags & CFXSentByAcceptor)) ||
1577 (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey && !(token.Flags & CFXAcceptorSubkey))) {
1578 printf("Bad flags %x\n", token.Flags);
1579 goto badrpc;
1580 }
1581
1582 /* XXX Sequence replay detection */
1583 memcpy(&seq, token.SND_SEQ, sizeof(seq));
1584 seq = ntohll(seq);
1585 lctx->recv_seq = seq;
1586
1587 ec = (token.EC[0] << 8) | token.EC[1];
1588 rrc = (token.RRC[0] << 8) | token.RRC[1];
1589 *qop = GSS_C_QOP_DEFAULT;
1590 conf = ((token.Flags & CFXSealed) == CFXSealed);
1591 if (conf_flag) {
1592 *conf_flag = conf;
1593 }
1594 if (conf) {
1595 gss_cfx_wrap_token_desc etoken;
1596
1597 if (rrc) { /* Handle Right rotation count */
1598 gss_krb5_cfx_unwrap_rrc_mbuf(*mbp, rrc);
1599 }
1600 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 0, reverse);
1601 if (error) {
1602 printf("krb5_cfx_crypt_mbuf %d\n", error);
1603 *minor = error;
1604 return GSS_S_FAILURE;
1605 }
1606 if (len >= sizeof(gss_cfx_wrap_token_desc)) {
1607 len -= sizeof(gss_cfx_wrap_token_desc);
1608 } else {
1609 goto badrpc;
1610 }
1611 mbuf_copydata(*mbp, len, sizeof(gss_cfx_wrap_token_desc), &etoken);
1612 /* Verify etoken with the token wich should be the same, except the rc field is always zero */
1613 token.RRC[0] = token.RRC[1] = 0;
1614 if (memcmp(&token, &etoken, sizeof(gss_cfx_wrap_token_desc)) != 0) {
1615 printf("Encrypted token mismach\n");
1616 goto badrpc;
1617 }
1618 /* strip the encrypted token and any pad bytes */
1619 gss_strip_mbuf(*mbp, -(sizeof(gss_cfx_wrap_token_desc) + ec));
1620 len -= (sizeof(gss_cfx_wrap_token_desc) + ec);
1621 } else {
1622 uint8_t digest[cctx->digest_size];
1623 int verf;
1624 gss_buffer_desc header;
1625
1626 if (ec != cctx->digest_size || len >= cctx->digest_size) {
1627 goto badrpc;
1628 }
1629 len -= cctx->digest_size;
1630 mbuf_copydata(*mbp, len, cctx->digest_size, digest);
1631 gss_strip_mbuf(*mbp, -cctx->digest_size);
1632 /* When calculating the mic header fields ec and rcc must be zero */
1633 token.EC[0] = token.EC[1] = token.RRC[0] = token.RRC[1] = 0;
1634 header.value = &token;
1635 header.length = sizeof(gss_cfx_wrap_token_desc);
1636 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, &verf, 1, reverse);
1637 if (error) {
1638 goto badrpc;
1639 }
1640 }
1641 return GSS_S_COMPLETE;
1642
1643 badrpc:
1644 *minor = EBADRPC;
1645 return GSS_S_FAILURE;
1646 }
1647
1648 /*
1649 * RFC 1964 3DES support
1650 */
1651
1652 typedef struct gss_1964_mic_token_desc_struct {
1653 uint8_t TOK_ID[2]; /* 01 01 */
1654 uint8_t Sign_Alg[2];
1655 uint8_t Filler[4]; /* ff ff ff ff */
1656 } gss_1964_mic_token_desc, *gss_1964_mic_token;
1657
1658 typedef struct gss_1964_wrap_token_desc_struct {
1659 uint8_t TOK_ID[2]; /* 02 01 */
1660 uint8_t Sign_Alg[2];
1661 uint8_t Seal_Alg[2];
1662 uint8_t Filler[2]; /* ff ff */
1663 } gss_1964_wrap_token_desc, *gss_1964_wrap_token;
1664
1665 typedef struct gss_1964_delete_token_desc_struct {
1666 uint8_t TOK_ID[2]; /* 01 02 */
1667 uint8_t Sign_Alg[2];
1668 uint8_t Filler[4]; /* ff ff ff ff */
1669 } gss_1964_delete_token_desc, *gss_1964_delete_token;
1670
1671 typedef struct gss_1964_header_desc_struct {
1672 uint8_t App0; /* 0x60 Application 0 constructed */
1673 uint8_t AppLen[]; /* Variable Der length */
1674 } gss_1964_header_desc, *gss_1964_header;
1675
1676 typedef union {
1677 gss_1964_mic_token_desc mic_tok;
1678 gss_1964_wrap_token_desc wrap_tok;
1679 gss_1964_delete_token_desc del_tok;
1680 } gss_1964_tok_type __attribute__((transparent_union));
1681
1682 typedef struct gss_1964_token_body_struct {
1683 uint8_t OIDType; /* 0x06 */
1684 uint8_t OIDLen; /* 0x09 */
1685 uint8_t kerb_mech[9]; /* Der Encode kerberos mech 1.2.840.113554.1.2.2
1686 * 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 */
1687 gss_1964_tok_type body;
1688 uint8_t SND_SEQ[8];
1689 uint8_t Hash[]; /* Mic */
1690 } gss_1964_token_body_desc, *gss_1964_token_body;
1691
1692
1693 gss_1964_header_desc tok_1964_header = {
1694 .App0 = 0x60
1695 };
1696
1697 gss_1964_mic_token_desc mic_1964_token = {
1698 .TOK_ID = "\x01\x01",
1699 .Filler = "\xff\xff\xff\xff"
1700 };
1701
1702 gss_1964_wrap_token_desc wrap_1964_token = {
1703 .TOK_ID = "\x02\x01",
1704 .Filler = "\xff\xff"
1705 };
1706
1707 gss_1964_delete_token_desc del_1964_token = {
1708 .TOK_ID = "\x01\x01",
1709 .Filler = "\xff\xff\xff\xff"
1710 };
1711
1712 gss_1964_token_body_desc body_1964_token = {
1713 .OIDType = 0x06,
1714 .OIDLen = 0x09,
1715 .kerb_mech = "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1716 };
1717
1718 #define GSS_KRB5_3DES_MAXTOKSZ (sizeof(gss_1964_header_desc) + 5 /* max der length supported */ + sizeof(gss_1964_token_body_desc))
1719
1720 uint32_t gss_krb5_3des_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
1721 uint32_t gss_krb5_3des_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
1722 uint32_t gss_krb5_3des_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
1723 uint32_t gss_krb5_3des_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
1724 uint32_t gss_krb5_3des_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
1725 uint32_t gss_krb5_3des_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
1726
1727 /*
1728 * Decode an ASN.1 DER length field
1729 */
1730 static ssize_t
1731 gss_krb5_der_length_get(uint8_t **pp)
1732 {
1733 uint8_t *p = *pp;
1734 uint32_t flen, len = 0;
1735
1736 flen = *p & 0x7f;
1737
1738 if (*p++ & 0x80) {
1739 if (flen > sizeof(uint32_t)) {
1740 return -1;
1741 }
1742 while (flen--) {
1743 len = (len << 8) + *p++;
1744 }
1745 } else {
1746 len = flen;
1747 }
1748 *pp = p;
1749 return len;
1750 }
1751
1752 /*
1753 * Determine size of ASN.1 DER length
1754 */
1755 static int
1756 gss_krb5_der_length_size(size_t len)
1757 {
1758 return
1759 len < (1 << 7) ? 1 :
1760 len < (1 << 8) ? 2 :
1761 len < (1 << 16) ? 3 :
1762 len < (1 << 24) ? 4 : 5;
1763 }
1764
1765 /*
1766 * Encode an ASN.1 DER length field
1767 */
1768 static void
1769 gss_krb5_der_length_put(uint8_t **pp, size_t len)
1770 {
1771 int sz = gss_krb5_der_length_size(len);
1772 uint8_t *p = *pp;
1773
1774 if (sz == 1) {
1775 *p++ = (uint8_t) len;
1776 } else {
1777 *p++ = (uint8_t) ((sz - 1) | 0x80);
1778 sz -= 1;
1779 while (sz--) {
1780 *p++ = (uint8_t) ((len >> (sz * 8)) & 0xff);
1781 }
1782 }
1783
1784 *pp = p;
1785 }
1786
1787 static void
1788 gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t hash, size_t datalen, gss_buffer_t des3_token)
1789 {
1790 gss_1964_header token;
1791 gss_1964_token_body tokbody;
1792 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1793 crypto_ctx_t cctx = &ctx->gss_cryptor;
1794 uint32_t seq = (uint32_t) (lctx->send_seq++ & 0xffff);
1795 size_t toklen = sizeof(gss_1964_token_body_desc) + cctx->digest_size;
1796 size_t alloclen = toklen + sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(toklen + datalen);
1797 uint8_t *tokptr;
1798
1799 MALLOC(token, gss_1964_header, alloclen, M_TEMP, M_WAITOK | M_ZERO);
1800 *token = tok_1964_header;
1801 tokptr = token->AppLen;
1802 gss_krb5_der_length_put(&tokptr, toklen + datalen);
1803 tokbody = (gss_1964_token_body)tokptr;
1804 *tokbody = body_1964_token; /* Initalize the token body */
1805 tokbody->body = body; /* and now set the body to the token type passed in */
1806 seq = htonl(seq);
1807 for (int i = 0; i < 4; i++) {
1808 tokbody->SND_SEQ[i] = (uint8_t)((seq >> (i * 8)) & 0xff);
1809 }
1810 for (int i = 4; i < 8; i++) {
1811 tokbody->SND_SEQ[i] = lctx->initiate ? 0x00 : 0xff;
1812 }
1813
1814 size_t blocksize = cctx->enc_mode->block_size;
1815 cccbc_iv_decl(blocksize, iv);
1816 cccbc_ctx_decl(cctx->enc_mode->size, enc_ctx);
1817 cccbc_set_iv(cctx->enc_mode, iv, hash->value);
1818 cccbc_init(cctx->enc_mode, enc_ctx, cctx->keylen, cctx->key);
1819 cccbc_update(cctx->enc_mode, enc_ctx, iv, 1, tokbody->SND_SEQ, tokbody->SND_SEQ);
1820
1821 assert(hash->length == cctx->digest_size);
1822 memcpy(tokbody->Hash, hash->value, hash->length);
1823 des3_token->length = alloclen;
1824 des3_token->value = token;
1825 }
1826
1827 static int
1828 gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok,
1829 gss_1964_tok_type body, gss_buffer_t hash, size_t *offset, size_t *len, int reverse)
1830 {
1831 gss_1964_header token = intok->value;
1832 gss_1964_token_body tokbody;
1833 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1834 crypto_ctx_t cctx = &ctx->gss_cryptor;
1835 ssize_t length;
1836 size_t toklen;
1837 uint8_t *tokptr;
1838 uint32_t seq;
1839 int initiate;
1840
1841 if (token->App0 != tok_1964_header.App0) {
1842 printf("%s: bad framing\n", __func__);
1843 printgbuf(__func__, intok);
1844 return EBADRPC;
1845 }
1846 tokptr = token->AppLen;
1847 length = gss_krb5_der_length_get(&tokptr);
1848 if (length < 0) {
1849 printf("%s: invalid length\n", __func__);
1850 printgbuf(__func__, intok);
1851 return EBADRPC;
1852 }
1853 toklen = sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(length)
1854 + sizeof(gss_1964_token_body_desc);
1855
1856 if (intok->length < toklen + cctx->digest_size) {
1857 printf("%s: token to short", __func__);
1858 printf("toklen = %d, length = %d\n", (int)toklen, (int)length);
1859 printgbuf(__func__, intok);
1860 return EBADRPC;
1861 }
1862
1863 if (offset) {
1864 *offset = toklen + cctx->digest_size;
1865 }
1866
1867 if (len) {
1868 *len = length - sizeof(gss_1964_token_body_desc) - cctx->digest_size;
1869 }
1870
1871 tokbody = (gss_1964_token_body)tokptr;
1872 if (tokbody->OIDType != body_1964_token.OIDType ||
1873 tokbody->OIDLen != body_1964_token.OIDLen ||
1874 memcmp(tokbody->kerb_mech, body_1964_token.kerb_mech, tokbody->OIDLen) != 0) {
1875 printf("%s: Invalid mechanism\n", __func__);
1876 printgbuf(__func__, intok);
1877 return EBADRPC;
1878 }
1879 if (memcmp(&tokbody->body, &body, sizeof(gss_1964_tok_type)) != 0) {
1880 printf("%s: Invalid body\n", __func__);
1881 printgbuf(__func__, intok);
1882 return EBADRPC;
1883 }
1884 size_t blocksize = cctx->enc_mode->block_size;
1885 uint8_t *block = tokbody->SND_SEQ;
1886
1887 assert(blocksize == sizeof(tokbody->SND_SEQ));
1888 cccbc_iv_decl(blocksize, iv);
1889 cccbc_ctx_decl(cctx->dec_mode->size, dec_ctx);
1890 cccbc_set_iv(cctx->dec_mode, iv, tokbody->Hash);
1891 cccbc_init(cctx->dec_mode, dec_ctx, cctx->keylen, cctx->key);
1892 cccbc_update(cctx->dec_mode, dec_ctx, iv, 1, block, block);
1893
1894 initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1895 for (int i = 4; i < 8; i++) {
1896 if (tokbody->SND_SEQ[i] != (initiate ? 0xff : 0x00)) {
1897 printf("%s: Invalid des mac\n", __func__);
1898 printgbuf(__func__, intok);
1899 return EAUTH;
1900 }
1901 }
1902
1903 memcpy(&seq, tokbody->SND_SEQ, sizeof(uint32_t));
1904
1905 lctx->recv_seq = ntohl(seq);
1906
1907 assert(hash->length >= cctx->digest_size);
1908 memcpy(hash->value, tokbody->Hash, cctx->digest_size);
1909
1910 return 0;
1911 }
1912
1913 uint32_t
1914 gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */
1915 gss_ctx_id_t ctx, /* krb5 context id */
1916 gss_qop_t qop __unused, /* qop_req (ignored) */
1917 gss_buffer_t mbp, /* message buffer in */
1918 gss_buffer_t mic) /* mic token out */
1919 {
1920 gss_1964_mic_token_desc tokbody = mic_1964_token;
1921 crypto_ctx_t cctx = &ctx->gss_cryptor;
1922 gss_buffer_desc hash;
1923 gss_buffer_desc header;
1924 uint8_t hashval[cctx->digest_size];
1925
1926 hash.length = cctx->digest_size;
1927 hash.value = hashval;
1928 tokbody.Sign_Alg[0] = 0x04; /* lctx->keydata.lucid_protocol_u.data_1964.sign_alg */
1929 tokbody.Sign_Alg[1] = 0x00;
1930 header.length = sizeof(gss_1964_mic_token_desc);
1931 header.value = &tokbody;
1932
1933 /* Hash the data */
1934 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, NULL, 0, 0);
1935 if (*minor) {
1936 return GSS_S_FAILURE;
1937 }
1938
1939 /* Make the token */
1940 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
1941
1942 return GSS_S_COMPLETE;
1943 }
1944
1945 uint32_t
1946 gss_krb5_3des_verify_mic(uint32_t *minor,
1947 gss_ctx_id_t ctx,
1948 gss_buffer_t mbp,
1949 gss_buffer_t mic,
1950 gss_qop_t *qop)
1951 {
1952 crypto_ctx_t cctx = &ctx->gss_cryptor;
1953 uint8_t hashval[cctx->digest_size];
1954 gss_buffer_desc hash;
1955 gss_1964_mic_token_desc mtok = mic_1964_token;
1956 gss_buffer_desc header;
1957 int verf;
1958
1959 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
1960 mtok.Sign_Alg[1] = 0x00;
1961 hash.length = cctx->digest_size;
1962 hash.value = hashval;
1963 header.length = sizeof(gss_1964_mic_token_desc);
1964 header.value = &mtok;
1965
1966 if (qop) {
1967 *qop = GSS_C_QOP_DEFAULT;
1968 }
1969
1970 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
1971 if (*minor) {
1972 return GSS_S_FAILURE;
1973 }
1974
1975 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, &verf, 0, 0);
1976 if (*minor) {
1977 return GSS_S_FAILURE;
1978 }
1979
1980 return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1981 }
1982
1983 uint32_t
1984 gss_krb5_3des_get_mic_mbuf(uint32_t *minor,
1985 gss_ctx_id_t ctx,
1986 gss_qop_t qop __unused,
1987 mbuf_t mbp,
1988 size_t offset,
1989 size_t len,
1990 gss_buffer_t mic)
1991 {
1992 gss_1964_mic_token_desc tokbody = mic_1964_token;
1993 crypto_ctx_t cctx = &ctx->gss_cryptor;
1994 gss_buffer_desc header;
1995 gss_buffer_desc hash;
1996 uint8_t hashval[cctx->digest_size];
1997
1998 hash.length = cctx->digest_size;
1999 hash.value = hashval;
2000 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_4121.sign_alg */
2001 tokbody.Sign_Alg[1] = 0x00;
2002 header.length = sizeof(gss_1964_mic_token_desc);
2003 header.value = &tokbody;
2004
2005 /* Hash the data */
2006 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, NULL, 0, 0);
2007 if (*minor) {
2008 return GSS_S_FAILURE;
2009 }
2010
2011 /* Make the token */
2012 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
2013
2014 return GSS_S_COMPLETE;
2015 }
2016
2017 uint32_t
2018 gss_krb5_3des_verify_mic_mbuf(uint32_t *minor,
2019 gss_ctx_id_t ctx,
2020 mbuf_t mbp,
2021 size_t offset,
2022 size_t len,
2023 gss_buffer_t mic,
2024 gss_qop_t *qop)
2025 {
2026 crypto_ctx_t cctx = &ctx->gss_cryptor;
2027 uint8_t hashval[cctx->digest_size];
2028 gss_buffer_desc header;
2029 gss_buffer_desc hash;
2030 gss_1964_mic_token_desc mtok = mic_1964_token;
2031 int verf;
2032
2033 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucic_protocol_u.data1964.sign_alg */
2034 mtok.Sign_Alg[1] = 0x00;
2035 hash.length = cctx->digest_size;
2036 hash.value = hashval;
2037 header.length = sizeof(gss_1964_mic_token_desc);
2038 header.value = &mtok;
2039
2040 if (qop) {
2041 *qop = GSS_C_QOP_DEFAULT;
2042 }
2043
2044 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
2045 if (*minor) {
2046 return GSS_S_FAILURE;
2047 }
2048
2049 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, &verf, 0, 0);
2050 if (*minor) {
2051 return GSS_S_FAILURE;
2052 }
2053
2054 return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
2055 }
2056
2057 uint32_t
2058 gss_krb5_3des_wrap_mbuf(uint32_t *minor,
2059 gss_ctx_id_t ctx,
2060 int conf_flag,
2061 gss_qop_t qop __unused,
2062 mbuf_t *mbp,
2063 size_t len,
2064 int *conf_state)
2065 {
2066 crypto_ctx_t cctx = &ctx->gss_cryptor;
2067 const struct ccmode_cbc *ccmode = cctx->enc_mode;
2068 uint8_t padlen;
2069 uint8_t pad[8];
2070 uint8_t confounder[ccmode->block_size];
2071 gss_1964_wrap_token_desc tokbody = wrap_1964_token;
2072 gss_buffer_desc header;
2073 gss_buffer_desc mic;
2074 gss_buffer_desc hash;
2075 uint8_t hashval[cctx->digest_size];
2076
2077 if (conf_state) {
2078 *conf_state = conf_flag;
2079 }
2080
2081 hash.length = cctx->digest_size;
2082 hash.value = hashval;
2083 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
2084 tokbody.Sign_Alg[1] = 0x00;
2085 /* conf_flag ? lctx->key_data.lucid_protocol_u.data_1964.seal_alg : 0xffff */
2086 tokbody.Seal_Alg[0] = conf_flag ? 0x02 : 0xff;
2087 tokbody.Seal_Alg[1] = conf_flag ? 0x00 : 0xff;
2088 header.length = sizeof(gss_1964_wrap_token_desc);
2089 header.value = &tokbody;
2090
2091 /* Prepend confounder */
2092 assert(ccmode->block_size <= UINT_MAX);
2093 read_random(confounder, (u_int)ccmode->block_size);
2094 *minor = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
2095 if (*minor) {
2096 return GSS_S_FAILURE;
2097 }
2098
2099 /* Append trailer of up to 8 bytes and set pad length in each trailer byte */
2100 padlen = 8 - len % 8;
2101 for (int i = 0; i < padlen; i++) {
2102 pad[i] = padlen;
2103 }
2104 *minor = gss_append_mbuf(*mbp, pad, padlen);
2105 if (*minor) {
2106 return GSS_S_FAILURE;
2107 }
2108
2109 len += ccmode->block_size + padlen;
2110
2111 /* Hash the data */
2112 *minor = krb5_mic_mbuf(cctx, &header, *mbp, 0, len, NULL, hashval, NULL, 0, 0);
2113 if (*minor) {
2114 return GSS_S_FAILURE;
2115 }
2116
2117 /* Make the token */
2118 gss_krb5_3des_token_put(ctx, tokbody, &hash, len, &mic);
2119
2120 if (conf_flag) {
2121 *minor = krb5_crypt_mbuf(cctx, mbp, len, 1, 0);
2122 if (*minor) {
2123 return GSS_S_FAILURE;
2124 }
2125 }
2126
2127 *minor = gss_prepend_mbuf(mbp, mic.value, mic.length);
2128
2129 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
2130 }
2131
2132 uint32_t
2133 gss_krb5_3des_unwrap_mbuf(uint32_t *minor,
2134 gss_ctx_id_t ctx,
2135 mbuf_t *mbp,
2136 size_t len,
2137 int *conf_state,
2138 gss_qop_t *qop)
2139 {
2140 crypto_ctx_t cctx = &ctx->gss_cryptor;
2141 const struct ccmode_cbc *ccmode = cctx->dec_mode;
2142 size_t length = 0, offset = 0;
2143 gss_buffer_desc hash;
2144 uint8_t hashval[cctx->digest_size];
2145 gss_buffer_desc itoken;
2146 uint8_t tbuffer[GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size];
2147 itoken.length = GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size;
2148 itoken.value = tbuffer;
2149 gss_1964_wrap_token_desc wrap = wrap_1964_token;
2150 gss_buffer_desc header;
2151 uint8_t padlen;
2152 mbuf_t smb, tmb;
2153 int cflag, verified, reverse = 0;
2154
2155 if (len < GSS_KRB5_3DES_MAXTOKSZ) {
2156 *minor = EBADRPC;
2157 return GSS_S_FAILURE;
2158 }
2159
2160 if (*qop == GSS_C_QOP_REVERSE) {
2161 reverse = 1;
2162 }
2163 *qop = GSS_C_QOP_DEFAULT;
2164
2165 *minor = mbuf_copydata(*mbp, 0, itoken.length, itoken.value);
2166 if (*minor) {
2167 return GSS_S_FAILURE;
2168 }
2169
2170 hash.length = cctx->digest_size;
2171 hash.value = hashval;
2172 wrap.Sign_Alg[0] = 0x04;
2173 wrap.Sign_Alg[1] = 0x00;
2174 wrap.Seal_Alg[0] = 0x02;
2175 wrap.Seal_Alg[1] = 0x00;
2176
2177 for (cflag = 1; cflag >= 0; cflag--) {
2178 *minor = gss_krb5_3des_token_get(ctx, &itoken, wrap, &hash, &offset, &length, reverse);
2179 if (*minor == 0) {
2180 break;
2181 }
2182 wrap.Seal_Alg[0] = 0xff;
2183 wrap.Seal_Alg[1] = 0xff;
2184 }
2185 if (*minor) {
2186 return GSS_S_FAILURE;
2187 }
2188
2189 if (conf_state) {
2190 *conf_state = cflag;
2191 }
2192
2193 /*
2194 * Seperate off the header
2195 */
2196 *minor = gss_normalize_mbuf(*mbp, offset, &length, &smb, &tmb, 0);
2197 if (*minor) {
2198 return GSS_S_FAILURE;
2199 }
2200
2201 assert(tmb == NULL);
2202
2203 /* Decrypt the chain if needed */
2204 if (cflag) {
2205 *minor = krb5_crypt_mbuf(cctx, &smb, length, 0, NULL);
2206 if (*minor) {
2207 return GSS_S_FAILURE;
2208 }
2209 }
2210
2211 /* Verify the mic */
2212 header.length = sizeof(gss_1964_wrap_token_desc);
2213 header.value = &wrap;
2214
2215 *minor = krb5_mic_mbuf(cctx, &header, smb, 0, length, NULL, hashval, &verified, 0, 0);
2216 if (*minor) {
2217 return GSS_S_FAILURE;
2218 }
2219 if (!verified) {
2220 return GSS_S_BAD_SIG;
2221 }
2222
2223 /* Get the pad bytes */
2224 *minor = mbuf_copydata(smb, length - 1, 1, &padlen);
2225 if (*minor) {
2226 return GSS_S_FAILURE;
2227 }
2228
2229 /* Strip the confounder and trailing pad bytes */
2230 gss_strip_mbuf(smb, -padlen);
2231 assert(ccmode->block_size <= INT_MAX);
2232 gss_strip_mbuf(smb, (int)ccmode->block_size);
2233
2234 if (*mbp != smb) {
2235 mbuf_freem(*mbp);
2236 *mbp = smb;
2237 }
2238
2239 return GSS_S_COMPLETE;
2240 }
2241
2242 static const char *
2243 etype_name(etypes etype)
2244 {
2245 switch (etype) {
2246 case DES3_CBC_SHA1_KD:
2247 return "des3-cbc-sha1";
2248 case AES128_CTS_HMAC_SHA1_96:
2249 return "aes128-cts-hmac-sha1-96";
2250 case AES256_CTS_HMAC_SHA1_96:
2251 return "aes-cts-hmac-sha1-96";
2252 default:
2253 return "unknown enctype";
2254 }
2255 }
2256
2257 static int
2258 supported_etype(uint32_t proto, etypes etype)
2259 {
2260 const char *proto_name;
2261
2262 switch (proto) {
2263 case 0:
2264 /* RFC 1964 */
2265 proto_name = "RFC 1964 krb5 gss mech";
2266 switch (etype) {
2267 case DES3_CBC_SHA1_KD:
2268 return 1;
2269 default:
2270 break;
2271 }
2272 break;
2273 case 1:
2274 /* RFC 4121 */
2275 proto_name = "RFC 4121 krb5 gss mech";
2276 switch (etype) {
2277 case AES256_CTS_HMAC_SHA1_96:
2278 case AES128_CTS_HMAC_SHA1_96:
2279 return 1;
2280 default:
2281 break;
2282 }
2283 break;
2284 default:
2285 proto_name = "Unknown krb5 gss mech";
2286 break;
2287 }
2288 printf("%s: Non supported encryption %s (%d) type for protocol %s (%d)\n",
2289 __func__, etype_name(etype), etype, proto_name, proto);
2290 return 0;
2291 }
2292
2293 /*
2294 * Kerberos gss mech entry points
2295 */
2296 uint32_t
2297 gss_krb5_get_mic(uint32_t *minor, /* minor_status */
2298 gss_ctx_id_t ctx, /* context_handle */
2299 gss_qop_t qop, /* qop_req */
2300 gss_buffer_t mbp, /* message buffer */
2301 gss_buffer_t mic /* message_token */)
2302 {
2303 uint32_t minor_stat = 0;
2304
2305 if (minor == NULL) {
2306 minor = &minor_stat;
2307 }
2308 *minor = 0;
2309
2310 /* Validate context */
2311 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2312 return GSS_S_NO_CONTEXT;
2313 }
2314
2315 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2316 *minor = ENOTSUP;
2317 return GSS_S_FAILURE;
2318 }
2319
2320 switch (ctx->gss_lucid_ctx.key_data.proto) {
2321 case 0:
2322 /* RFC 1964 DES3 case */
2323 return gss_krb5_3des_get_mic(minor, ctx, qop, mbp, mic);
2324 case 1:
2325 /* RFC 4121 CFX case */
2326 return gss_krb5_cfx_get_mic(minor, ctx, qop, mbp, mic);
2327 }
2328
2329 return GSS_S_COMPLETE;
2330 }
2331
2332 uint32_t
2333 gss_krb5_verify_mic(uint32_t *minor, /* minor_status */
2334 gss_ctx_id_t ctx, /* context_handle */
2335 gss_buffer_t mbp, /* message_buffer */
2336 gss_buffer_t mic, /* message_token */
2337 gss_qop_t *qop /* qop_state */)
2338 {
2339 uint32_t minor_stat = 0;
2340 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2341
2342 if (minor == NULL) {
2343 minor = &minor_stat;
2344 }
2345 if (qop == NULL) {
2346 qop = &qop_val;
2347 }
2348
2349 *minor = 0;
2350
2351 /* Validate context */
2352 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2353 return GSS_S_NO_CONTEXT;
2354 }
2355
2356 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2357 *minor = ENOTSUP;
2358 return GSS_S_FAILURE;
2359 }
2360
2361 switch (ctx->gss_lucid_ctx.key_data.proto) {
2362 case 0:
2363 /* RFC 1964 DES3 case */
2364 return gss_krb5_3des_verify_mic(minor, ctx, mbp, mic, qop);
2365 case 1:
2366 /* RFC 4121 CFX case */
2367 return gss_krb5_cfx_verify_mic(minor, ctx, mbp, mic, qop);
2368 }
2369 return GSS_S_COMPLETE;
2370 }
2371
2372 uint32_t
2373 gss_krb5_get_mic_mbuf(uint32_t *minor, /* minor_status */
2374 gss_ctx_id_t ctx, /* context_handle */
2375 gss_qop_t qop, /* qop_req */
2376 mbuf_t mbp, /* message mbuf */
2377 size_t offset, /* offest */
2378 size_t len, /* length */
2379 gss_buffer_t mic /* message_token */)
2380 {
2381 uint32_t minor_stat = 0;
2382
2383 if (minor == NULL) {
2384 minor = &minor_stat;
2385 }
2386 *minor = 0;
2387
2388 if (len == 0) {
2389 len = ~(size_t)0;
2390 }
2391
2392 /* Validate context */
2393 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2394 return GSS_S_NO_CONTEXT;
2395 }
2396
2397 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2398 *minor = ENOTSUP;
2399 return GSS_S_FAILURE;
2400 }
2401
2402 switch (ctx->gss_lucid_ctx.key_data.proto) {
2403 case 0:
2404 /* RFC 1964 DES3 case */
2405 return gss_krb5_3des_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic);
2406 case 1:
2407 /* RFC 4121 CFX case */
2408 return gss_krb5_cfx_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic);
2409 }
2410
2411 return GSS_S_COMPLETE;
2412 }
2413
2414 uint32_t
2415 gss_krb5_verify_mic_mbuf(uint32_t *minor, /* minor_status */
2416 gss_ctx_id_t ctx, /* context_handle */
2417 mbuf_t mbp, /* message_buffer */
2418 size_t offset, /* offset */
2419 size_t len, /* length */
2420 gss_buffer_t mic, /* message_token */
2421 gss_qop_t *qop /* qop_state */)
2422 {
2423 uint32_t minor_stat = 0;
2424 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2425
2426 if (minor == NULL) {
2427 minor = &minor_stat;
2428 }
2429 if (qop == NULL) {
2430 qop = &qop_val;
2431 }
2432
2433 *minor = 0;
2434
2435 if (len == 0) {
2436 len = ~(size_t)0;
2437 }
2438
2439 /* Validate context */
2440 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2441 return GSS_S_NO_CONTEXT;
2442 }
2443
2444 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2445 *minor = ENOTSUP;
2446 return GSS_S_FAILURE;
2447 }
2448
2449 switch (ctx->gss_lucid_ctx.key_data.proto) {
2450 case 0:
2451 /* RFC 1964 DES3 case */
2452 return gss_krb5_3des_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop);
2453 case 1:
2454 /* RFC 4121 CFX case */
2455 return gss_krb5_cfx_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop);
2456 }
2457
2458 return GSS_S_COMPLETE;
2459 }
2460
2461 uint32_t
2462 gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */
2463 gss_ctx_id_t ctx, /* context_handle */
2464 int conf_flag, /* conf_req_flag */
2465 gss_qop_t qop, /* qop_req */
2466 mbuf_t *mbp, /* input/output message_buffer */
2467 size_t offset, /* offset */
2468 size_t len, /* length */
2469 int *conf_state /* conf state */)
2470 {
2471 uint32_t major = GSS_S_FAILURE, minor_stat = 0;
2472 mbuf_t smb, tmb;
2473 int conf_val = 0;
2474
2475 if (minor == NULL) {
2476 minor = &minor_stat;
2477 }
2478 if (conf_state == NULL) {
2479 conf_state = &conf_val;
2480 }
2481
2482 *minor = 0;
2483
2484 /* Validate context */
2485 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2486 return GSS_S_NO_CONTEXT;
2487 }
2488
2489 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2490 *minor = ENOTSUP;
2491 return GSS_S_FAILURE;
2492 }
2493
2494 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2495
2496 switch (ctx->gss_lucid_ctx.key_data.proto) {
2497 case 0:
2498 /* RFC 1964 DES3 case */
2499 major = gss_krb5_3des_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2500 break;
2501 case 1:
2502 /* RFC 4121 CFX case */
2503 major = gss_krb5_cfx_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2504 break;
2505 }
2506
2507 if (offset) {
2508 gss_join_mbuf(*mbp, smb, tmb);
2509 } else {
2510 *mbp = smb;
2511 gss_join_mbuf(smb, tmb, NULL);
2512 }
2513
2514 return major;
2515 }
2516
2517 uint32_t
2518 gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */
2519 gss_ctx_id_t ctx, /* context_handle */
2520 mbuf_t *mbp, /* input/output message_buffer */
2521 size_t offset, /* offset */
2522 size_t len, /* length */
2523 int *conf_flag, /* conf_state */
2524 gss_qop_t *qop /* qop state */)
2525 {
2526 uint32_t major = GSS_S_FAILURE, minor_stat = 0;
2527 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2528 int conf_val = 0;
2529 mbuf_t smb, tmb;
2530
2531 if (minor == NULL) {
2532 minor = &minor_stat;
2533 }
2534 if (qop == NULL) {
2535 qop = &qop_val;
2536 }
2537 if (conf_flag == NULL) {
2538 conf_flag = &conf_val;
2539 }
2540
2541 /* Validate context */
2542 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2543 return GSS_S_NO_CONTEXT;
2544 }
2545
2546 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2547 *minor = ENOTSUP;
2548 return GSS_S_FAILURE;
2549 }
2550
2551 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2552
2553 switch (ctx->gss_lucid_ctx.key_data.proto) {
2554 case 0:
2555 /* RFC 1964 DES3 case */
2556 major = gss_krb5_3des_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2557 break;
2558 case 1:
2559 /* RFC 4121 CFX case */
2560 major = gss_krb5_cfx_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2561 break;
2562 }
2563
2564 if (offset) {
2565 gss_join_mbuf(*mbp, smb, tmb);
2566 } else {
2567 *mbp = smb;
2568 gss_join_mbuf(smb, tmb, NULL);
2569 }
2570
2571 return major;
2572 }
2573
2574 #include <nfs/xdr_subs.h>
2575
2576 static int
2577 xdr_lucid_context(void *data, uint32_t length, lucid_context_t lctx)
2578 {
2579 struct xdrbuf xb;
2580 int error = 0;
2581 uint32_t keylen = 0;
2582
2583 xb_init_buffer(&xb, data, length);
2584 xb_get_32(error, &xb, lctx->vers);
2585 if (!error && lctx->vers != 1) {
2586 error = EINVAL;
2587 printf("%s: invalid version %d\n", __func__, (int)lctx->vers);
2588 goto out;
2589 }
2590 xb_get_32(error, &xb, lctx->initiate);
2591 if (error) {
2592 printf("%s: Could not decode initiate\n", __func__);
2593 goto out;
2594 }
2595 xb_get_32(error, &xb, lctx->endtime);
2596 if (error) {
2597 printf("%s: Could not decode endtime\n", __func__);
2598 goto out;
2599 }
2600 xb_get_64(error, &xb, lctx->send_seq);
2601 if (error) {
2602 printf("%s: Could not decode send_seq\n", __func__);
2603 goto out;
2604 }
2605 xb_get_64(error, &xb, lctx->recv_seq);
2606 if (error) {
2607 printf("%s: Could not decode recv_seq\n", __func__);
2608 goto out;
2609 }
2610 xb_get_32(error, &xb, lctx->key_data.proto);
2611 if (error) {
2612 printf("%s: Could not decode mech protocol\n", __func__);
2613 goto out;
2614 }
2615 switch (lctx->key_data.proto) {
2616 case 0:
2617 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.sign_alg);
2618 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.seal_alg);
2619 if (error) {
2620 printf("%s: Could not decode rfc1964 sign and seal\n", __func__);
2621 }
2622 break;
2623 case 1:
2624 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey);
2625 if (error) {
2626 printf("%s: Could not decode rfc4121 acceptor_subkey", __func__);
2627 }
2628 break;
2629 default:
2630 printf("%s: Invalid mech protocol %d\n", __func__, (int)lctx->key_data.proto);
2631 error = EINVAL;
2632 }
2633 if (error) {
2634 goto out;
2635 }
2636 xb_get_32(error, &xb, lctx->ctx_key.etype);
2637 if (error) {
2638 printf("%s: Could not decode key enctype\n", __func__);
2639 goto out;
2640 }
2641 switch (lctx->ctx_key.etype) {
2642 case DES3_CBC_SHA1_KD:
2643 keylen = 24;
2644 break;
2645 case AES128_CTS_HMAC_SHA1_96:
2646 keylen = 16;
2647 break;
2648 case AES256_CTS_HMAC_SHA1_96:
2649 keylen = 32;
2650 break;
2651 default:
2652 error = ENOTSUP;
2653 goto out;
2654 }
2655 xb_get_32(error, &xb, lctx->ctx_key.key.key_len);
2656 if (error) {
2657 printf("%s: could not decode key length\n", __func__);
2658 goto out;
2659 }
2660 if (lctx->ctx_key.key.key_len != keylen) {
2661 error = EINVAL;
2662 printf("%s: etype = %d keylen = %d expected keylen = %d\n", __func__,
2663 lctx->ctx_key.etype, lctx->ctx_key.key.key_len, keylen);
2664 goto out;
2665 }
2666
2667 lctx->ctx_key.key.key_val = xb_malloc(keylen);
2668 if (lctx->ctx_key.key.key_val == NULL) {
2669 printf("%s: could not get memory for key\n", __func__);
2670 error = ENOMEM;
2671 goto out;
2672 }
2673 error = xb_get_bytes(&xb, (char *)lctx->ctx_key.key.key_val, keylen, 1);
2674 if (error) {
2675 printf("%s: could get key value\n", __func__);
2676 xb_free(lctx->ctx_key.key.key_val);
2677 }
2678 out:
2679 return error;
2680 }
2681
2682 gss_ctx_id_t
2683 gss_krb5_make_context(void *data, uint32_t datalen)
2684 {
2685 gss_ctx_id_t ctx;
2686
2687 if (!corecrypto_available()) {
2688 return NULL;
2689 }
2690
2691 gss_krb5_mech_init();
2692 MALLOC(ctx, gss_ctx_id_t, sizeof(struct gss_ctx_id_desc), M_TEMP, M_WAITOK | M_ZERO);
2693 if (xdr_lucid_context(data, datalen, &ctx->gss_lucid_ctx) ||
2694 !supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_lucid_ctx.ctx_key.etype)) {
2695 FREE(ctx, M_TEMP);
2696 FREE(data, M_TEMP);
2697 return NULL;
2698 }
2699
2700 /* Set up crypto context */
2701 gss_crypto_ctx_init(&ctx->gss_cryptor, &ctx->gss_lucid_ctx);
2702 FREE(data, M_TEMP);
2703
2704 return ctx;
2705 }
2706
2707 void
2708 gss_krb5_destroy_context(gss_ctx_id_t ctx)
2709 {
2710 if (ctx == NULL) {
2711 return;
2712 }
2713 gss_crypto_ctx_free(&ctx->gss_cryptor);
2714 FREE(ctx->gss_lucid_ctx.ctx_key.key.key_val, M_TEMP);
2715 cc_clear(sizeof(lucid_context_t), &ctx->gss_lucid_ctx);
2716 FREE(ctx, M_TEMP);
2717 }