]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/gss/gss_krb5_mech.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / gss / gss_krb5_mech.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (c) 1999 Kungliga Tekniska Högskolan
31 * (Royal Institute of Technology, Stockholm, Sweden).
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 *
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * 3. Neither the name of KTH nor the names of its contributors may be
46 * used to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE
53 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
56 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 #include <stdint.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/kpi_mbuf.h>
68 #include <sys/random.h>
69 #include <mach_assert.h>
70 #include <kern/assert.h>
71 #include <libkern/OSAtomic.h>
72 #include "gss_krb5_mech.h"
73
74 LCK_GRP_DECLARE(gss_krb5_mech_grp, "gss_krb5_mech");
75
76 typedef struct crypt_walker_ctx {
77 size_t length;
78 const struct ccmode_cbc *ccmode;
79 cccbc_ctx *crypt_ctx;
80 cccbc_iv *iv;
81 } *crypt_walker_ctx_t;
82
83 typedef struct hmac_walker_ctx {
84 const struct ccdigest_info *di;
85 struct cchmac_ctx *hmac_ctx;
86 } *hmac_walker_ctx_t;
87
88 typedef size_t (*ccpad_func)(const struct ccmode_cbc *, cccbc_ctx *, cccbc_iv *,
89 size_t nbytes, const void *, void *);
90
91 static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size);
92
93 size_t gss_mbuf_len(mbuf_t, size_t);
94 errno_t gss_prepend_mbuf(mbuf_t *, uint8_t *, size_t);
95 errno_t gss_append_mbuf(mbuf_t, uint8_t *, size_t);
96 errno_t gss_strip_mbuf(mbuf_t, int);
97 int mbuf_walk(mbuf_t, size_t, size_t, size_t, int (*)(void *, uint8_t *, size_t), void *);
98
99 void do_crypt_init(crypt_walker_ctx_t, int, crypto_ctx_t, cccbc_ctx *);
100 int do_crypt(void *, uint8_t *, size_t);
101 void do_hmac_init(hmac_walker_ctx_t, crypto_ctx_t, void *);
102 int do_hmac(void *, uint8_t *, size_t);
103
104 void krb5_make_usage(uint32_t, uint8_t, uint8_t[KRB5_USAGE_LEN]);
105 void krb5_key_derivation(crypto_ctx_t, const void *, size_t, void **, size_t);
106 void cc_key_schedule_create(crypto_ctx_t);
107 void gss_crypto_ctx_free(crypto_ctx_t);
108 int gss_crypto_ctx_init(struct crypto_ctx *, lucid_context_t);
109
110 errno_t krb5_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t, int, cccbc_ctx *);
111 int krb5_mic(crypto_ctx_t, gss_buffer_t, gss_buffer_t, gss_buffer_t, uint8_t *, int *, int, int);
112 int krb5_mic_mbuf(crypto_ctx_t, gss_buffer_t, mbuf_t, size_t, size_t, gss_buffer_t, uint8_t *, int *, int, int);
113
114 uint32_t gss_krb5_cfx_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
115 uint32_t gss_krb5_cfx_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
116 uint32_t gss_krb5_cfx_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
117 uint32_t gss_krb5_cfx_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
118 errno_t krb5_cfx_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t *, int, int);
119 uint32_t gss_krb5_cfx_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
120 uint32_t gss_krb5_cfx_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
121
122 int gss_krb5_mech_is_initialized(void);
123 void gss_krb5_mech_init(void);
124
125 /* Debugging routines */
126 void
127 printmbuf(const char *str, mbuf_t mb, uint32_t offset, uint32_t len)
128 {
129 size_t i;
130 int cout = 1;
131
132 len = len ? len : ~0;
133 printf("%s mbuf = %p offset = %d len = %d:\n", str ? str : "mbuf", mb, offset, len);
134 for (; mb && len; mb = mbuf_next(mb)) {
135 if (offset >= mbuf_len(mb)) {
136 offset -= mbuf_len(mb);
137 continue;
138 }
139 for (i = offset; len && i < mbuf_len(mb); i++) {
140 const char *s = (cout % 8) ? " " : (cout % 16) ? " " : "\n";
141 printf("%02x%s", ((uint8_t *)mbuf_data(mb))[i], s);
142 len--;
143 cout++;
144 }
145 offset = 0;
146 }
147 if ((cout - 1) % 16) {
148 printf("\n");
149 }
150 printf("Count chars %d\n", cout - 1);
151 }
152
153 void
154 printgbuf(const char *str, gss_buffer_t buf)
155 {
156 size_t i;
157 size_t len = buf->length > 128 ? 128 : buf->length;
158
159 printf("%s: len = %d value = %p\n", str ? str : "buffer", (int)buf->length, buf->value);
160 for (i = 0; i < len; i++) {
161 const char *s = ((i + 1) % 8) ? " " : ((i + 1) % 16) ? " " : "\n";
162 printf("%02x%s", ((uint8_t *)buf->value)[i], s);
163 }
164 if (i % 16) {
165 printf("\n");
166 }
167 }
168
169 /*
170 * Initialize the data structures for the gss kerberos mech.
171 */
172 #define GSS_KRB5_NOT_INITIALIZED 0
173 #define GSS_KRB5_INITIALIZING 1
174 #define GSS_KRB5_INITIALIZED 2
175 static volatile uint32_t gss_krb5_mech_initted = GSS_KRB5_NOT_INITIALIZED;
176
177 int
178 gss_krb5_mech_is_initialized(void)
179 {
180 return gss_krb5_mech_initted == GSS_KRB5_NOT_INITIALIZED;
181 }
182
183 void
184 gss_krb5_mech_init(void)
185 {
186 extern void IOSleep(int);
187
188 /* Once initted always initted */
189 if (gss_krb5_mech_initted == GSS_KRB5_INITIALIZED) {
190 return;
191 }
192
193 /* make sure we init only once */
194 if (!OSCompareAndSwap(GSS_KRB5_NOT_INITIALIZED, GSS_KRB5_INITIALIZING, &gss_krb5_mech_initted)) {
195 /* wait until initialization is complete */
196 while (!gss_krb5_mech_is_initialized()) {
197 IOSleep(10);
198 }
199 return;
200 }
201 gss_krb5_mech_initted = GSS_KRB5_INITIALIZED;
202 }
203
204 uint32_t
205 gss_release_buffer(uint32_t *minor, gss_buffer_t buf)
206 {
207 if (minor) {
208 *minor = 0;
209 }
210 if (buf->value) {
211 FREE(buf->value, M_TEMP);
212 }
213 buf->value = NULL;
214 buf->length = 0;
215 return GSS_S_COMPLETE;
216 }
217
218 /*
219 * GSS mbuf routines
220 */
221
222 size_t
223 gss_mbuf_len(mbuf_t mb, size_t offset)
224 {
225 size_t len;
226
227 for (len = 0; mb; mb = mbuf_next(mb)) {
228 len += mbuf_len(mb);
229 }
230 return (offset > len) ? 0 : len - offset;
231 }
232
233 /*
234 * Split an mbuf in a chain into two mbufs such that the original mbuf
235 * points to the original mbuf and the new mbuf points to the rest of the
236 * chain. The first mbuf length is the first len bytes and the second
237 * mbuf contains the remaining bytes. if len is zero or equals
238 * mbuf_len(mb) the don't create a new mbuf. We are already at an mbuf
239 * boundary. Return the mbuf that starts at the offset.
240 */
241 static errno_t
242 split_one_mbuf(mbuf_t mb, size_t offset, mbuf_t *nmb, int join)
243 {
244 errno_t error;
245
246 *nmb = mb;
247 /* We don't have an mbuf or we're alread on an mbuf boundary */
248 if (mb == NULL || offset == 0) {
249 return 0;
250 }
251
252 /* If the mbuf length is offset then the next mbuf is the one we want */
253 if (mbuf_len(mb) == offset) {
254 *nmb = mbuf_next(mb);
255 if (!join) {
256 mbuf_setnext(mb, NULL);
257 }
258 return 0;
259 }
260
261 if (offset > mbuf_len(mb)) {
262 return EINVAL;
263 }
264
265 error = mbuf_split(mb, offset, MBUF_WAITOK, nmb);
266 if (error) {
267 return error;
268 }
269
270 if (mbuf_flags(*nmb) & MBUF_PKTHDR) {
271 /* We don't want to copy the pkthdr. mbuf_split does that. */
272 error = mbuf_setflags_mask(*nmb, ~MBUF_PKTHDR, MBUF_PKTHDR);
273 }
274
275 if (join) {
276 /* Join the chain again */
277 mbuf_setnext(mb, *nmb);
278 }
279
280 return 0;
281 }
282
283 /*
284 * Given an mbuf with an offset and length return the chain such that
285 * offset and offset + *subchain_length are on mbuf boundaries. If
286 * *mbuf_length is less that the length of the chain after offset
287 * return that length in *mbuf_length. The mbuf sub chain starting at
288 * offset is returned in *subchain. If an error occurs return the
289 * corresponding errno. Note if there are less than offset bytes then
290 * subchain will be set to NULL and *subchain_length will be set to
291 * zero. If *subchain_length is 0; then set it to the length of the
292 * chain starting at offset. Join parameter is used to indicate whether
293 * the mbuf chain will be joined again as on chain, just rearranged so
294 * that offset and subchain_length are on mbuf boundaries.
295 */
296
297 errno_t
298 gss_normalize_mbuf(mbuf_t chain, size_t offset, size_t *subchain_length, mbuf_t *subchain, mbuf_t *tail, int join)
299 {
300 size_t length = *subchain_length ? *subchain_length : ~0;
301 size_t len;
302 mbuf_t mb, nmb;
303 errno_t error;
304
305 if (tail == NULL) {
306 tail = &nmb;
307 }
308 *tail = NULL;
309 *subchain = NULL;
310
311 for (len = offset, mb = chain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) {
312 len -= mbuf_len(mb);
313 }
314
315 /* if we don't have offset bytes just return */
316 if (mb == NULL) {
317 return 0;
318 }
319
320 error = split_one_mbuf(mb, len, subchain, join);
321 if (error) {
322 return error;
323 }
324
325 assert(subchain != NULL && *subchain != NULL);
326 assert(offset == 0 ? mb == *subchain : 1);
327
328 len = gss_mbuf_len(*subchain, 0);
329 length = (length > len) ? len : length;
330 *subchain_length = length;
331
332 for (len = length, mb = *subchain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) {
333 len -= mbuf_len(mb);
334 }
335
336 error = split_one_mbuf(mb, len, tail, join);
337
338 return error;
339 }
340
341 mbuf_t
342 gss_join_mbuf(mbuf_t head, mbuf_t body, mbuf_t tail)
343 {
344 mbuf_t mb;
345
346 for (mb = head; mb && mbuf_next(mb); mb = mbuf_next(mb)) {
347 ;
348 }
349 if (mb) {
350 mbuf_setnext(mb, body);
351 }
352 for (mb = body; mb && mbuf_next(mb); mb = mbuf_next(mb)) {
353 ;
354 }
355 if (mb) {
356 mbuf_setnext(mb, tail);
357 }
358 mb = head ? head : (body ? body : tail);
359 return mb;
360 }
361
362 /*
363 * Prepend size bytes to the mbuf chain.
364 */
365 errno_t
366 gss_prepend_mbuf(mbuf_t *chain, uint8_t *bytes, size_t size)
367 {
368 uint8_t *data = mbuf_data(*chain);
369 size_t leading = mbuf_leadingspace(*chain);
370 size_t trailing = mbuf_trailingspace(*chain);
371 size_t mlen = mbuf_len(*chain);
372 errno_t error;
373
374 if (size > leading && size <= leading + trailing) {
375 data = memmove(data + size - leading, data, mlen);
376 mbuf_setdata(*chain, data, mlen);
377 }
378
379 error = mbuf_prepend(chain, size, MBUF_WAITOK);
380 if (error) {
381 return error;
382 }
383 data = mbuf_data(*chain);
384 memcpy(data, bytes, size);
385
386 return 0;
387 }
388
389 errno_t
390 gss_append_mbuf(mbuf_t chain, uint8_t *bytes, size_t size)
391 {
392 size_t len = 0;
393 mbuf_t mb;
394
395 if (chain == NULL) {
396 return EINVAL;
397 }
398
399 for (mb = chain; mb; mb = mbuf_next(mb)) {
400 len += mbuf_len(mb);
401 }
402
403 return mbuf_copyback(chain, len, size, bytes, MBUF_WAITOK);
404 }
405
406 errno_t
407 gss_strip_mbuf(mbuf_t chain, int size)
408 {
409 if (chain == NULL) {
410 return EINVAL;
411 }
412
413 mbuf_adj(chain, size);
414
415 return 0;
416 }
417
418
419 /*
420 * Kerberos mech generic crypto support for mbufs
421 */
422
423 /*
424 * Walk the mbuf after the given offset calling the passed in crypto function
425 * for len bytes. Note the length, len should be a multiple of the blocksize and
426 * there should be at least len bytes available after the offset in the mbuf chain.
427 * padding should be done before calling this routine.
428 */
429 int
430 mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_fn)(void *, uint8_t *data, size_t length), void *ctx)
431 {
432 mbuf_t mb;
433 size_t mlen, residue;
434 uint8_t *ptr;
435 int error = 0;
436
437 /* Move to the start of the chain */
438 for (mb = mbp; mb && len > 0; mb = mbuf_next(mb)) {
439 ptr = mbuf_data(mb);
440 mlen = mbuf_len(mb);
441 if (offset >= mlen) {
442 /* Offset not yet reached */
443 offset -= mlen;
444 continue;
445 }
446 /* Found starting point in chain */
447 ptr += offset;
448 mlen -= offset;
449 offset = 0;
450
451 /*
452 * Handle the data in this mbuf. If the length to
453 * walk is less than the data in the mbuf, set
454 * the mbuf length left to be the length left
455 */
456 mlen = mlen < len ? mlen : len;
457 /* Figure out how much is a multple of blocksize */
458 residue = mlen % blocksize;
459 /* And addjust the mleft length to be the largest multiple of blocksized */
460 mlen -= residue;
461 /* run our hash/encrypt/decrpyt function */
462 if (mlen > 0) {
463 error = crypto_fn(ctx, ptr, mlen);
464 if (error) {
465 break;
466 }
467 ptr += mlen;
468 len -= mlen;
469 }
470 /*
471 * If we have a residue then to get a full block for our crypto
472 * function, we need to copy the residue into our block size
473 * block and use the next mbuf to get the rest of the data for
474 * the block. N.B. We generally assume that from the offset
475 * passed in, that the total length, len, is a multple of
476 * blocksize and that there are at least len bytes in the chain
477 * from the offset. We also assume there is at least (blocksize
478 * - residue) size data in any next mbuf for residue > 0. If not
479 * we attemp to pullup bytes from down the chain.
480 */
481 if (residue) {
482 mbuf_t nmb = mbuf_next(mb);
483 uint8_t *nptr = NULL, block[blocksize];
484
485 assert(nmb);
486 len -= residue;
487 offset = blocksize - residue;
488 if (len < offset) {
489 offset = len;
490 /*
491 * We don't have enough bytes so zero the block
492 * so that any trailing bytes will be zero.
493 */
494 cc_clear(sizeof(block), block);
495 }
496 memcpy(block, ptr, residue);
497 if (len && nmb) {
498 mlen = mbuf_len(nmb);
499 if (mlen < offset) {
500 error = mbuf_pullup(&nmb, offset - mlen);
501 if (error) {
502 mbuf_setnext(mb, NULL);
503 return error;
504 }
505 }
506 nptr = mbuf_data(nmb);
507 memcpy(block + residue, nptr, offset);
508 }
509 len -= offset;
510 error = crypto_fn(ctx, block, sizeof(block));
511 if (error) {
512 break;
513 }
514 memcpy(ptr, block, residue);
515 if (nptr) {
516 memcpy(nptr, block + residue, offset);
517 }
518 }
519 }
520
521 return error;
522 }
523
524 void
525 do_crypt_init(crypt_walker_ctx_t wctx, int encrypt, crypto_ctx_t cctx, cccbc_ctx *ks)
526 {
527 memset(wctx, 0, sizeof(*wctx));
528 wctx->length = 0;
529 wctx->ccmode = encrypt ? cctx->enc_mode : cctx->dec_mode;
530 wctx->crypt_ctx = ks;
531 MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK | M_ZERO);
532 cccbc_set_iv(wctx->ccmode, wctx->iv, NULL);
533 }
534
535 int
536 do_crypt(void *walker, uint8_t *data, size_t len)
537 {
538 struct crypt_walker_ctx *wctx = (crypt_walker_ctx_t)walker;
539 size_t nblocks;
540
541 nblocks = len / wctx->ccmode->block_size;
542 assert(len % wctx->ccmode->block_size == 0);
543 cccbc_update(wctx->ccmode, wctx->crypt_ctx, wctx->iv, nblocks, data, data);
544 wctx->length += len;
545
546 return 0;
547 }
548
549 void
550 do_hmac_init(hmac_walker_ctx_t wctx, crypto_ctx_t cctx, void *key)
551 {
552 size_t alloc_size = cchmac_di_size(cctx->di);
553
554 wctx->di = cctx->di;
555 MALLOC(wctx->hmac_ctx, struct cchmac_ctx *, alloc_size, M_TEMP, M_WAITOK | M_ZERO);
556 cchmac_init(cctx->di, wctx->hmac_ctx, cctx->keylen, key);
557 }
558
559 int
560 do_hmac(void *walker, uint8_t *data, size_t len)
561 {
562 hmac_walker_ctx_t wctx = (hmac_walker_ctx_t)walker;
563
564 cchmac_update(wctx->di, wctx->hmac_ctx, len, data);
565
566 return 0;
567 }
568
569
570 int
571 krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
572 {
573 uint8_t digest[ctx->di->output_size];
574 cchmac_di_decl(ctx->di, hmac_ctx);
575 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
576 void *key2use;
577
578 if (ikey) {
579 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
580 lck_mtx_lock(&ctx->lock);
581 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
582 cc_key_schedule_create(ctx);
583 }
584 ctx->flags |= CRYPTO_KS_ALLOCED;
585 lck_mtx_unlock(&ctx->lock);
586 }
587 key2use = ctx->ks.ikey[kdx];
588 } else {
589 key2use = ctx->ckey[kdx];
590 }
591
592 cchmac_init(ctx->di, hmac_ctx, ctx->keylen, key2use);
593
594 if (header) {
595 cchmac_update(ctx->di, hmac_ctx, header->length, header->value);
596 }
597
598 cchmac_update(ctx->di, hmac_ctx, bp->length, bp->value);
599
600 if (trailer) {
601 cchmac_update(ctx->di, hmac_ctx, trailer->length, trailer->value);
602 }
603
604 cchmac_final(ctx->di, hmac_ctx, digest);
605
606 if (verify) {
607 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
608 } else {
609 memcpy(mic, digest, ctx->digest_size);
610 }
611
612 return 0;
613 }
614
615 int
616 krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header,
617 mbuf_t mbp, size_t offset, size_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
618 {
619 struct hmac_walker_ctx wctx;
620 uint8_t digest[ctx->di->output_size];
621 int error;
622 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
623 void *key2use;
624
625 if (ikey) {
626 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
627 lck_mtx_lock(&ctx->lock);
628 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
629 cc_key_schedule_create(ctx);
630 }
631 ctx->flags |= CRYPTO_KS_ALLOCED;
632 lck_mtx_unlock(&ctx->lock);
633 }
634 key2use = ctx->ks.ikey[kdx];
635 } else {
636 key2use = ctx->ckey[kdx];
637 }
638
639 do_hmac_init(&wctx, ctx, key2use);
640
641 if (header) {
642 cchmac_update(ctx->di, wctx.hmac_ctx, header->length, header->value);
643 }
644
645 error = mbuf_walk(mbp, offset, len, 1, do_hmac, &wctx);
646
647 if (error) {
648 return error;
649 }
650 if (trailer) {
651 cchmac_update(ctx->di, wctx.hmac_ctx, trailer->length, trailer->value);
652 }
653
654 cchmac_final(ctx->di, wctx.hmac_ctx, digest);
655 FREE(wctx.hmac_ctx, M_TEMP);
656
657 if (verify) {
658 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
659 if (!*verify) {
660 return EBADRPC;
661 }
662 } else {
663 memcpy(mic, digest, ctx->digest_size);
664 }
665
666 return 0;
667 }
668
669 errno_t
670 /* __attribute__((optnone)) */
671 krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t len, int encrypt, cccbc_ctx *ks)
672 {
673 struct crypt_walker_ctx wctx;
674 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
675 size_t plen = len;
676 size_t cts_len = 0;
677 mbuf_t mb, lmb = NULL;
678 int error;
679
680 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
681 lck_mtx_lock(&ctx->lock);
682 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
683 cc_key_schedule_create(ctx);
684 }
685 ctx->flags |= CRYPTO_KS_ALLOCED;
686 lck_mtx_unlock(&ctx->lock);
687 }
688 if (!ks) {
689 ks = encrypt ? ctx->ks.enc : ctx->ks.dec;
690 }
691
692 if ((ctx->flags & CRYPTO_CTS_ENABLE) && ctx->mpad == 1) {
693 uint8_t block[ccmode->block_size];
694 /* if the length is less than or equal to a blocksize. We just encrypt the block */
695 if (len <= ccmode->block_size) {
696 if (len < ccmode->block_size) {
697 memset(block, 0, sizeof(block));
698 gss_append_mbuf(*mbp, block, ccmode->block_size);
699 }
700 plen = ccmode->block_size;
701 } else {
702 /* determine where the last two blocks are */
703 size_t r = len % ccmode->block_size;
704
705 cts_len = r ? r + ccmode->block_size : 2 * ccmode->block_size;
706 plen = len - cts_len;
707 /* If plen is 0 we only have two blocks to crypt with ccpad below */
708 if (plen == 0) {
709 lmb = *mbp;
710 } else {
711 gss_normalize_mbuf(*mbp, 0, &plen, &mb, &lmb, 0);
712 assert(*mbp == mb);
713 assert(plen == len - cts_len);
714 assert(gss_mbuf_len(mb, 0) == plen);
715 assert(gss_mbuf_len(lmb, 0) == cts_len);
716 }
717 }
718 } else if (len % ctx->mpad) {
719 uint8_t pad_block[ctx->mpad];
720 size_t padlen = ctx->mpad - (len % ctx->mpad);
721
722 memset(pad_block, 0, padlen);
723 error = gss_append_mbuf(*mbp, pad_block, padlen);
724 if (error) {
725 return error;
726 }
727 plen = len + padlen;
728 }
729 do_crypt_init(&wctx, encrypt, ctx, ks);
730 if (plen) {
731 error = mbuf_walk(*mbp, 0, plen, ccmode->block_size, do_crypt, &wctx);
732 if (error) {
733 return error;
734 }
735 }
736
737 if ((ctx->flags & CRYPTO_CTS_ENABLE) && cts_len) {
738 uint8_t cts_pad[2 * ccmode->block_size];
739 ccpad_func do_ccpad = encrypt ? ccpad_cts3_encrypt : ccpad_cts3_decrypt;
740
741 assert(cts_len <= 2 * ccmode->block_size && cts_len > ccmode->block_size);
742 memset(cts_pad, 0, sizeof(cts_pad));
743 mbuf_copydata(lmb, 0, cts_len, cts_pad);
744 mbuf_freem(lmb);
745 do_ccpad(ccmode, wctx.crypt_ctx, wctx.iv, cts_len, cts_pad, cts_pad);
746 gss_append_mbuf(*mbp, cts_pad, cts_len);
747 }
748 FREE(wctx.iv, M_TEMP);
749
750 return 0;
751 }
752
753 /*
754 * Key derivation routines
755 */
756
757 static int
758 rr13(unsigned char *buf, size_t len)
759 {
760 size_t bytes = (len + 7) / 8;
761 unsigned char tmp[bytes];
762 size_t i;
763
764 if (len == 0) {
765 return 0;
766 }
767
768 {
769 const int bits = 13 % len;
770 const int lbit = len % 8;
771
772 memcpy(tmp, buf, bytes);
773 if (lbit) {
774 /* pad final byte with inital bits */
775 tmp[bytes - 1] &= 0xff << (8 - lbit);
776 for (i = lbit; i < 8; i += len) {
777 tmp[bytes - 1] |= buf[0] >> i;
778 }
779 }
780 for (i = 0; i < bytes; i++) {
781 ssize_t bb;
782 ssize_t b1, s1, b2, s2;
783
784 /* calculate first bit position of this byte */
785 bb = 8 * i - bits;
786 while (bb < 0) {
787 bb += len;
788 }
789 /* byte offset and shift count */
790 b1 = bb / 8;
791 s1 = bb % 8;
792 if ((size_t)bb + 8 > bytes * 8) {
793 /* watch for wraparound */
794 s2 = (len + 8 - s1) % 8;
795 } else {
796 s2 = 8 - s1;
797 }
798 b2 = (b1 + 1) % bytes;
799 buf[i] = 0xff & ((tmp[b1] << s1) | (tmp[b2] >> s2));
800 }
801 }
802 return 0;
803 }
804
805
806 /* Add `b' to `a', both being one's complement numbers. */
807 static void
808 add1(unsigned char *a, unsigned char *b, size_t len)
809 {
810 ssize_t i;
811 int carry = 0;
812
813 for (i = len - 1; i >= 0; i--) {
814 int x = a[i] + b[i] + carry;
815 carry = x > 0xff;
816 a[i] = x & 0xff;
817 }
818 for (i = len - 1; carry && i >= 0; i--) {
819 int x = a[i] + carry;
820 carry = x > 0xff;
821 a[i] = x & 0xff;
822 }
823 }
824
825
826 static int
827 krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size)
828 {
829 /* if len < size we need at most N * len bytes, ie < 2 * size;
830 * if len > size we need at most 2 * len */
831 int ret = 0;
832 size_t maxlen = 2 * lmax(size, len);
833 size_t l = 0;
834 unsigned char tmp[maxlen];
835 unsigned char buf[len];
836
837 memcpy(buf, instr, len);
838 memset(foldstr, 0, size);
839 do {
840 memcpy(tmp + l, buf, len);
841 l += len;
842 ret = rr13(buf, len * 8);
843 if (ret) {
844 goto out;
845 }
846 while (l >= size) {
847 add1(foldstr, tmp, size);
848 l -= size;
849 if (l == 0) {
850 break;
851 }
852 memmove(tmp, tmp + size, l);
853 }
854 } while (l != 0);
855 out:
856
857 return ret;
858 }
859
860 void
861 krb5_make_usage(uint32_t usage_no, uint8_t suffix, uint8_t usage_string[KRB5_USAGE_LEN])
862 {
863 uint32_t i;
864
865 for (i = 0; i < 4; i++) {
866 usage_string[i] = ((usage_no >> 8 * (3 - i)) & 0xff);
867 }
868 usage_string[i] = suffix;
869 }
870
871 void
872 krb5_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **dkey, size_t dklen)
873 {
874 size_t blocksize = ctx->enc_mode->block_size;
875 cccbc_iv_decl(blocksize, iv);
876 cccbc_ctx_decl(ctx->enc_mode->size, enc_ctx);
877 size_t ksize = 8 * dklen;
878 size_t nblocks = (ksize + 8 * blocksize - 1) / (8 * blocksize);
879 uint8_t *dkptr;
880 uint8_t block[blocksize];
881
882 MALLOC(*dkey, void *, nblocks * blocksize, M_TEMP, M_WAITOK | M_ZERO);
883 dkptr = *dkey;
884
885 krb5_n_fold(cons, conslen, block, blocksize);
886 cccbc_init(ctx->enc_mode, enc_ctx, ctx->keylen, ctx->key);
887 for (size_t i = 0; i < nblocks; i++) {
888 cccbc_set_iv(ctx->enc_mode, iv, NULL);
889 cccbc_update(ctx->enc_mode, enc_ctx, iv, 1, block, block);
890 memcpy(dkptr, block, blocksize);
891 dkptr += blocksize;
892 }
893 }
894
895 static void
896 des_make_key(const uint8_t rawkey[7], uint8_t deskey[8])
897 {
898 uint8_t val = 0;
899
900 memcpy(deskey, rawkey, 7);
901 for (int i = 0; i < 7; i++) {
902 val |= ((deskey[i] & 1) << (i + 1));
903 }
904 deskey[7] = val;
905 ccdes_key_set_odd_parity(deskey, 8);
906 }
907
908 static void
909 krb5_3des_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **des3key)
910 {
911 const struct ccmode_cbc *cbcmode = ctx->enc_mode;
912 void *rawkey;
913 uint8_t *kptr, *rptr;
914
915 MALLOC(*des3key, void *, 3 * cbcmode->block_size, M_TEMP, M_WAITOK | M_ZERO);
916 krb5_key_derivation(ctx, cons, conslen, &rawkey, 3 * (cbcmode->block_size - 1));
917 kptr = (uint8_t *)*des3key;
918 rptr = (uint8_t *)rawkey;
919
920 for (int i = 0; i < 3; i++) {
921 des_make_key(rptr, kptr);
922 rptr += cbcmode->block_size - 1;
923 kptr += cbcmode->block_size;
924 }
925
926 cc_clear(3 * (cbcmode->block_size - 1), rawkey);
927 FREE(rawkey, M_TEMP);
928 }
929
930 /*
931 * Create a key schecule
932 *
933 */
934 void
935 cc_key_schedule_create(crypto_ctx_t ctx)
936 {
937 uint8_t usage_string[KRB5_USAGE_LEN];
938 lucid_context_t lctx = ctx->gss_ctx;
939 void *ekey;
940
941 switch (lctx->key_data.proto) {
942 case 0: {
943 if (ctx->ks.enc == NULL) {
944 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
945 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ctx->key);
946 }
947 if (ctx->ks.dec == NULL) {
948 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
949 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ctx->key);
950 }
951 }
952 OS_FALLTHROUGH;
953 case 1: {
954 if (ctx->ks.enc == NULL) {
955 krb5_make_usage(lctx->initiate ?
956 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
957 0xAA, usage_string);
958 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
959 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
960 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ekey);
961 FREE(ekey, M_TEMP);
962 }
963 if (ctx->ks.dec == NULL) {
964 krb5_make_usage(lctx->initiate ?
965 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
966 0xAA, usage_string);
967 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
968 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
969 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ekey);
970 FREE(ekey, M_TEMP);
971 }
972 if (ctx->ks.ikey[GSS_SND] == NULL) {
973 krb5_make_usage(lctx->initiate ?
974 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
975 0x55, usage_string);
976 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_SND], ctx->keylen);
977 }
978 if (ctx->ks.ikey[GSS_RCV] == NULL) {
979 krb5_make_usage(lctx->initiate ?
980 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
981 0x55, usage_string);
982 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_RCV], ctx->keylen);
983 }
984 }
985 }
986 }
987
988 void
989 gss_crypto_ctx_free(crypto_ctx_t ctx)
990 {
991 lck_mtx_destroy(&ctx->lock, &gss_krb5_mech_grp);
992
993 ctx->ks.ikey[GSS_SND] = NULL;
994 if (ctx->ks.ikey[GSS_RCV] && ctx->key != ctx->ks.ikey[GSS_RCV]) {
995 cc_clear(ctx->keylen, ctx->ks.ikey[GSS_RCV]);
996 FREE(ctx->ks.ikey[GSS_RCV], M_TEMP);
997 }
998 ctx->ks.ikey[GSS_RCV] = NULL;
999 if (ctx->ks.enc) {
1000 cccbc_ctx_clear(ctx->enc_mode->size, ctx->ks.enc);
1001 FREE(ctx->ks.enc, M_TEMP);
1002 ctx->ks.enc = NULL;
1003 }
1004 if (ctx->ks.dec) {
1005 cccbc_ctx_clear(ctx->dec_mode->size, ctx->ks.dec);
1006 FREE(ctx->ks.dec, M_TEMP);
1007 ctx->ks.dec = NULL;
1008 }
1009 if (ctx->ckey[GSS_SND] && ctx->ckey[GSS_SND] != ctx->key) {
1010 cc_clear(ctx->keylen, ctx->ckey[GSS_SND]);
1011 FREE(ctx->ckey[GSS_SND], M_TEMP);
1012 }
1013 ctx->ckey[GSS_SND] = NULL;
1014 if (ctx->ckey[GSS_RCV] && ctx->ckey[GSS_RCV] != ctx->key) {
1015 cc_clear(ctx->keylen, ctx->ckey[GSS_RCV]);
1016 FREE(ctx->ckey[GSS_RCV], M_TEMP);
1017 }
1018 ctx->ckey[GSS_RCV] = NULL;
1019 ctx->key = NULL;
1020 ctx->keylen = 0;
1021 }
1022
1023 int
1024 gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid)
1025 {
1026 ctx->gss_ctx = lucid;
1027 void *key;
1028 uint8_t usage_string[KRB5_USAGE_LEN];
1029
1030 ctx->keylen = ctx->gss_ctx->ctx_key.key.key_len;
1031 key = ctx->gss_ctx->ctx_key.key.key_val;
1032 ctx->etype = ctx->gss_ctx->ctx_key.etype;
1033 ctx->key = key;
1034
1035 switch (ctx->etype) {
1036 case AES128_CTS_HMAC_SHA1_96:
1037 case AES256_CTS_HMAC_SHA1_96:
1038 ctx->enc_mode = ccaes_cbc_encrypt_mode();
1039 assert(ctx->enc_mode);
1040 ctx->dec_mode = ccaes_cbc_decrypt_mode();
1041 assert(ctx->dec_mode);
1042 ctx->ks.enc = NULL;
1043 ctx->ks.dec = NULL;
1044 ctx->di = ccsha1_di();
1045 assert(ctx->di);
1046 ctx->flags = CRYPTO_CTS_ENABLE;
1047 ctx->mpad = 1;
1048 ctx->digest_size = 12; /* 96 bits */
1049 krb5_make_usage(ctx->gss_ctx->initiate ?
1050 KRB5_USAGE_INITIATOR_SIGN : KRB5_USAGE_ACCEPTOR_SIGN,
1051 0x99, usage_string);
1052 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND], ctx->keylen);
1053 krb5_make_usage(ctx->gss_ctx->initiate ?
1054 KRB5_USAGE_ACCEPTOR_SIGN : KRB5_USAGE_INITIATOR_SIGN,
1055 0x99, usage_string);
1056 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV], ctx->keylen);
1057 break;
1058 case DES3_CBC_SHA1_KD:
1059 ctx->enc_mode = ccdes3_cbc_encrypt_mode();
1060 assert(ctx->enc_mode);
1061 ctx->dec_mode = ccdes3_cbc_decrypt_mode();
1062 assert(ctx->dec_mode);
1063 ctx->ks.ikey[GSS_SND] = ctx->key;
1064 ctx->ks.ikey[GSS_RCV] = ctx->key;
1065 ctx->di = ccsha1_di();
1066 assert(ctx->di);
1067 ctx->flags = 0;
1068 ctx->mpad = ctx->enc_mode->block_size;
1069 ctx->digest_size = 20; /* 160 bits */
1070 krb5_make_usage(KRB5_USAGE_ACCEPTOR_SIGN, 0x99, usage_string);
1071 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND]);
1072 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV]);
1073 break;
1074 default:
1075 return ENOTSUP;
1076 }
1077
1078 lck_mtx_init(&ctx->lock, &gss_krb5_mech_grp, LCK_ATTR_NULL);
1079
1080 return 0;
1081 }
1082
1083 /*
1084 * CFX gss support routines
1085 */
1086 /* From Heimdal cfx.h file RFC 4121 Cryptoo framework extensions */
1087 typedef struct gss_cfx_mic_token_desc_struct {
1088 uint8_t TOK_ID[2]; /* 04 04 */
1089 uint8_t Flags;
1090 uint8_t Filler[5];
1091 uint8_t SND_SEQ[8];
1092 } gss_cfx_mic_token_desc, *gss_cfx_mic_token;
1093
1094 typedef struct gss_cfx_wrap_token_desc_struct {
1095 uint8_t TOK_ID[2]; /* 05 04 */
1096 uint8_t Flags;
1097 uint8_t Filler;
1098 uint8_t EC[2];
1099 uint8_t RRC[2];
1100 uint8_t SND_SEQ[8];
1101 } gss_cfx_wrap_token_desc, *gss_cfx_wrap_token;
1102
1103 /* End of cfx.h file */
1104
1105 #define CFXSentByAcceptor (1 << 0)
1106 #define CFXSealed (1 << 1)
1107 #define CFXAcceptorSubkey (1 << 2)
1108
1109 const gss_cfx_mic_token_desc mic_cfx_token = {
1110 .TOK_ID = "\x04\x04",
1111 .Flags = 0,
1112 .Filler = "\xff\xff\xff\xff\xff",
1113 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1114 };
1115
1116 const gss_cfx_wrap_token_desc wrap_cfx_token = {
1117 .TOK_ID = "\x05\04",
1118 .Flags = 0,
1119 .Filler = '\xff',
1120 .EC = "\x00\x00",
1121 .RRC = "\x00\x00",
1122 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1123 };
1124
1125 static int
1126 gss_krb5_cfx_verify_mic_token(gss_ctx_id_t ctx, gss_cfx_mic_token token)
1127 {
1128 int i;
1129 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1130 uint8_t flags = 0;
1131
1132 if (token->TOK_ID[0] != mic_cfx_token.TOK_ID[0] || token->TOK_ID[1] != mic_cfx_token.TOK_ID[1]) {
1133 printf("Bad mic TOK_ID %x %x\n", token->TOK_ID[0], token->TOK_ID[1]);
1134 return EBADRPC;
1135 }
1136 if (lctx->initiate) {
1137 flags |= CFXSentByAcceptor;
1138 }
1139 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1140 flags |= CFXAcceptorSubkey;
1141 }
1142 if (token->Flags != flags) {
1143 printf("Bad flags received %x exptect %x\n", token->Flags, flags);
1144 return EBADRPC;
1145 }
1146 for (i = 0; i < 5; i++) {
1147 if (token->Filler[i] != mic_cfx_token.Filler[i]) {
1148 break;
1149 }
1150 }
1151
1152 if (i != 5) {
1153 printf("Bad mic filler %x @ %d\n", token->Filler[i], i);
1154 return EBADRPC;
1155 }
1156
1157 return 0;
1158 }
1159
1160 uint32_t
1161 gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */
1162 gss_ctx_id_t ctx, /* context_handle */
1163 gss_qop_t qop __unused, /* qop_req (ignored) */
1164 gss_buffer_t mbp, /* message mbuf */
1165 gss_buffer_t mic /* message_token */)
1166 {
1167 gss_cfx_mic_token_desc token;
1168 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1169 crypto_ctx_t cctx = &ctx->gss_cryptor;
1170 gss_buffer_desc header;
1171 uint32_t rv;
1172 uint64_t seq = htonll(lctx->send_seq);
1173
1174 if (minor == NULL) {
1175 minor = &rv;
1176 }
1177 *minor = 0;
1178 token = mic_cfx_token;
1179 mic->length = sizeof(token) + cctx->digest_size;
1180 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1181 if (!lctx->initiate) {
1182 token.Flags |= CFXSentByAcceptor;
1183 }
1184 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1185 token.Flags |= CFXAcceptorSubkey;
1186 }
1187 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1188 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1189 header.value = &token;
1190 header.length = sizeof(gss_cfx_mic_token_desc);
1191
1192 *minor = krb5_mic(cctx, NULL, mbp, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1193
1194 if (*minor) {
1195 mic->length = 0;
1196 FREE(mic->value, M_TEMP);
1197 mic->value = NULL;
1198 } else {
1199 memcpy(mic->value, &token, sizeof(token));
1200 }
1201
1202 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
1203 }
1204
1205 uint32_t
1206 gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */
1207 gss_ctx_id_t ctx, /* context_handle */
1208 gss_buffer_t mbp, /* message_buffer */
1209 gss_buffer_t mic, /* message_token */
1210 gss_qop_t *qop /* qop_state */)
1211 {
1212 gss_cfx_mic_token token = mic->value;
1213 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1214 crypto_ctx_t cctx = &ctx->gss_cryptor;
1215 uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc);
1216 int verified = 0;
1217 uint64_t seq;
1218 uint32_t rv;
1219 gss_buffer_desc header;
1220
1221 if (qop) {
1222 *qop = GSS_C_QOP_DEFAULT;
1223 }
1224 if (minor == NULL) {
1225 minor = &rv;
1226 }
1227
1228 if (mic->length != sizeof(gss_cfx_mic_token_desc) + cctx->digest_size) {
1229 printf("mic token wrong length\n");
1230 *minor = EBADRPC;
1231 goto out;
1232 }
1233 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1234 if (*minor) {
1235 return GSS_S_FAILURE;
1236 }
1237 header.value = token;
1238 header.length = sizeof(gss_cfx_mic_token_desc);
1239 *minor = krb5_mic(cctx, NULL, mbp, &header, digest, &verified, 0, 0);
1240
1241 if (verified) {
1242 //XXX errors and such? Sequencing and replay? Not supported in RPCSEC_GSS
1243 memcpy(&seq, token->SND_SEQ, sizeof(uint64_t));
1244 seq = ntohll(seq);
1245 lctx->recv_seq = seq;
1246 }
1247
1248 out:
1249 return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1250 }
1251
1252 uint32_t
1253 gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */
1254 gss_ctx_id_t ctx, /* context_handle */
1255 gss_qop_t qop __unused, /* qop_req (ignored) */
1256 mbuf_t mbp, /* message mbuf */
1257 size_t offset, /* offest */
1258 size_t len, /* length */
1259 gss_buffer_t mic /* message_token */)
1260 {
1261 gss_cfx_mic_token_desc token;
1262 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1263 crypto_ctx_t cctx = &ctx->gss_cryptor;
1264 uint32_t rv;
1265 uint64_t seq = htonll(lctx->send_seq);
1266 gss_buffer_desc header;
1267
1268 if (minor == NULL) {
1269 minor = &rv;
1270 }
1271 *minor = 0;
1272
1273 token = mic_cfx_token;
1274 mic->length = sizeof(token) + cctx->digest_size;
1275 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1276 if (!lctx->initiate) {
1277 token.Flags |= CFXSentByAcceptor;
1278 }
1279 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1280 token.Flags |= CFXAcceptorSubkey;
1281 }
1282
1283 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1284 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1285
1286 header.length = sizeof(token);
1287 header.value = &token;
1288
1289 len = len ? len : gss_mbuf_len(mbp, offset);
1290 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1291
1292 if (*minor) {
1293 mic->length = 0;
1294 FREE(mic->value, M_TEMP);
1295 mic->value = NULL;
1296 } else {
1297 memcpy(mic->value, &token, sizeof(token));
1298 }
1299
1300 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
1301 }
1302
1303
1304 uint32_t
1305 gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */
1306 gss_ctx_id_t ctx, /* context_handle */
1307 mbuf_t mbp, /* message_buffer */
1308 size_t offset, /* offset */
1309 size_t len, /* length */
1310 gss_buffer_t mic, /* message_token */
1311 gss_qop_t *qop /* qop_state */)
1312 {
1313 gss_cfx_mic_token token = mic->value;
1314 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1315 crypto_ctx_t cctx = &ctx->gss_cryptor;
1316 uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc);
1317 int verified;
1318 uint64_t seq;
1319 uint32_t rv;
1320 gss_buffer_desc header;
1321
1322 if (qop) {
1323 *qop = GSS_C_QOP_DEFAULT;
1324 }
1325
1326 if (minor == NULL) {
1327 minor = &rv;
1328 }
1329
1330 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1331 if (*minor) {
1332 return GSS_S_FAILURE;
1333 }
1334
1335 header.length = sizeof(gss_cfx_mic_token_desc);
1336 header.value = mic->value;
1337
1338 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, digest, &verified, 0, 0);
1339 if (*minor) {
1340 return GSS_S_FAILURE;
1341 }
1342
1343 //XXX errors and such? Sequencing and replay? Not Supported RPCSEC_GSS
1344 memcpy(&seq, token->SND_SEQ, sizeof(uint64_t));
1345 seq = ntohll(seq);
1346 lctx->recv_seq = seq;
1347
1348 return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1349 }
1350
1351 errno_t
1352 krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int reverse)
1353 {
1354 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
1355 uint8_t confounder[ccmode->block_size];
1356 uint8_t digest[ctx->digest_size];
1357 size_t tlen, r = 0;
1358 errno_t error;
1359
1360 if (encrypt) {
1361 assert(ccmode->block_size <= UINT_MAX);
1362 read_random(confounder, (u_int)ccmode->block_size);
1363 error = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
1364 if (error) {
1365 return error;
1366 }
1367 tlen = *len + ccmode->block_size;
1368 if (ctx->mpad > 1) {
1369 r = ctx->mpad - (tlen % ctx->mpad);
1370 }
1371 /* We expect that r == 0 from krb5_cfx_wrap */
1372 if (r != 0) {
1373 uint8_t mpad[r];
1374 memset(mpad, 0, r);
1375 error = gss_append_mbuf(*mbp, mpad, r);
1376 if (error) {
1377 return error;
1378 }
1379 }
1380 tlen += r;
1381 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, NULL, 1, 0);
1382 if (error) {
1383 return error;
1384 }
1385 error = krb5_crypt_mbuf(ctx, mbp, tlen, 1, NULL);
1386 if (error) {
1387 return error;
1388 }
1389 error = gss_append_mbuf(*mbp, digest, ctx->digest_size);
1390 if (error) {
1391 return error;
1392 }
1393 *len = tlen + ctx->digest_size;
1394 return 0;
1395 } else {
1396 int verf;
1397 cccbc_ctx *ks = NULL;
1398
1399 if (*len < ctx->digest_size + sizeof(confounder)) {
1400 return EBADRPC;
1401 }
1402 tlen = *len - ctx->digest_size;
1403 /* get the digest */
1404 error = mbuf_copydata(*mbp, tlen, ctx->digest_size, digest);
1405 /* Remove the digest from the mbuffer */
1406 error = gss_strip_mbuf(*mbp, -ctx->digest_size);
1407 if (error) {
1408 return error;
1409 }
1410
1411 if (reverse) {
1412 /*
1413 * Derive a key schedule that the sender can unwrap with. This
1414 * is so that RPCSEC_GSS can restore encrypted arguments for
1415 * resending. We do that because the RPCSEC_GSS sequence number in
1416 * the rpc header is prepended to the body of the message before wrapping.
1417 */
1418 void *ekey;
1419 uint8_t usage_string[KRB5_USAGE_LEN];
1420 lucid_context_t lctx = ctx->gss_ctx;
1421
1422 krb5_make_usage(lctx->initiate ?
1423 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
1424 0xAA, usage_string);
1425 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
1426 MALLOC(ks, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
1427 cccbc_init(ctx->dec_mode, ks, ctx->keylen, ekey);
1428 FREE(ekey, M_TEMP);
1429 }
1430 error = krb5_crypt_mbuf(ctx, mbp, tlen, 0, ks);
1431 FREE(ks, M_TEMP);
1432 if (error) {
1433 return error;
1434 }
1435 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, &verf, 1, reverse);
1436 if (error) {
1437 return error;
1438 }
1439 if (!verf) {
1440 return EBADRPC;
1441 }
1442 /* strip off the confounder */
1443 assert(ccmode->block_size <= INT_MAX);
1444 error = gss_strip_mbuf(*mbp, (int)ccmode->block_size);
1445 if (error) {
1446 return error;
1447 }
1448 *len = tlen - ccmode->block_size;
1449 }
1450 return 0;
1451 }
1452
1453 uint32_t
1454 gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */
1455 gss_ctx_id_t ctx, /* context_handle */
1456 int conf_flag, /* conf_req_flag */
1457 gss_qop_t qop __unused, /* qop_req */
1458 mbuf_t *mbp, /* input/output message_buffer */
1459 size_t len, /* mbuf chain length */
1460 int *conf /* conf_state */)
1461 {
1462 gss_cfx_wrap_token_desc token;
1463 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1464 crypto_ctx_t cctx = &ctx->gss_cryptor;
1465 int error = 0;
1466 uint32_t mv;
1467 uint64_t seq = htonll(lctx->send_seq);
1468
1469 if (minor == NULL) {
1470 minor = &mv;
1471 }
1472 if (conf) {
1473 *conf = conf_flag;
1474 }
1475
1476 *minor = 0;
1477 token = wrap_cfx_token;
1478 if (!lctx->initiate) {
1479 token.Flags |= CFXSentByAcceptor;
1480 }
1481 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) {
1482 token.Flags |= CFXAcceptorSubkey;
1483 }
1484 memcpy(&token.SND_SEQ, &seq, sizeof(uint64_t));
1485 lctx->send_seq++;
1486 if (conf_flag) {
1487 uint8_t pad[cctx->mpad];
1488 size_t plen = 0;
1489
1490 token.Flags |= CFXSealed;
1491 memset(pad, 0, cctx->mpad);
1492 if (cctx->mpad > 1) {
1493 size_t val = cctx->mpad - ((len + sizeof(gss_cfx_wrap_token_desc)) % cctx->mpad);
1494 plen = sizeof(val) > sizeof(uint32_t) ? htonll(val) : htonl(val);
1495 token.EC[0] = ((plen >> 8) & 0xff);
1496 token.EC[1] = (plen & 0xff);
1497 }
1498 if (plen) {
1499 error = gss_append_mbuf(*mbp, pad, plen);
1500 len += plen;
1501 }
1502 if (error == 0) {
1503 error = gss_append_mbuf(*mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1504 len += sizeof(gss_cfx_wrap_token_desc);
1505 }
1506 if (error == 0) {
1507 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 1, 0);
1508 }
1509 if (error == 0) {
1510 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1511 }
1512 } else {
1513 uint8_t digest[cctx->digest_size];
1514 gss_buffer_desc header;
1515
1516 header.length = sizeof(token);
1517 header.value = &token;
1518
1519 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, NULL, 1, 0);
1520 if (error == 0) {
1521 error = gss_append_mbuf(*mbp, digest, cctx->digest_size);
1522 if (error == 0) {
1523 uint32_t plen = htonl(cctx->digest_size);
1524 memcpy(token.EC, &plen, 2);
1525 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1526 }
1527 }
1528 }
1529 if (error) {
1530 *minor = error;
1531 return GSS_S_FAILURE;
1532 }
1533
1534 return GSS_S_COMPLETE;
1535 }
1536
1537 /*
1538 * Given a wrap token the has a rrc, move the trailer back to the end.
1539 */
1540 static void
1541 gss_krb5_cfx_unwrap_rrc_mbuf(mbuf_t header, size_t rrc)
1542 {
1543 mbuf_t body, trailer;
1544
1545 gss_normalize_mbuf(header, sizeof(gss_cfx_wrap_token_desc), &rrc, &trailer, &body, 0);
1546 gss_join_mbuf(header, body, trailer);
1547 }
1548
1549 uint32_t
1550 gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */
1551 gss_ctx_id_t ctx, /* context_handle */
1552 mbuf_t *mbp, /* input/output message_buffer */
1553 size_t len, /* mbuf chain length */
1554 int *conf_flag, /* conf_state */
1555 gss_qop_t *qop /* qop state */)
1556 {
1557 gss_cfx_wrap_token_desc token;
1558 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1559 crypto_ctx_t cctx = &ctx->gss_cryptor;
1560 int error, conf;
1561 uint32_t ec = 0, rrc = 0;
1562 uint64_t seq;
1563 int reverse = (*qop == GSS_C_QOP_REVERSE);
1564 int initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1565
1566 error = mbuf_copydata(*mbp, 0, sizeof(gss_cfx_wrap_token_desc), &token);
1567 gss_strip_mbuf(*mbp, sizeof(gss_cfx_wrap_token_desc));
1568 len -= sizeof(gss_cfx_wrap_token_desc);
1569
1570 /* Check for valid token */
1571 if (token.TOK_ID[0] != wrap_cfx_token.TOK_ID[0] ||
1572 token.TOK_ID[1] != wrap_cfx_token.TOK_ID[1] ||
1573 token.Filler != wrap_cfx_token.Filler) {
1574 printf("Token id does not match\n");
1575 goto badrpc;
1576 }
1577 if ((initiate && !(token.Flags & CFXSentByAcceptor)) ||
1578 (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey && !(token.Flags & CFXAcceptorSubkey))) {
1579 printf("Bad flags %x\n", token.Flags);
1580 goto badrpc;
1581 }
1582
1583 /* XXX Sequence replay detection */
1584 memcpy(&seq, token.SND_SEQ, sizeof(seq));
1585 seq = ntohll(seq);
1586 lctx->recv_seq = seq;
1587
1588 ec = (token.EC[0] << 8) | token.EC[1];
1589 rrc = (token.RRC[0] << 8) | token.RRC[1];
1590 *qop = GSS_C_QOP_DEFAULT;
1591 conf = ((token.Flags & CFXSealed) == CFXSealed);
1592 if (conf_flag) {
1593 *conf_flag = conf;
1594 }
1595 if (conf) {
1596 gss_cfx_wrap_token_desc etoken;
1597
1598 if (rrc) { /* Handle Right rotation count */
1599 gss_krb5_cfx_unwrap_rrc_mbuf(*mbp, rrc);
1600 }
1601 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 0, reverse);
1602 if (error) {
1603 printf("krb5_cfx_crypt_mbuf %d\n", error);
1604 *minor = error;
1605 return GSS_S_FAILURE;
1606 }
1607 if (len >= sizeof(gss_cfx_wrap_token_desc)) {
1608 len -= sizeof(gss_cfx_wrap_token_desc);
1609 } else {
1610 goto badrpc;
1611 }
1612 mbuf_copydata(*mbp, len, sizeof(gss_cfx_wrap_token_desc), &etoken);
1613 /* Verify etoken with the token wich should be the same, except the rc field is always zero */
1614 token.RRC[0] = token.RRC[1] = 0;
1615 if (memcmp(&token, &etoken, sizeof(gss_cfx_wrap_token_desc)) != 0) {
1616 printf("Encrypted token mismach\n");
1617 goto badrpc;
1618 }
1619 /* strip the encrypted token and any pad bytes */
1620 gss_strip_mbuf(*mbp, -(sizeof(gss_cfx_wrap_token_desc) + ec));
1621 len -= (sizeof(gss_cfx_wrap_token_desc) + ec);
1622 } else {
1623 uint8_t digest[cctx->digest_size];
1624 int verf;
1625 gss_buffer_desc header;
1626
1627 if (ec != cctx->digest_size || len >= cctx->digest_size) {
1628 goto badrpc;
1629 }
1630 len -= cctx->digest_size;
1631 mbuf_copydata(*mbp, len, cctx->digest_size, digest);
1632 gss_strip_mbuf(*mbp, -cctx->digest_size);
1633 /* When calculating the mic header fields ec and rcc must be zero */
1634 token.EC[0] = token.EC[1] = token.RRC[0] = token.RRC[1] = 0;
1635 header.value = &token;
1636 header.length = sizeof(gss_cfx_wrap_token_desc);
1637 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, &verf, 1, reverse);
1638 if (error) {
1639 goto badrpc;
1640 }
1641 }
1642 return GSS_S_COMPLETE;
1643
1644 badrpc:
1645 *minor = EBADRPC;
1646 return GSS_S_FAILURE;
1647 }
1648
1649 /*
1650 * RFC 1964 3DES support
1651 */
1652
1653 typedef struct gss_1964_mic_token_desc_struct {
1654 uint8_t TOK_ID[2]; /* 01 01 */
1655 uint8_t Sign_Alg[2];
1656 uint8_t Filler[4]; /* ff ff ff ff */
1657 } gss_1964_mic_token_desc, *gss_1964_mic_token;
1658
1659 typedef struct gss_1964_wrap_token_desc_struct {
1660 uint8_t TOK_ID[2]; /* 02 01 */
1661 uint8_t Sign_Alg[2];
1662 uint8_t Seal_Alg[2];
1663 uint8_t Filler[2]; /* ff ff */
1664 } gss_1964_wrap_token_desc, *gss_1964_wrap_token;
1665
1666 typedef struct gss_1964_delete_token_desc_struct {
1667 uint8_t TOK_ID[2]; /* 01 02 */
1668 uint8_t Sign_Alg[2];
1669 uint8_t Filler[4]; /* ff ff ff ff */
1670 } gss_1964_delete_token_desc, *gss_1964_delete_token;
1671
1672 typedef struct gss_1964_header_desc_struct {
1673 uint8_t App0; /* 0x60 Application 0 constructed */
1674 uint8_t AppLen[]; /* Variable Der length */
1675 } gss_1964_header_desc, *gss_1964_header;
1676
1677 typedef union {
1678 gss_1964_mic_token_desc mic_tok;
1679 gss_1964_wrap_token_desc wrap_tok;
1680 gss_1964_delete_token_desc del_tok;
1681 } gss_1964_tok_type __attribute__((transparent_union));
1682
1683 typedef struct gss_1964_token_body_struct {
1684 uint8_t OIDType; /* 0x06 */
1685 uint8_t OIDLen; /* 0x09 */
1686 uint8_t kerb_mech[9]; /* Der Encode kerberos mech 1.2.840.113554.1.2.2
1687 * 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 */
1688 gss_1964_tok_type body;
1689 uint8_t SND_SEQ[8];
1690 uint8_t Hash[]; /* Mic */
1691 } gss_1964_token_body_desc, *gss_1964_token_body;
1692
1693
1694 gss_1964_header_desc tok_1964_header = {
1695 .App0 = 0x60
1696 };
1697
1698 gss_1964_mic_token_desc mic_1964_token = {
1699 .TOK_ID = "\x01\x01",
1700 .Filler = "\xff\xff\xff\xff"
1701 };
1702
1703 gss_1964_wrap_token_desc wrap_1964_token = {
1704 .TOK_ID = "\x02\x01",
1705 .Filler = "\xff\xff"
1706 };
1707
1708 gss_1964_delete_token_desc del_1964_token = {
1709 .TOK_ID = "\x01\x01",
1710 .Filler = "\xff\xff\xff\xff"
1711 };
1712
1713 gss_1964_token_body_desc body_1964_token = {
1714 .OIDType = 0x06,
1715 .OIDLen = 0x09,
1716 .kerb_mech = "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1717 };
1718
1719 #define GSS_KRB5_3DES_MAXTOKSZ (sizeof(gss_1964_header_desc) + 5 /* max der length supported */ + sizeof(gss_1964_token_body_desc))
1720
1721 uint32_t gss_krb5_3des_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
1722 uint32_t gss_krb5_3des_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
1723 uint32_t gss_krb5_3des_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
1724 uint32_t gss_krb5_3des_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
1725 uint32_t gss_krb5_3des_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
1726 uint32_t gss_krb5_3des_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
1727
1728 /*
1729 * Decode an ASN.1 DER length field
1730 */
1731 static ssize_t
1732 gss_krb5_der_length_get(uint8_t **pp)
1733 {
1734 uint8_t *p = *pp;
1735 uint32_t flen, len = 0;
1736
1737 flen = *p & 0x7f;
1738
1739 if (*p++ & 0x80) {
1740 if (flen > sizeof(uint32_t)) {
1741 return -1;
1742 }
1743 while (flen--) {
1744 len = (len << 8) + *p++;
1745 }
1746 } else {
1747 len = flen;
1748 }
1749 *pp = p;
1750 return len;
1751 }
1752
1753 /*
1754 * Determine size of ASN.1 DER length
1755 */
1756 static int
1757 gss_krb5_der_length_size(size_t len)
1758 {
1759 return
1760 len < (1 << 7) ? 1 :
1761 len < (1 << 8) ? 2 :
1762 len < (1 << 16) ? 3 :
1763 len < (1 << 24) ? 4 : 5;
1764 }
1765
1766 /*
1767 * Encode an ASN.1 DER length field
1768 */
1769 static void
1770 gss_krb5_der_length_put(uint8_t **pp, size_t len)
1771 {
1772 int sz = gss_krb5_der_length_size(len);
1773 uint8_t *p = *pp;
1774
1775 if (sz == 1) {
1776 *p++ = (uint8_t) len;
1777 } else {
1778 *p++ = (uint8_t) ((sz - 1) | 0x80);
1779 sz -= 1;
1780 while (sz--) {
1781 *p++ = (uint8_t) ((len >> (sz * 8)) & 0xff);
1782 }
1783 }
1784
1785 *pp = p;
1786 }
1787
1788 static void
1789 gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t hash, size_t datalen, gss_buffer_t des3_token)
1790 {
1791 gss_1964_header token;
1792 gss_1964_token_body tokbody;
1793 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1794 crypto_ctx_t cctx = &ctx->gss_cryptor;
1795 uint32_t seq = (uint32_t) (lctx->send_seq++ & 0xffff);
1796 size_t toklen = sizeof(gss_1964_token_body_desc) + cctx->digest_size;
1797 size_t alloclen = toklen + sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(toklen + datalen);
1798 uint8_t *tokptr;
1799
1800 MALLOC(token, gss_1964_header, alloclen, M_TEMP, M_WAITOK | M_ZERO);
1801 *token = tok_1964_header;
1802 tokptr = token->AppLen;
1803 gss_krb5_der_length_put(&tokptr, toklen + datalen);
1804 tokbody = (gss_1964_token_body)tokptr;
1805 *tokbody = body_1964_token; /* Initalize the token body */
1806 tokbody->body = body; /* and now set the body to the token type passed in */
1807 seq = htonl(seq);
1808 for (int i = 0; i < 4; i++) {
1809 tokbody->SND_SEQ[i] = (uint8_t)((seq >> (i * 8)) & 0xff);
1810 }
1811 for (int i = 4; i < 8; i++) {
1812 tokbody->SND_SEQ[i] = lctx->initiate ? 0x00 : 0xff;
1813 }
1814
1815 size_t blocksize = cctx->enc_mode->block_size;
1816 cccbc_iv_decl(blocksize, iv);
1817 cccbc_ctx_decl(cctx->enc_mode->size, enc_ctx);
1818 cccbc_set_iv(cctx->enc_mode, iv, hash->value);
1819 cccbc_init(cctx->enc_mode, enc_ctx, cctx->keylen, cctx->key);
1820 cccbc_update(cctx->enc_mode, enc_ctx, iv, 1, tokbody->SND_SEQ, tokbody->SND_SEQ);
1821
1822 assert(hash->length == cctx->digest_size);
1823 memcpy(tokbody->Hash, hash->value, hash->length);
1824 des3_token->length = alloclen;
1825 des3_token->value = token;
1826 }
1827
1828 static int
1829 gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok,
1830 gss_1964_tok_type body, gss_buffer_t hash, size_t *offset, size_t *len, int reverse)
1831 {
1832 gss_1964_header token = intok->value;
1833 gss_1964_token_body tokbody;
1834 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1835 crypto_ctx_t cctx = &ctx->gss_cryptor;
1836 ssize_t length;
1837 size_t toklen;
1838 uint8_t *tokptr;
1839 uint32_t seq;
1840 int initiate;
1841
1842 if (token->App0 != tok_1964_header.App0) {
1843 printf("%s: bad framing\n", __func__);
1844 printgbuf(__func__, intok);
1845 return EBADRPC;
1846 }
1847 tokptr = token->AppLen;
1848 length = gss_krb5_der_length_get(&tokptr);
1849 if (length < 0) {
1850 printf("%s: invalid length\n", __func__);
1851 printgbuf(__func__, intok);
1852 return EBADRPC;
1853 }
1854 toklen = sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(length)
1855 + sizeof(gss_1964_token_body_desc);
1856
1857 if (intok->length < toklen + cctx->digest_size) {
1858 printf("%s: token to short", __func__);
1859 printf("toklen = %d, length = %d\n", (int)toklen, (int)length);
1860 printgbuf(__func__, intok);
1861 return EBADRPC;
1862 }
1863
1864 if (offset) {
1865 *offset = toklen + cctx->digest_size;
1866 }
1867
1868 if (len) {
1869 *len = length - sizeof(gss_1964_token_body_desc) - cctx->digest_size;
1870 }
1871
1872 tokbody = (gss_1964_token_body)tokptr;
1873 if (tokbody->OIDType != body_1964_token.OIDType ||
1874 tokbody->OIDLen != body_1964_token.OIDLen ||
1875 memcmp(tokbody->kerb_mech, body_1964_token.kerb_mech, tokbody->OIDLen) != 0) {
1876 printf("%s: Invalid mechanism\n", __func__);
1877 printgbuf(__func__, intok);
1878 return EBADRPC;
1879 }
1880 if (memcmp(&tokbody->body, &body, sizeof(gss_1964_tok_type)) != 0) {
1881 printf("%s: Invalid body\n", __func__);
1882 printgbuf(__func__, intok);
1883 return EBADRPC;
1884 }
1885 size_t blocksize = cctx->enc_mode->block_size;
1886 uint8_t *block = tokbody->SND_SEQ;
1887
1888 assert(blocksize == sizeof(tokbody->SND_SEQ));
1889 cccbc_iv_decl(blocksize, iv);
1890 cccbc_ctx_decl(cctx->dec_mode->size, dec_ctx);
1891 cccbc_set_iv(cctx->dec_mode, iv, tokbody->Hash);
1892 cccbc_init(cctx->dec_mode, dec_ctx, cctx->keylen, cctx->key);
1893 cccbc_update(cctx->dec_mode, dec_ctx, iv, 1, block, block);
1894
1895 initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1896 for (int i = 4; i < 8; i++) {
1897 if (tokbody->SND_SEQ[i] != (initiate ? 0xff : 0x00)) {
1898 printf("%s: Invalid des mac\n", __func__);
1899 printgbuf(__func__, intok);
1900 return EAUTH;
1901 }
1902 }
1903
1904 memcpy(&seq, tokbody->SND_SEQ, sizeof(uint32_t));
1905
1906 lctx->recv_seq = ntohl(seq);
1907
1908 assert(hash->length >= cctx->digest_size);
1909 memcpy(hash->value, tokbody->Hash, cctx->digest_size);
1910
1911 return 0;
1912 }
1913
1914 uint32_t
1915 gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */
1916 gss_ctx_id_t ctx, /* krb5 context id */
1917 gss_qop_t qop __unused, /* qop_req (ignored) */
1918 gss_buffer_t mbp, /* message buffer in */
1919 gss_buffer_t mic) /* mic token out */
1920 {
1921 gss_1964_mic_token_desc tokbody = mic_1964_token;
1922 crypto_ctx_t cctx = &ctx->gss_cryptor;
1923 gss_buffer_desc hash;
1924 gss_buffer_desc header;
1925 uint8_t hashval[cctx->digest_size];
1926
1927 hash.length = cctx->digest_size;
1928 hash.value = hashval;
1929 tokbody.Sign_Alg[0] = 0x04; /* lctx->keydata.lucid_protocol_u.data_1964.sign_alg */
1930 tokbody.Sign_Alg[1] = 0x00;
1931 header.length = sizeof(gss_1964_mic_token_desc);
1932 header.value = &tokbody;
1933
1934 /* Hash the data */
1935 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, NULL, 0, 0);
1936 if (*minor) {
1937 return GSS_S_FAILURE;
1938 }
1939
1940 /* Make the token */
1941 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
1942
1943 return GSS_S_COMPLETE;
1944 }
1945
1946 uint32_t
1947 gss_krb5_3des_verify_mic(uint32_t *minor,
1948 gss_ctx_id_t ctx,
1949 gss_buffer_t mbp,
1950 gss_buffer_t mic,
1951 gss_qop_t *qop)
1952 {
1953 crypto_ctx_t cctx = &ctx->gss_cryptor;
1954 uint8_t hashval[cctx->digest_size];
1955 gss_buffer_desc hash;
1956 gss_1964_mic_token_desc mtok = mic_1964_token;
1957 gss_buffer_desc header;
1958 int verf;
1959
1960 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
1961 mtok.Sign_Alg[1] = 0x00;
1962 hash.length = cctx->digest_size;
1963 hash.value = hashval;
1964 header.length = sizeof(gss_1964_mic_token_desc);
1965 header.value = &mtok;
1966
1967 if (qop) {
1968 *qop = GSS_C_QOP_DEFAULT;
1969 }
1970
1971 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
1972 if (*minor) {
1973 return GSS_S_FAILURE;
1974 }
1975
1976 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, &verf, 0, 0);
1977 if (*minor) {
1978 return GSS_S_FAILURE;
1979 }
1980
1981 return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
1982 }
1983
1984 uint32_t
1985 gss_krb5_3des_get_mic_mbuf(uint32_t *minor,
1986 gss_ctx_id_t ctx,
1987 gss_qop_t qop __unused,
1988 mbuf_t mbp,
1989 size_t offset,
1990 size_t len,
1991 gss_buffer_t mic)
1992 {
1993 gss_1964_mic_token_desc tokbody = mic_1964_token;
1994 crypto_ctx_t cctx = &ctx->gss_cryptor;
1995 gss_buffer_desc header;
1996 gss_buffer_desc hash;
1997 uint8_t hashval[cctx->digest_size];
1998
1999 hash.length = cctx->digest_size;
2000 hash.value = hashval;
2001 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_4121.sign_alg */
2002 tokbody.Sign_Alg[1] = 0x00;
2003 header.length = sizeof(gss_1964_mic_token_desc);
2004 header.value = &tokbody;
2005
2006 /* Hash the data */
2007 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, NULL, 0, 0);
2008 if (*minor) {
2009 return GSS_S_FAILURE;
2010 }
2011
2012 /* Make the token */
2013 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
2014
2015 return GSS_S_COMPLETE;
2016 }
2017
2018 uint32_t
2019 gss_krb5_3des_verify_mic_mbuf(uint32_t *minor,
2020 gss_ctx_id_t ctx,
2021 mbuf_t mbp,
2022 size_t offset,
2023 size_t len,
2024 gss_buffer_t mic,
2025 gss_qop_t *qop)
2026 {
2027 crypto_ctx_t cctx = &ctx->gss_cryptor;
2028 uint8_t hashval[cctx->digest_size];
2029 gss_buffer_desc header;
2030 gss_buffer_desc hash;
2031 gss_1964_mic_token_desc mtok = mic_1964_token;
2032 int verf;
2033
2034 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucic_protocol_u.data1964.sign_alg */
2035 mtok.Sign_Alg[1] = 0x00;
2036 hash.length = cctx->digest_size;
2037 hash.value = hashval;
2038 header.length = sizeof(gss_1964_mic_token_desc);
2039 header.value = &mtok;
2040
2041 if (qop) {
2042 *qop = GSS_C_QOP_DEFAULT;
2043 }
2044
2045 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
2046 if (*minor) {
2047 return GSS_S_FAILURE;
2048 }
2049
2050 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, &verf, 0, 0);
2051 if (*minor) {
2052 return GSS_S_FAILURE;
2053 }
2054
2055 return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG;
2056 }
2057
2058 uint32_t
2059 gss_krb5_3des_wrap_mbuf(uint32_t *minor,
2060 gss_ctx_id_t ctx,
2061 int conf_flag,
2062 gss_qop_t qop __unused,
2063 mbuf_t *mbp,
2064 size_t len,
2065 int *conf_state)
2066 {
2067 crypto_ctx_t cctx = &ctx->gss_cryptor;
2068 const struct ccmode_cbc *ccmode = cctx->enc_mode;
2069 uint8_t padlen;
2070 uint8_t pad[8];
2071 uint8_t confounder[ccmode->block_size];
2072 gss_1964_wrap_token_desc tokbody = wrap_1964_token;
2073 gss_buffer_desc header;
2074 gss_buffer_desc mic;
2075 gss_buffer_desc hash;
2076 uint8_t hashval[cctx->digest_size];
2077
2078 if (conf_state) {
2079 *conf_state = conf_flag;
2080 }
2081
2082 hash.length = cctx->digest_size;
2083 hash.value = hashval;
2084 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
2085 tokbody.Sign_Alg[1] = 0x00;
2086 /* conf_flag ? lctx->key_data.lucid_protocol_u.data_1964.seal_alg : 0xffff */
2087 tokbody.Seal_Alg[0] = conf_flag ? 0x02 : 0xff;
2088 tokbody.Seal_Alg[1] = conf_flag ? 0x00 : 0xff;
2089 header.length = sizeof(gss_1964_wrap_token_desc);
2090 header.value = &tokbody;
2091
2092 /* Prepend confounder */
2093 assert(ccmode->block_size <= UINT_MAX);
2094 read_random(confounder, (u_int)ccmode->block_size);
2095 *minor = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
2096 if (*minor) {
2097 return GSS_S_FAILURE;
2098 }
2099
2100 /* Append trailer of up to 8 bytes and set pad length in each trailer byte */
2101 padlen = 8 - len % 8;
2102 for (int i = 0; i < padlen; i++) {
2103 pad[i] = padlen;
2104 }
2105 *minor = gss_append_mbuf(*mbp, pad, padlen);
2106 if (*minor) {
2107 return GSS_S_FAILURE;
2108 }
2109
2110 len += ccmode->block_size + padlen;
2111
2112 /* Hash the data */
2113 *minor = krb5_mic_mbuf(cctx, &header, *mbp, 0, len, NULL, hashval, NULL, 0, 0);
2114 if (*minor) {
2115 return GSS_S_FAILURE;
2116 }
2117
2118 /* Make the token */
2119 gss_krb5_3des_token_put(ctx, tokbody, &hash, len, &mic);
2120
2121 if (conf_flag) {
2122 *minor = krb5_crypt_mbuf(cctx, mbp, len, 1, 0);
2123 if (*minor) {
2124 return GSS_S_FAILURE;
2125 }
2126 }
2127
2128 *minor = gss_prepend_mbuf(mbp, mic.value, mic.length);
2129
2130 return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE;
2131 }
2132
2133 uint32_t
2134 gss_krb5_3des_unwrap_mbuf(uint32_t *minor,
2135 gss_ctx_id_t ctx,
2136 mbuf_t *mbp,
2137 size_t len,
2138 int *conf_state,
2139 gss_qop_t *qop)
2140 {
2141 crypto_ctx_t cctx = &ctx->gss_cryptor;
2142 const struct ccmode_cbc *ccmode = cctx->dec_mode;
2143 size_t length = 0, offset = 0;
2144 gss_buffer_desc hash;
2145 uint8_t hashval[cctx->digest_size];
2146 gss_buffer_desc itoken;
2147 uint8_t tbuffer[GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size];
2148 itoken.length = GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size;
2149 itoken.value = tbuffer;
2150 gss_1964_wrap_token_desc wrap = wrap_1964_token;
2151 gss_buffer_desc header;
2152 uint8_t padlen;
2153 mbuf_t smb, tmb;
2154 int cflag, verified, reverse = 0;
2155
2156 if (len < GSS_KRB5_3DES_MAXTOKSZ) {
2157 *minor = EBADRPC;
2158 return GSS_S_FAILURE;
2159 }
2160
2161 if (*qop == GSS_C_QOP_REVERSE) {
2162 reverse = 1;
2163 }
2164 *qop = GSS_C_QOP_DEFAULT;
2165
2166 *minor = mbuf_copydata(*mbp, 0, itoken.length, itoken.value);
2167 if (*minor) {
2168 return GSS_S_FAILURE;
2169 }
2170
2171 hash.length = cctx->digest_size;
2172 hash.value = hashval;
2173 wrap.Sign_Alg[0] = 0x04;
2174 wrap.Sign_Alg[1] = 0x00;
2175 wrap.Seal_Alg[0] = 0x02;
2176 wrap.Seal_Alg[1] = 0x00;
2177
2178 for (cflag = 1; cflag >= 0; cflag--) {
2179 *minor = gss_krb5_3des_token_get(ctx, &itoken, wrap, &hash, &offset, &length, reverse);
2180 if (*minor == 0) {
2181 break;
2182 }
2183 wrap.Seal_Alg[0] = 0xff;
2184 wrap.Seal_Alg[1] = 0xff;
2185 }
2186 if (*minor) {
2187 return GSS_S_FAILURE;
2188 }
2189
2190 if (conf_state) {
2191 *conf_state = cflag;
2192 }
2193
2194 /*
2195 * Seperate off the header
2196 */
2197 *minor = gss_normalize_mbuf(*mbp, offset, &length, &smb, &tmb, 0);
2198 if (*minor) {
2199 return GSS_S_FAILURE;
2200 }
2201
2202 assert(tmb == NULL);
2203
2204 /* Decrypt the chain if needed */
2205 if (cflag) {
2206 *minor = krb5_crypt_mbuf(cctx, &smb, length, 0, NULL);
2207 if (*minor) {
2208 return GSS_S_FAILURE;
2209 }
2210 }
2211
2212 /* Verify the mic */
2213 header.length = sizeof(gss_1964_wrap_token_desc);
2214 header.value = &wrap;
2215
2216 *minor = krb5_mic_mbuf(cctx, &header, smb, 0, length, NULL, hashval, &verified, 0, 0);
2217 if (*minor) {
2218 return GSS_S_FAILURE;
2219 }
2220 if (!verified) {
2221 return GSS_S_BAD_SIG;
2222 }
2223
2224 /* Get the pad bytes */
2225 *minor = mbuf_copydata(smb, length - 1, 1, &padlen);
2226 if (*minor) {
2227 return GSS_S_FAILURE;
2228 }
2229
2230 /* Strip the confounder and trailing pad bytes */
2231 gss_strip_mbuf(smb, -padlen);
2232 assert(ccmode->block_size <= INT_MAX);
2233 gss_strip_mbuf(smb, (int)ccmode->block_size);
2234
2235 if (*mbp != smb) {
2236 mbuf_freem(*mbp);
2237 *mbp = smb;
2238 }
2239
2240 return GSS_S_COMPLETE;
2241 }
2242
2243 static const char *
2244 etype_name(etypes etype)
2245 {
2246 switch (etype) {
2247 case DES3_CBC_SHA1_KD:
2248 return "des3-cbc-sha1";
2249 case AES128_CTS_HMAC_SHA1_96:
2250 return "aes128-cts-hmac-sha1-96";
2251 case AES256_CTS_HMAC_SHA1_96:
2252 return "aes-cts-hmac-sha1-96";
2253 default:
2254 return "unknown enctype";
2255 }
2256 }
2257
2258 static int
2259 supported_etype(uint32_t proto, etypes etype)
2260 {
2261 const char *proto_name;
2262
2263 switch (proto) {
2264 case 0:
2265 /* RFC 1964 */
2266 proto_name = "RFC 1964 krb5 gss mech";
2267 switch (etype) {
2268 case DES3_CBC_SHA1_KD:
2269 return 1;
2270 default:
2271 break;
2272 }
2273 break;
2274 case 1:
2275 /* RFC 4121 */
2276 proto_name = "RFC 4121 krb5 gss mech";
2277 switch (etype) {
2278 case AES256_CTS_HMAC_SHA1_96:
2279 case AES128_CTS_HMAC_SHA1_96:
2280 return 1;
2281 default:
2282 break;
2283 }
2284 break;
2285 default:
2286 proto_name = "Unknown krb5 gss mech";
2287 break;
2288 }
2289 printf("%s: Non supported encryption %s (%d) type for protocol %s (%d)\n",
2290 __func__, etype_name(etype), etype, proto_name, proto);
2291 return 0;
2292 }
2293
2294 /*
2295 * Kerberos gss mech entry points
2296 */
2297 uint32_t
2298 gss_krb5_get_mic(uint32_t *minor, /* minor_status */
2299 gss_ctx_id_t ctx, /* context_handle */
2300 gss_qop_t qop, /* qop_req */
2301 gss_buffer_t mbp, /* message buffer */
2302 gss_buffer_t mic /* message_token */)
2303 {
2304 uint32_t minor_stat = 0;
2305
2306 if (minor == NULL) {
2307 minor = &minor_stat;
2308 }
2309 *minor = 0;
2310
2311 /* Validate context */
2312 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2313 return GSS_S_NO_CONTEXT;
2314 }
2315
2316 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2317 *minor = ENOTSUP;
2318 return GSS_S_FAILURE;
2319 }
2320
2321 switch (ctx->gss_lucid_ctx.key_data.proto) {
2322 case 0:
2323 /* RFC 1964 DES3 case */
2324 return gss_krb5_3des_get_mic(minor, ctx, qop, mbp, mic);
2325 case 1:
2326 /* RFC 4121 CFX case */
2327 return gss_krb5_cfx_get_mic(minor, ctx, qop, mbp, mic);
2328 }
2329
2330 return GSS_S_COMPLETE;
2331 }
2332
2333 uint32_t
2334 gss_krb5_verify_mic(uint32_t *minor, /* minor_status */
2335 gss_ctx_id_t ctx, /* context_handle */
2336 gss_buffer_t mbp, /* message_buffer */
2337 gss_buffer_t mic, /* message_token */
2338 gss_qop_t *qop /* qop_state */)
2339 {
2340 uint32_t minor_stat = 0;
2341 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2342
2343 if (minor == NULL) {
2344 minor = &minor_stat;
2345 }
2346 if (qop == NULL) {
2347 qop = &qop_val;
2348 }
2349
2350 *minor = 0;
2351
2352 /* Validate context */
2353 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2354 return GSS_S_NO_CONTEXT;
2355 }
2356
2357 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2358 *minor = ENOTSUP;
2359 return GSS_S_FAILURE;
2360 }
2361
2362 switch (ctx->gss_lucid_ctx.key_data.proto) {
2363 case 0:
2364 /* RFC 1964 DES3 case */
2365 return gss_krb5_3des_verify_mic(minor, ctx, mbp, mic, qop);
2366 case 1:
2367 /* RFC 4121 CFX case */
2368 return gss_krb5_cfx_verify_mic(minor, ctx, mbp, mic, qop);
2369 }
2370 return GSS_S_COMPLETE;
2371 }
2372
2373 uint32_t
2374 gss_krb5_get_mic_mbuf(uint32_t *minor, /* minor_status */
2375 gss_ctx_id_t ctx, /* context_handle */
2376 gss_qop_t qop, /* qop_req */
2377 mbuf_t mbp, /* message mbuf */
2378 size_t offset, /* offest */
2379 size_t len, /* length */
2380 gss_buffer_t mic /* message_token */)
2381 {
2382 uint32_t minor_stat = 0;
2383
2384 if (minor == NULL) {
2385 minor = &minor_stat;
2386 }
2387 *minor = 0;
2388
2389 if (len == 0) {
2390 len = ~(size_t)0;
2391 }
2392
2393 /* Validate context */
2394 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2395 return GSS_S_NO_CONTEXT;
2396 }
2397
2398 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2399 *minor = ENOTSUP;
2400 return GSS_S_FAILURE;
2401 }
2402
2403 switch (ctx->gss_lucid_ctx.key_data.proto) {
2404 case 0:
2405 /* RFC 1964 DES3 case */
2406 return gss_krb5_3des_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic);
2407 case 1:
2408 /* RFC 4121 CFX case */
2409 return gss_krb5_cfx_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic);
2410 }
2411
2412 return GSS_S_COMPLETE;
2413 }
2414
2415 uint32_t
2416 gss_krb5_verify_mic_mbuf(uint32_t *minor, /* minor_status */
2417 gss_ctx_id_t ctx, /* context_handle */
2418 mbuf_t mbp, /* message_buffer */
2419 size_t offset, /* offset */
2420 size_t len, /* length */
2421 gss_buffer_t mic, /* message_token */
2422 gss_qop_t *qop /* qop_state */)
2423 {
2424 uint32_t minor_stat = 0;
2425 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2426
2427 if (minor == NULL) {
2428 minor = &minor_stat;
2429 }
2430 if (qop == NULL) {
2431 qop = &qop_val;
2432 }
2433
2434 *minor = 0;
2435
2436 if (len == 0) {
2437 len = ~(size_t)0;
2438 }
2439
2440 /* Validate context */
2441 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2442 return GSS_S_NO_CONTEXT;
2443 }
2444
2445 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2446 *minor = ENOTSUP;
2447 return GSS_S_FAILURE;
2448 }
2449
2450 switch (ctx->gss_lucid_ctx.key_data.proto) {
2451 case 0:
2452 /* RFC 1964 DES3 case */
2453 return gss_krb5_3des_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop);
2454 case 1:
2455 /* RFC 4121 CFX case */
2456 return gss_krb5_cfx_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop);
2457 }
2458
2459 return GSS_S_COMPLETE;
2460 }
2461
2462 uint32_t
2463 gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */
2464 gss_ctx_id_t ctx, /* context_handle */
2465 int conf_flag, /* conf_req_flag */
2466 gss_qop_t qop, /* qop_req */
2467 mbuf_t *mbp, /* input/output message_buffer */
2468 size_t offset, /* offset */
2469 size_t len, /* length */
2470 int *conf_state /* conf state */)
2471 {
2472 uint32_t major = GSS_S_FAILURE, minor_stat = 0;
2473 mbuf_t smb, tmb;
2474 int conf_val = 0;
2475
2476 if (minor == NULL) {
2477 minor = &minor_stat;
2478 }
2479 if (conf_state == NULL) {
2480 conf_state = &conf_val;
2481 }
2482
2483 *minor = 0;
2484
2485 /* Validate context */
2486 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2487 return GSS_S_NO_CONTEXT;
2488 }
2489
2490 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2491 *minor = ENOTSUP;
2492 return GSS_S_FAILURE;
2493 }
2494
2495 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2496
2497 switch (ctx->gss_lucid_ctx.key_data.proto) {
2498 case 0:
2499 /* RFC 1964 DES3 case */
2500 major = gss_krb5_3des_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2501 break;
2502 case 1:
2503 /* RFC 4121 CFX case */
2504 major = gss_krb5_cfx_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2505 break;
2506 }
2507
2508 if (offset) {
2509 gss_join_mbuf(*mbp, smb, tmb);
2510 } else {
2511 *mbp = smb;
2512 gss_join_mbuf(smb, tmb, NULL);
2513 }
2514
2515 return major;
2516 }
2517
2518 uint32_t
2519 gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */
2520 gss_ctx_id_t ctx, /* context_handle */
2521 mbuf_t *mbp, /* input/output message_buffer */
2522 size_t offset, /* offset */
2523 size_t len, /* length */
2524 int *conf_flag, /* conf_state */
2525 gss_qop_t *qop /* qop state */)
2526 {
2527 uint32_t major = GSS_S_FAILURE, minor_stat = 0;
2528 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2529 int conf_val = 0;
2530 mbuf_t smb, tmb;
2531
2532 if (minor == NULL) {
2533 minor = &minor_stat;
2534 }
2535 if (qop == NULL) {
2536 qop = &qop_val;
2537 }
2538 if (conf_flag == NULL) {
2539 conf_flag = &conf_val;
2540 }
2541
2542 /* Validate context */
2543 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) {
2544 return GSS_S_NO_CONTEXT;
2545 }
2546
2547 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2548 *minor = ENOTSUP;
2549 return GSS_S_FAILURE;
2550 }
2551
2552 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2553
2554 switch (ctx->gss_lucid_ctx.key_data.proto) {
2555 case 0:
2556 /* RFC 1964 DES3 case */
2557 major = gss_krb5_3des_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2558 break;
2559 case 1:
2560 /* RFC 4121 CFX case */
2561 major = gss_krb5_cfx_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2562 break;
2563 }
2564
2565 if (offset) {
2566 gss_join_mbuf(*mbp, smb, tmb);
2567 } else {
2568 *mbp = smb;
2569 gss_join_mbuf(smb, tmb, NULL);
2570 }
2571
2572 return major;
2573 }
2574
2575 #include <nfs/xdr_subs.h>
2576
2577 static int
2578 xdr_lucid_context(void *data, uint32_t length, lucid_context_t lctx)
2579 {
2580 struct xdrbuf xb;
2581 int error = 0;
2582 uint32_t keylen = 0;
2583
2584 xb_init_buffer(&xb, data, length);
2585 xb_get_32(error, &xb, lctx->vers);
2586 if (!error && lctx->vers != 1) {
2587 error = EINVAL;
2588 printf("%s: invalid version %d\n", __func__, (int)lctx->vers);
2589 goto out;
2590 }
2591 xb_get_32(error, &xb, lctx->initiate);
2592 if (error) {
2593 printf("%s: Could not decode initiate\n", __func__);
2594 goto out;
2595 }
2596 xb_get_32(error, &xb, lctx->endtime);
2597 if (error) {
2598 printf("%s: Could not decode endtime\n", __func__);
2599 goto out;
2600 }
2601 xb_get_64(error, &xb, lctx->send_seq);
2602 if (error) {
2603 printf("%s: Could not decode send_seq\n", __func__);
2604 goto out;
2605 }
2606 xb_get_64(error, &xb, lctx->recv_seq);
2607 if (error) {
2608 printf("%s: Could not decode recv_seq\n", __func__);
2609 goto out;
2610 }
2611 xb_get_32(error, &xb, lctx->key_data.proto);
2612 if (error) {
2613 printf("%s: Could not decode mech protocol\n", __func__);
2614 goto out;
2615 }
2616 switch (lctx->key_data.proto) {
2617 case 0:
2618 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.sign_alg);
2619 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.seal_alg);
2620 if (error) {
2621 printf("%s: Could not decode rfc1964 sign and seal\n", __func__);
2622 }
2623 break;
2624 case 1:
2625 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey);
2626 if (error) {
2627 printf("%s: Could not decode rfc4121 acceptor_subkey", __func__);
2628 }
2629 break;
2630 default:
2631 printf("%s: Invalid mech protocol %d\n", __func__, (int)lctx->key_data.proto);
2632 error = EINVAL;
2633 }
2634 if (error) {
2635 goto out;
2636 }
2637 xb_get_32(error, &xb, lctx->ctx_key.etype);
2638 if (error) {
2639 printf("%s: Could not decode key enctype\n", __func__);
2640 goto out;
2641 }
2642 switch (lctx->ctx_key.etype) {
2643 case DES3_CBC_SHA1_KD:
2644 keylen = 24;
2645 break;
2646 case AES128_CTS_HMAC_SHA1_96:
2647 keylen = 16;
2648 break;
2649 case AES256_CTS_HMAC_SHA1_96:
2650 keylen = 32;
2651 break;
2652 default:
2653 error = ENOTSUP;
2654 goto out;
2655 }
2656 xb_get_32(error, &xb, lctx->ctx_key.key.key_len);
2657 if (error) {
2658 printf("%s: could not decode key length\n", __func__);
2659 goto out;
2660 }
2661 if (lctx->ctx_key.key.key_len != keylen) {
2662 error = EINVAL;
2663 printf("%s: etype = %d keylen = %d expected keylen = %d\n", __func__,
2664 lctx->ctx_key.etype, lctx->ctx_key.key.key_len, keylen);
2665 goto out;
2666 }
2667
2668 lctx->ctx_key.key.key_val = xb_malloc(keylen);
2669 if (lctx->ctx_key.key.key_val == NULL) {
2670 printf("%s: could not get memory for key\n", __func__);
2671 error = ENOMEM;
2672 goto out;
2673 }
2674 error = xb_get_bytes(&xb, (char *)lctx->ctx_key.key.key_val, keylen, 1);
2675 if (error) {
2676 printf("%s: could get key value\n", __func__);
2677 xb_free(lctx->ctx_key.key.key_val);
2678 }
2679 out:
2680 return error;
2681 }
2682
2683 gss_ctx_id_t
2684 gss_krb5_make_context(void *data, uint32_t datalen)
2685 {
2686 gss_ctx_id_t ctx;
2687
2688 if (!corecrypto_available()) {
2689 return NULL;
2690 }
2691
2692 gss_krb5_mech_init();
2693 MALLOC(ctx, gss_ctx_id_t, sizeof(struct gss_ctx_id_desc), M_TEMP, M_WAITOK | M_ZERO);
2694 if (xdr_lucid_context(data, datalen, &ctx->gss_lucid_ctx) ||
2695 !supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_lucid_ctx.ctx_key.etype)) {
2696 FREE(ctx, M_TEMP);
2697 FREE(data, M_TEMP);
2698 return NULL;
2699 }
2700
2701 /* Set up crypto context */
2702 gss_crypto_ctx_init(&ctx->gss_cryptor, &ctx->gss_lucid_ctx);
2703 FREE(data, M_TEMP);
2704
2705 return ctx;
2706 }
2707
2708 void
2709 gss_krb5_destroy_context(gss_ctx_id_t ctx)
2710 {
2711 if (ctx == NULL) {
2712 return;
2713 }
2714 gss_crypto_ctx_free(&ctx->gss_cryptor);
2715 FREE(ctx->gss_lucid_ctx.ctx_key.key.key_val, M_TEMP);
2716 cc_clear(sizeof(lucid_context_t), &ctx->gss_lucid_ctx);
2717 FREE(ctx, M_TEMP);
2718 }