]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/gss/gss_krb5_mech.c
xnu-3789.41.3.tar.gz
[apple/xnu.git] / bsd / nfs / gss / gss_krb5_mech.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (c) 1999 Kungliga Tekniska Högskolan
31 * (Royal Institute of Technology, Stockholm, Sweden).
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 *
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * 3. Neither the name of KTH nor the names of its contributors may be
46 * used to endorse or promote products derived from this software without
47 * specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY
50 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE
53 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
56 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 #include <stdint.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/kpi_mbuf.h>
68 #include <sys/random.h>
69 #include <mach_assert.h>
70 #include <kern/assert.h>
71 #include <libkern/OSAtomic.h>
72 #include "gss_krb5_mech.h"
73
74 lck_grp_t *gss_krb5_mech_grp;
75
76 typedef struct crypt_walker_ctx {
77 size_t length;
78 const struct ccmode_cbc *ccmode;
79 cccbc_ctx *crypt_ctx;
80 cccbc_iv *iv;
81 } *crypt_walker_ctx_t;
82
83 typedef struct hmac_walker_ctx {
84 const struct ccdigest_info *di;
85 cchmac_ctx_t hmac_ctx;
86 } *hmac_walker_ctx_t;
87
88 typedef size_t (*ccpad_func)(const struct ccmode_cbc *, cccbc_ctx *, cccbc_iv *,
89 size_t nbytes, const void *, void *);
90
91 static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size);
92
93 size_t gss_mbuf_len(mbuf_t, size_t);
94 errno_t gss_prepend_mbuf(mbuf_t *, uint8_t *, size_t);
95 errno_t gss_append_mbuf(mbuf_t, uint8_t *, size_t);
96 errno_t gss_strip_mbuf(mbuf_t, ssize_t);
97 int mbuf_walk(mbuf_t, size_t, size_t, size_t, int (*)(void *, uint8_t *, uint32_t), void *);
98
99 void do_crypt_init(crypt_walker_ctx_t, int, crypto_ctx_t, cccbc_ctx *);
100 int do_crypt(void *, uint8_t *, uint32_t);
101 void do_hmac_init(hmac_walker_ctx_t, crypto_ctx_t, void *);
102 int do_hmac(void *, uint8_t *, uint32_t);
103
104 void krb5_make_usage(uint32_t, uint8_t, uint8_t [KRB5_USAGE_LEN]);
105 void krb5_key_derivation(crypto_ctx_t, const void *, size_t, void **, size_t);
106 void cc_key_schedule_create(crypto_ctx_t);
107 void gss_crypto_ctx_free(crypto_ctx_t);
108 int gss_crypto_ctx_init(struct crypto_ctx *, lucid_context_t);
109
110 errno_t krb5_crypt_mbuf(crypto_ctx_t, mbuf_t *, uint32_t, int, cccbc_ctx *);
111 int krb5_mic(crypto_ctx_t, gss_buffer_t, gss_buffer_t, gss_buffer_t, uint8_t *, int *, int, int);
112 int krb5_mic_mbuf(crypto_ctx_t, gss_buffer_t, mbuf_t, uint32_t, uint32_t, gss_buffer_t, uint8_t *, int *, int, int);
113
114 uint32_t gss_krb5_cfx_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
115 uint32_t gss_krb5_cfx_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
116 uint32_t gss_krb5_cfx_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
117 uint32_t gss_krb5_cfx_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
118 errno_t krb5_cfx_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t *, int, int);
119 uint32_t gss_krb5_cfx_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
120 uint32_t gss_krb5_cfx_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
121
122 int gss_krb5_mech_is_initialized(void);
123 void gss_krb5_mech_init(void);
124
125 /* Debugging routines */
126 void
127 printmbuf(const char *str, mbuf_t mb, uint32_t offset, uint32_t len)
128 {
129 size_t i;
130 int cout = 1;
131
132 len = len ? len : ~0;
133 printf("%s mbuf = %p offset = %d len = %d:\n", str ? str : "mbuf", mb, offset, len);
134 for (; mb && len; mb = mbuf_next(mb)) {
135 if (offset >= mbuf_len(mb)) {
136 offset -= mbuf_len(mb);
137 continue;
138 }
139 for(i = offset; len && i < mbuf_len(mb); i++) {
140 const char *s = (cout % 8) ? " " : (cout % 16) ? " " : "\n";
141 printf("%02x%s", ((uint8_t *)mbuf_data(mb))[i], s);
142 len--;
143 cout++;
144 }
145 offset = 0;
146 }
147 if ((cout-1) % 16)
148 printf("\n");
149 printf("Count chars %d\n", cout - 1);
150 }
151
152 void
153 printgbuf(const char *str, gss_buffer_t buf)
154 {
155 size_t i;
156 size_t len = buf->length > 128 ? 128 : buf->length;
157
158 printf("%s: len = %d value = %p\n", str ? str : "buffer", (int)buf->length, buf->value);
159 for (i = 0; i < len; i++) {
160 const char *s = ((i + 1) % 8) ? " " : ((i + 1) % 16) ? " " : "\n";
161 printf("%02x%s", ((uint8_t *)buf->value)[i], s);
162 }
163 if (i % 16)
164 printf("\n");
165 }
166
167 /*
168 * Initialize the data structures for the gss kerberos mech.
169 */
170 #define GSS_KRB5_NOT_INITIALIZED 0
171 #define GSS_KRB5_INITIALIZING 1
172 #define GSS_KRB5_INITIALIZED 2
173 static volatile uint32_t gss_krb5_mech_initted = GSS_KRB5_NOT_INITIALIZED;
174
175 int
176 gss_krb5_mech_is_initialized(void)
177 {
178 return (gss_krb5_mech_initted == GSS_KRB5_NOT_INITIALIZED);
179 }
180
181 void
182 gss_krb5_mech_init(void)
183 {
184 extern void IOSleep(int);
185
186 /* Once initted always initted */
187 if (gss_krb5_mech_initted == GSS_KRB5_INITIALIZED)
188 return;
189
190 /* make sure we init only once */
191 if (!OSCompareAndSwap(GSS_KRB5_NOT_INITIALIZED, GSS_KRB5_INITIALIZING, &gss_krb5_mech_initted)) {
192 /* wait until initialization is complete */
193 while (!gss_krb5_mech_is_initialized())
194 IOSleep(10);
195 return;
196 }
197 gss_krb5_mech_grp = lck_grp_alloc_init("gss_krb5_mech", LCK_GRP_ATTR_NULL);
198 gss_krb5_mech_initted = GSS_KRB5_INITIALIZED;
199 }
200
201 uint32_t
202 gss_release_buffer(uint32_t *minor, gss_buffer_t buf)
203 {
204 if (minor)
205 *minor = 0;
206 if (buf->value)
207 FREE(buf->value, M_TEMP);
208 buf->value = NULL;
209 buf->length = 0;
210 return (GSS_S_COMPLETE);
211 }
212
213 /*
214 * GSS mbuf routines
215 */
216
217 size_t
218 gss_mbuf_len(mbuf_t mb, size_t offset)
219 {
220 size_t len;
221
222 for (len = 0; mb; mb = mbuf_next(mb))
223 len += mbuf_len(mb);
224 return ((offset > len) ? 0 : len - offset);
225 }
226
227 /*
228 * Split an mbuf in a chain into two mbufs such that the original mbuf
229 * points to the original mbuf and the new mbuf points to the rest of the
230 * chain. The first mbuf length is the first len bytes and the second
231 * mbuf contains the remaining bytes. if len is zero or equals
232 * mbuf_len(mb) the don't create a new mbuf. We are already at an mbuf
233 * boundary. Return the mbuf that starts at the offset.
234 */
235 static errno_t
236 split_one_mbuf(mbuf_t mb, size_t offset, mbuf_t *nmb, int join)
237 {
238 errno_t error;
239
240 *nmb = mb;
241 /* We don't have an mbuf or we're alread on an mbuf boundary */
242 if (mb == NULL || offset == 0)
243 return (0);
244
245 /* If the mbuf length is offset then the next mbuf is the one we want */
246 if (mbuf_len(mb) == offset) {
247 *nmb = mbuf_next(mb);
248 if (!join)
249 mbuf_setnext(mb, NULL);
250 return (0);
251 }
252
253 if (offset > mbuf_len(mb))
254 return (EINVAL);
255
256 error = mbuf_split(mb, offset, MBUF_WAITOK, nmb);
257 if (error)
258 return (error);
259
260 if (mbuf_flags(*nmb) & MBUF_PKTHDR) {
261 /* We don't want to copy the pkthdr. mbuf_split does that. */
262 error = mbuf_setflags_mask(*nmb, ~MBUF_PKTHDR, MBUF_PKTHDR);
263 }
264
265 if (join)
266 /* Join the chain again */
267 mbuf_setnext(mb, *nmb);
268
269 return (0);
270 }
271
272 /*
273 * Given an mbuf with an offset and length return the chain such that
274 * offset and offset + *subchain_length are on mbuf boundaries. If
275 * *mbuf_length is less that the length of the chain after offset
276 * return that length in *mbuf_length. The mbuf sub chain starting at
277 * offset is returned in *subchain. If an error occurs return the
278 * corresponding errno. Note if there are less than offset bytes then
279 * subchain will be set to NULL and *subchain_length will be set to
280 * zero. If *subchain_length is 0; then set it to the length of the
281 * chain starting at offset. Join parameter is used to indicate whether
282 * the mbuf chain will be joined again as on chain, just rearranged so
283 * that offset and subchain_length are on mbuf boundaries.
284 */
285
286 errno_t
287 gss_normalize_mbuf(mbuf_t chain, size_t offset, size_t *subchain_length, mbuf_t *subchain, mbuf_t *tail, int join)
288 {
289 size_t length = *subchain_length ? *subchain_length : ~0;
290 size_t len;
291 mbuf_t mb, nmb;
292 errno_t error;
293
294 if (tail == NULL)
295 tail = &nmb;
296 *tail = NULL;
297 *subchain = NULL;
298
299 for (len = offset, mb = chain; mb && len > mbuf_len(mb); mb = mbuf_next(mb))
300 len -= mbuf_len(mb);
301
302 /* if we don't have offset bytes just return */
303 if (mb == NULL)
304 return (0);
305
306 error = split_one_mbuf(mb, len, subchain, join);
307 if (error)
308 return (error);
309
310 assert(subchain != NULL && *subchain != NULL);
311 assert(offset == 0 ? mb == *subchain : 1);
312
313 len = gss_mbuf_len(*subchain, 0);
314 length = (length > len) ? len : length;
315 *subchain_length = length;
316
317 for (len = length, mb = *subchain; mb && len > mbuf_len(mb); mb = mbuf_next(mb))
318 len -= mbuf_len(mb);
319
320 error = split_one_mbuf(mb, len, tail, join);
321
322 return (error);
323 }
324
325 mbuf_t
326 gss_join_mbuf(mbuf_t head, mbuf_t body, mbuf_t tail)
327 {
328 mbuf_t mb;
329
330 for (mb = head; mb && mbuf_next(mb); mb = mbuf_next(mb))
331 ;
332 if (mb)
333 mbuf_setnext(mb, body);
334 for (mb = body; mb && mbuf_next(mb); mb = mbuf_next(mb))
335 ;
336 if (mb)
337 mbuf_setnext(mb, tail);
338 mb = head ? head : (body ? body : tail);
339 return (mb);
340 }
341
342 /*
343 * Prepend size bytes to the mbuf chain.
344 */
345 errno_t
346 gss_prepend_mbuf(mbuf_t *chain, uint8_t *bytes, size_t size)
347 {
348 uint8_t *data = mbuf_data(*chain);
349 size_t leading = mbuf_leadingspace(*chain);
350 size_t trailing = mbuf_trailingspace(*chain);
351 size_t mlen = mbuf_len(*chain);
352 errno_t error;
353
354 if (size > leading && size <= leading + trailing) {
355 data = memmove(data + size - leading, data, mlen);
356 mbuf_setdata(*chain, data, mlen);
357 }
358
359 error = mbuf_prepend(chain, size, MBUF_WAITOK);
360 if (error)
361 return (error);
362 data = mbuf_data(*chain);
363 memcpy(data, bytes, size);
364
365 return (0);
366 }
367
368 errno_t
369 gss_append_mbuf(mbuf_t chain, uint8_t *bytes, size_t size)
370 {
371 size_t len = 0;
372 mbuf_t mb;
373
374 if (chain == NULL)
375 return (EINVAL);
376
377 for (mb = chain; mb; mb = mbuf_next(mb))
378 len += mbuf_len(mb);
379
380 return (mbuf_copyback(chain, len, size, bytes, MBUF_WAITOK));
381 }
382
383 errno_t
384 gss_strip_mbuf(mbuf_t chain, ssize_t size)
385 {
386 if (chain == NULL)
387 return (EINVAL);
388
389 mbuf_adj(chain, size);
390
391 return (0);
392 }
393
394
395 /*
396 * Kerberos mech generic crypto support for mbufs
397 */
398
399 /*
400 * Walk the mbuf after the given offset calling the passed in crypto function
401 * for len bytes. Note the length, len should be a multiple of the blocksize and
402 * there should be at least len bytes available after the offset in the mbuf chain.
403 * padding should be done before calling this routine.
404 */
405 int
406 mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_fn)(void *, uint8_t *data, uint32_t length), void *ctx)
407 {
408 mbuf_t mb;
409 size_t mlen, residue;
410 uint8_t *ptr;
411 int error = 0;
412
413 /* Move to the start of the chain */
414 for (mb = mbp; mb && len > 0; mb = mbuf_next(mb)) {
415 ptr = mbuf_data(mb);
416 mlen = mbuf_len(mb);
417 if (offset >= mlen) {
418 /* Offset not yet reached */
419 offset -= mlen;
420 continue;
421 }
422 /* Found starting point in chain */
423 ptr += offset;
424 mlen -= offset;
425 offset = 0;
426
427 /*
428 * Handle the data in this mbuf. If the length to
429 * walk is less than the data in the mbuf, set
430 * the mbuf length left to be the length left
431 */
432 mlen = mlen < len ? mlen : len;
433 /* Figure out how much is a multple of blocksize */
434 residue = mlen % blocksize;
435 /* And addjust the mleft length to be the largest multiple of blocksized */
436 mlen -= residue;
437 /* run our hash/encrypt/decrpyt function */
438 if (mlen > 0) {
439 error = crypto_fn(ctx, ptr, mlen);
440 if (error)
441 break;
442 ptr += mlen;
443 len -= mlen;
444 }
445 /*
446 * If we have a residue then to get a full block for our crypto
447 * function, we need to copy the residue into our block size
448 * block and use the next mbuf to get the rest of the data for
449 * the block. N.B. We generally assume that from the offset
450 * passed in, that the total length, len, is a multple of
451 * blocksize and that there are at least len bytes in the chain
452 * from the offset. We also assume there is at least (blocksize
453 * - residue) size data in any next mbuf for residue > 0. If not
454 * we attemp to pullup bytes from down the chain.
455 */
456 if (residue) {
457 mbuf_t nmb = mbuf_next(mb);
458 uint8_t *nptr = NULL, block[blocksize];
459
460 assert(nmb);
461 len -= residue;
462 offset = blocksize - residue;
463 if (len < offset) {
464 offset = len;
465 /*
466 * We don't have enough bytes so zero the block
467 * so that any trailing bytes will be zero.
468 */
469 cc_clear(sizeof(block), block);
470 }
471 memcpy(block, ptr, residue);
472 if (len && nmb) {
473 mlen = mbuf_len(nmb);
474 if (mlen < offset) {
475 error = mbuf_pullup(&nmb, offset - mlen);
476 if (error) {
477 mbuf_setnext(mb, NULL);
478 return (error);
479 }
480 }
481 nptr = mbuf_data(nmb);
482 memcpy(block + residue, nptr, offset);
483 }
484 len -= offset;
485 error = crypto_fn(ctx, block, sizeof(block));
486 if (error)
487 break;
488 memcpy(ptr, block, residue);
489 if (nptr)
490 memcpy(nptr, block + residue, offset);
491 }
492 }
493
494 return (error);
495 }
496
497 void
498 do_crypt_init(crypt_walker_ctx_t wctx, int encrypt, crypto_ctx_t cctx, cccbc_ctx *ks)
499 {
500 wctx->ccmode = encrypt ? cctx->enc_mode : cctx->dec_mode;
501
502 wctx->crypt_ctx = ks;
503 MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK|M_ZERO);
504 cccbc_set_iv(wctx->ccmode, wctx->iv, NULL);
505 }
506
507 int
508 do_crypt(void *walker, uint8_t *data, uint32_t len)
509 {
510 struct crypt_walker_ctx *wctx = (crypt_walker_ctx_t)walker;
511 uint32_t nblocks;
512
513 nblocks = len / wctx->ccmode->block_size;
514 assert(len % wctx->ccmode->block_size == 0);
515 cccbc_update(wctx->ccmode, wctx->crypt_ctx, wctx->iv, nblocks, data, data);
516 wctx->length += len;
517
518 return (0);
519 }
520
521 void
522 do_hmac_init(hmac_walker_ctx_t wctx, crypto_ctx_t cctx, void *key)
523 {
524 size_t alloc_size = cc_ctx_n(struct cchmac_ctx, cchmac_di_size(cctx->di)) * sizeof(struct cchmac_ctx);
525
526 wctx->di = cctx->di;
527 MALLOC(wctx->hmac_ctx.hdr, struct cchmac_ctx *, alloc_size, M_TEMP, M_WAITOK|M_ZERO);
528 cchmac_init(cctx->di, wctx->hmac_ctx, cctx->keylen, key);
529 }
530
531 int
532 do_hmac(void *walker, uint8_t *data, uint32_t len)
533 {
534 hmac_walker_ctx_t wctx = (hmac_walker_ctx_t)walker;
535
536 cchmac_update(wctx->di, wctx->hmac_ctx, len, data);
537
538 return (0);
539 }
540
541
542 int
543 krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
544 {
545 uint8_t digest[ctx->di->output_size];
546 cchmac_di_decl(ctx->di, hmac_ctx);
547 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
548 void *key2use;
549
550 if (ikey) {
551 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
552 lck_mtx_lock(ctx->lock);
553 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
554 cc_key_schedule_create(ctx);
555 }
556 ctx->flags |= CRYPTO_KS_ALLOCED;
557 lck_mtx_unlock(ctx->lock);
558 }
559 key2use = ctx->ks.ikey[kdx];
560 } else {
561 key2use = ctx->ckey[kdx];
562 }
563
564 cchmac_init(ctx->di, hmac_ctx, ctx->keylen, key2use);
565
566 if (header) {
567 cchmac_update(ctx->di, hmac_ctx, header->length, header->value);
568 }
569
570 cchmac_update(ctx->di, hmac_ctx, bp->length, bp->value);
571
572 if (trailer) {
573 cchmac_update(ctx->di, hmac_ctx, trailer->length, trailer->value);
574 }
575
576 cchmac_final(ctx->di, hmac_ctx, digest);
577
578 if (verify) {
579 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
580 }
581 else
582 memcpy(mic, digest, ctx->digest_size);
583
584 return (0);
585 }
586
587 int
588 krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header,
589 mbuf_t mbp, uint32_t offset, uint32_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse)
590 {
591 struct hmac_walker_ctx wctx;
592 uint8_t digest[ctx->di->output_size];
593 int error;
594 int kdx = (verify == NULL) ? (reverse ? GSS_RCV : GSS_SND) : (reverse ? GSS_SND : GSS_RCV);
595 void *key2use;
596
597 if (ikey) {
598 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
599 lck_mtx_lock(ctx->lock);
600 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
601 cc_key_schedule_create(ctx);
602 }
603 ctx->flags |= CRYPTO_KS_ALLOCED;
604 lck_mtx_unlock(ctx->lock);
605 }
606 key2use = ctx->ks.ikey[kdx];
607 } else {
608 key2use = ctx->ckey[kdx];
609 }
610
611 do_hmac_init(&wctx, ctx, key2use);
612
613 if (header) {
614 cchmac_update(ctx->di, wctx.hmac_ctx, header->length, header->value);
615 }
616
617 error = mbuf_walk(mbp, offset, len, 1, do_hmac, &wctx);
618
619 if (error)
620 return (error);
621 if (trailer)
622 cchmac_update(ctx->di, wctx.hmac_ctx, trailer->length, trailer->value);
623
624 cchmac_final(ctx->di, wctx.hmac_ctx, digest);
625 FREE(wctx.hmac_ctx.hdr, M_TEMP);
626
627 if (verify) {
628 *verify = (memcmp(mic, digest, ctx->digest_size) == 0);
629 if (!*verify)
630 return (EBADRPC);
631 } else
632 memcpy(mic, digest, ctx->digest_size);
633
634 return (0);
635 }
636
637 errno_t /* __attribute__((optnone)) */
638 krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ctx *ks)
639 {
640 struct crypt_walker_ctx wctx;
641 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
642 size_t plen = len;
643 size_t cts_len = 0;
644 mbuf_t mb, lmb;
645 int error;
646
647 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
648 lck_mtx_lock(ctx->lock);
649 if (!(ctx->flags & CRYPTO_KS_ALLOCED)) {
650 cc_key_schedule_create(ctx);
651 }
652 ctx->flags |= CRYPTO_KS_ALLOCED;
653 lck_mtx_unlock(ctx->lock);
654 }
655 if (!ks)
656 ks = encrypt ? ctx->ks.enc : ctx->ks.dec;
657
658 if ((ctx->flags & CRYPTO_CTS_ENABLE) && ctx->mpad == 1) {
659 uint8_t block[ccmode->block_size];
660 /* if the length is less than or equal to a blocksize. We just encrypt the block */
661 if (len <= ccmode->block_size) {
662 if (len < ccmode->block_size) {
663 memset(block, 0, sizeof(block));
664 gss_append_mbuf(*mbp, block, ccmode->block_size);
665 }
666 plen = ccmode->block_size;
667 } else {
668 /* determine where the last two blocks are */
669 uint32_t r = len % ccmode->block_size;
670
671 cts_len = r ? r + ccmode->block_size : 2 * ccmode->block_size;
672 plen = len - cts_len;
673 /* If plen is 0 we only have two blocks to crypt with ccpad below */
674 if (plen == 0)
675 lmb = *mbp;
676 else {
677 gss_normalize_mbuf(*mbp, 0, &plen, &mb, &lmb, 0);
678 assert(*mbp == mb);
679 assert(plen == len - cts_len);
680 assert(gss_mbuf_len(mb, 0) == plen);
681 assert(gss_mbuf_len(lmb, 0) == cts_len);
682 }
683 }
684 } else if (len % ctx->mpad) {
685 uint8_t pad_block[ctx->mpad];
686 size_t padlen = ctx->mpad - (len % ctx->mpad);
687
688 memset(pad_block, 0, padlen);
689 error = gss_append_mbuf(*mbp, pad_block, padlen);
690 if (error)
691 return (error);
692 plen = len + padlen;
693 }
694 do_crypt_init(&wctx, encrypt, ctx, ks);
695 if (plen) {
696 error = mbuf_walk(*mbp, 0, plen, ccmode->block_size, do_crypt, &wctx);
697 if (error)
698 return (error);
699 }
700
701 if ((ctx->flags & CRYPTO_CTS_ENABLE) && cts_len) {
702 uint8_t cts_pad[2*ccmode->block_size];
703 ccpad_func do_ccpad = encrypt ? ccpad_cts3_encrypt : ccpad_cts3_decrypt;
704
705 assert(cts_len <= 2*ccmode->block_size && cts_len > ccmode->block_size);
706 memset(cts_pad, 0, sizeof(cts_pad));
707 mbuf_copydata(lmb, 0, cts_len, cts_pad);
708 mbuf_freem(lmb);
709 do_ccpad(ccmode, wctx.crypt_ctx, wctx.iv, cts_len, cts_pad, cts_pad);
710 gss_append_mbuf(*mbp, cts_pad, cts_len);
711 }
712 FREE(wctx.iv, M_TEMP);
713
714 return (0);
715 }
716
717 /*
718 * Key derivation routines
719 */
720
721 static int
722 rr13(unsigned char *buf, size_t len)
723 {
724 size_t bytes = (len + 7) / 8;
725 unsigned char tmp[bytes];
726 size_t i;
727
728 if(len == 0)
729 return 0;
730
731 {
732 const int bits = 13 % len;
733 const int lbit = len % 8;
734
735 memcpy(tmp, buf, bytes);
736 if(lbit) {
737 /* pad final byte with inital bits */
738 tmp[bytes - 1] &= 0xff << (8 - lbit);
739 for(i = lbit; i < 8; i += len)
740 tmp[bytes - 1] |= buf[0] >> i;
741 }
742 for(i = 0; i < bytes; i++) {
743 ssize_t bb;
744 ssize_t b1, s1, b2, s2;
745
746 /* calculate first bit position of this byte */
747 bb = 8 * i - bits;
748 while(bb < 0)
749 bb += len;
750 /* byte offset and shift count */
751 b1 = bb / 8;
752 s1 = bb % 8;
753 if((size_t)bb + 8 > bytes * 8)
754 /* watch for wraparound */
755 s2 = (len + 8 - s1) % 8;
756 else
757 s2 = 8 - s1;
758 b2 = (b1 + 1) % bytes;
759 buf[i] = (tmp[b1] << s1) | (tmp[b2] >> s2);
760 }
761 }
762 return 0;
763 }
764
765
766 /* Add `b' to `a', both being one's complement numbers. */
767 static void
768 add1(unsigned char *a, unsigned char *b, size_t len)
769 {
770 ssize_t i;
771 int carry = 0;
772
773 for(i = len - 1; i >= 0; i--){
774 int x = a[i] + b[i] + carry;
775 carry = x > 0xff;
776 a[i] = x & 0xff;
777 }
778 for(i = len - 1; carry && i >= 0; i--){
779 int x = a[i] + carry;
780 carry = x > 0xff;
781 a[i] = x & 0xff;
782 }
783 }
784
785
786 static int
787 krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size)
788 {
789 /* if len < size we need at most N * len bytes, ie < 2 * size;
790 if len > size we need at most 2 * len */
791 int ret = 0;
792 size_t maxlen = 2 * max(size, len);
793 size_t l = 0;
794 unsigned char tmp[maxlen];
795 unsigned char buf[len];
796
797 memcpy(buf, instr, len);
798 memset(foldstr, 0, size);
799 do {
800 memcpy(tmp + l, buf, len);
801 l += len;
802 ret = rr13(buf, len * 8);
803 if (ret)
804 goto out;
805 while(l >= size) {
806 add1(foldstr, tmp, size);
807 l -= size;
808 if(l == 0)
809 break;
810 memmove(tmp, tmp + size, l);
811 }
812 } while(l != 0);
813 out:
814
815 return ret;
816 }
817
818 void
819 krb5_make_usage(uint32_t usage_no, uint8_t suffix, uint8_t usage_string[KRB5_USAGE_LEN])
820 {
821 uint32_t i;
822
823 for (i = 0; i < 4; i++)
824 usage_string[i] = ((usage_no >> 8*(3-i)) & 0xff);
825 usage_string[i] = suffix;
826 }
827
828 void
829 krb5_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **dkey, size_t dklen)
830 {
831 size_t blocksize = ctx->enc_mode->block_size;
832 cccbc_iv_decl(blocksize, iv);
833 cccbc_ctx_decl(ctx->enc_mode->size, enc_ctx);
834 size_t ksize = 8*dklen;
835 size_t nblocks = (ksize + 8*blocksize - 1) / (8*blocksize);
836 uint8_t *dkptr;
837 uint8_t block[blocksize];
838
839 MALLOC(*dkey, void *, nblocks * blocksize, M_TEMP, M_WAITOK | M_ZERO);
840 dkptr = *dkey;
841
842 krb5_n_fold(cons, conslen, block, blocksize);
843 cccbc_init(ctx->enc_mode, enc_ctx, ctx->keylen, ctx->key);
844 for (size_t i = 0; i < nblocks; i++) {
845 cccbc_set_iv(ctx->enc_mode, iv, NULL);
846 cccbc_update(ctx->enc_mode, enc_ctx, iv, 1, block, block);
847 memcpy(dkptr, block, blocksize);
848 dkptr += blocksize;
849 }
850 }
851
852 static void
853 des_make_key(const uint8_t rawkey[7], uint8_t deskey[8])
854 {
855 uint8_t val = 0;
856
857 memcpy(deskey, rawkey, 7);
858 for (int i = 0; i < 7; i++)
859 val |= ((deskey[i] & 1) << (i+1));
860 deskey[7] = val;
861 ccdes_key_set_odd_parity(deskey, 8);
862 }
863
864 static void
865 krb5_3des_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **des3key)
866 {
867 const struct ccmode_cbc *cbcmode = ctx->enc_mode;
868 void *rawkey;
869 uint8_t *kptr, *rptr;
870
871 MALLOC(*des3key, void *, 3*cbcmode->block_size, M_TEMP, M_WAITOK | M_ZERO);
872 krb5_key_derivation(ctx, cons, conslen, &rawkey, 3*(cbcmode->block_size - 1));
873 kptr = (uint8_t *)*des3key;
874 rptr = (uint8_t *)rawkey;
875
876 for (int i = 0; i < 3; i++) {
877 des_make_key(rptr, kptr);
878 rptr += cbcmode->block_size - 1;
879 kptr += cbcmode->block_size;
880 }
881
882 cc_clear(3*(cbcmode->block_size - 1), rawkey);
883 FREE(rawkey, M_TEMP);
884 }
885
886 /*
887 * Create a key schecule
888 *
889 */
890 void
891 cc_key_schedule_create(crypto_ctx_t ctx)
892 {
893 uint8_t usage_string[KRB5_USAGE_LEN];
894 lucid_context_t lctx = ctx->gss_ctx;
895 void *ekey;
896
897 switch (lctx->key_data.proto) {
898 case 0: {
899 if (ctx->ks.enc == NULL) {
900 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
901 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ctx->key);
902 }
903 if (ctx->ks.dec == NULL) {
904 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
905 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ctx->key);
906 }
907 }
908 case 1: {
909 if (ctx->ks.enc == NULL) {
910 krb5_make_usage(lctx->initiate ?
911 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
912 0xAA, usage_string);
913 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
914 MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO);
915 cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ekey);
916 FREE(ekey, M_TEMP);
917 }
918 if (ctx->ks.dec == NULL) {
919 krb5_make_usage(lctx->initiate ?
920 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
921 0xAA, usage_string);
922 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
923 MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
924 cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ekey);
925 FREE(ekey, M_TEMP);
926 }
927 if (ctx->ks.ikey[GSS_SND] == NULL) {
928 krb5_make_usage(lctx->initiate ?
929 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
930 0x55, usage_string);
931 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_SND], ctx->keylen);
932 }
933 if (ctx->ks.ikey[GSS_RCV] == NULL) {
934 krb5_make_usage(lctx->initiate ?
935 KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL,
936 0x55, usage_string);
937 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_RCV], ctx->keylen);
938 }
939 }
940 }
941 }
942
943 void
944 gss_crypto_ctx_free(crypto_ctx_t ctx)
945 {
946 ctx->ks.ikey[GSS_SND] = NULL;
947 if (ctx->ks.ikey[GSS_RCV] && ctx->key != ctx->ks.ikey[GSS_RCV]) {
948 cc_clear(ctx->keylen, ctx->ks.ikey[GSS_RCV]);
949 FREE(ctx->ks.ikey[GSS_RCV], M_TEMP);
950 }
951 ctx->ks.ikey[GSS_RCV] = NULL;
952 if (ctx->ks.enc) {
953 cccbc_ctx_clear(ctx->enc_mode->size, ctx->ks.enc);
954 FREE(ctx->ks.enc, M_TEMP);
955 ctx->ks.enc = NULL;
956 }
957 if (ctx->ks.dec) {
958 cccbc_ctx_clear(ctx->dec_mode->size, ctx->ks.dec);
959 FREE(ctx->ks.dec, M_TEMP);
960 ctx->ks.dec = NULL;
961 }
962 if (ctx->ckey[GSS_SND] && ctx->ckey[GSS_SND] != ctx->key) {
963 cc_clear(ctx->keylen, ctx->ckey[GSS_SND]);
964 FREE(ctx->ckey[GSS_SND], M_TEMP);
965 }
966 ctx->ckey[GSS_SND] = NULL;
967 if (ctx->ckey[GSS_RCV] && ctx->ckey[GSS_RCV] != ctx->key) {
968 cc_clear(ctx->keylen, ctx->ckey[GSS_RCV]);
969 FREE(ctx->ckey[GSS_RCV], M_TEMP);
970 }
971 ctx->ckey[GSS_RCV] = NULL;
972 ctx->key = NULL;
973 ctx->keylen = 0;
974 }
975
976 int
977 gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid)
978 {
979 ctx->gss_ctx = lucid;
980 void *key;
981 uint8_t usage_string[KRB5_USAGE_LEN];
982
983 ctx->keylen = ctx->gss_ctx->ctx_key.key.key_len;
984 key = ctx->gss_ctx->ctx_key.key.key_val;
985 ctx->etype = ctx->gss_ctx->ctx_key.etype;
986 ctx->key = key;
987
988 switch(ctx->etype) {
989 case AES128_CTS_HMAC_SHA1_96:
990 case AES256_CTS_HMAC_SHA1_96:
991 ctx->enc_mode = ccaes_cbc_encrypt_mode();
992 assert(ctx->enc_mode);
993 ctx->dec_mode = ccaes_cbc_decrypt_mode();
994 assert(ctx->dec_mode);
995 ctx->ks.enc = NULL;
996 ctx->ks.dec = NULL;
997 ctx->di = ccsha1_di();
998 assert(ctx->di);
999 ctx->flags = CRYPTO_CTS_ENABLE;
1000 ctx->mpad = 1;
1001 ctx->digest_size = 12; /* 96 bits */
1002 krb5_make_usage(ctx->gss_ctx->initiate ?
1003 KRB5_USAGE_INITIATOR_SIGN : KRB5_USAGE_ACCEPTOR_SIGN,
1004 0x99, usage_string);
1005 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND], ctx->keylen);
1006 krb5_make_usage(ctx->gss_ctx->initiate ?
1007 KRB5_USAGE_ACCEPTOR_SIGN : KRB5_USAGE_INITIATOR_SIGN,
1008 0x99, usage_string);
1009 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV], ctx->keylen);
1010 break;
1011 case DES3_CBC_SHA1_KD:
1012 ctx->enc_mode = ccdes3_cbc_encrypt_mode();
1013 assert(ctx->enc_mode);
1014 ctx->dec_mode = ccdes3_cbc_decrypt_mode();
1015 assert(ctx->dec_mode);
1016 ctx->ks.ikey[GSS_SND] = ctx->key;
1017 ctx->ks.ikey[GSS_RCV] = ctx->key;
1018 ctx->di = ccsha1_di();
1019 assert(ctx->di);
1020 ctx->flags = 0;
1021 ctx->mpad = ctx->enc_mode->block_size;
1022 ctx->digest_size = 20; /* 160 bits */
1023 krb5_make_usage(KRB5_USAGE_ACCEPTOR_SIGN, 0x99, usage_string);
1024 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND]);
1025 krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV]);
1026 break;
1027 default:
1028 return (ENOTSUP);
1029 }
1030
1031 ctx->lock = lck_mtx_alloc_init(gss_krb5_mech_grp, LCK_ATTR_NULL);
1032
1033 return (0);
1034 }
1035
1036 /*
1037 * CFX gss support routines
1038 */
1039 /* From Heimdal cfx.h file RFC 4121 Cryptoo framework extensions */
1040 typedef struct gss_cfx_mic_token_desc_struct
1041 {
1042 uint8_t TOK_ID[2]; /* 04 04 */
1043 uint8_t Flags;
1044 uint8_t Filler[5];
1045 uint8_t SND_SEQ[8];
1046 } gss_cfx_mic_token_desc, *gss_cfx_mic_token;
1047
1048 typedef struct gss_cfx_wrap_token_desc_struct
1049 {
1050 uint8_t TOK_ID[2]; /* 05 04 */
1051 uint8_t Flags;
1052 uint8_t Filler;
1053 uint8_t EC[2];
1054 uint8_t RRC[2];
1055 uint8_t SND_SEQ[8];
1056 } gss_cfx_wrap_token_desc, *gss_cfx_wrap_token;
1057
1058 /* End of cfx.h file */
1059
1060 #define CFXSentByAcceptor (1 << 0)
1061 #define CFXSealed (1 << 1)
1062 #define CFXAcceptorSubkey (1 << 2)
1063
1064 const gss_cfx_mic_token_desc mic_cfx_token = {
1065 .TOK_ID = "\x04\x04",
1066 .Flags = 0,
1067 .Filler = "\xff\xff\xff\xff\xff",
1068 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1069 };
1070
1071 const gss_cfx_wrap_token_desc wrap_cfx_token = {
1072 .TOK_ID = "\x05\04",
1073 .Flags = 0,
1074 .Filler = '\xff',
1075 .EC = "\x00\x00",
1076 .RRC = "\x00\x00",
1077 .SND_SEQ = "\x00\x00\x00\x00\x00\x00\x00\x00"
1078 };
1079
1080 static int
1081 gss_krb5_cfx_verify_mic_token(gss_ctx_id_t ctx, gss_cfx_mic_token token)
1082 {
1083 int i;
1084 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1085 uint8_t flags = 0;
1086
1087 if (token->TOK_ID[0] != mic_cfx_token.TOK_ID[0] || token->TOK_ID[1] != mic_cfx_token.TOK_ID[1]) {
1088 printf("Bad mic TOK_ID %x %x\n", token->TOK_ID[0], token->TOK_ID[1]);
1089 return (EBADRPC);
1090 }
1091 if (lctx->initiate)
1092 flags |= CFXSentByAcceptor;
1093 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey)
1094 flags |= CFXAcceptorSubkey;
1095 if (token->Flags != flags) {
1096 printf("Bad flags received %x exptect %x\n", token->Flags, flags);
1097 return (EBADRPC);
1098 }
1099 for (i = 0; i < 5; i++) {
1100 if (token->Filler[i] != mic_cfx_token.Filler[i])
1101 break;
1102 }
1103
1104 if (i != 5) {
1105 printf("Bad mic filler %x @ %d\n", token->Filler[i], i);
1106 return (EBADRPC);
1107 }
1108
1109 return (0);
1110 }
1111
1112 uint32_t
1113 gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */
1114 gss_ctx_id_t ctx, /* context_handle */
1115 gss_qop_t qop __unused, /* qop_req (ignored) */
1116 gss_buffer_t mbp, /* message mbuf */
1117 gss_buffer_t mic /* message_token */)
1118 {
1119 gss_cfx_mic_token_desc token;
1120 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1121 crypto_ctx_t cctx = &ctx->gss_cryptor;
1122 gss_buffer_desc header;
1123 uint32_t rv;
1124 uint64_t seq = htonll(lctx->send_seq);
1125
1126 if (minor == NULL)
1127 minor = &rv;
1128 *minor = 0;
1129 token = mic_cfx_token;
1130 mic->length = sizeof (token) + cctx->digest_size;
1131 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1132 if (!lctx->initiate)
1133 token.Flags |= CFXSentByAcceptor;
1134 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey)
1135 token.Flags |= CFXAcceptorSubkey;
1136 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1137 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1138 header.value = &token;
1139 header.length = sizeof (gss_cfx_mic_token_desc);
1140
1141 *minor = krb5_mic(cctx, NULL, mbp, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1142
1143 if (*minor) {
1144 mic->length = 0;
1145 FREE(mic->value, M_TEMP);
1146 mic->value = NULL;
1147 } else {
1148 memcpy(mic->value, &token, sizeof(token));
1149 }
1150
1151 return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE);
1152 }
1153
1154 uint32_t
1155 gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */
1156 gss_ctx_id_t ctx, /* context_handle */
1157 gss_buffer_t mbp, /* message_buffer */
1158 gss_buffer_t mic, /* message_token */
1159 gss_qop_t *qop /* qop_state */)
1160 {
1161 gss_cfx_mic_token token = mic->value;
1162 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1163 crypto_ctx_t cctx = &ctx->gss_cryptor;
1164 uint8_t *digest = (uint8_t *)mic->value + sizeof (gss_cfx_mic_token_desc);
1165 int verified = 0;
1166 uint64_t seq;
1167 uint32_t rv;
1168 gss_buffer_desc header;
1169
1170 if (qop)
1171 *qop = GSS_C_QOP_DEFAULT;
1172 if (minor == NULL)
1173 minor = &rv;
1174
1175 if (mic->length != sizeof(gss_cfx_mic_token_desc) + cctx->digest_size) {
1176 printf("mic token wrong length\n");
1177 *minor = EBADRPC;
1178 goto out;
1179 }
1180 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1181 if (*minor)
1182 return (GSS_S_FAILURE);
1183 header.value = token;
1184 header.length = sizeof (gss_cfx_mic_token_desc);
1185 *minor = krb5_mic(cctx, NULL, mbp, &header, digest, &verified, 0, 0);
1186
1187 if (verified) {
1188 //XXX errors and such? Sequencing and replay? Not supported in RPCSEC_GSS
1189 memcpy(&seq, token->SND_SEQ, sizeof (uint64_t));
1190 seq = ntohll(seq);
1191 lctx->recv_seq = seq;
1192 }
1193
1194 out:
1195 return (verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG);
1196 }
1197
1198 uint32_t
1199 gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */
1200 gss_ctx_id_t ctx, /* context_handle */
1201 gss_qop_t qop __unused ,/* qop_req (ignored) */
1202 mbuf_t mbp, /* message mbuf */
1203 size_t offset, /* offest */
1204 size_t len, /* length */
1205 gss_buffer_t mic /* message_token */)
1206 {
1207 gss_cfx_mic_token_desc token;
1208 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1209 crypto_ctx_t cctx = &ctx->gss_cryptor;
1210 uint32_t rv;
1211 uint64_t seq = htonll(lctx->send_seq);
1212 gss_buffer_desc header;
1213
1214 if (minor == NULL)
1215 minor = &rv;
1216 *minor = 0;
1217
1218 token = mic_cfx_token;
1219 mic->length = sizeof (token) + cctx->digest_size;
1220 MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO);
1221 if (!lctx->initiate)
1222 token.Flags |= CFXSentByAcceptor;
1223 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey)
1224 token.Flags |= CFXAcceptorSubkey;
1225
1226 memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq));
1227 lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way
1228
1229 header.length = sizeof(token);
1230 header.value = &token;
1231
1232 len = len ? len : gss_mbuf_len(mbp, offset);
1233 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0);
1234
1235 if (*minor) {
1236 mic->length = 0;
1237 FREE(mic->value, M_TEMP);
1238 mic->value = NULL;
1239 } else {
1240 memcpy(mic->value, &token, sizeof(token));
1241 }
1242
1243 return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE);
1244 }
1245
1246
1247 uint32_t
1248 gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */
1249 gss_ctx_id_t ctx, /* context_handle */
1250 mbuf_t mbp, /* message_buffer */
1251 size_t offset, /* offset */
1252 size_t len, /* length */
1253 gss_buffer_t mic, /* message_token */
1254 gss_qop_t *qop /* qop_state */)
1255 {
1256 gss_cfx_mic_token token = mic->value;
1257 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1258 crypto_ctx_t cctx = &ctx->gss_cryptor;
1259 uint8_t *digest = (uint8_t *)mic->value + sizeof (gss_cfx_mic_token_desc);
1260 int verified;
1261 uint64_t seq;
1262 uint32_t rv;
1263 gss_buffer_desc header;
1264
1265 if (qop)
1266 *qop = GSS_C_QOP_DEFAULT;
1267
1268 if (minor == NULL)
1269 minor = &rv;
1270
1271 *minor = gss_krb5_cfx_verify_mic_token(ctx, token);
1272 if (*minor)
1273 return (GSS_S_FAILURE);
1274
1275 header.length = sizeof(gss_cfx_mic_token_desc);
1276 header.value = mic->value;
1277
1278 *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, digest, &verified, 0, 0);
1279
1280 //XXX errors and such? Sequencing and replay? Not Supported RPCSEC_GSS
1281 memcpy(&seq, token->SND_SEQ, sizeof (uint64_t));
1282 seq = ntohll(seq);
1283 lctx->recv_seq = seq;
1284
1285 return (verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG);
1286 }
1287
1288 errno_t
1289 krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int reverse)
1290 {
1291 const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode;
1292 uint8_t confounder[ccmode->block_size];
1293 uint8_t digest[ctx->digest_size];
1294 size_t tlen, r = 0;
1295 errno_t error;
1296
1297 if (encrypt) {
1298 read_random(confounder, ccmode->block_size);
1299 error = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
1300 if (error)
1301 return (error);
1302 tlen = *len + ccmode->block_size;
1303 if (ctx->mpad > 1)
1304 r = ctx->mpad - (tlen % ctx->mpad);
1305 /* We expect that r == 0 from krb5_cfx_wrap */
1306 if (r != 0) {
1307 uint8_t mpad[r];
1308 memset(mpad, 0, r);
1309 error = gss_append_mbuf(*mbp, mpad, r);
1310 if (error)
1311 return (error);
1312 }
1313 tlen += r;
1314 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, NULL, 1, 0);
1315 if (error)
1316 return (error);
1317 error = krb5_crypt_mbuf(ctx, mbp, tlen, 1, NULL);
1318 if (error)
1319 return (error);
1320 error = gss_append_mbuf(*mbp, digest, ctx->digest_size);
1321 if (error)
1322 return (error);
1323 *len = tlen + ctx->digest_size;
1324 return (0);
1325 } else {
1326 int verf;
1327 cccbc_ctx *ks = NULL;
1328
1329 if (*len < ctx->digest_size + sizeof(confounder))
1330 return (EBADRPC);
1331 tlen = *len - ctx->digest_size;
1332 /* get the digest */
1333 error = mbuf_copydata(*mbp, tlen, ctx->digest_size, digest);
1334 /* Remove the digest from the mbuffer */
1335 error = gss_strip_mbuf(*mbp, -ctx->digest_size);
1336 if (error)
1337 return (error);
1338
1339 if (reverse) {
1340 /*
1341 * Derive a key schedule that the sender can unwrap with. This
1342 * is so that RPCSEC_GSS can restore encrypted arguments for
1343 * resending. We do that because the RPCSEC_GSS sequence number in
1344 * the rpc header is prepended to the body of the message before wrapping.
1345 */
1346 void *ekey;
1347 uint8_t usage_string[KRB5_USAGE_LEN];
1348 lucid_context_t lctx = ctx->gss_ctx;
1349
1350 krb5_make_usage(lctx->initiate ?
1351 KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL,
1352 0xAA, usage_string);
1353 krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen);
1354 MALLOC(ks, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO);
1355 cccbc_init(ctx->dec_mode, ks, ctx->keylen, ekey);
1356 FREE(ekey, M_TEMP);
1357 }
1358 error = krb5_crypt_mbuf(ctx, mbp, tlen, 0, ks);
1359 FREE(ks, M_TEMP);
1360 if (error)
1361 return (error);
1362 error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, &verf, 1, reverse);
1363 if (error)
1364 return (error);
1365 if (!verf)
1366 return (EBADRPC);
1367 /* strip off the confounder */
1368 error = gss_strip_mbuf(*mbp, ccmode->block_size);
1369 if (error)
1370 return (error);
1371 *len = tlen - ccmode->block_size;
1372 }
1373 return (0);
1374 }
1375
1376 uint32_t
1377 gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */
1378 gss_ctx_id_t ctx, /* context_handle */
1379 int conf_flag, /* conf_req_flag */
1380 gss_qop_t qop __unused, /* qop_req */
1381 mbuf_t *mbp, /* input/output message_buffer */
1382 size_t len, /* mbuf chain length */
1383 int *conf /* conf_state */)
1384 {
1385 gss_cfx_wrap_token_desc token;
1386 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1387 crypto_ctx_t cctx = &ctx->gss_cryptor;
1388 int error = 0;
1389 uint32_t mv;
1390 uint64_t seq = htonll(lctx->send_seq);
1391
1392 if (minor == NULL)
1393 minor = &mv;
1394 if (conf)
1395 *conf = conf_flag;
1396
1397 *minor = 0;
1398 token = wrap_cfx_token;
1399 if (!lctx->initiate)
1400 token.Flags |= CFXSentByAcceptor;
1401 if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey)
1402 token.Flags |= CFXAcceptorSubkey;
1403 memcpy(&token.SND_SEQ, &seq, sizeof(uint64_t));
1404 lctx->send_seq++;
1405 if (conf_flag) {
1406 uint8_t pad[cctx->mpad];
1407 uint16_t plen = 0;
1408
1409 token.Flags |= CFXSealed;
1410 memset(pad, 0, cctx->mpad);
1411 if (cctx->mpad > 1) {
1412 plen = htons(cctx->mpad - ((len + sizeof (gss_cfx_wrap_token_desc)) % cctx->mpad));
1413 token.EC[0] = ((plen >> 8) & 0xff);
1414 token.EC[1] = (plen & 0xff);
1415 }
1416 if (plen) {
1417 error = gss_append_mbuf(*mbp, pad, plen);
1418 len += plen;
1419 }
1420 if (error == 0) {
1421 error = gss_append_mbuf(*mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1422 len += sizeof (gss_cfx_wrap_token_desc);
1423 }
1424 if (error == 0)
1425 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 1, 0);
1426 if (error == 0)
1427 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc));
1428 } else {
1429 uint8_t digest[cctx->digest_size];
1430 gss_buffer_desc header;
1431
1432 header.length = sizeof(token);
1433 header.value = &token;
1434
1435 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, NULL, 1, 0);
1436 if (error == 0) {
1437 error = gss_append_mbuf(*mbp, digest, cctx->digest_size);
1438 if (error == 0) {
1439 uint16_t plen = htons(cctx->digest_size);
1440 memcpy(token.EC, &plen, 2);
1441 error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof (gss_cfx_wrap_token_desc));
1442 }
1443 }
1444 }
1445 if (error) {
1446 *minor = error;
1447 return (GSS_S_FAILURE);
1448 }
1449
1450 return (GSS_S_COMPLETE);
1451 }
1452
1453 /*
1454 * Given a wrap token the has a rrc, move the trailer back to the end.
1455 */
1456 static void
1457 gss_krb5_cfx_unwrap_rrc_mbuf(mbuf_t header, size_t rrc)
1458 {
1459 mbuf_t body, trailer;
1460
1461 gss_normalize_mbuf(header, sizeof(gss_cfx_wrap_token_desc), &rrc, &trailer, &body, 0);
1462 gss_join_mbuf(header, body, trailer);
1463 }
1464
1465 uint32_t
1466 gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */
1467 gss_ctx_id_t ctx, /* context_handle */
1468 mbuf_t *mbp, /* input/output message_buffer */
1469 size_t len, /* mbuf chain length */
1470 int *conf_flag, /* conf_state */
1471 gss_qop_t *qop /* qop state */)
1472 {
1473 gss_cfx_wrap_token_desc token;
1474 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1475 crypto_ctx_t cctx = &ctx->gss_cryptor;
1476 int error, conf;
1477 uint16_t ec = 0 , rrc = 0;
1478 uint64_t seq;
1479 int reverse = (*qop == GSS_C_QOP_REVERSE);
1480 int initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1481
1482 error = mbuf_copydata(*mbp, 0, sizeof (gss_cfx_wrap_token_desc), &token);
1483 gss_strip_mbuf(*mbp, sizeof (gss_cfx_wrap_token_desc));
1484 len -= sizeof (gss_cfx_wrap_token_desc);
1485
1486 /* Check for valid token */
1487 if (token.TOK_ID[0] != wrap_cfx_token.TOK_ID[0] ||
1488 token.TOK_ID[1] != wrap_cfx_token.TOK_ID[1] ||
1489 token.Filler != wrap_cfx_token.Filler) {
1490 printf("Token id does not match\n");
1491 goto badrpc;
1492 }
1493 if ((initiate && !(token.Flags & CFXSentByAcceptor)) ||
1494 (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey && !(token.Flags & CFXAcceptorSubkey))) {
1495 printf("Bad flags %x\n", token.Flags);
1496 goto badrpc;
1497 }
1498
1499 /* XXX Sequence replay detection */
1500 memcpy(&seq, token.SND_SEQ, sizeof (seq));
1501 seq = ntohll(seq);
1502 lctx->recv_seq = seq;
1503
1504 ec = (token.EC[0] << 8) | token.EC[1];
1505 rrc = (token.RRC[0] << 8) | token.RRC[1];
1506 *qop = GSS_C_QOP_DEFAULT;
1507 conf = ((token.Flags & CFXSealed) == CFXSealed);
1508 if (conf_flag)
1509 *conf_flag = conf;
1510 if (conf) {
1511 gss_cfx_wrap_token_desc etoken;
1512
1513 if (rrc) /* Handle Right rotation count */
1514 gss_krb5_cfx_unwrap_rrc_mbuf(*mbp, rrc);
1515 error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 0, reverse);
1516 if (error) {
1517 printf("krb5_cfx_crypt_mbuf %d\n", error);
1518 *minor = error;
1519 return (GSS_S_FAILURE);
1520 }
1521 if (len >= sizeof(gss_cfx_wrap_token_desc))
1522 len -= sizeof(gss_cfx_wrap_token_desc);
1523 else
1524 goto badrpc;
1525 mbuf_copydata(*mbp, len, sizeof(gss_cfx_wrap_token_desc), &etoken);
1526 /* Verify etoken with the token wich should be the same, except the rc field is always zero */
1527 token.RRC[0] = token.RRC[1] = 0;
1528 if (memcmp(&token, &etoken, sizeof (gss_cfx_wrap_token_desc)) != 0) {
1529 printf("Encrypted token mismach\n");
1530 goto badrpc;
1531 }
1532 /* strip the encrypted token and any pad bytes */
1533 gss_strip_mbuf(*mbp, -(sizeof(gss_cfx_wrap_token_desc) + ec));
1534 len -= (sizeof(gss_cfx_wrap_token_desc) + ec);
1535 } else {
1536 uint8_t digest[cctx->digest_size];
1537 int verf;
1538 gss_buffer_desc header;
1539
1540 if (ec != cctx->digest_size || len >= cctx->digest_size)
1541 goto badrpc;
1542 len -= cctx->digest_size;
1543 mbuf_copydata(*mbp, len, cctx->digest_size, digest);
1544 gss_strip_mbuf(*mbp, -cctx->digest_size);
1545 /* When calculating the mic header fields ec and rcc must be zero */
1546 token.EC[0] = token.EC[1] = token.RRC[0] = token.RRC[1] = 0;
1547 header.value = &token;
1548 header.length = sizeof(gss_cfx_wrap_token_desc);
1549 error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, &verf, 1, reverse);
1550 if (error)
1551 goto badrpc;
1552 }
1553 return (GSS_S_COMPLETE);
1554
1555 badrpc:
1556 *minor = EBADRPC;
1557 return (GSS_S_FAILURE);
1558 }
1559
1560 /*
1561 * RFC 1964 3DES support
1562 */
1563
1564 typedef struct gss_1964_mic_token_desc_struct {
1565 uint8_t TOK_ID[2]; /* 01 01 */
1566 uint8_t Sign_Alg[2];
1567 uint8_t Filler[4]; /* ff ff ff ff */
1568 } gss_1964_mic_token_desc, *gss_1964_mic_token;
1569
1570 typedef struct gss_1964_wrap_token_desc_struct {
1571 uint8_t TOK_ID[2]; /* 02 01 */
1572 uint8_t Sign_Alg[2];
1573 uint8_t Seal_Alg[2];
1574 uint8_t Filler[2]; /* ff ff */
1575 } gss_1964_wrap_token_desc, *gss_1964_wrap_token;
1576
1577 typedef struct gss_1964_delete_token_desc_struct {
1578 uint8_t TOK_ID[2]; /* 01 02 */
1579 uint8_t Sign_Alg[2];
1580 uint8_t Filler[4]; /* ff ff ff ff */
1581 } gss_1964_delete_token_desc, *gss_1964_delete_token;
1582
1583 typedef struct gss_1964_header_desc_struct {
1584 uint8_t App0; /* 0x60 Application 0 constructed */
1585 uint8_t AppLen[]; /* Variable Der length */
1586 } gss_1964_header_desc, *gss_1964_header;
1587
1588 typedef union {
1589 gss_1964_mic_token_desc mic_tok;
1590 gss_1964_wrap_token_desc wrap_tok;
1591 gss_1964_delete_token_desc del_tok;
1592 } gss_1964_tok_type __attribute__((transparent_union));
1593
1594 typedef struct gss_1964_token_body_struct
1595 {
1596 uint8_t OIDType; /* 0x06 */
1597 uint8_t OIDLen; /* 0x09 */
1598 uint8_t kerb_mech[9]; /* Der Encode kerberos mech 1.2.840.113554.1.2.2
1599 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 */
1600 gss_1964_tok_type body;
1601 uint8_t SND_SEQ[8];
1602 uint8_t Hash[]; /* Mic */
1603 } gss_1964_token_body_desc, *gss_1964_token_body;
1604
1605
1606 gss_1964_header_desc tok_1964_header = {
1607 .App0 = 0x60
1608 };
1609
1610 gss_1964_mic_token_desc mic_1964_token = {
1611 .TOK_ID = "\x01\x01",
1612 .Filler = "\xff\xff\xff\xff"
1613 };
1614
1615 gss_1964_wrap_token_desc wrap_1964_token = {
1616 .TOK_ID = "\x02\x01",
1617 .Filler = "\xff\xff"
1618 };
1619
1620 gss_1964_delete_token_desc del_1964_token = {
1621 .TOK_ID = "\x01\x01",
1622 .Filler = "\xff\xff\xff\xff"
1623 };
1624
1625 gss_1964_token_body_desc body_1964_token = {
1626 .OIDType = 0x06,
1627 .OIDLen = 0x09,
1628 .kerb_mech = "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1629 };
1630
1631 #define GSS_KRB5_3DES_MAXTOKSZ (sizeof(gss_1964_header_desc) + 5 /* max der length supported */ + sizeof(gss_1964_token_body_desc))
1632
1633 uint32_t gss_krb5_3des_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t);
1634 uint32_t gss_krb5_3des_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *);
1635 uint32_t gss_krb5_3des_get_mic_mbuf(uint32_t *, gss_ctx_id_t, gss_qop_t, mbuf_t, size_t, size_t, gss_buffer_t);
1636 uint32_t gss_krb5_3des_verify_mic_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t, size_t, size_t, gss_buffer_t, gss_qop_t *);
1637 uint32_t gss_krb5_3des_wrap_mbuf(uint32_t *, gss_ctx_id_t, int, gss_qop_t, mbuf_t *, size_t, int *);
1638 uint32_t gss_krb5_3des_unwrap_mbuf(uint32_t *, gss_ctx_id_t, mbuf_t *, size_t, int *, gss_qop_t *);
1639
1640 /*
1641 * Decode an ASN.1 DER length field
1642 */
1643 static ssize_t
1644 gss_krb5_der_length_get(uint8_t **pp)
1645 {
1646 uint8_t *p = *pp;
1647 uint32_t flen, len = 0;
1648
1649 flen = *p & 0x7f;
1650
1651 if (*p++ & 0x80) {
1652 if (flen > sizeof(uint32_t))
1653 return (-1);
1654 while (flen--)
1655 len = (len << 8) + *p++;
1656 } else {
1657 len = flen;
1658 }
1659 *pp = p;
1660 return (len);
1661 }
1662
1663 /*
1664 * Determine size of ASN.1 DER length
1665 */
1666 static int
1667 gss_krb5_der_length_size(int len)
1668 {
1669 return
1670 len < (1 << 7) ? 1 :
1671 len < (1 << 8) ? 2 :
1672 len < (1 << 16) ? 3 :
1673 len < (1 << 24) ? 4 : 5;
1674 }
1675
1676 /*
1677 * Encode an ASN.1 DER length field
1678 */
1679 static void
1680 gss_krb5_der_length_put(uint8_t **pp, int len)
1681 {
1682 int sz = gss_krb5_der_length_size(len);
1683 uint8_t *p = *pp;
1684
1685 if (sz == 1) {
1686 *p++ = (uint8_t) len;
1687 } else {
1688 *p++ = (uint8_t) ((sz-1) | 0x80);
1689 sz -= 1;
1690 while (sz--)
1691 *p++ = (uint8_t) ((len >> (sz * 8)) & 0xff);
1692 }
1693
1694 *pp = p;
1695 }
1696
1697 static void
1698 gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t hash, size_t datalen, gss_buffer_t des3_token)
1699 {
1700 gss_1964_header token;
1701 gss_1964_token_body tokbody;
1702 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1703 crypto_ctx_t cctx = &ctx->gss_cryptor;
1704 uint32_t seq = (uint32_t) (lctx->send_seq++ & 0xffff);
1705 size_t toklen = sizeof(gss_1964_token_body_desc) + cctx->digest_size;
1706 size_t alloclen = toklen + sizeof (gss_1964_header_desc) + gss_krb5_der_length_size(toklen + datalen);
1707 uint8_t *tokptr;
1708
1709 MALLOC(token, gss_1964_header, alloclen, M_TEMP, M_WAITOK|M_ZERO);
1710 *token = tok_1964_header;
1711 tokptr = token->AppLen;
1712 gss_krb5_der_length_put(&tokptr, toklen + datalen);
1713 tokbody = (gss_1964_token_body)tokptr;
1714 *tokbody = body_1964_token; /* Initalize the token body */
1715 tokbody->body = body; /* and now set the body to the token type passed in */
1716 seq = htonl(seq);
1717 for (int i = 0; i < 4; i++)
1718 tokbody->SND_SEQ[i] = (uint8_t)((seq >> (i * 8)) & 0xff);
1719 for (int i = 4; i < 8; i++)
1720 tokbody->SND_SEQ[i] = lctx->initiate ? 0x00 : 0xff;
1721
1722 size_t blocksize = cctx->enc_mode->block_size;
1723 cccbc_iv_decl(blocksize, iv);
1724 cccbc_ctx_decl(cctx->enc_mode->size, enc_ctx);
1725 cccbc_set_iv(cctx->enc_mode, iv, hash->value);
1726 cccbc_init(cctx->enc_mode, enc_ctx, cctx->keylen, cctx->key);
1727 cccbc_update(cctx->enc_mode, enc_ctx, iv, 1, tokbody->SND_SEQ, tokbody->SND_SEQ);
1728
1729 assert(hash->length == cctx->digest_size);
1730 memcpy(tokbody->Hash, hash->value, hash->length);
1731 des3_token->length = alloclen;
1732 des3_token->value = token;
1733 }
1734
1735 static int
1736 gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok,
1737 gss_1964_tok_type body, gss_buffer_t hash, size_t *offset, size_t *len, int reverse)
1738 {
1739 gss_1964_header token = intok->value;
1740 gss_1964_token_body tokbody;
1741 lucid_context_t lctx = &ctx->gss_lucid_ctx;
1742 crypto_ctx_t cctx = &ctx->gss_cryptor;
1743 ssize_t length;
1744 size_t toklen;
1745 uint8_t *tokptr;
1746 uint32_t seq;
1747 int initiate;
1748
1749 if (token->App0 != tok_1964_header.App0) {
1750 printf("%s: bad framing\n", __func__);
1751 printgbuf(__func__, intok);
1752 return (EBADRPC);
1753 }
1754 tokptr = token->AppLen;
1755 length = gss_krb5_der_length_get(&tokptr);
1756 if (length < 0) {
1757 printf("%s: invalid length\n", __func__);
1758 printgbuf(__func__, intok);
1759 return (EBADRPC);
1760 }
1761 toklen = sizeof (gss_1964_header_desc) + gss_krb5_der_length_size(length)
1762 + sizeof (gss_1964_token_body_desc);
1763
1764 if (intok->length < toklen + cctx->digest_size) {
1765 printf("%s: token to short", __func__);
1766 printf("toklen = %d, length = %d\n", (int)toklen, (int)length);
1767 printgbuf(__func__, intok);
1768 return (EBADRPC);
1769 }
1770
1771 if (offset)
1772 *offset = toklen + cctx->digest_size;
1773
1774 if (len)
1775 *len = length - sizeof (gss_1964_token_body_desc) - cctx->digest_size;
1776
1777 tokbody = (gss_1964_token_body)tokptr;
1778 if (tokbody->OIDType != body_1964_token.OIDType ||
1779 tokbody->OIDLen != body_1964_token.OIDLen ||
1780 memcmp(tokbody->kerb_mech, body_1964_token.kerb_mech, tokbody->OIDLen) != 0) {
1781 printf("%s: Invalid mechanism\n", __func__);
1782 printgbuf(__func__, intok);
1783 return (EBADRPC);
1784 }
1785 if (memcmp(&tokbody->body, &body, sizeof(gss_1964_tok_type)) != 0) {
1786 printf("%s: Invalid body\n", __func__);
1787 printgbuf(__func__, intok);
1788 return (EBADRPC);
1789 }
1790 size_t blocksize = cctx->enc_mode->block_size;
1791 uint8_t *block = tokbody->SND_SEQ;
1792
1793 assert(blocksize == sizeof(tokbody->SND_SEQ));
1794 cccbc_iv_decl(blocksize, iv);
1795 cccbc_ctx_decl(cctx->dec_mode->size, dec_ctx);
1796 cccbc_set_iv(cctx->dec_mode, iv, tokbody->Hash);
1797 cccbc_init(cctx->dec_mode, dec_ctx, cctx->keylen, cctx->key);
1798 cccbc_update(cctx->dec_mode, dec_ctx, iv, 1, block, block);
1799
1800 initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0);
1801 for(int i = 4; i < 8; i++) {
1802 if (tokbody->SND_SEQ[i] != (initiate ? 0xff : 0x00)) {
1803 printf("%s: Invalid des mac\n", __func__);
1804 printgbuf(__func__, intok);
1805 return (EAUTH);
1806 }
1807 }
1808
1809 memcpy(&seq, tokbody->SND_SEQ, sizeof (uint32_t));
1810
1811 lctx->recv_seq = ntohl(seq);
1812
1813 assert(hash->length >= cctx->digest_size);
1814 memcpy(hash->value, tokbody->Hash, cctx->digest_size);
1815
1816 return (0);
1817 }
1818
1819 uint32_t
1820 gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */
1821 gss_ctx_id_t ctx, /* krb5 context id */
1822 gss_qop_t qop __unused, /* qop_req (ignored) */
1823 gss_buffer_t mbp, /* message buffer in */
1824 gss_buffer_t mic) /* mic token out */
1825 {
1826 gss_1964_mic_token_desc tokbody = mic_1964_token;
1827 crypto_ctx_t cctx = &ctx->gss_cryptor;
1828 gss_buffer_desc hash;
1829 gss_buffer_desc header;
1830 uint8_t hashval[cctx->digest_size];
1831
1832 hash.length = cctx->digest_size;
1833 hash.value = hashval;
1834 tokbody.Sign_Alg[0] = 0x04; /* lctx->keydata.lucid_protocol_u.data_1964.sign_alg */
1835 tokbody.Sign_Alg[1] = 0x00;
1836 header.length = sizeof (gss_1964_mic_token_desc);
1837 header.value = & tokbody;
1838
1839 /* Hash the data */
1840 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, NULL, 0, 0);
1841 if (*minor)
1842 return (GSS_S_FAILURE);
1843
1844 /* Make the token */
1845 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
1846
1847 return (GSS_S_COMPLETE);
1848 }
1849
1850 uint32_t
1851 gss_krb5_3des_verify_mic(uint32_t *minor,
1852 gss_ctx_id_t ctx,
1853 gss_buffer_t mbp,
1854 gss_buffer_t mic,
1855 gss_qop_t *qop)
1856 {
1857 crypto_ctx_t cctx = &ctx->gss_cryptor;
1858 uint8_t hashval[cctx->digest_size];
1859 gss_buffer_desc hash;
1860 gss_1964_mic_token_desc mtok = mic_1964_token;
1861 gss_buffer_desc header;
1862 int verf;
1863
1864 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
1865 mtok.Sign_Alg[1] = 0x00;
1866 hash.length = cctx->digest_size;
1867 hash.value = hashval;
1868 header.length = sizeof(gss_1964_mic_token_desc);
1869 header.value = &mtok;
1870
1871 if (qop)
1872 *qop = GSS_C_QOP_DEFAULT;
1873
1874 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
1875 if (*minor)
1876 return (GSS_S_FAILURE);
1877
1878 *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, &verf, 0, 0);
1879 if (*minor)
1880 return (GSS_S_FAILURE);
1881
1882 return (verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG);
1883 }
1884
1885 uint32_t
1886 gss_krb5_3des_get_mic_mbuf(uint32_t *minor,
1887 gss_ctx_id_t ctx,
1888 gss_qop_t qop __unused,
1889 mbuf_t mbp,
1890 size_t offset,
1891 size_t len,
1892 gss_buffer_t mic)
1893 {
1894 gss_1964_mic_token_desc tokbody = mic_1964_token;
1895 crypto_ctx_t cctx = &ctx->gss_cryptor;
1896 gss_buffer_desc header;
1897 gss_buffer_desc hash;
1898 uint8_t hashval[cctx->digest_size];
1899
1900 hash.length = cctx->digest_size;
1901 hash.value = hashval;
1902 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_4121.sign_alg */
1903 tokbody.Sign_Alg[1] = 0x00;
1904 header.length = sizeof (gss_1964_mic_token_desc);
1905 header.value = &tokbody;
1906
1907 /* Hash the data */
1908 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, NULL, 0, 0);
1909 if (*minor)
1910 return (GSS_S_FAILURE);
1911
1912 /* Make the token */
1913 gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic);
1914
1915 return (GSS_S_COMPLETE);
1916 }
1917
1918 uint32_t
1919 gss_krb5_3des_verify_mic_mbuf(uint32_t *minor,
1920 gss_ctx_id_t ctx,
1921 mbuf_t mbp,
1922 size_t offset,
1923 size_t len,
1924 gss_buffer_t mic,
1925 gss_qop_t *qop)
1926 {
1927 crypto_ctx_t cctx = &ctx->gss_cryptor;
1928 uint8_t hashval[cctx->digest_size];
1929 gss_buffer_desc header;
1930 gss_buffer_desc hash;
1931 gss_1964_mic_token_desc mtok = mic_1964_token;
1932 int verf;
1933
1934 mtok.Sign_Alg[0] = 0x04; /* lctx->key_data.lucic_protocol_u.data1964.sign_alg */
1935 mtok.Sign_Alg[1] = 0x00;
1936 hash.length = cctx->digest_size;
1937 hash.value = hashval;
1938 header.length = sizeof(gss_1964_mic_token_desc);
1939 header.value = &mtok;
1940
1941 if (qop)
1942 *qop = GSS_C_QOP_DEFAULT;
1943
1944 *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0);
1945 if (*minor)
1946 return (GSS_S_FAILURE);
1947
1948 *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, &verf, 0, 0);
1949 if (*minor)
1950 return (GSS_S_FAILURE);
1951
1952 return (verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG);
1953 }
1954
1955 uint32_t
1956 gss_krb5_3des_wrap_mbuf(uint32_t *minor,
1957 gss_ctx_id_t ctx,
1958 int conf_flag,
1959 gss_qop_t qop __unused,
1960 mbuf_t *mbp,
1961 size_t len,
1962 int *conf_state)
1963 {
1964 crypto_ctx_t cctx = &ctx->gss_cryptor;
1965 const struct ccmode_cbc *ccmode = cctx->enc_mode;
1966 uint8_t padlen;
1967 uint8_t pad[8];
1968 uint8_t confounder[ccmode->block_size];
1969 gss_1964_wrap_token_desc tokbody = wrap_1964_token;
1970 gss_buffer_desc header;
1971 gss_buffer_desc mic;
1972 gss_buffer_desc hash;
1973 uint8_t hashval[cctx->digest_size];
1974
1975 if (conf_state)
1976 *conf_state = conf_flag;
1977
1978 hash.length = cctx->digest_size;
1979 hash.value = hashval;
1980 tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_1964.sign_alg */
1981 tokbody.Sign_Alg[1] = 0x00;
1982 /* conf_flag ? lctx->key_data.lucid_protocol_u.data_1964.seal_alg : 0xffff */
1983 tokbody.Seal_Alg[0] = conf_flag ? 0x02 : 0xff;
1984 tokbody.Seal_Alg[1] = conf_flag ? 0x00 : 0xff;
1985 header.length = sizeof (gss_1964_wrap_token_desc);
1986 header.value = &tokbody;
1987
1988 /* Prepend confounder */
1989 read_random(confounder, ccmode->block_size);
1990 *minor = gss_prepend_mbuf(mbp, confounder, ccmode->block_size);
1991 if (*minor)
1992 return (GSS_S_FAILURE);
1993
1994 /* Append trailer of up to 8 bytes and set pad length in each trailer byte */
1995 padlen = 8 - len % 8;
1996 for (int i = 0; i < padlen; i++)
1997 pad[i] = padlen;
1998 *minor = gss_append_mbuf(*mbp, pad, padlen);
1999 if (*minor)
2000 return (GSS_S_FAILURE);
2001
2002 len += ccmode->block_size + padlen;
2003
2004 /* Hash the data */
2005 *minor = krb5_mic_mbuf(cctx, &header, *mbp, 0, len, NULL, hashval, NULL, 0, 0);
2006 if (*minor)
2007 return (GSS_S_FAILURE);
2008
2009 /* Make the token */
2010 gss_krb5_3des_token_put(ctx, tokbody, &hash, len, &mic);
2011
2012 if (conf_flag) {
2013 *minor = krb5_crypt_mbuf(cctx, mbp, len, 1, 0);
2014 if (*minor)
2015 return (GSS_S_FAILURE);
2016 }
2017
2018 *minor = gss_prepend_mbuf(mbp, mic.value, mic.length);
2019
2020 return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE);
2021 }
2022
2023 uint32_t
2024 gss_krb5_3des_unwrap_mbuf(uint32_t *minor,
2025 gss_ctx_id_t ctx,
2026 mbuf_t *mbp,
2027 size_t len,
2028 int *conf_state,
2029 gss_qop_t *qop)
2030 {
2031 crypto_ctx_t cctx = &ctx->gss_cryptor;
2032 const struct ccmode_cbc *ccmode = cctx->dec_mode;
2033 size_t length = 0, offset;
2034 gss_buffer_desc hash;
2035 uint8_t hashval[cctx->digest_size];
2036 gss_buffer_desc itoken;
2037 uint8_t tbuffer[GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size];
2038 itoken.length = GSS_KRB5_3DES_MAXTOKSZ + cctx->digest_size;
2039 itoken.value = tbuffer;
2040 gss_1964_wrap_token_desc wrap = wrap_1964_token;
2041 gss_buffer_desc header;
2042 uint8_t padlen;
2043 mbuf_t smb, tmb;
2044 int cflag, verified, reverse = 0;
2045
2046 if (len < GSS_KRB5_3DES_MAXTOKSZ) {
2047 *minor = EBADRPC;
2048 return (GSS_S_FAILURE);
2049 }
2050
2051 if (*qop == GSS_C_QOP_REVERSE)
2052 reverse = 1;
2053 *qop = GSS_C_QOP_DEFAULT;
2054
2055 *minor = mbuf_copydata(*mbp, 0, itoken.length, itoken.value);
2056 if (*minor)
2057 return (GSS_S_FAILURE);
2058
2059 hash.length = cctx->digest_size;
2060 hash.value = hashval;
2061 wrap.Sign_Alg[0] = 0x04;
2062 wrap.Sign_Alg[1] = 0x00;
2063 wrap.Seal_Alg[0] = 0x02;
2064 wrap.Seal_Alg[1] = 0x00;
2065
2066 for (cflag = 1; cflag >= 0; cflag--) {
2067 *minor = gss_krb5_3des_token_get(ctx, &itoken, wrap, &hash, &offset, &length, reverse);
2068 if (*minor == 0)
2069 break;
2070 wrap.Seal_Alg[0] = 0xff;
2071 wrap.Seal_Alg[0] = 0xff;
2072 }
2073 if (*minor)
2074 return (GSS_S_FAILURE);
2075
2076 if (conf_state)
2077 *conf_state = cflag;
2078
2079 /*
2080 * Seperate off the header
2081 */
2082 *minor = gss_normalize_mbuf(*mbp, offset, &length, &smb, &tmb, 0);
2083 if (*minor)
2084 return (GSS_S_FAILURE);
2085
2086 assert(tmb == NULL);
2087
2088 /* Decrypt the chain if needed */
2089 if (cflag) {
2090 *minor = krb5_crypt_mbuf(cctx, &smb, length, 0, NULL);
2091 if (*minor)
2092 return (GSS_S_FAILURE);
2093 }
2094
2095 /* Verify the mic */
2096 header.length = sizeof(gss_1964_wrap_token_desc);
2097 header.value = &wrap;
2098
2099 *minor = krb5_mic_mbuf(cctx, &header, smb, 0, length, NULL, hashval, &verified, 0, 0);
2100 if (!verified)
2101 return (GSS_S_BAD_SIG);
2102 if (*minor)
2103 return (GSS_S_FAILURE);
2104
2105 /* Get the pad bytes */
2106 *minor = mbuf_copydata(smb, length - 1, 1, &padlen);
2107 if (*minor)
2108 return (GSS_S_FAILURE);
2109
2110 /* Strip the confounder and trailing pad bytes */
2111 gss_strip_mbuf(smb, -padlen);
2112 gss_strip_mbuf(smb, ccmode->block_size);
2113
2114 if (*mbp != smb) {
2115 mbuf_freem(*mbp);
2116 *mbp = smb;
2117 }
2118
2119 return (GSS_S_COMPLETE);
2120 }
2121
2122 static const char *
2123 etype_name(etypes etype)
2124 {
2125 switch (etype) {
2126 case DES3_CBC_SHA1_KD:
2127 return ("des3-cbc-sha1");
2128 case AES128_CTS_HMAC_SHA1_96:
2129 return ("aes128-cts-hmac-sha1-96");
2130 case AES256_CTS_HMAC_SHA1_96:
2131 return ("aes-cts-hmac-sha1-96");
2132 default:
2133 return ("unknown enctype");
2134 }
2135 }
2136
2137 static int
2138 supported_etype(uint32_t proto, etypes etype)
2139 {
2140 const char *proto_name;
2141
2142 switch(proto) {
2143 case 0:
2144 /* RFC 1964 */
2145 proto_name = "RFC 1964 krb5 gss mech";
2146 switch (etype) {
2147 case DES3_CBC_SHA1_KD:
2148 return (1);
2149 default:
2150 break;
2151 }
2152 break;
2153 case 1:
2154 /* RFC 4121 */
2155 proto_name = "RFC 4121 krb5 gss mech";
2156 switch (etype) {
2157 case AES256_CTS_HMAC_SHA1_96:
2158 case AES128_CTS_HMAC_SHA1_96:
2159 return (1);
2160 default:
2161 break;
2162 }
2163 break;
2164 default:
2165 proto_name = "Unknown krb5 gss mech";
2166 break;
2167 }
2168 printf("%s: Non supported encryption %s (%d) type for protocol %s (%d)\n",
2169 __func__, etype_name(etype), etype, proto_name, proto);
2170 return (0);
2171 }
2172
2173 /*
2174 * Kerberos gss mech entry points
2175 */
2176 uint32_t
2177 gss_krb5_get_mic(uint32_t *minor, /* minor_status */
2178 gss_ctx_id_t ctx, /* context_handle */
2179 gss_qop_t qop, /* qop_req */
2180 gss_buffer_t mbp, /* message buffer */
2181 gss_buffer_t mic /* message_token */)
2182 {
2183 uint32_t minor_stat = 0;
2184
2185 if (minor == NULL)
2186 minor = &minor_stat;
2187 *minor = 0;
2188
2189 /* Validate context */
2190 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2191 return (GSS_S_NO_CONTEXT);
2192
2193 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2194 *minor = ENOTSUP;
2195 return (GSS_S_FAILURE);
2196 }
2197
2198 switch(ctx->gss_lucid_ctx.key_data.proto) {
2199 case 0:
2200 /* RFC 1964 DES3 case */
2201 return (gss_krb5_3des_get_mic(minor, ctx, qop, mbp, mic));
2202 case 1:
2203 /* RFC 4121 CFX case */
2204 return (gss_krb5_cfx_get_mic(minor, ctx, qop, mbp, mic));
2205 }
2206
2207 return (GSS_S_COMPLETE);
2208 }
2209
2210 uint32_t
2211 gss_krb5_verify_mic(uint32_t *minor, /* minor_status */
2212 gss_ctx_id_t ctx, /* context_handle */
2213 gss_buffer_t mbp, /* message_buffer */
2214 gss_buffer_t mic, /* message_token */
2215 gss_qop_t *qop /* qop_state */)
2216 {
2217 uint32_t minor_stat = 0;
2218 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2219
2220 if (minor == NULL)
2221 minor = &minor_stat;
2222 if (qop == NULL)
2223 qop = &qop_val;
2224
2225 *minor = 0;
2226
2227 /* Validate context */
2228 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2229 return (GSS_S_NO_CONTEXT);
2230
2231 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2232 *minor = ENOTSUP;
2233 return (GSS_S_FAILURE);
2234 }
2235
2236 switch(ctx->gss_lucid_ctx.key_data.proto) {
2237 case 0:
2238 /* RFC 1964 DES3 case */
2239 return (gss_krb5_3des_verify_mic(minor, ctx, mbp, mic, qop));
2240 case 1:
2241 /* RFC 4121 CFX case */
2242 return (gss_krb5_cfx_verify_mic(minor, ctx, mbp, mic, qop));
2243 }
2244 return (GSS_S_COMPLETE);
2245 }
2246
2247 uint32_t
2248 gss_krb5_get_mic_mbuf(uint32_t *minor, /* minor_status */
2249 gss_ctx_id_t ctx, /* context_handle */
2250 gss_qop_t qop, /* qop_req */
2251 mbuf_t mbp, /* message mbuf */
2252 size_t offset, /* offest */
2253 size_t len, /* length */
2254 gss_buffer_t mic /* message_token */)
2255 {
2256 uint32_t minor_stat = 0;
2257
2258 if (minor == NULL)
2259 minor = &minor_stat;
2260 *minor = 0;
2261
2262 if (len == 0)
2263 len = ~(size_t)0;
2264
2265 /* Validate context */
2266 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2267 return (GSS_S_NO_CONTEXT);
2268
2269 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2270 *minor = ENOTSUP;
2271 return (GSS_S_FAILURE);
2272 }
2273
2274 switch(ctx->gss_lucid_ctx.key_data.proto) {
2275 case 0:
2276 /* RFC 1964 DES3 case */
2277 return (gss_krb5_3des_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic));
2278 case 1:
2279 /* RFC 4121 CFX case */
2280 return (gss_krb5_cfx_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic));
2281 }
2282
2283 return (GSS_S_COMPLETE);
2284 }
2285
2286 uint32_t
2287 gss_krb5_verify_mic_mbuf(uint32_t *minor, /* minor_status */
2288 gss_ctx_id_t ctx, /* context_handle */
2289 mbuf_t mbp, /* message_buffer */
2290 size_t offset, /* offset */
2291 size_t len, /* length */
2292 gss_buffer_t mic, /* message_token */
2293 gss_qop_t *qop /* qop_state */)
2294 {
2295 uint32_t minor_stat = 0;
2296 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2297
2298 if (minor == NULL)
2299 minor = &minor_stat;
2300 if (qop == NULL)
2301 qop = &qop_val;
2302
2303 *minor = 0;
2304
2305 if (len == 0)
2306 len = ~(size_t)0;
2307
2308 /* Validate context */
2309 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2310 return (GSS_S_NO_CONTEXT);
2311
2312 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2313 *minor = ENOTSUP;
2314 return (GSS_S_FAILURE);
2315 }
2316
2317 switch(ctx->gss_lucid_ctx.key_data.proto) {
2318 case 0:
2319 /* RFC 1964 DES3 case */
2320 return (gss_krb5_3des_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop));
2321 case 1:
2322 /* RFC 4121 CFX case */
2323 return (gss_krb5_cfx_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop));
2324 }
2325
2326 return (GSS_S_COMPLETE);
2327 }
2328
2329 uint32_t
2330 gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */
2331 gss_ctx_id_t ctx, /* context_handle */
2332 int conf_flag, /* conf_req_flag */
2333 gss_qop_t qop, /* qop_req */
2334 mbuf_t *mbp, /* input/output message_buffer */
2335 size_t offset, /* offset */
2336 size_t len, /* length */
2337 int *conf_state /* conf state */)
2338 {
2339 uint32_t major, minor_stat = 0;
2340 mbuf_t smb, tmb;
2341 int conf_val = 0;
2342
2343 if (minor == NULL)
2344 minor = &minor_stat;
2345 if (conf_state == NULL)
2346 conf_state = &conf_val;
2347
2348 *minor = 0;
2349
2350 /* Validate context */
2351 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2352 return (GSS_S_NO_CONTEXT);
2353
2354 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2355 *minor = ENOTSUP;
2356 return (GSS_S_FAILURE);
2357 }
2358
2359 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2360
2361 switch(ctx->gss_lucid_ctx.key_data.proto) {
2362 case 0:
2363 /* RFC 1964 DES3 case */
2364 major = gss_krb5_3des_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2365 break;
2366 case 1:
2367 /* RFC 4121 CFX case */
2368 major = gss_krb5_cfx_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state);
2369 break;
2370 }
2371
2372 if (offset)
2373 gss_join_mbuf(*mbp, smb, tmb);
2374 else {
2375 *mbp = smb;
2376 gss_join_mbuf(smb, tmb, NULL);
2377 }
2378
2379 return (major);
2380 }
2381
2382 uint32_t
2383 gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */
2384 gss_ctx_id_t ctx, /* context_handle */
2385 mbuf_t *mbp, /* input/output message_buffer */
2386 size_t offset, /* offset */
2387 size_t len, /* length */
2388 int *conf_flag, /* conf_state */
2389 gss_qop_t *qop /* qop state */)
2390 {
2391 uint32_t major, minor_stat = 0;
2392 gss_qop_t qop_val = GSS_C_QOP_DEFAULT;
2393 int conf_val = 0;
2394 mbuf_t smb, tmb;
2395
2396 if (minor == NULL)
2397 minor = &minor_stat;
2398 if (qop == NULL)
2399 qop = &qop_val;
2400 if (conf_flag == NULL)
2401 conf_flag = &conf_val;
2402
2403 /* Validate context */
2404 if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1)
2405 return (GSS_S_NO_CONTEXT);
2406
2407 if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) {
2408 *minor = ENOTSUP;
2409 return (GSS_S_FAILURE);
2410 }
2411
2412 gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0);
2413
2414 switch(ctx->gss_lucid_ctx.key_data.proto) {
2415 case 0:
2416 /* RFC 1964 DES3 case */
2417 major = gss_krb5_3des_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2418 break;
2419 case 1:
2420 /* RFC 4121 CFX case */
2421 major = gss_krb5_cfx_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop);
2422 break;
2423 }
2424
2425 if (offset)
2426 gss_join_mbuf(*mbp, smb, tmb);
2427 else {
2428 *mbp = smb;
2429 gss_join_mbuf(smb, tmb, NULL);
2430 }
2431
2432 return (major);
2433 }
2434
2435 #include <nfs/xdr_subs.h>
2436
2437 static int
2438 xdr_lucid_context(void *data, size_t length, lucid_context_t lctx)
2439 {
2440 struct xdrbuf xb;
2441 int error = 0;
2442 uint32_t keylen = 0;
2443
2444 xb_init_buffer(&xb, data, length);
2445 xb_get_32(error, &xb, lctx->vers);
2446 if (!error && lctx->vers != 1) {
2447 error = EINVAL;
2448 printf("%s: invalid version %d\n", __func__, (int)lctx->vers);
2449 goto out;
2450 }
2451 xb_get_32(error, &xb, lctx->initiate);
2452 if (error) {
2453 printf("%s: Could not decode initiate\n", __func__);
2454 goto out;
2455 }
2456 xb_get_32(error, &xb, lctx->endtime);
2457 if (error) {
2458 printf("%s: Could not decode endtime\n", __func__);
2459 goto out;
2460 }
2461 xb_get_64(error, &xb, lctx->send_seq);
2462 if (error) {
2463 printf("%s: Could not decode send_seq\n", __func__);
2464 goto out;
2465 }
2466 xb_get_64(error, &xb, lctx->recv_seq);
2467 if (error) {
2468 printf("%s: Could not decode recv_seq\n", __func__);
2469 goto out;
2470 }
2471 xb_get_32(error, &xb, lctx->key_data.proto);
2472 if (error) {
2473 printf("%s: Could not decode mech protocol\n", __func__);
2474 goto out;
2475 }
2476 switch(lctx->key_data.proto) {
2477 case 0:
2478 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.sign_alg);
2479 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.seal_alg);
2480 if (error)
2481 printf("%s: Could not decode rfc1964 sign and seal\n", __func__);
2482 break;
2483 case 1:
2484 xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey);
2485 if (error)
2486 printf("%s: Could not decode rfc4121 acceptor_subkey", __func__);
2487 break;
2488 default:
2489 printf("%s: Invalid mech protocol %d\n", __func__, (int)lctx->key_data.proto);
2490 error = EINVAL;
2491 }
2492 if (error)
2493 goto out;
2494 xb_get_32(error, &xb, lctx->ctx_key.etype);
2495 if (error) {
2496 printf("%s: Could not decode key enctype\n", __func__);
2497 goto out;
2498 }
2499 switch(lctx->ctx_key.etype) {
2500 case DES3_CBC_SHA1_KD:
2501 keylen = 24;
2502 break;
2503 case AES128_CTS_HMAC_SHA1_96:
2504 keylen = 16;
2505 break;
2506 case AES256_CTS_HMAC_SHA1_96:
2507 keylen = 32;
2508 break;
2509 default:
2510 error = ENOTSUP;
2511 goto out;
2512 }
2513 xb_get_32(error, &xb, lctx->ctx_key.key.key_len);
2514 if (error) {
2515 printf("%s: could not decode key length\n", __func__);
2516 goto out;
2517 }
2518 if (lctx->ctx_key.key.key_len != keylen) {
2519 error = EINVAL;
2520 printf("%s: etype = %d keylen = %d expected keylen = %d\n", __func__,
2521 lctx->ctx_key.etype, lctx->ctx_key.key.key_len, keylen);
2522 goto out;
2523 }
2524
2525 lctx->ctx_key.key.key_val = xb_malloc(keylen);
2526 if (lctx->ctx_key.key.key_val == NULL) {
2527 printf("%s: could not get memory for key\n", __func__);
2528 error = ENOMEM;
2529 goto out;
2530 }
2531 error = xb_get_bytes(&xb, (char *)lctx->ctx_key.key.key_val, keylen, 1);
2532 if (error) {
2533 printf("%s: could get key value\n", __func__);
2534 xb_free(lctx->ctx_key.key.key_val);
2535 }
2536 out:
2537 return (error);
2538 }
2539
2540 gss_ctx_id_t
2541 gss_krb5_make_context(void *data, uint32_t datalen)
2542 {
2543 gss_ctx_id_t ctx;
2544
2545 if (!corecrypto_available())
2546 return (NULL);
2547
2548 gss_krb5_mech_init();
2549 MALLOC(ctx, gss_ctx_id_t, sizeof (struct gss_ctx_id_desc), M_TEMP, M_WAITOK | M_ZERO);
2550 if (xdr_lucid_context(data, datalen, &ctx->gss_lucid_ctx) ||
2551 !supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_lucid_ctx.ctx_key.etype)) {
2552 FREE(ctx, M_TEMP);
2553 FREE(data, M_TEMP);
2554 return (NULL);
2555 }
2556
2557 /* Set up crypto context */
2558 gss_crypto_ctx_init(&ctx->gss_cryptor, &ctx->gss_lucid_ctx);
2559 FREE(data, M_TEMP);
2560
2561 return (ctx);
2562 }
2563
2564 void
2565 gss_krb5_destroy_context(gss_ctx_id_t ctx)
2566 {
2567 if (ctx == NULL)
2568 return;
2569 gss_crypto_ctx_free(&ctx->gss_cryptor);
2570 FREE(ctx->gss_lucid_ctx.ctx_key.key.key_val, M_TEMP);
2571 cc_clear(sizeof (lucid_context_t), &ctx->gss_lucid_ctx);
2572 FREE(ctx, M_TEMP);
2573 }