]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
4aa0f10371716ea413476ac0b24888afeeb4651c
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 1999 Mar 29 rsulack created.
28 */
29
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <kern/kern_types.h>
34 #include <vm/vm_kern.h>
35 #include <kern/thread.h>
36
37 #include <mach_host.h>
38
39 kmod_info_t *kmod = 0;
40 static int kmod_index = 1;
41
42 decl_simple_lock_data(,kmod_lock)
43 decl_simple_lock_data(,kmod_queue_lock)
44
45 typedef struct cmd_queue_entry {
46 queue_chain_t links;
47 vm_address_t data;
48 vm_size_t size;
49 } cmd_queue_entry_t;
50
51 queue_head_t kmod_cmd_queue;
52
53 void
54 kmod_init()
55 {
56 simple_lock_init(&kmod_lock, ETAP_MISC_Q);
57 simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q);
58 queue_init(&kmod_cmd_queue);
59 }
60
61 kmod_info_t *
62 kmod_lookupbyid(kmod_t id)
63 {
64 kmod_info_t *k = 0;
65
66 k = kmod;
67 while (k) {
68 if (k->id == id) break;
69 k = k->next;
70 }
71
72 return k;
73 }
74
75 kmod_info_t *
76 kmod_lookupbyname(const char * name)
77 {
78 kmod_info_t *k = 0;
79
80 k = kmod;
81 while (k) {
82 if (!strcmp(k->name, name)) break;
83 k = k->next;
84 }
85
86 return k;
87 }
88
89 // XXX add a nocopy flag??
90
91 kern_return_t
92 kmod_queue_cmd(vm_address_t data, vm_size_t size)
93 {
94 kern_return_t rc;
95 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
96 if (!e) return KERN_RESOURCE_SHORTAGE;
97
98 rc = kmem_alloc(kernel_map, &e->data, size);
99 if (rc != KERN_SUCCESS) {
100 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
101 return rc;
102 }
103 e->size = size;
104 bcopy((void *)data, (void *)e->data, size);
105
106 simple_lock(&kmod_queue_lock);
107 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
108 simple_unlock(&kmod_queue_lock);
109
110 thread_wakeup_one((event_t)&kmod_cmd_queue);
111
112 return KERN_SUCCESS;
113 }
114
115 kern_return_t
116 kmod_load_extension(char *name)
117 {
118 kmod_load_extension_cmd_t *data;
119 vm_size_t size;
120
121 size = sizeof(kmod_load_extension_cmd_t);
122 data = (kmod_load_extension_cmd_t *)kalloc(size);
123 if (!data) return KERN_RESOURCE_SHORTAGE;
124
125 data->type = KMOD_LOAD_EXTENSION_PACKET;
126 strncpy(data->name, name, KMOD_MAX_NAME);
127
128 return kmod_queue_cmd((vm_address_t)data, size);
129 }
130
131 kern_return_t
132 kmod_load_extension_with_dependencies(char *name, char **dependencies)
133 {
134 kmod_load_with_dependencies_cmd_t *data;
135 vm_size_t size;
136 char **c;
137 int i, count = 0;
138
139 c = dependencies;
140 if (c) {
141 while (*c) {
142 count++; c++;
143 }
144 }
145 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
146 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
147 if (!data) return KERN_RESOURCE_SHORTAGE;
148
149 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
150 strncpy(data->name, name, KMOD_MAX_NAME);
151
152 c = dependencies;
153 for (i=0; i < count; i++) {
154 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
155 c++;
156 }
157 data->dependencies[count][0] = 0;
158
159 return kmod_queue_cmd((vm_address_t)data, size);
160 }
161 kern_return_t
162 kmod_send_generic(int type, void *generic_data, int size)
163 {
164 kmod_generic_cmd_t *data;
165
166 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
167 if (!data) return KERN_RESOURCE_SHORTAGE;
168
169 data->type = type;
170 bcopy(data->data, generic_data, size);
171
172 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
173 }
174
175 kern_return_t
176 kmod_create_internal(kmod_info_t *info, kmod_t *id)
177 {
178 kern_return_t rc;
179
180 if (!info) return KERN_INVALID_ADDRESS;
181
182 // double check for page alignment
183 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
184 return KERN_INVALID_ADDRESS;
185 }
186
187 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
188 info->address + info->size, VM_PROT_DEFAULT, FALSE);
189 if (rc != KERN_SUCCESS) {
190 return rc;
191 }
192
193 simple_lock(&kmod_lock);
194
195 // check to see if already loaded
196 if (kmod_lookupbyname(info->name)) {
197 simple_unlock(&kmod_lock);
198 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
199 info->address + info->size, FALSE);
200 assert(rc == KERN_SUCCESS);
201 return KERN_INVALID_ARGUMENT;
202 }
203
204 info->id = kmod_index++;
205 info->reference_count = 0;
206
207 info->next = kmod;
208 kmod = info;
209
210 *id = info->id;
211
212 simple_unlock(&kmod_lock);
213
214 #if DEBUG
215 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
216 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
217 #endif DEBUG
218
219 return KERN_SUCCESS;
220 }
221
222
223 kern_return_t
224 kmod_create(host_priv_t host_priv,
225 kmod_info_t *info,
226 kmod_t *id)
227 {
228 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
229 return kmod_create_internal(info, id);
230 }
231
232 kern_return_t
233 kmod_create_fake(char *name, char *version)
234 {
235 kmod_info_t *info;
236
237 if (!name || ! version ||
238 (1 + strlen(name) > KMOD_MAX_NAME) ||
239 (1 + strlen(version) > KMOD_MAX_NAME)) {
240
241 return KERN_INVALID_ARGUMENT;
242 }
243
244 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
245 if (!info) {
246 return KERN_RESOURCE_SHORTAGE;
247 }
248
249 // make de fake
250 info->info_version = KMOD_INFO_VERSION;
251 bcopy(name, info->name, 1 + strlen(name));
252 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
253 info->reference_count = 1; // keep it from unloading, starting, stopping
254 info->reference_list = 0;
255 info->address = info->size = info->hdr_size = 0;
256 info->start = info->stop = 0;
257
258 simple_lock(&kmod_lock);
259
260 // check to see if already "loaded"
261 if (kmod_lookupbyname(info->name)) {
262 simple_unlock(&kmod_lock);
263 return KERN_INVALID_ARGUMENT;
264 }
265
266 info->id = kmod_index++;
267
268 info->next = kmod;
269 kmod = info;
270
271 simple_unlock(&kmod_lock);
272
273 return KERN_SUCCESS;
274 }
275
276 kern_return_t
277 kmod_destroy_internal(kmod_t id)
278 {
279 kern_return_t rc;
280 kmod_info_t *k;
281 kmod_info_t *p;
282
283 simple_lock(&kmod_lock);
284
285 k = p = kmod;
286 while (k) {
287 if (k->id == id) {
288 kmod_reference_t *r, *t;
289
290 if (k->reference_count != 0) {
291 simple_unlock(&kmod_lock);
292 return KERN_INVALID_ARGUMENT;
293 }
294
295 if (k == p) { // first element
296 kmod = k->next;
297 } else {
298 p->next = k->next;
299 }
300 simple_unlock(&kmod_lock);
301
302 r = k->reference_list;
303 while (r) {
304 r->info->reference_count--;
305 t = r;
306 r = r->next;
307 kfree((vm_offset_t)t, sizeof(struct kmod_reference));
308 }
309
310 #if DEBUG
311 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
312 k->name, k->id, k->size / PAGE_SIZE, k->address);
313 #endif DEBUG
314
315 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
316 k->address + k->size, FALSE);
317 assert(rc == KERN_SUCCESS);
318
319 rc = vm_deallocate(kernel_map, k->address, k->size);
320 assert(rc == KERN_SUCCESS);
321
322 return KERN_SUCCESS;
323 }
324 p = k;
325 k = k->next;
326 }
327
328 simple_unlock(&kmod_lock);
329
330 return KERN_INVALID_ARGUMENT;
331 }
332
333
334 kern_return_t
335 kmod_destroy(host_priv_t host_priv,
336 kmod_t id)
337 {
338 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
339 return kmod_destroy_internal(id);
340 }
341
342
343 kern_return_t
344 kmod_start_or_stop(
345 kmod_t id,
346 int start,
347 kmod_args_t *data,
348 mach_msg_type_number_t *dataCount)
349 {
350 kern_return_t rc = KERN_SUCCESS;
351 void * user_data = 0;
352 kern_return_t (*func)();
353 kmod_info_t *k;
354
355 simple_lock(&kmod_lock);
356
357 k = kmod_lookupbyid(id);
358 if (!k || k->reference_count) {
359 simple_unlock(&kmod_lock);
360 rc = KERN_INVALID_ARGUMENT;
361 goto finish;
362 }
363
364 if (start) {
365 func = (void *)k->start;
366 } else {
367 func = (void *)k->stop;
368 }
369
370 simple_unlock(&kmod_lock);
371
372 //
373 // call kmod entry point
374 //
375 if (data && dataCount && *data && *dataCount) {
376 vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data);
377 }
378
379 rc = (*func)(k, user_data);
380
381 finish:
382
383 if (user_data) {
384 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
385 }
386 if (data) *data = 0;
387 if (dataCount) *dataCount = 0;
388
389 return rc;
390 }
391
392
393 /*
394 * The retain and release calls take no user data, but the caller
395 * may have sent some in error (the MIG definition allows it).
396 * If this is the case, they will just return that same data
397 * right back to the caller (since they never touch the *data and
398 * *dataCount fields).
399 */
400 kern_return_t
401 kmod_retain(kmod_t id)
402 {
403 kern_return_t rc = KERN_SUCCESS;
404
405 kmod_info_t *t; // reference to
406 kmod_info_t *f; // reference from
407 kmod_reference_t *r = 0;
408
409 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
410 if (!r) {
411 rc = KERN_RESOURCE_SHORTAGE;
412 goto finish;
413 }
414
415 simple_lock(&kmod_lock);
416
417 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
418 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
419 if (!t || !f) {
420 simple_unlock(&kmod_lock);
421 if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference));
422 rc = KERN_INVALID_ARGUMENT;
423 goto finish;
424 }
425
426 r->next = f->reference_list;
427 r->info = t;
428 f->reference_list = r;
429 t->reference_count++;
430
431 simple_unlock(&kmod_lock);
432
433 finish:
434
435 return rc;
436 }
437
438
439 kern_return_t
440 kmod_release(kmod_t id)
441 {
442 kern_return_t rc = KERN_INVALID_ARGUMENT;
443
444 kmod_info_t *t; // reference to
445 kmod_info_t *f; // reference from
446 kmod_reference_t *r = 0;
447 kmod_reference_t * p;
448
449 simple_lock(&kmod_lock);
450
451 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
452 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
453 if (!t || !f) {
454 rc = KERN_INVALID_ARGUMENT;
455 goto finish;
456 }
457
458 p = r = f->reference_list;
459 while (r) {
460 if (r->info == t) {
461 if (p == r) { // first element
462 f->reference_list = r->next;
463 } else {
464 p->next = r->next;
465 }
466 r->info->reference_count--;
467
468 simple_unlock(&kmod_lock);
469 kfree((vm_offset_t)r, sizeof(struct kmod_reference));
470 rc = KERN_SUCCESS;
471 goto finish;
472 }
473 p = r;
474 r = r->next;
475 }
476
477 simple_unlock(&kmod_lock);
478
479 finish:
480
481 return rc;
482 }
483
484
485 kern_return_t
486 kmod_control(host_priv_t host_priv,
487 kmod_t id,
488 kmod_control_flavor_t flavor,
489 kmod_args_t *data,
490 mach_msg_type_number_t *dataCount)
491 {
492 kern_return_t rc = KERN_SUCCESS;
493
494 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
495
496 switch (flavor) {
497
498 case KMOD_CNTL_START:
499 case KMOD_CNTL_STOP:
500 {
501 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
502 data, dataCount);
503 break;
504 }
505
506 case KMOD_CNTL_RETAIN:
507 {
508 rc = kmod_retain(id);
509 break;
510 }
511
512 case KMOD_CNTL_RELEASE:
513 {
514 rc = kmod_release(id);
515 break;
516 }
517
518 case KMOD_CNTL_GET_CMD: {
519
520 cmd_queue_entry_t *e;
521
522 /*
523 * Throw away any data the user may have sent in error.
524 * We must do this, because we are likely to return to
525 * some data for these commands (thus causing a leak of
526 * whatever data the user sent us in error).
527 */
528 if (*data && *dataCount) {
529 vm_map_copy_discard(*data);
530 *data = 0;
531 *dataCount = 0;
532 }
533
534 simple_lock(&kmod_queue_lock);
535
536 if (queue_empty(&kmod_cmd_queue)) {
537 assert_wait((event_t)&kmod_cmd_queue, THREAD_ABORTSAFE);
538 simple_unlock(&kmod_queue_lock);
539 thread_block((void(*)(void))0);
540 simple_lock(&kmod_queue_lock);
541 if (queue_empty(&kmod_cmd_queue)) {
542 // we must have been interrupted!
543 simple_unlock(&kmod_queue_lock);
544 return KERN_ABORTED;
545 }
546 }
547 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
548
549 simple_unlock(&kmod_queue_lock);
550
551 rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data);
552 if (rc) {
553 simple_lock(&kmod_queue_lock);
554 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
555 simple_unlock(&kmod_queue_lock);
556 *data = 0;
557 *dataCount = 0;
558 return rc;
559 }
560 *dataCount = e->size;
561
562 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
563
564 break;
565 }
566
567 default:
568 rc = KERN_INVALID_ARGUMENT;
569 }
570
571 return rc;
572 };
573
574
575 kern_return_t
576 kmod_get_info(host_t host,
577 kmod_info_array_t *kmods,
578 mach_msg_type_number_t *kmodCount)
579 {
580 vm_offset_t data;
581 kmod_info_t *k, *p1;
582 kmod_reference_t *r, *p2;
583 int ref_count;
584 unsigned size = 0;
585 kern_return_t rc = KERN_SUCCESS;
586
587 *kmods = (void *)0;
588 *kmodCount = 0;
589
590 retry:
591 simple_lock(&kmod_lock);
592 size = 0;
593 k = kmod;
594 while (k) {
595 size += sizeof(kmod_info_t);
596 r = k->reference_list;
597 while (r) {
598 size +=sizeof(kmod_reference_t);
599 r = r->next;
600 }
601 k = k->next;
602 }
603 simple_unlock(&kmod_lock);
604 if (!size) return KERN_SUCCESS;
605
606 rc = kmem_alloc(kernel_map, &data, size);
607 if (rc) return rc;
608
609 // copy kmod into data, retry if kmod's size has changed (grown)
610 // the copied out data is tweeked to figure what's what at user level
611 // change the copied out k->next pointers to point to themselves
612 // change the k->reference into a count, tack the references on
613 // the end of the data packet in the order they are found
614
615 simple_lock(&kmod_lock);
616 k = kmod; p1 = (kmod_info_t *)data;
617 while (k) {
618 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
619 simple_unlock(&kmod_lock);
620 kmem_free(kernel_map, data, size);
621 goto retry;
622 }
623
624 *p1 = *k;
625 if (k->next) p1->next = k;
626 p1++; k = k->next;
627 }
628
629 p2 = (kmod_reference_t *)p1;
630 k = kmod; p1 = (kmod_info_t *)data;
631 while (k) {
632 r = k->reference_list; ref_count = 0;
633 while (r) {
634 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
635 simple_unlock(&kmod_lock);
636 kmem_free(kernel_map, data, size);
637 goto retry;
638 }
639 // note the last 'k' in the chain has its next == 0
640 // since there can only be one like that,
641 // this case is handled by the caller
642 *p2 = *r;
643 p2++; r = r->next; ref_count++;
644 }
645 p1->reference_list = (kmod_reference_t *)ref_count;
646 p1++; k = k->next;
647 }
648 simple_unlock(&kmod_lock);
649
650 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
651 if (rc) {
652 kmem_free(kernel_map, data, size);
653 *kmods = 0;
654 *kmodCount = 0;
655 return rc;
656 }
657 *kmodCount = size;
658
659 return KERN_SUCCESS;
660 }
661
662 #include <mach-o/loader.h>
663
664 extern void *getsectdatafromheader(struct mach_header *mhp,
665 const char *segname,
666 const char *sectname,
667 int *size);
668
669 static kern_return_t
670 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
671 {
672 typedef void (*Routine)(void);
673 Routine * routines;
674 int size, i;
675
676 if (header->magic != MH_MAGIC) {
677 return KERN_INVALID_ARGUMENT;
678 }
679
680 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, sectName, &size);
681 if (!routines) return KERN_SUCCESS;
682
683 size /= sizeof(Routine);
684 for (i = 0; i < size; i++) {
685 (*routines[i])();
686 }
687
688 return KERN_SUCCESS;
689 }
690
691 kern_return_t
692 kmod_initialize_cpp(kmod_info_t *info)
693 {
694 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
695 }
696
697 kern_return_t
698 kmod_finalize_cpp(kmod_info_t *info)
699 {
700 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
701 }
702
703 kern_return_t
704 kmod_default_start(struct kmod_info *ki, void *data)
705 {
706 return KMOD_RETURN_SUCCESS;
707 }
708
709 kern_return_t
710 kmod_default_stop(struct kmod_info *ki, void *data)
711 {
712 return KMOD_RETURN_SUCCESS;
713 }
714
715 #define IS_IN_BACKTRACE 0xdeadbeef
716 #define IS_A_DEPENDENCY 0xbeefdead
717
718 void
719 kmod_dump(vm_offset_t *addr, unsigned int cnt)
720 {
721 kmod_info_t *k;
722 kmod_reference_t *r;
723 int i, found_one = 0;
724
725 // find backtrace addresses that are inside a kmod
726 for (i=0; i < cnt; i++, addr++) {
727 k = kmod;
728 while (k) {
729 // XXX - validate page(s) that k points to
730 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
731 printf(" kmod scan stopped due to missing page: %08X\n", k);
732 break;
733 }
734 if ((*addr >= k->address) && (*addr < (k->address + k->size))) {
735 // got one, blast info_version, we don't need it at this point
736 k->info_version = IS_IN_BACKTRACE;
737 found_one++;
738 break;
739 }
740 k = k->next;
741 }
742 }
743 if (!found_one) return;
744
745 printf(" Kernel loadable modules in backtrace:\n");
746 k = kmod;
747 while (k) {
748 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
749 printf(" kmod scan stopped due to missing page: %08X\n", k);
750 break;
751 }
752 if (k->info_version == IS_IN_BACKTRACE) {
753 printf(" %s(%s)@0x%x\n", k->name, k->version, k->address);
754 }
755 k = k->next;
756 }
757
758 // look for dependencies
759 k = kmod; found_one = 0;
760 while (k) {
761 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
762 printf(" kmod dependency scan stopped due to missing page: %08X\n", k);
763 break;
764 }
765 if (k->info_version == IS_IN_BACKTRACE) {
766 r = k->reference_list;
767 while (r) {
768 // XXX - validate page(s) that r and r->info point to
769 if(pmap_extract(kernel_pmap, (vm_offset_t)r) == 0) { /* Exit loop if page not mapped */
770 printf(" kmod validation scan stopped due to missing page: %08X\n", r);
771 break;
772 }
773 if (r->info->info_version != IS_IN_BACKTRACE) {
774 r->info->info_version = IS_A_DEPENDENCY;
775 found_one++;
776 }
777 r = r->next;
778 }
779 }
780 k = k->next;
781 }
782 if (!found_one) goto cleanup;
783
784 printf(" Kernel loadable module dependencies:\n");
785 k = kmod;
786 while (k) {
787 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
788 printf(" kmod dependency print stopped due to missing page: %08X\n", k);
789 break;
790 }
791 if (k->info_version == IS_A_DEPENDENCY) {
792 printf(" %s(%s)@0x%x\n", k->name, k->version, k->address);
793 }
794 k = k->next;
795 }
796
797 cleanup:
798 // in case we double panic
799 k = kmod;
800 while (k) {
801 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
802 printf(" kmod dump cleanup stopped due to missing page: %08X\n", k);
803 break;
804 }
805 k->info_version = KMOD_INFO_VERSION;
806 k = k->next;
807 }
808 }