]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
b2c5baecc955991345e9f60d2e470648f654ca3a
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
27 *
28 * HISTORY
29 *
30 * 1999 Mar 29 rsulack created.
31 */
32
33 #include <mach/mach_types.h>
34 #include <mach/vm_types.h>
35 #include <mach/kern_return.h>
36 #include <kern/kern_types.h>
37 #include <vm/vm_kern.h>
38 #include <kern/thread.h>
39 #include <mach-o/mach_header.h>
40
41 #include <mach_host.h>
42
43 #define WRITE_PROTECT_MODULE_TEXT (0)
44
45 kmod_info_t *kmod = 0;
46 static int kmod_index = 1;
47
48 decl_simple_lock_data(,kmod_lock)
49 decl_simple_lock_data(,kmod_queue_lock)
50
51 typedef struct cmd_queue_entry {
52 queue_chain_t links;
53 vm_address_t data;
54 vm_size_t size;
55 } cmd_queue_entry_t;
56
57 queue_head_t kmod_cmd_queue;
58
59 void
60 kmod_init()
61 {
62 simple_lock_init(&kmod_lock, ETAP_MISC_Q);
63 simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q);
64 queue_init(&kmod_cmd_queue);
65 }
66
67 kmod_info_t *
68 kmod_lookupbyid(kmod_t id)
69 {
70 kmod_info_t *k = 0;
71
72 k = kmod;
73 while (k) {
74 if (k->id == id) break;
75 k = k->next;
76 }
77
78 return k;
79 }
80
81 kmod_info_t *
82 kmod_lookupbyname(const char * name)
83 {
84 kmod_info_t *k = 0;
85
86 k = kmod;
87 while (k) {
88 if (!strcmp(k->name, name)) break;
89 k = k->next;
90 }
91
92 return k;
93 }
94
95 kmod_info_t *
96 kmod_lookupbyid_locked(kmod_t id)
97 {
98 kmod_info_t *k = 0;
99 kmod_info_t *kc = 0;
100
101 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
102 if (!kc) return kc;
103
104 simple_lock(&kmod_queue_lock);
105 k = kmod_lookupbyid(id);
106 if (k) {
107 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
108 }
109 finish:
110 simple_unlock(&kmod_queue_lock);
111
112 if (k == 0) {
113 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
114 kc = 0;
115 }
116 return kc;
117 }
118
119 kmod_info_t *
120 kmod_lookupbyname_locked(const char * name)
121 {
122 kmod_info_t *k = 0;
123 kmod_info_t *kc = 0;
124
125 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
126 if (!kc) return kc;
127
128 simple_lock(&kmod_queue_lock);
129 k = kmod_lookupbyname(name);
130 if (k) {
131 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
132 }
133 finish:
134 simple_unlock(&kmod_queue_lock);
135
136 if (k == 0) {
137 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
138 kc = 0;
139 }
140 return kc;
141 }
142
143 // XXX add a nocopy flag??
144
145 kern_return_t
146 kmod_queue_cmd(vm_address_t data, vm_size_t size)
147 {
148 kern_return_t rc;
149 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
150 if (!e) return KERN_RESOURCE_SHORTAGE;
151
152 rc = kmem_alloc(kernel_map, &e->data, size);
153 if (rc != KERN_SUCCESS) {
154 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
155 return rc;
156 }
157 e->size = size;
158 bcopy((void *)data, (void *)e->data, size);
159
160 simple_lock(&kmod_queue_lock);
161 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
162 simple_unlock(&kmod_queue_lock);
163
164 thread_wakeup_one((event_t)&kmod_cmd_queue);
165
166 return KERN_SUCCESS;
167 }
168
169 kern_return_t
170 kmod_load_extension(char *name)
171 {
172 kmod_load_extension_cmd_t *data;
173 vm_size_t size;
174
175 size = sizeof(kmod_load_extension_cmd_t);
176 data = (kmod_load_extension_cmd_t *)kalloc(size);
177 if (!data) return KERN_RESOURCE_SHORTAGE;
178
179 data->type = KMOD_LOAD_EXTENSION_PACKET;
180 strncpy(data->name, name, KMOD_MAX_NAME);
181
182 return kmod_queue_cmd((vm_address_t)data, size);
183 }
184
185 kern_return_t
186 kmod_load_extension_with_dependencies(char *name, char **dependencies)
187 {
188 kmod_load_with_dependencies_cmd_t *data;
189 vm_size_t size;
190 char **c;
191 int i, count = 0;
192
193 c = dependencies;
194 if (c) {
195 while (*c) {
196 count++; c++;
197 }
198 }
199 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
200 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
201 if (!data) return KERN_RESOURCE_SHORTAGE;
202
203 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
204 strncpy(data->name, name, KMOD_MAX_NAME);
205
206 c = dependencies;
207 for (i=0; i < count; i++) {
208 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
209 c++;
210 }
211 data->dependencies[count][0] = 0;
212
213 return kmod_queue_cmd((vm_address_t)data, size);
214 }
215 kern_return_t
216 kmod_send_generic(int type, void *generic_data, int size)
217 {
218 kmod_generic_cmd_t *data;
219
220 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
221 if (!data) return KERN_RESOURCE_SHORTAGE;
222
223 data->type = type;
224 bcopy(data->data, generic_data, size);
225
226 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
227 }
228
229 extern vm_offset_t sectPRELINKB;
230 extern int sectSizePRELINK;
231
232 kern_return_t
233 kmod_create_internal(kmod_info_t *info, kmod_t *id)
234 {
235 kern_return_t rc;
236 boolean_t isPrelink;
237
238 if (!info) return KERN_INVALID_ADDRESS;
239
240 // double check for page alignment
241 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
242 return KERN_INVALID_ADDRESS;
243 }
244
245 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
246 if (!isPrelink) {
247 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
248 info->address + info->size, VM_PROT_DEFAULT, FALSE);
249 if (rc != KERN_SUCCESS) {
250 return rc;
251 }
252 }
253 #if WRITE_PROTECT_MODULE_TEXT
254 {
255 struct section * sect = getsectbynamefromheader(
256 (struct mach_header*) info->address, "__TEXT", "__text");
257
258 if(sect) {
259 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
260 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
261 }
262 }
263 #endif /* WRITE_PROTECT_MODULE_TEXT */
264
265 simple_lock(&kmod_lock);
266
267 // check to see if already loaded
268 if (kmod_lookupbyname(info->name)) {
269 simple_unlock(&kmod_lock);
270 if (!isPrelink) {
271 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
272 info->address + info->size, FALSE);
273 assert(rc == KERN_SUCCESS);
274 }
275 return KERN_INVALID_ARGUMENT;
276 }
277
278 info->id = kmod_index++;
279 info->reference_count = 0;
280
281 info->next = kmod;
282 kmod = info;
283
284 *id = info->id;
285
286 simple_unlock(&kmod_lock);
287
288 #if DEBUG
289 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
290 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
291 #endif /* DEBUG */
292
293 return KERN_SUCCESS;
294 }
295
296
297 kern_return_t
298 kmod_create(host_priv_t host_priv,
299 kmod_info_t *info,
300 kmod_t *id)
301 {
302 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
303 return kmod_create_internal(info, id);
304 }
305
306 kern_return_t
307 kmod_create_fake(const char *name, const char *version)
308 {
309 kmod_info_t *info;
310
311 if (!name || ! version ||
312 (1 + strlen(name) > KMOD_MAX_NAME) ||
313 (1 + strlen(version) > KMOD_MAX_NAME)) {
314
315 return KERN_INVALID_ARGUMENT;
316 }
317
318 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
319 if (!info) {
320 return KERN_RESOURCE_SHORTAGE;
321 }
322
323 // make de fake
324 info->info_version = KMOD_INFO_VERSION;
325 bcopy(name, info->name, 1 + strlen(name));
326 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
327 info->reference_count = 1; // keep it from unloading, starting, stopping
328 info->reference_list = 0;
329 info->address = info->size = info->hdr_size = 0;
330 info->start = info->stop = 0;
331
332 simple_lock(&kmod_lock);
333
334 // check to see if already "loaded"
335 if (kmod_lookupbyname(info->name)) {
336 simple_unlock(&kmod_lock);
337 return KERN_INVALID_ARGUMENT;
338 }
339
340 info->id = kmod_index++;
341
342 info->next = kmod;
343 kmod = info;
344
345 simple_unlock(&kmod_lock);
346
347 return KERN_SUCCESS;
348 }
349
350 kern_return_t
351 kmod_destroy_internal(kmod_t id)
352 {
353 kern_return_t rc;
354 kmod_info_t *k;
355 kmod_info_t *p;
356
357 simple_lock(&kmod_lock);
358
359 k = p = kmod;
360 while (k) {
361 if (k->id == id) {
362 kmod_reference_t *r, *t;
363
364 if (k->reference_count != 0) {
365 simple_unlock(&kmod_lock);
366 return KERN_INVALID_ARGUMENT;
367 }
368
369 if (k == p) { // first element
370 kmod = k->next;
371 } else {
372 p->next = k->next;
373 }
374 simple_unlock(&kmod_lock);
375
376 r = k->reference_list;
377 while (r) {
378 r->info->reference_count--;
379 t = r;
380 r = r->next;
381 kfree((vm_offset_t)t, sizeof(struct kmod_reference));
382 }
383
384 #if DEBUG
385 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
386 k->name, k->id, k->size / PAGE_SIZE, k->address);
387 #endif /* DEBUG */
388
389 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
390 {
391 vm_offset_t
392 virt = ml_static_ptovirt(k->address);
393 if( virt) {
394 ml_static_mfree( virt, k->size);
395 }
396 }
397 else
398 {
399 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
400 k->address + k->size, FALSE);
401 assert(rc == KERN_SUCCESS);
402
403 rc = vm_deallocate(kernel_map, k->address, k->size);
404 assert(rc == KERN_SUCCESS);
405 }
406 return KERN_SUCCESS;
407 }
408 p = k;
409 k = k->next;
410 }
411
412 simple_unlock(&kmod_lock);
413
414 return KERN_INVALID_ARGUMENT;
415 }
416
417
418 kern_return_t
419 kmod_destroy(host_priv_t host_priv,
420 kmod_t id)
421 {
422 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
423 return kmod_destroy_internal(id);
424 }
425
426
427 kern_return_t
428 kmod_start_or_stop(
429 kmod_t id,
430 int start,
431 kmod_args_t *data,
432 mach_msg_type_number_t *dataCount)
433 {
434 kern_return_t rc = KERN_SUCCESS;
435 void * user_data = 0;
436 kern_return_t (*func)();
437 kmod_info_t *k;
438
439 simple_lock(&kmod_lock);
440
441 k = kmod_lookupbyid(id);
442 if (!k || k->reference_count) {
443 simple_unlock(&kmod_lock);
444 rc = KERN_INVALID_ARGUMENT;
445 goto finish;
446 }
447
448 if (start) {
449 func = (void *)k->start;
450 } else {
451 func = (void *)k->stop;
452 }
453
454 simple_unlock(&kmod_lock);
455
456 //
457 // call kmod entry point
458 //
459 if (data && dataCount && *data && *dataCount) {
460 vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data);
461 }
462
463 rc = (*func)(k, user_data);
464
465 finish:
466
467 if (user_data) {
468 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
469 }
470 if (data) *data = 0;
471 if (dataCount) *dataCount = 0;
472
473 return rc;
474 }
475
476
477 /*
478 * The retain and release calls take no user data, but the caller
479 * may have sent some in error (the MIG definition allows it).
480 * If this is the case, they will just return that same data
481 * right back to the caller (since they never touch the *data and
482 * *dataCount fields).
483 */
484 kern_return_t
485 kmod_retain(kmod_t id)
486 {
487 kern_return_t rc = KERN_SUCCESS;
488
489 kmod_info_t *t; // reference to
490 kmod_info_t *f; // reference from
491 kmod_reference_t *r = 0;
492
493 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
494 if (!r) {
495 rc = KERN_RESOURCE_SHORTAGE;
496 goto finish;
497 }
498
499 simple_lock(&kmod_lock);
500
501 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
502 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
503 if (!t || !f) {
504 simple_unlock(&kmod_lock);
505 if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference));
506 rc = KERN_INVALID_ARGUMENT;
507 goto finish;
508 }
509
510 r->next = f->reference_list;
511 r->info = t;
512 f->reference_list = r;
513 t->reference_count++;
514
515 simple_unlock(&kmod_lock);
516
517 finish:
518
519 return rc;
520 }
521
522
523 kern_return_t
524 kmod_release(kmod_t id)
525 {
526 kern_return_t rc = KERN_INVALID_ARGUMENT;
527
528 kmod_info_t *t; // reference to
529 kmod_info_t *f; // reference from
530 kmod_reference_t *r = 0;
531 kmod_reference_t * p;
532
533 simple_lock(&kmod_lock);
534
535 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
536 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
537 if (!t || !f) {
538 rc = KERN_INVALID_ARGUMENT;
539 goto finish;
540 }
541
542 p = r = f->reference_list;
543 while (r) {
544 if (r->info == t) {
545 if (p == r) { // first element
546 f->reference_list = r->next;
547 } else {
548 p->next = r->next;
549 }
550 r->info->reference_count--;
551
552 simple_unlock(&kmod_lock);
553 kfree((vm_offset_t)r, sizeof(struct kmod_reference));
554 rc = KERN_SUCCESS;
555 goto finish;
556 }
557 p = r;
558 r = r->next;
559 }
560
561 simple_unlock(&kmod_lock);
562
563 finish:
564
565 return rc;
566 }
567
568
569 kern_return_t
570 kmod_control(host_priv_t host_priv,
571 kmod_t id,
572 kmod_control_flavor_t flavor,
573 kmod_args_t *data,
574 mach_msg_type_number_t *dataCount)
575 {
576 kern_return_t rc = KERN_SUCCESS;
577
578 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
579
580 switch (flavor) {
581
582 case KMOD_CNTL_START:
583 case KMOD_CNTL_STOP:
584 {
585 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
586 data, dataCount);
587 break;
588 }
589
590 case KMOD_CNTL_RETAIN:
591 {
592 rc = kmod_retain(id);
593 break;
594 }
595
596 case KMOD_CNTL_RELEASE:
597 {
598 rc = kmod_release(id);
599 break;
600 }
601
602 case KMOD_CNTL_GET_CMD:
603 {
604
605 cmd_queue_entry_t *e;
606
607 /*
608 * Throw away any data the user may have sent in error.
609 * We must do this, because we are likely to return to
610 * some data for these commands (thus causing a leak of
611 * whatever data the user sent us in error).
612 */
613 if (*data && *dataCount) {
614 vm_map_copy_discard(*data);
615 *data = 0;
616 *dataCount = 0;
617 }
618
619 simple_lock(&kmod_queue_lock);
620
621 if (queue_empty(&kmod_cmd_queue)) {
622 wait_result_t res;
623
624 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
625 &kmod_queue_lock,
626 THREAD_ABORTSAFE);
627 if (queue_empty(&kmod_cmd_queue)) {
628 // we must have been interrupted!
629 simple_unlock(&kmod_queue_lock);
630 assert(res == THREAD_INTERRUPTED);
631 return KERN_ABORTED;
632 }
633 }
634 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
635
636 simple_unlock(&kmod_queue_lock);
637
638 rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data);
639 if (rc) {
640 simple_lock(&kmod_queue_lock);
641 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
642 simple_unlock(&kmod_queue_lock);
643 *data = 0;
644 *dataCount = 0;
645 return rc;
646 }
647 *dataCount = e->size;
648
649 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
650
651 break;
652 }
653
654 default:
655 rc = KERN_INVALID_ARGUMENT;
656 }
657
658 return rc;
659 };
660
661
662 kern_return_t
663 kmod_get_info(host_t host,
664 kmod_info_array_t *kmods,
665 mach_msg_type_number_t *kmodCount)
666 {
667 vm_offset_t data;
668 kmod_info_t *k, *p1;
669 kmod_reference_t *r, *p2;
670 int ref_count;
671 unsigned size = 0;
672 kern_return_t rc = KERN_SUCCESS;
673
674 *kmods = (void *)0;
675 *kmodCount = 0;
676
677 retry:
678 simple_lock(&kmod_lock);
679 size = 0;
680 k = kmod;
681 while (k) {
682 size += sizeof(kmod_info_t);
683 r = k->reference_list;
684 while (r) {
685 size +=sizeof(kmod_reference_t);
686 r = r->next;
687 }
688 k = k->next;
689 }
690 simple_unlock(&kmod_lock);
691 if (!size) return KERN_SUCCESS;
692
693 rc = kmem_alloc(kernel_map, &data, size);
694 if (rc) return rc;
695
696 // copy kmod into data, retry if kmod's size has changed (grown)
697 // the copied out data is tweeked to figure what's what at user level
698 // change the copied out k->next pointers to point to themselves
699 // change the k->reference into a count, tack the references on
700 // the end of the data packet in the order they are found
701
702 simple_lock(&kmod_lock);
703 k = kmod; p1 = (kmod_info_t *)data;
704 while (k) {
705 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
706 simple_unlock(&kmod_lock);
707 kmem_free(kernel_map, data, size);
708 goto retry;
709 }
710
711 *p1 = *k;
712 if (k->next) p1->next = k;
713 p1++; k = k->next;
714 }
715
716 p2 = (kmod_reference_t *)p1;
717 k = kmod; p1 = (kmod_info_t *)data;
718 while (k) {
719 r = k->reference_list; ref_count = 0;
720 while (r) {
721 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
722 simple_unlock(&kmod_lock);
723 kmem_free(kernel_map, data, size);
724 goto retry;
725 }
726 // note the last 'k' in the chain has its next == 0
727 // since there can only be one like that,
728 // this case is handled by the caller
729 *p2 = *r;
730 p2++; r = r->next; ref_count++;
731 }
732 p1->reference_list = (kmod_reference_t *)ref_count;
733 p1++; k = k->next;
734 }
735 simple_unlock(&kmod_lock);
736
737 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
738 if (rc) {
739 kmem_free(kernel_map, data, size);
740 *kmods = 0;
741 *kmodCount = 0;
742 return rc;
743 }
744 *kmodCount = size;
745
746 return KERN_SUCCESS;
747 }
748
749 static kern_return_t
750 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
751 {
752 typedef void (*Routine)(void);
753 Routine * routines;
754 int size, i;
755
756 if (header->magic != MH_MAGIC) {
757 return KERN_INVALID_ARGUMENT;
758 }
759
760 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, (char *) sectName, &size);
761 if (!routines) return KERN_SUCCESS;
762
763 size /= sizeof(Routine);
764 for (i = 0; i < size; i++) {
765 (*routines[i])();
766 }
767
768 return KERN_SUCCESS;
769 }
770
771 kern_return_t
772 kmod_initialize_cpp(kmod_info_t *info)
773 {
774 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
775 }
776
777 kern_return_t
778 kmod_finalize_cpp(kmod_info_t *info)
779 {
780 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
781 }
782
783 kern_return_t
784 kmod_default_start(struct kmod_info *ki, void *data)
785 {
786 return KMOD_RETURN_SUCCESS;
787 }
788
789 kern_return_t
790 kmod_default_stop(struct kmod_info *ki, void *data)
791 {
792 return KMOD_RETURN_SUCCESS;
793 }
794
795 void
796 kmod_dump(vm_offset_t *addr, unsigned int cnt)
797 {
798 vm_offset_t * kscan_addr = 0;
799 vm_offset_t * rscan_addr = 0;
800 kmod_info_t * k;
801 kmod_reference_t * r;
802 int i, j;
803 int found_kmod = 0;
804 int kmod_scan_stopped = 0;
805 kmod_info_t * stop_kmod = 0;
806 int ref_scan_stopped = 0;
807 kmod_reference_t * stop_ref = 0;
808
809 for (k = kmod; k; k = k->next) {
810 if (!k->address) {
811 continue; // skip fake entries for built-in kernel components
812 }
813 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
814 kdb_printf(" kmod scan stopped due to missing "
815 "kmod page: %08x\n", stop_kmod);
816 break;
817 }
818 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
819 if ((*kscan_addr >= k->address) &&
820 (*kscan_addr < (k->address + k->size))) {
821
822 if (!found_kmod) {
823 kdb_printf(" Kernel loadable modules in backtrace "
824 "(with dependencies):\n");
825 }
826 found_kmod = 1;
827 kdb_printf(" %s(%s)@0x%x\n",
828 k->name, k->version, k->address);
829
830 for (r = k->reference_list; r; r = r->next) {
831 kmod_info_t * rinfo;
832
833 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
834 kdb_printf(" kmod dependency scan stopped "
835 "due to missing dependency page: %08x\n", r);
836 break;
837 }
838
839 rinfo = r->info;
840
841 if (!rinfo->address) {
842 continue; // skip fake entries for built-ins
843 }
844
845 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
846 kdb_printf(" kmod dependency scan stopped "
847 "due to missing kmod page: %08x\n", rinfo);
848 break;
849 }
850
851 kdb_printf(" dependency: %s(%s)@0x%x\n",
852 rinfo->name, rinfo->version, rinfo->address);
853 }
854
855 break; // only report this kmod for one backtrace address
856 }
857 }
858 }
859
860 return;
861 }