]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
0de60c5ce314675932905855f556b4bf88f97039
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 1999 Mar 29 rsulack created.
28 */
29
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <kern/kern_types.h>
34 #include <vm/vm_kern.h>
35 #include <kern/thread.h>
36 #include <mach-o/mach_header.h>
37
38 #include <mach_host.h>
39
40 #define WRITE_PROTECT_MODULE_TEXT (0)
41
42 kmod_info_t *kmod = 0;
43 static int kmod_index = 1;
44
45 decl_simple_lock_data(,kmod_lock)
46 decl_simple_lock_data(,kmod_queue_lock)
47
48 typedef struct cmd_queue_entry {
49 queue_chain_t links;
50 vm_address_t data;
51 vm_size_t size;
52 } cmd_queue_entry_t;
53
54 queue_head_t kmod_cmd_queue;
55
56 void
57 kmod_init()
58 {
59 simple_lock_init(&kmod_lock, ETAP_MISC_Q);
60 simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q);
61 queue_init(&kmod_cmd_queue);
62 }
63
64 kmod_info_t *
65 kmod_lookupbyid(kmod_t id)
66 {
67 kmod_info_t *k = 0;
68
69 k = kmod;
70 while (k) {
71 if (k->id == id) break;
72 k = k->next;
73 }
74
75 return k;
76 }
77
78 kmod_info_t *
79 kmod_lookupbyname(const char * name)
80 {
81 kmod_info_t *k = 0;
82
83 k = kmod;
84 while (k) {
85 if (!strcmp(k->name, name)) break;
86 k = k->next;
87 }
88
89 return k;
90 }
91
92 kmod_info_t *
93 kmod_lookupbyid_locked(kmod_t id)
94 {
95 kmod_info_t *k = 0;
96 kmod_info_t *kc = 0;
97
98 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
99 if (!kc) return kc;
100
101 simple_lock(&kmod_queue_lock);
102 k = kmod_lookupbyid(id);
103 if (k) {
104 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
105 }
106 finish:
107 simple_unlock(&kmod_queue_lock);
108
109 if (k == 0) {
110 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
111 kc = 0;
112 }
113 return kc;
114 }
115
116 kmod_info_t *
117 kmod_lookupbyname_locked(const char * name)
118 {
119 kmod_info_t *k = 0;
120 kmod_info_t *kc = 0;
121
122 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
123 if (!kc) return kc;
124
125 simple_lock(&kmod_queue_lock);
126 k = kmod_lookupbyname(name);
127 if (k) {
128 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
129 }
130 finish:
131 simple_unlock(&kmod_queue_lock);
132
133 if (k == 0) {
134 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
135 kc = 0;
136 }
137 return kc;
138 }
139
140 // XXX add a nocopy flag??
141
142 kern_return_t
143 kmod_queue_cmd(vm_address_t data, vm_size_t size)
144 {
145 kern_return_t rc;
146 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
147 if (!e) return KERN_RESOURCE_SHORTAGE;
148
149 rc = kmem_alloc(kernel_map, &e->data, size);
150 if (rc != KERN_SUCCESS) {
151 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
152 return rc;
153 }
154 e->size = size;
155 bcopy((void *)data, (void *)e->data, size);
156
157 simple_lock(&kmod_queue_lock);
158 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
159 simple_unlock(&kmod_queue_lock);
160
161 thread_wakeup_one((event_t)&kmod_cmd_queue);
162
163 return KERN_SUCCESS;
164 }
165
166 kern_return_t
167 kmod_load_extension(char *name)
168 {
169 kmod_load_extension_cmd_t *data;
170 vm_size_t size;
171
172 size = sizeof(kmod_load_extension_cmd_t);
173 data = (kmod_load_extension_cmd_t *)kalloc(size);
174 if (!data) return KERN_RESOURCE_SHORTAGE;
175
176 data->type = KMOD_LOAD_EXTENSION_PACKET;
177 strncpy(data->name, name, KMOD_MAX_NAME);
178
179 return kmod_queue_cmd((vm_address_t)data, size);
180 }
181
182 kern_return_t
183 kmod_load_extension_with_dependencies(char *name, char **dependencies)
184 {
185 kmod_load_with_dependencies_cmd_t *data;
186 vm_size_t size;
187 char **c;
188 int i, count = 0;
189
190 c = dependencies;
191 if (c) {
192 while (*c) {
193 count++; c++;
194 }
195 }
196 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
197 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
198 if (!data) return KERN_RESOURCE_SHORTAGE;
199
200 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
201 strncpy(data->name, name, KMOD_MAX_NAME);
202
203 c = dependencies;
204 for (i=0; i < count; i++) {
205 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
206 c++;
207 }
208 data->dependencies[count][0] = 0;
209
210 return kmod_queue_cmd((vm_address_t)data, size);
211 }
212 kern_return_t
213 kmod_send_generic(int type, void *generic_data, int size)
214 {
215 kmod_generic_cmd_t *data;
216
217 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
218 if (!data) return KERN_RESOURCE_SHORTAGE;
219
220 data->type = type;
221 bcopy(data->data, generic_data, size);
222
223 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
224 }
225
226 kern_return_t
227 kmod_create_internal(kmod_info_t *info, kmod_t *id)
228 {
229 kern_return_t rc;
230
231 if (!info) return KERN_INVALID_ADDRESS;
232
233 // double check for page alignment
234 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
235 return KERN_INVALID_ADDRESS;
236 }
237
238 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
239 info->address + info->size, VM_PROT_DEFAULT, FALSE);
240 if (rc != KERN_SUCCESS) {
241 return rc;
242 }
243 #if WRITE_PROTECT_MODULE_TEXT
244 {
245 struct section * sect = getsectbynamefromheader(
246 (struct mach_header*) info->address, "__TEXT", "__text");
247
248 if(sect) {
249 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
250 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
251 }
252 }
253 #endif
254
255 simple_lock(&kmod_lock);
256
257 // check to see if already loaded
258 if (kmod_lookupbyname(info->name)) {
259 simple_unlock(&kmod_lock);
260 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
261 info->address + info->size, FALSE);
262 assert(rc == KERN_SUCCESS);
263 return KERN_INVALID_ARGUMENT;
264 }
265
266 info->id = kmod_index++;
267 info->reference_count = 0;
268
269 info->next = kmod;
270 kmod = info;
271
272 *id = info->id;
273
274 simple_unlock(&kmod_lock);
275
276 #if DEBUG
277 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
278 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
279 #endif DEBUG
280
281 return KERN_SUCCESS;
282 }
283
284
285 kern_return_t
286 kmod_create(host_priv_t host_priv,
287 kmod_info_t *info,
288 kmod_t *id)
289 {
290 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
291 return kmod_create_internal(info, id);
292 }
293
294 kern_return_t
295 kmod_create_fake(const char *name, const char *version)
296 {
297 kmod_info_t *info;
298
299 if (!name || ! version ||
300 (1 + strlen(name) > KMOD_MAX_NAME) ||
301 (1 + strlen(version) > KMOD_MAX_NAME)) {
302
303 return KERN_INVALID_ARGUMENT;
304 }
305
306 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
307 if (!info) {
308 return KERN_RESOURCE_SHORTAGE;
309 }
310
311 // make de fake
312 info->info_version = KMOD_INFO_VERSION;
313 bcopy(name, info->name, 1 + strlen(name));
314 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
315 info->reference_count = 1; // keep it from unloading, starting, stopping
316 info->reference_list = 0;
317 info->address = info->size = info->hdr_size = 0;
318 info->start = info->stop = 0;
319
320 simple_lock(&kmod_lock);
321
322 // check to see if already "loaded"
323 if (kmod_lookupbyname(info->name)) {
324 simple_unlock(&kmod_lock);
325 return KERN_INVALID_ARGUMENT;
326 }
327
328 info->id = kmod_index++;
329
330 info->next = kmod;
331 kmod = info;
332
333 simple_unlock(&kmod_lock);
334
335 return KERN_SUCCESS;
336 }
337
338 kern_return_t
339 kmod_destroy_internal(kmod_t id)
340 {
341 kern_return_t rc;
342 kmod_info_t *k;
343 kmod_info_t *p;
344
345 simple_lock(&kmod_lock);
346
347 k = p = kmod;
348 while (k) {
349 if (k->id == id) {
350 kmod_reference_t *r, *t;
351
352 if (k->reference_count != 0) {
353 simple_unlock(&kmod_lock);
354 return KERN_INVALID_ARGUMENT;
355 }
356
357 if (k == p) { // first element
358 kmod = k->next;
359 } else {
360 p->next = k->next;
361 }
362 simple_unlock(&kmod_lock);
363
364 r = k->reference_list;
365 while (r) {
366 r->info->reference_count--;
367 t = r;
368 r = r->next;
369 kfree((vm_offset_t)t, sizeof(struct kmod_reference));
370 }
371
372 #if DEBUG
373 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
374 k->name, k->id, k->size / PAGE_SIZE, k->address);
375 #endif DEBUG
376
377 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
378 k->address + k->size, FALSE);
379 assert(rc == KERN_SUCCESS);
380
381 rc = vm_deallocate(kernel_map, k->address, k->size);
382 assert(rc == KERN_SUCCESS);
383
384 return KERN_SUCCESS;
385 }
386 p = k;
387 k = k->next;
388 }
389
390 simple_unlock(&kmod_lock);
391
392 return KERN_INVALID_ARGUMENT;
393 }
394
395
396 kern_return_t
397 kmod_destroy(host_priv_t host_priv,
398 kmod_t id)
399 {
400 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
401 return kmod_destroy_internal(id);
402 }
403
404
405 kern_return_t
406 kmod_start_or_stop(
407 kmod_t id,
408 int start,
409 kmod_args_t *data,
410 mach_msg_type_number_t *dataCount)
411 {
412 kern_return_t rc = KERN_SUCCESS;
413 void * user_data = 0;
414 kern_return_t (*func)();
415 kmod_info_t *k;
416
417 simple_lock(&kmod_lock);
418
419 k = kmod_lookupbyid(id);
420 if (!k || k->reference_count) {
421 simple_unlock(&kmod_lock);
422 rc = KERN_INVALID_ARGUMENT;
423 goto finish;
424 }
425
426 if (start) {
427 func = (void *)k->start;
428 } else {
429 func = (void *)k->stop;
430 }
431
432 simple_unlock(&kmod_lock);
433
434 //
435 // call kmod entry point
436 //
437 if (data && dataCount && *data && *dataCount) {
438 vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data);
439 }
440
441 rc = (*func)(k, user_data);
442
443 finish:
444
445 if (user_data) {
446 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
447 }
448 if (data) *data = 0;
449 if (dataCount) *dataCount = 0;
450
451 return rc;
452 }
453
454
455 /*
456 * The retain and release calls take no user data, but the caller
457 * may have sent some in error (the MIG definition allows it).
458 * If this is the case, they will just return that same data
459 * right back to the caller (since they never touch the *data and
460 * *dataCount fields).
461 */
462 kern_return_t
463 kmod_retain(kmod_t id)
464 {
465 kern_return_t rc = KERN_SUCCESS;
466
467 kmod_info_t *t; // reference to
468 kmod_info_t *f; // reference from
469 kmod_reference_t *r = 0;
470
471 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
472 if (!r) {
473 rc = KERN_RESOURCE_SHORTAGE;
474 goto finish;
475 }
476
477 simple_lock(&kmod_lock);
478
479 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
480 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
481 if (!t || !f) {
482 simple_unlock(&kmod_lock);
483 if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference));
484 rc = KERN_INVALID_ARGUMENT;
485 goto finish;
486 }
487
488 r->next = f->reference_list;
489 r->info = t;
490 f->reference_list = r;
491 t->reference_count++;
492
493 simple_unlock(&kmod_lock);
494
495 finish:
496
497 return rc;
498 }
499
500
501 kern_return_t
502 kmod_release(kmod_t id)
503 {
504 kern_return_t rc = KERN_INVALID_ARGUMENT;
505
506 kmod_info_t *t; // reference to
507 kmod_info_t *f; // reference from
508 kmod_reference_t *r = 0;
509 kmod_reference_t * p;
510
511 simple_lock(&kmod_lock);
512
513 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
514 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
515 if (!t || !f) {
516 rc = KERN_INVALID_ARGUMENT;
517 goto finish;
518 }
519
520 p = r = f->reference_list;
521 while (r) {
522 if (r->info == t) {
523 if (p == r) { // first element
524 f->reference_list = r->next;
525 } else {
526 p->next = r->next;
527 }
528 r->info->reference_count--;
529
530 simple_unlock(&kmod_lock);
531 kfree((vm_offset_t)r, sizeof(struct kmod_reference));
532 rc = KERN_SUCCESS;
533 goto finish;
534 }
535 p = r;
536 r = r->next;
537 }
538
539 simple_unlock(&kmod_lock);
540
541 finish:
542
543 return rc;
544 }
545
546
547 kern_return_t
548 kmod_control(host_priv_t host_priv,
549 kmod_t id,
550 kmod_control_flavor_t flavor,
551 kmod_args_t *data,
552 mach_msg_type_number_t *dataCount)
553 {
554 kern_return_t rc = KERN_SUCCESS;
555
556 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
557
558 switch (flavor) {
559
560 case KMOD_CNTL_START:
561 case KMOD_CNTL_STOP:
562 {
563 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
564 data, dataCount);
565 break;
566 }
567
568 case KMOD_CNTL_RETAIN:
569 {
570 rc = kmod_retain(id);
571 break;
572 }
573
574 case KMOD_CNTL_RELEASE:
575 {
576 rc = kmod_release(id);
577 break;
578 }
579
580 case KMOD_CNTL_GET_CMD:
581 {
582
583 cmd_queue_entry_t *e;
584
585 /*
586 * Throw away any data the user may have sent in error.
587 * We must do this, because we are likely to return to
588 * some data for these commands (thus causing a leak of
589 * whatever data the user sent us in error).
590 */
591 if (*data && *dataCount) {
592 vm_map_copy_discard(*data);
593 *data = 0;
594 *dataCount = 0;
595 }
596
597 simple_lock(&kmod_queue_lock);
598
599 if (queue_empty(&kmod_cmd_queue)) {
600 wait_result_t res;
601
602 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
603 &kmod_queue_lock,
604 THREAD_ABORTSAFE);
605 if (queue_empty(&kmod_cmd_queue)) {
606 // we must have been interrupted!
607 simple_unlock(&kmod_queue_lock);
608 assert(res == THREAD_INTERRUPTED);
609 return KERN_ABORTED;
610 }
611 }
612 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
613
614 simple_unlock(&kmod_queue_lock);
615
616 rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data);
617 if (rc) {
618 simple_lock(&kmod_queue_lock);
619 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
620 simple_unlock(&kmod_queue_lock);
621 *data = 0;
622 *dataCount = 0;
623 return rc;
624 }
625 *dataCount = e->size;
626
627 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
628
629 break;
630 }
631
632 default:
633 rc = KERN_INVALID_ARGUMENT;
634 }
635
636 return rc;
637 };
638
639
640 kern_return_t
641 kmod_get_info(host_t host,
642 kmod_info_array_t *kmods,
643 mach_msg_type_number_t *kmodCount)
644 {
645 vm_offset_t data;
646 kmod_info_t *k, *p1;
647 kmod_reference_t *r, *p2;
648 int ref_count;
649 unsigned size = 0;
650 kern_return_t rc = KERN_SUCCESS;
651
652 *kmods = (void *)0;
653 *kmodCount = 0;
654
655 retry:
656 simple_lock(&kmod_lock);
657 size = 0;
658 k = kmod;
659 while (k) {
660 size += sizeof(kmod_info_t);
661 r = k->reference_list;
662 while (r) {
663 size +=sizeof(kmod_reference_t);
664 r = r->next;
665 }
666 k = k->next;
667 }
668 simple_unlock(&kmod_lock);
669 if (!size) return KERN_SUCCESS;
670
671 rc = kmem_alloc(kernel_map, &data, size);
672 if (rc) return rc;
673
674 // copy kmod into data, retry if kmod's size has changed (grown)
675 // the copied out data is tweeked to figure what's what at user level
676 // change the copied out k->next pointers to point to themselves
677 // change the k->reference into a count, tack the references on
678 // the end of the data packet in the order they are found
679
680 simple_lock(&kmod_lock);
681 k = kmod; p1 = (kmod_info_t *)data;
682 while (k) {
683 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
684 simple_unlock(&kmod_lock);
685 kmem_free(kernel_map, data, size);
686 goto retry;
687 }
688
689 *p1 = *k;
690 if (k->next) p1->next = k;
691 p1++; k = k->next;
692 }
693
694 p2 = (kmod_reference_t *)p1;
695 k = kmod; p1 = (kmod_info_t *)data;
696 while (k) {
697 r = k->reference_list; ref_count = 0;
698 while (r) {
699 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
700 simple_unlock(&kmod_lock);
701 kmem_free(kernel_map, data, size);
702 goto retry;
703 }
704 // note the last 'k' in the chain has its next == 0
705 // since there can only be one like that,
706 // this case is handled by the caller
707 *p2 = *r;
708 p2++; r = r->next; ref_count++;
709 }
710 p1->reference_list = (kmod_reference_t *)ref_count;
711 p1++; k = k->next;
712 }
713 simple_unlock(&kmod_lock);
714
715 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
716 if (rc) {
717 kmem_free(kernel_map, data, size);
718 *kmods = 0;
719 *kmodCount = 0;
720 return rc;
721 }
722 *kmodCount = size;
723
724 return KERN_SUCCESS;
725 }
726
727 static kern_return_t
728 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
729 {
730 typedef void (*Routine)(void);
731 Routine * routines;
732 int size, i;
733
734 if (header->magic != MH_MAGIC) {
735 return KERN_INVALID_ARGUMENT;
736 }
737
738 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, (char *) sectName, &size);
739 if (!routines) return KERN_SUCCESS;
740
741 size /= sizeof(Routine);
742 for (i = 0; i < size; i++) {
743 (*routines[i])();
744 }
745
746 return KERN_SUCCESS;
747 }
748
749 kern_return_t
750 kmod_initialize_cpp(kmod_info_t *info)
751 {
752 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
753 }
754
755 kern_return_t
756 kmod_finalize_cpp(kmod_info_t *info)
757 {
758 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
759 }
760
761 kern_return_t
762 kmod_default_start(struct kmod_info *ki, void *data)
763 {
764 return KMOD_RETURN_SUCCESS;
765 }
766
767 kern_return_t
768 kmod_default_stop(struct kmod_info *ki, void *data)
769 {
770 return KMOD_RETURN_SUCCESS;
771 }
772
773 void
774 kmod_dump(vm_offset_t *addr, unsigned int cnt)
775 {
776 vm_offset_t * kscan_addr = 0;
777 vm_offset_t * rscan_addr = 0;
778 kmod_info_t * k;
779 kmod_reference_t * r;
780 int i, j;
781 int found_kmod = 0;
782 int kmod_scan_stopped = 0;
783 kmod_info_t * stop_kmod = 0;
784 int ref_scan_stopped = 0;
785 kmod_reference_t * stop_ref = 0;
786
787 for (k = kmod; k; k = k->next) {
788 if (!k->address) {
789 continue; // skip fake entries for built-in kernel components
790 }
791 if (pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) {
792 kdb_printf(" kmod scan stopped due to missing "
793 "kmod page: %08x\n", stop_kmod);
794 break;
795 }
796 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
797 if ((*kscan_addr >= k->address) &&
798 (*kscan_addr < (k->address + k->size))) {
799
800 if (!found_kmod) {
801 kdb_printf(" Kernel loadable modules in backtrace "
802 "(with dependencies):\n");
803 }
804 found_kmod = 1;
805 kdb_printf(" %s(%s)@0x%x\n",
806 k->name, k->version, k->address);
807
808 for (r = k->reference_list; r; r = r->next) {
809 kmod_info_t * rinfo;
810
811 if (pmap_extract(kernel_pmap, (vm_offset_t)r) == 0) {
812 kdb_printf(" kmod dependency scan stopped "
813 "due to missing dependency page: %08x\n", r);
814 break;
815 }
816
817 rinfo = r->info;
818
819 if (!rinfo->address) {
820 continue; // skip fake entries for built-ins
821 }
822
823 if (pmap_extract(kernel_pmap, (vm_offset_t)rinfo) == 0) {
824 kdb_printf(" kmod dependency scan stopped "
825 "due to missing kmod page: %08x\n", rinfo);
826 break;
827 }
828
829 kdb_printf(" dependency: %s(%s)@0x%x\n",
830 rinfo->name, rinfo->version, rinfo->address);
831 }
832
833 break; // only report this kmod for one backtrace address
834 }
835 }
836 }
837
838 return;
839 }