]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 1999 Mar 29 rsulack created.
28 */
29
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/host_priv_server.h>
34 #include <mach/vm_map.h>
35
36 #include <kern/kalloc.h>
37 #include <kern/kern_types.h>
38 #include <kern/thread.h>
39
40 #include <vm/vm_kern.h>
41
42 #include <mach-o/mach_header.h>
43
44 #include <mach_host.h>
45
46 /*
47 * XXX headers for which prototypes should be in a common include file;
48 * XXX see libsa/kext.cpp for why.
49 */
50 kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
51 kern_return_t kmod_destroy_internal(kmod_t id);
52 kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
53 mach_msg_type_number_t *dataCount);
54 kern_return_t kmod_retain(kmod_t id);
55 kern_return_t kmod_release(kmod_t id);
56 kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
57 kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
58 mach_msg_type_number_t *kmodCount);
59 extern void kdb_printf(const char *fmt, ...);
60
61
62
63 #define WRITE_PROTECT_MODULE_TEXT (0)
64
65 kmod_info_t *kmod = 0;
66 static int kmod_index = 1;
67
68 decl_simple_lock_data(,kmod_lock)
69 decl_simple_lock_data(,kmod_queue_lock)
70
71 typedef struct cmd_queue_entry {
72 queue_chain_t links;
73 vm_address_t data;
74 vm_size_t size;
75 } cmd_queue_entry_t;
76
77 queue_head_t kmod_cmd_queue;
78
79 void
80 kmod_init(void)
81 {
82 simple_lock_init(&kmod_lock, 0);
83 simple_lock_init(&kmod_queue_lock, 0);
84 queue_init(&kmod_cmd_queue);
85 }
86
87 kmod_info_t *
88 kmod_lookupbyid(kmod_t id)
89 {
90 kmod_info_t *k = 0;
91
92 k = kmod;
93 while (k) {
94 if (k->id == id) break;
95 k = k->next;
96 }
97
98 return k;
99 }
100
101 kmod_info_t *
102 kmod_lookupbyname(const char * name)
103 {
104 kmod_info_t *k = 0;
105
106 k = kmod;
107 while (k) {
108 if (!strcmp(k->name, name)) break;
109 k = k->next;
110 }
111
112 return k;
113 }
114
115 kmod_info_t *
116 kmod_lookupbyid_locked(kmod_t id)
117 {
118 kmod_info_t *k = 0;
119 kmod_info_t *kc = 0;
120
121 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
122 if (!kc) return kc;
123
124 simple_lock(&kmod_queue_lock);
125 k = kmod_lookupbyid(id);
126 if (k) {
127 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
128 }
129
130 simple_unlock(&kmod_queue_lock);
131
132 if (k == 0) {
133 kfree(kc, sizeof(kmod_info_t));
134 kc = 0;
135 }
136 return kc;
137 }
138
139 kmod_info_t *
140 kmod_lookupbyname_locked(const char * name)
141 {
142 kmod_info_t *k = 0;
143 kmod_info_t *kc = 0;
144
145 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
146 if (!kc) return kc;
147
148 simple_lock(&kmod_queue_lock);
149 k = kmod_lookupbyname(name);
150 if (k) {
151 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
152 }
153
154 simple_unlock(&kmod_queue_lock);
155
156 if (k == 0) {
157 kfree(kc, sizeof(kmod_info_t));
158 kc = 0;
159 }
160 return kc;
161 }
162
163 // XXX add a nocopy flag??
164
165 kern_return_t
166 kmod_queue_cmd(vm_address_t data, vm_size_t size)
167 {
168 kern_return_t rc;
169 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
170 if (!e) return KERN_RESOURCE_SHORTAGE;
171
172 rc = kmem_alloc(kernel_map, &e->data, size);
173 if (rc != KERN_SUCCESS) {
174 kfree(e, sizeof(struct cmd_queue_entry));
175 return rc;
176 }
177 e->size = size;
178 bcopy((void *)data, (void *)e->data, size);
179
180 simple_lock(&kmod_queue_lock);
181 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
182 simple_unlock(&kmod_queue_lock);
183
184 thread_wakeup_one((event_t)&kmod_cmd_queue);
185
186 return KERN_SUCCESS;
187 }
188
189 kern_return_t
190 kmod_load_extension(char *name)
191 {
192 kmod_load_extension_cmd_t *data;
193 vm_size_t size;
194
195 size = sizeof(kmod_load_extension_cmd_t);
196 data = (kmod_load_extension_cmd_t *)kalloc(size);
197 if (!data) return KERN_RESOURCE_SHORTAGE;
198
199 data->type = KMOD_LOAD_EXTENSION_PACKET;
200 strncpy(data->name, name, KMOD_MAX_NAME);
201
202 return kmod_queue_cmd((vm_address_t)data, size);
203 }
204
205 kern_return_t
206 kmod_load_extension_with_dependencies(char *name, char **dependencies)
207 {
208 kmod_load_with_dependencies_cmd_t *data;
209 vm_size_t size;
210 char **c;
211 int i, count = 0;
212
213 c = dependencies;
214 if (c) {
215 while (*c) {
216 count++; c++;
217 }
218 }
219 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
220 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
221 if (!data) return KERN_RESOURCE_SHORTAGE;
222
223 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
224 strncpy(data->name, name, KMOD_MAX_NAME);
225
226 c = dependencies;
227 for (i=0; i < count; i++) {
228 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
229 c++;
230 }
231 data->dependencies[count][0] = 0;
232
233 return kmod_queue_cmd((vm_address_t)data, size);
234 }
235 kern_return_t
236 kmod_send_generic(int type, void *generic_data, int size)
237 {
238 kmod_generic_cmd_t *data;
239
240 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
241 if (!data) return KERN_RESOURCE_SHORTAGE;
242
243 data->type = type;
244 bcopy(data->data, generic_data, size);
245
246 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
247 }
248
249 extern vm_offset_t sectPRELINKB;
250 extern int sectSizePRELINK;
251
252 /*
253 * Operates only on 32 bit mach keaders on behalf of kernel module loader
254 * if WRITE_PROTECT_MODULE_TEXT is defined.
255 */
256 kern_return_t
257 kmod_create_internal(kmod_info_t *info, kmod_t *id)
258 {
259 kern_return_t rc;
260 boolean_t isPrelink;
261
262 if (!info) return KERN_INVALID_ADDRESS;
263
264 // double check for page alignment
265 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
266 return KERN_INVALID_ADDRESS;
267 }
268
269 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
270 if (!isPrelink) {
271 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
272 info->address + info->size, VM_PROT_DEFAULT, FALSE);
273 if (rc != KERN_SUCCESS) {
274 return rc;
275 }
276 }
277 #if WRITE_PROTECT_MODULE_TEXT
278 {
279 struct section * sect = getsectbynamefromheader(
280 (struct mach_header*) info->address, "__TEXT", "__text");
281
282 if(sect) {
283 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
284 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
285 }
286 }
287 #endif /* WRITE_PROTECT_MODULE_TEXT */
288
289 simple_lock(&kmod_lock);
290
291 // check to see if already loaded
292 if (kmod_lookupbyname(info->name)) {
293 simple_unlock(&kmod_lock);
294 if (!isPrelink) {
295 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
296 info->address + info->size, FALSE);
297 assert(rc == KERN_SUCCESS);
298 }
299 return KERN_INVALID_ARGUMENT;
300 }
301
302 info->id = kmod_index++;
303 info->reference_count = 0;
304
305 info->next = kmod;
306 kmod = info;
307
308 *id = info->id;
309
310 simple_unlock(&kmod_lock);
311
312 #if DEBUG
313 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
314 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
315 #endif /* DEBUG */
316
317 return KERN_SUCCESS;
318 }
319
320
321 kern_return_t
322 kmod_create(host_priv_t host_priv,
323 vm_address_t addr,
324 kmod_t *id)
325 {
326 kmod_info_t *info = (kmod_info_t *)addr;
327
328 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
329 return kmod_create_internal(info, id);
330 }
331
332 kern_return_t
333 kmod_create_fake_with_address(const char *name, const char *version,
334 vm_address_t address, vm_size_t size,
335 int * return_id)
336 {
337 kmod_info_t *info;
338
339 if (!name || ! version ||
340 (1 + strlen(name) > KMOD_MAX_NAME) ||
341 (1 + strlen(version) > KMOD_MAX_NAME)) {
342
343 return KERN_INVALID_ARGUMENT;
344 }
345
346 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
347 if (!info) {
348 return KERN_RESOURCE_SHORTAGE;
349 }
350
351 // make de fake
352 info->info_version = KMOD_INFO_VERSION;
353 bcopy(name, info->name, 1 + strlen(name));
354 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
355 info->reference_count = 1; // keep it from unloading, starting, stopping
356 info->reference_list = 0;
357 info->address = address;
358 info->size = size;
359 info->hdr_size = 0;
360 info->start = info->stop = 0;
361
362 simple_lock(&kmod_lock);
363
364 // check to see if already "loaded"
365 if (kmod_lookupbyname(info->name)) {
366 simple_unlock(&kmod_lock);
367 return KERN_INVALID_ARGUMENT;
368 }
369
370 info->id = kmod_index++;
371 if (return_id)
372 *return_id = info->id;
373
374 info->next = kmod;
375 kmod = info;
376
377 simple_unlock(&kmod_lock);
378
379 return KERN_SUCCESS;
380 }
381
382 kern_return_t
383 kmod_create_fake(const char *name, const char *version)
384 {
385 return kmod_create_fake_with_address(name, version, 0, 0, NULL);
386 }
387
388
389 static kern_return_t
390 _kmod_destroy_internal(kmod_t id, boolean_t fake)
391 {
392 kern_return_t rc;
393 kmod_info_t *k;
394 kmod_info_t *p;
395
396 simple_lock(&kmod_lock);
397
398 k = p = kmod;
399 while (k) {
400 if (k->id == id) {
401 kmod_reference_t *r, *t;
402
403 if (!fake && (k->reference_count != 0)) {
404 simple_unlock(&kmod_lock);
405 return KERN_INVALID_ARGUMENT;
406 }
407
408 if (k == p) { // first element
409 kmod = k->next;
410 } else {
411 p->next = k->next;
412 }
413 simple_unlock(&kmod_lock);
414
415 r = k->reference_list;
416 while (r) {
417 r->info->reference_count--;
418 t = r;
419 r = r->next;
420 kfree(t, sizeof(struct kmod_reference));
421 }
422
423 if (!fake)
424 {
425 #if DEBUG
426 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
427 k->name, k->id, k->size / PAGE_SIZE, k->address);
428 #endif /* DEBUG */
429
430 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
431 {
432 vm_offset_t
433 virt = ml_static_ptovirt(k->address);
434 if( virt) {
435 ml_static_mfree( virt, k->size);
436 }
437 }
438 else
439 {
440 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
441 k->address + k->size, FALSE);
442 assert(rc == KERN_SUCCESS);
443
444 rc = vm_deallocate(kernel_map, k->address, k->size);
445 assert(rc == KERN_SUCCESS);
446 }
447 }
448 return KERN_SUCCESS;
449 }
450 p = k;
451 k = k->next;
452 }
453
454 simple_unlock(&kmod_lock);
455
456 return KERN_INVALID_ARGUMENT;
457 }
458
459 kern_return_t
460 kmod_destroy_internal(kmod_t id)
461 {
462 return _kmod_destroy_internal(id, FALSE);
463 }
464
465 kern_return_t
466 kmod_destroy(host_priv_t host_priv,
467 kmod_t id)
468 {
469 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
470 return _kmod_destroy_internal(id, FALSE);
471 }
472
473 kern_return_t
474 kmod_destroy_fake(kmod_t id)
475 {
476 return _kmod_destroy_internal(id, TRUE);
477 }
478
479 kern_return_t
480 kmod_start_or_stop(
481 kmod_t id,
482 int start,
483 kmod_args_t *data,
484 mach_msg_type_number_t *dataCount)
485 {
486 kern_return_t rc = KERN_SUCCESS;
487 void * user_data = 0;
488 kern_return_t (*func)(kmod_info_t *, void *);
489 kmod_info_t *k;
490
491 simple_lock(&kmod_lock);
492
493 k = kmod_lookupbyid(id);
494 if (!k || k->reference_count) {
495 simple_unlock(&kmod_lock);
496 rc = KERN_INVALID_ARGUMENT;
497 goto finish;
498 }
499
500 if (start) {
501 func = (void *)k->start;
502 } else {
503 func = (void *)k->stop;
504 }
505
506 simple_unlock(&kmod_lock);
507
508 //
509 // call kmod entry point
510 //
511 if (data && dataCount && *data && *dataCount) {
512 vm_map_offset_t map_addr;
513 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
514 user_data = CAST_DOWN(void *, map_addr);
515 }
516
517 rc = (*func)(k, user_data);
518
519 finish:
520
521 if (user_data) {
522 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
523 }
524 if (data) *data = 0;
525 if (dataCount) *dataCount = 0;
526
527 return rc;
528 }
529
530
531 /*
532 * The retain and release calls take no user data, but the caller
533 * may have sent some in error (the MIG definition allows it).
534 * If this is the case, they will just return that same data
535 * right back to the caller (since they never touch the *data and
536 * *dataCount fields).
537 */
538 kern_return_t
539 kmod_retain(kmod_t id)
540 {
541 kern_return_t rc = KERN_SUCCESS;
542
543 kmod_info_t *t; // reference to
544 kmod_info_t *f; // reference from
545 kmod_reference_t *r = 0;
546
547 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
548 if (!r) {
549 rc = KERN_RESOURCE_SHORTAGE;
550 goto finish;
551 }
552
553 simple_lock(&kmod_lock);
554
555 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
556 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
557 if (!t || !f) {
558 simple_unlock(&kmod_lock);
559 if (r) kfree(r, sizeof(struct kmod_reference));
560 rc = KERN_INVALID_ARGUMENT;
561 goto finish;
562 }
563
564 r->next = f->reference_list;
565 r->info = t;
566 f->reference_list = r;
567 t->reference_count++;
568
569 simple_unlock(&kmod_lock);
570
571 finish:
572
573 return rc;
574 }
575
576
577 kern_return_t
578 kmod_release(kmod_t id)
579 {
580 kern_return_t rc = KERN_INVALID_ARGUMENT;
581
582 kmod_info_t *t; // reference to
583 kmod_info_t *f; // reference from
584 kmod_reference_t *r = 0;
585 kmod_reference_t * p;
586
587 simple_lock(&kmod_lock);
588
589 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
590 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
591 if (!t || !f) {
592 rc = KERN_INVALID_ARGUMENT;
593 goto finish;
594 }
595
596 p = r = f->reference_list;
597 while (r) {
598 if (r->info == t) {
599 if (p == r) { // first element
600 f->reference_list = r->next;
601 } else {
602 p->next = r->next;
603 }
604 r->info->reference_count--;
605
606 simple_unlock(&kmod_lock);
607 kfree(r, sizeof(struct kmod_reference));
608 rc = KERN_SUCCESS;
609 goto finish;
610 }
611 p = r;
612 r = r->next;
613 }
614
615 simple_unlock(&kmod_lock);
616
617 finish:
618
619 return rc;
620 }
621
622
623 kern_return_t
624 kmod_control(host_priv_t host_priv,
625 kmod_t id,
626 kmod_control_flavor_t flavor,
627 kmod_args_t *data,
628 mach_msg_type_number_t *dataCount)
629 {
630 kern_return_t rc = KERN_SUCCESS;
631
632 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
633
634 switch (flavor) {
635
636 case KMOD_CNTL_START:
637 case KMOD_CNTL_STOP:
638 {
639 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
640 data, dataCount);
641 break;
642 }
643
644 case KMOD_CNTL_RETAIN:
645 {
646 rc = kmod_retain(id);
647 break;
648 }
649
650 case KMOD_CNTL_RELEASE:
651 {
652 rc = kmod_release(id);
653 break;
654 }
655
656 case KMOD_CNTL_GET_CMD:
657 {
658
659 cmd_queue_entry_t *e;
660
661 /*
662 * Throw away any data the user may have sent in error.
663 * We must do this, because we are likely to return to
664 * some data for these commands (thus causing a leak of
665 * whatever data the user sent us in error).
666 */
667 if (*data && *dataCount) {
668 vm_map_copy_discard(*data);
669 *data = 0;
670 *dataCount = 0;
671 }
672
673 simple_lock(&kmod_queue_lock);
674
675 if (queue_empty(&kmod_cmd_queue)) {
676 wait_result_t res;
677
678 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
679 &kmod_queue_lock,
680 THREAD_ABORTSAFE);
681 if (queue_empty(&kmod_cmd_queue)) {
682 // we must have been interrupted!
683 simple_unlock(&kmod_queue_lock);
684 assert(res == THREAD_INTERRUPTED);
685 return KERN_ABORTED;
686 }
687 }
688 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
689
690 simple_unlock(&kmod_queue_lock);
691
692 rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
693 (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
694 if (rc) {
695 simple_lock(&kmod_queue_lock);
696 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
697 simple_unlock(&kmod_queue_lock);
698 *data = 0;
699 *dataCount = 0;
700 return rc;
701 }
702 *dataCount = e->size;
703
704 kfree(e, sizeof(struct cmd_queue_entry));
705
706 break;
707 }
708
709 default:
710 rc = KERN_INVALID_ARGUMENT;
711 }
712
713 return rc;
714 };
715
716
717 kern_return_t
718 kmod_get_info(__unused host_t host,
719 kmod_info_array_t *kmods,
720 mach_msg_type_number_t *kmodCount)
721 {
722 vm_offset_t data;
723 kmod_info_t *k, *p1;
724 kmod_reference_t *r, *p2;
725 int ref_count;
726 unsigned size = 0;
727 kern_return_t rc = KERN_SUCCESS;
728
729 *kmods = (void *)0;
730 *kmodCount = 0;
731
732 retry:
733 simple_lock(&kmod_lock);
734 size = 0;
735 k = kmod;
736 while (k) {
737 size += sizeof(kmod_info_t);
738 r = k->reference_list;
739 while (r) {
740 size +=sizeof(kmod_reference_t);
741 r = r->next;
742 }
743 k = k->next;
744 }
745 simple_unlock(&kmod_lock);
746 if (!size) return KERN_SUCCESS;
747
748 rc = kmem_alloc(kernel_map, &data, size);
749 if (rc) return rc;
750
751 // copy kmod into data, retry if kmod's size has changed (grown)
752 // the copied out data is tweeked to figure what's what at user level
753 // change the copied out k->next pointers to point to themselves
754 // change the k->reference into a count, tack the references on
755 // the end of the data packet in the order they are found
756
757 simple_lock(&kmod_lock);
758 k = kmod; p1 = (kmod_info_t *)data;
759 while (k) {
760 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
761 simple_unlock(&kmod_lock);
762 kmem_free(kernel_map, data, size);
763 goto retry;
764 }
765
766 *p1 = *k;
767 if (k->next) p1->next = k;
768 p1++; k = k->next;
769 }
770
771 p2 = (kmod_reference_t *)p1;
772 k = kmod; p1 = (kmod_info_t *)data;
773 while (k) {
774 r = k->reference_list; ref_count = 0;
775 while (r) {
776 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
777 simple_unlock(&kmod_lock);
778 kmem_free(kernel_map, data, size);
779 goto retry;
780 }
781 // note the last 'k' in the chain has its next == 0
782 // since there can only be one like that,
783 // this case is handled by the caller
784 *p2 = *r;
785 p2++; r = r->next; ref_count++;
786 }
787 p1->reference_list = (kmod_reference_t *)ref_count;
788 p1++; k = k->next;
789 }
790 simple_unlock(&kmod_lock);
791
792 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
793 if (rc) {
794 kmem_free(kernel_map, data, size);
795 *kmods = 0;
796 *kmodCount = 0;
797 return rc;
798 }
799 *kmodCount = size;
800
801 return KERN_SUCCESS;
802 }
803
804 /*
805 * Operates only on 32 bit mach keaders on behalf of kernel module loader
806 */
807 static kern_return_t
808 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
809 {
810 typedef void (*Routine)(void);
811 Routine * routines;
812 int size, i;
813
814 if (header->magic != MH_MAGIC) {
815 return KERN_INVALID_ARGUMENT;
816 }
817
818 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
819 if (!routines) return KERN_SUCCESS;
820
821 size /= sizeof(Routine);
822 for (i = 0; i < size; i++) {
823 (*routines[i])();
824 }
825
826 return KERN_SUCCESS;
827 }
828
829 /*
830 * Operates only on 32 bit mach keaders on behalf of kernel module loader
831 */
832 kern_return_t
833 kmod_initialize_cpp(kmod_info_t *info)
834 {
835 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
836 }
837
838 /*
839 * Operates only on 32 bit mach keaders on behalf of kernel module loader
840 */
841 kern_return_t
842 kmod_finalize_cpp(kmod_info_t *info)
843 {
844 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
845 }
846
847 kern_return_t
848 kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
849 {
850 return KMOD_RETURN_SUCCESS;
851 }
852
853 kern_return_t
854 kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
855 {
856 return KMOD_RETURN_SUCCESS;
857 }
858
859 static void
860 kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
861 void (*printf_func)(const char *fmt, ...))
862 {
863 vm_offset_t * kscan_addr = 0;
864 kmod_info_t * k;
865 kmod_reference_t * r;
866 unsigned int i;
867 int found_kmod = 0;
868 kmod_info_t * stop_kmod = 0;
869
870 for (k = kmod; k; k = k->next) {
871 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
872 (*printf_func)(" kmod scan stopped due to missing "
873 "kmod page: %08x\n", stop_kmod);
874 break;
875 }
876 if (!k->address) {
877 continue; // skip fake entries for built-in kernel components
878 }
879 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
880 if ((*kscan_addr >= k->address) &&
881 (*kscan_addr < (k->address + k->size))) {
882
883 if (!found_kmod) {
884 (*printf_func)(" Kernel loadable modules in backtrace "
885 "(with dependencies):\n");
886 }
887 found_kmod = 1;
888 (*printf_func)(" %s(%s)@0x%x\n",
889 k->name, k->version, k->address);
890
891 for (r = k->reference_list; r; r = r->next) {
892 kmod_info_t * rinfo;
893
894 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
895 (*printf_func)(" kmod dependency scan stopped "
896 "due to missing dependency page: %08x\n", r);
897 break;
898 }
899
900 rinfo = r->info;
901
902 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
903 (*printf_func)(" kmod dependency scan stopped "
904 "due to missing kmod page: %08x\n", rinfo);
905 break;
906 }
907
908 if (!rinfo->address) {
909 continue; // skip fake entries for built-ins
910 }
911
912 (*printf_func)(" dependency: %s(%s)@0x%x\n",
913 rinfo->name, rinfo->version, rinfo->address);
914 }
915
916 break; // only report this kmod for one backtrace address
917 }
918 }
919 }
920
921 return;
922 }
923
924 void
925 kmod_dump(vm_offset_t *addr, unsigned int cnt)
926 {
927 kmod_dump_to(addr, cnt, &kdb_printf);
928 }
929
930 void
931 kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
932 {
933 kmod_dump_to(addr, cnt, &printf);
934 }