]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
fac911d115b23117ac302d6abbc1105aec2f99c0
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34 /*
35 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
36 *
37 * HISTORY
38 *
39 * 1999 Mar 29 rsulack created.
40 */
41
42 #include <mach/mach_types.h>
43 #include <mach/vm_types.h>
44 #include <mach/kern_return.h>
45 #include <mach/host_priv_server.h>
46 #include <mach/vm_map.h>
47
48 #include <kern/kalloc.h>
49 #include <kern/kern_types.h>
50 #include <kern/thread.h>
51
52 #include <vm/vm_kern.h>
53
54 #include <mach-o/mach_header.h>
55 #include <mach-o/loader.h>
56 #include <mach-o/nlist.h>
57
58 /*
59 * XXX headers for which prototypes should be in a common include file;
60 * XXX see libsa/kext.cpp for why.
61 */
62 kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
63 kern_return_t kmod_destroy_internal(kmod_t id);
64 kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
65 mach_msg_type_number_t *dataCount);
66 kern_return_t kmod_retain(kmod_t id);
67 kern_return_t kmod_release(kmod_t id);
68 kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
69 kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
70 mach_msg_type_number_t *kmodCount);
71
72 static kern_return_t kmod_get_symbol_data(kmod_args_t * data,
73 mach_msg_type_number_t * dataCount);
74 static kern_return_t kmod_free_linkedit_data(void);
75 static kern_return_t kmod_get_kext_uuid(
76 const char * kext_id,
77 kmod_args_t * data,
78 mach_msg_type_number_t * dataCount);
79
80 extern int IODTGetLoaderInfo(const char * key, void ** infoAddr, vm_size_t * infoSize);
81 extern void IODTFreeLoaderInfo(const char * key, void * infoAddr, vm_size_t infoSize);
82 /* operates on 32 bit segments */
83 extern void OSRuntimeUnloadCPPForSegment(struct segment_command * segment);
84
85 #define WRITE_PROTECT_MODULE_TEXT (0)
86
87 kmod_info_t *kmod;
88 static int kmod_index = 1;
89 static int kmod_load_disabled = 0;
90
91 mutex_t * kmod_lock = 0;
92 static mutex_t * kmod_queue_lock = 0;
93
94 typedef struct cmd_queue_entry {
95 queue_chain_t links;
96 vm_address_t data;
97 vm_size_t size;
98 } cmd_queue_entry_t;
99
100 queue_head_t kmod_cmd_queue;
101
102 void
103 kmod_init(void)
104 {
105 kmod_lock = mutex_alloc(0);
106 kmod_queue_lock = mutex_alloc(0);
107 queue_init(&kmod_cmd_queue);
108 }
109
110 kmod_info_t *
111 kmod_lookupbyid(kmod_t id)
112 {
113 kmod_info_t *k = NULL;
114
115 k = kmod;
116 while (k) {
117 if (k->id == id) break;
118 k = k->next;
119 }
120
121 return k;
122 }
123
124 kmod_info_t *
125 kmod_lookupbyname(const char * name)
126 {
127 kmod_info_t *k = NULL;
128
129 k = kmod;
130 while (k) {
131 if (!strncmp(k->name, name, sizeof(k->name)))
132 break;
133 k = k->next;
134 }
135
136 return k;
137 }
138
139 // get the id of a kext in a given range, if the address is not in a kext
140 // -1 is returned
141 int kmod_lookupidbyaddress_locked(vm_address_t addr)
142 {
143 kmod_info_t *k = 0;
144
145 mutex_lock(kmod_queue_lock);
146 k = kmod;
147 if(NULL != k) {
148 while (k) {
149 if ((k->address <= addr) && ((k->address + k->size) > addr)) {
150 break;
151 }
152 k = k->next;
153 }
154 mutex_unlock(kmod_queue_lock);
155 } else {
156 mutex_unlock(kmod_queue_lock);
157 return -1;
158 }
159
160 if(NULL == k) {
161 return -1;
162 } else {
163 return k->id;
164 }
165 }
166
167 kmod_info_t *
168 kmod_lookupbyaddress(vm_address_t addr)
169 {
170 kmod_info_t *k = 0;
171
172 k = kmod;
173 while (k) {
174 if ((k->address <= addr) && ((k->address + k->size) > addr)) break;
175 k = k->next;
176 }
177
178 return k;
179 }
180
181 kmod_info_t *
182 kmod_lookupbyid_locked(kmod_t id)
183 {
184 kmod_info_t *k = NULL;
185 kmod_info_t *kc = NULL;
186
187 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
188 if (!kc) return kc;
189
190 mutex_lock(kmod_lock);
191 k = kmod_lookupbyid(id);
192 if (k) {
193 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
194 }
195
196 mutex_unlock(kmod_lock);
197
198 if (k == 0) {
199 kfree(kc, sizeof(kmod_info_t));
200 kc = NULL;
201 }
202 return kc;
203 }
204
205 kmod_info_t *
206 kmod_lookupbyname_locked(const char * name)
207 {
208 kmod_info_t *k = NULL;
209 kmod_info_t *kc = NULL;
210
211 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
212 if (!kc) return kc;
213
214 mutex_lock(kmod_lock);
215 k = kmod_lookupbyname(name);
216 if (k) {
217 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
218 }
219
220 mutex_unlock(kmod_lock);
221
222 if (k == 0) {
223 kfree(kc, sizeof(kmod_info_t));
224 kc = NULL;
225 }
226 return kc;
227 }
228
229 // XXX add a nocopy flag??
230
231 kern_return_t
232 kmod_queue_cmd(vm_address_t data, vm_size_t size)
233 {
234 kern_return_t rc;
235 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
236 if (!e) return KERN_RESOURCE_SHORTAGE;
237
238 rc = kmem_alloc(kernel_map, &e->data, size);
239 if (rc != KERN_SUCCESS) {
240 kfree(e, sizeof(struct cmd_queue_entry));
241 return rc;
242 }
243 e->size = size;
244 bcopy((void *)data, (void *)e->data, size);
245
246 mutex_lock(kmod_queue_lock);
247 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
248 mutex_unlock(kmod_queue_lock);
249
250 thread_wakeup_one((event_t)&kmod_cmd_queue);
251
252 return KERN_SUCCESS;
253 }
254
255 kern_return_t
256 kmod_load_extension(char *name)
257 {
258 kmod_load_extension_cmd_t data;
259
260 if (kmod_load_disabled) {
261 return KERN_NO_ACCESS;
262 }
263
264 data.type = KMOD_LOAD_EXTENSION_PACKET;
265 strncpy(data.name, name, sizeof(data.name));
266
267 return kmod_queue_cmd((vm_address_t)&data, sizeof(data));
268 }
269
270 kern_return_t
271 kmod_load_extension_with_dependencies(char *name, char **dependencies)
272 {
273 kern_return_t result;
274 kmod_load_with_dependencies_cmd_t * data;
275 vm_size_t size;
276 char **c;
277 int i, count = 0;
278
279 if (kmod_load_disabled) {
280 return KERN_NO_ACCESS;
281 }
282
283 c = dependencies;
284 if (c) {
285 while (*c) {
286 count++; c++;
287 }
288 }
289 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
290 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
291 if (!data) return KERN_RESOURCE_SHORTAGE;
292
293 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
294 strncpy(data->name, name, KMOD_MAX_NAME);
295
296 c = dependencies;
297 for (i=0; i < count; i++) {
298 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
299 c++;
300 }
301 data->dependencies[count][0] = 0;
302
303 result = kmod_queue_cmd((vm_address_t)data, size);
304 kfree(data, size);
305 return result;
306 }
307 kern_return_t
308 kmod_send_generic(int type, void *generic_data, int size)
309 {
310 kern_return_t result;
311 kmod_generic_cmd_t * data;
312 vm_size_t cmd_size;
313
314 // add sizeof(int) for the type field
315 cmd_size = size + sizeof(int);
316 data = (kmod_generic_cmd_t *)kalloc(cmd_size);
317 if (!data) return KERN_RESOURCE_SHORTAGE;
318
319 data->type = type;
320 bcopy(data->data, generic_data, size);
321
322 result = kmod_queue_cmd((vm_address_t)data, cmd_size);
323 kfree(data, cmd_size);
324 return result;
325 }
326
327 extern vm_offset_t sectPRELINKB;
328 extern int sectSizePRELINK;
329 extern int kth_started;
330
331 /*
332 * Operates only on 32 bit mach keaders on behalf of kernel module loader
333 * if WRITE_PROTECT_MODULE_TEXT is defined.
334 */
335 kern_return_t
336 kmod_create_internal(kmod_info_t *info, kmod_t *id)
337 {
338 kern_return_t rc;
339 boolean_t isPrelink;
340
341 if (!info) return KERN_INVALID_ADDRESS;
342
343 // double check for page alignment
344 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
345 return KERN_INVALID_ADDRESS;
346 }
347
348 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
349 if (!isPrelink && kth_started) {
350 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
351 info->address + info->size, VM_PROT_DEFAULT, FALSE);
352 if (rc != KERN_SUCCESS) {
353 return rc;
354 }
355 }
356 #if WRITE_PROTECT_MODULE_TEXT
357 {
358 struct section * sect = getsectbynamefromheader(
359 (struct mach_header*) info->address, "__TEXT", "__text");
360
361 if(sect) {
362 (void) vm_map_protect(kernel_map, round_page(sect->addr),
363 trunc_page(sect->addr + sect->size),
364 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
365 }
366 }
367 #endif /* WRITE_PROTECT_MODULE_TEXT */
368
369 mutex_lock(kmod_lock);
370
371 // check to see if already loaded
372 if (kmod_lookupbyname(info->name)) {
373 mutex_unlock(kmod_lock);
374 if (!isPrelink) {
375 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
376 info->address + info->size, FALSE);
377 assert(rc == KERN_SUCCESS);
378 }
379 return KERN_INVALID_ARGUMENT;
380 }
381
382 info->id = kmod_index++;
383 info->reference_count = 0;
384
385 info->next = kmod;
386 kmod = info;
387
388 *id = info->id;
389
390 mutex_unlock(kmod_lock);
391
392 #if DEBUG
393 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
394 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
395 #endif /* DEBUG */
396
397 return KERN_SUCCESS;
398 }
399
400
401 kern_return_t
402 kmod_create(host_priv_t host_priv,
403 vm_address_t addr,
404 kmod_t *id)
405 {
406 #ifdef SECURE_KERNEL
407 return KERN_NOT_SUPPORTED;
408 #else
409 kmod_info_t *info;
410
411 if (kmod_load_disabled) {
412 return KERN_NO_ACCESS;
413 }
414
415 info = (kmod_info_t *)addr;
416
417 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
418 return kmod_create_internal(info, id);
419 #endif
420 }
421
422 kern_return_t
423 kmod_create_fake_with_address(const char *name, const char *version,
424 vm_address_t address, vm_size_t size,
425 int * return_id)
426 {
427 kmod_info_t *info;
428
429 if (!name || ! version ||
430 (1 + strlen(name) > KMOD_MAX_NAME) ||
431 (1 + strlen(version) > KMOD_MAX_NAME)) {
432
433 return KERN_INVALID_ARGUMENT;
434 }
435
436 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
437 if (!info) {
438 return KERN_RESOURCE_SHORTAGE;
439 }
440
441 // make de fake
442 info->info_version = KMOD_INFO_VERSION;
443 bcopy(name, info->name, 1 + strlen(name));
444 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
445 info->reference_count = 1; // keep it from unloading, starting, stopping
446 info->reference_list = NULL;
447 info->address = address;
448 info->size = size;
449 info->hdr_size = 0;
450 info->start = info->stop = NULL;
451
452 mutex_lock(kmod_lock);
453
454 // check to see if already "loaded"
455 if (kmod_lookupbyname(info->name)) {
456 mutex_unlock(kmod_lock);
457 kfree(info, sizeof(kmod_info_t));
458 return KERN_INVALID_ARGUMENT;
459 }
460
461 info->id = kmod_index++;
462 if (return_id)
463 *return_id = info->id;
464
465 info->next = kmod;
466 kmod = info;
467
468 mutex_unlock(kmod_lock);
469
470 return KERN_SUCCESS;
471 }
472
473 kern_return_t
474 kmod_create_fake(const char *name, const char *version)
475 {
476 return kmod_create_fake_with_address(name, version, 0, 0, NULL);
477 }
478
479
480 static kern_return_t
481 _kmod_destroy_internal(kmod_t id, boolean_t fake)
482 {
483 kern_return_t rc;
484 kmod_info_t *k;
485 kmod_info_t *p;
486
487 mutex_lock(kmod_lock);
488
489 k = p = kmod;
490 while (k) {
491 if (k->id == id) {
492 kmod_reference_t *r, *t;
493
494 if (!fake && (k->reference_count != 0)) {
495 mutex_unlock(kmod_lock);
496 return KERN_INVALID_ARGUMENT;
497 }
498
499 if (k == p) { // first element
500 kmod = k->next;
501 } else {
502 p->next = k->next;
503 }
504 mutex_unlock(kmod_lock);
505
506 r = k->reference_list;
507 while (r) {
508 r->info->reference_count--;
509 t = r;
510 r = r->next;
511 kfree(t, sizeof(struct kmod_reference));
512 }
513
514 if (!fake)
515 {
516 #if DEBUG
517 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
518 k->name, k->id, k->size / PAGE_SIZE, k->address);
519 #endif /* DEBUG */
520
521 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
522 {
523 vm_offset_t
524 virt = ml_static_ptovirt(k->address);
525 if( virt) {
526 ml_static_mfree( virt, k->size);
527 }
528 }
529 else
530 {
531 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
532 k->address + k->size, FALSE);
533 assert(rc == KERN_SUCCESS);
534
535 rc = vm_deallocate(kernel_map, k->address, k->size);
536 assert(rc == KERN_SUCCESS);
537 }
538 }
539 return KERN_SUCCESS;
540 }
541 p = k;
542 k = k->next;
543 }
544
545 mutex_unlock(kmod_lock);
546
547 return KERN_INVALID_ARGUMENT;
548 }
549
550 kern_return_t
551 kmod_destroy_internal(kmod_t id)
552 {
553 return _kmod_destroy_internal(id, FALSE);
554 }
555
556 kern_return_t
557 kmod_destroy(host_priv_t host_priv,
558 kmod_t id)
559 {
560 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
561 return _kmod_destroy_internal(id, FALSE);
562 }
563
564 kern_return_t
565 kmod_destroy_fake(kmod_t id)
566 {
567 return _kmod_destroy_internal(id, TRUE);
568 }
569
570 kern_return_t
571 kmod_start_or_stop(
572 kmod_t id,
573 int start,
574 kmod_args_t *data,
575 mach_msg_type_number_t *dataCount)
576 {
577 kern_return_t rc = KERN_SUCCESS;
578 void * user_data = NULL;
579 kern_return_t (*func)(kmod_info_t *, void *);
580 kmod_info_t *k;
581
582 if (start && kmod_load_disabled) {
583 return KERN_NO_ACCESS;
584 }
585
586 mutex_lock(kmod_lock);
587
588 k = kmod_lookupbyid(id);
589 if (!k || k->reference_count) {
590 mutex_unlock(kmod_lock);
591 rc = KERN_INVALID_ARGUMENT;
592 goto finish;
593 }
594
595 if (start) {
596 func = (void *)k->start;
597 } else {
598 func = (void *)k->stop;
599 }
600
601 mutex_unlock(kmod_lock);
602
603 //
604 // call kmod entry point
605 //
606 if (data && dataCount && *data && *dataCount) {
607 vm_map_offset_t map_addr;
608 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
609 user_data = CAST_DOWN(void *, map_addr);
610 }
611
612 rc = (*func)(k, user_data);
613
614 finish:
615
616 if (user_data) {
617 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
618 }
619 if (data) *data = NULL;
620 if (dataCount) *dataCount = 0;
621
622 return rc;
623 }
624
625
626 /*
627 * The retain and release calls take no user data, but the caller
628 * may have sent some in error (the MIG definition allows it).
629 * If this is the case, they will just return that same data
630 * right back to the caller (since they never touch the *data and
631 * *dataCount fields).
632 */
633 kern_return_t
634 kmod_retain(kmod_t id)
635 {
636 kern_return_t rc = KERN_SUCCESS;
637
638 kmod_info_t *t; // reference to
639 kmod_info_t *f; // reference from
640 kmod_reference_t *r = NULL;
641
642 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
643 if (!r) {
644 rc = KERN_RESOURCE_SHORTAGE;
645 goto finish;
646 }
647
648 mutex_lock(kmod_lock);
649
650 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
651 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
652 if (!t || !f) {
653 mutex_unlock(kmod_lock);
654 if (r) kfree(r, sizeof(struct kmod_reference));
655 rc = KERN_INVALID_ARGUMENT;
656 goto finish;
657 }
658
659 r->next = f->reference_list;
660 r->info = t;
661 f->reference_list = r;
662 t->reference_count++;
663
664 mutex_unlock(kmod_lock);
665
666 finish:
667
668 return rc;
669 }
670
671
672 kern_return_t
673 kmod_release(kmod_t id)
674 {
675 kern_return_t rc = KERN_INVALID_ARGUMENT;
676
677 kmod_info_t *t; // reference to
678 kmod_info_t *f; // reference from
679 kmod_reference_t *r = NULL;
680 kmod_reference_t * p;
681
682 mutex_lock(kmod_lock);
683
684 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
685 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
686 if (!t || !f) {
687 rc = KERN_INVALID_ARGUMENT;
688 goto finish;
689 }
690
691 p = r = f->reference_list;
692 while (r) {
693 if (r->info == t) {
694 if (p == r) { // first element
695 f->reference_list = r->next;
696 } else {
697 p->next = r->next;
698 }
699 r->info->reference_count--;
700
701 mutex_unlock(kmod_lock);
702 kfree(r, sizeof(struct kmod_reference));
703 rc = KERN_SUCCESS;
704 goto finish;
705 }
706 p = r;
707 r = r->next;
708 }
709
710 mutex_unlock(kmod_lock);
711
712 finish:
713
714 return rc;
715 }
716
717
718 kern_return_t
719 kmod_control(host_priv_t host_priv,
720 kmod_t id,
721 kmod_control_flavor_t flavor,
722 kmod_args_t *data,
723 mach_msg_type_number_t *dataCount)
724 {
725 kern_return_t rc = KERN_SUCCESS;
726
727 /* Only allow non-root access to retrieve kernel symbols or UUID.
728 */
729 if (flavor != KMOD_CNTL_GET_KERNEL_SYMBOLS &&
730 flavor != KMOD_CNTL_GET_UUID) {
731
732 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
733 }
734
735 switch (flavor) {
736
737 case KMOD_CNTL_START:
738 case KMOD_CNTL_STOP:
739 {
740 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
741 data, dataCount);
742 break;
743 }
744
745 case KMOD_CNTL_RETAIN:
746 {
747 rc = kmod_retain(id);
748 break;
749 }
750
751 case KMOD_CNTL_RELEASE:
752 {
753 rc = kmod_release(id);
754 break;
755 }
756
757 case KMOD_CNTL_GET_CMD:
758 {
759
760 cmd_queue_entry_t *e;
761
762 /* Throw away any data the user may have sent in error.
763 * We must do this, because we are likely to return to
764 * some data for these commands (thus causing a leak of
765 * whatever data the user sent us in error).
766 */
767 if (*data && *dataCount) {
768 vm_map_copy_discard(*data);
769 *data = NULL;
770 *dataCount = 0;
771 }
772
773 mutex_lock(kmod_queue_lock);
774
775 if (queue_empty(&kmod_cmd_queue)) {
776 wait_result_t res;
777
778 res = thread_sleep_mutex((event_t)&kmod_cmd_queue,
779 kmod_queue_lock,
780 THREAD_ABORTSAFE);
781 if (queue_empty(&kmod_cmd_queue)) {
782 // we must have been interrupted!
783 mutex_unlock(kmod_queue_lock);
784 assert(res == THREAD_INTERRUPTED);
785 return KERN_ABORTED;
786 }
787 }
788 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
789
790 mutex_unlock(kmod_queue_lock);
791
792 rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
793 (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
794 if (rc) {
795 mutex_lock(kmod_queue_lock);
796 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
797 mutex_unlock(kmod_queue_lock);
798 *data = NULL;
799 *dataCount = 0;
800 return rc;
801 }
802 *dataCount = e->size;
803
804 kfree(e, sizeof(struct cmd_queue_entry));
805
806 break;
807 }
808
809 case KMOD_CNTL_GET_KERNEL_SYMBOLS:
810 {
811 /* Throw away any data the user may have sent in error.
812 * We must do this, because we are likely to return to
813 * some data for these commands (thus causing a leak of
814 * whatever data the user sent us in error).
815 */
816 if (*data && *dataCount) {
817 vm_map_copy_discard(*data);
818 *data = NULL;
819 *dataCount = 0;
820 }
821
822 return kmod_get_symbol_data(data, dataCount);
823 break;
824 }
825
826 case KMOD_CNTL_FREE_LINKEDIT_DATA:
827 {
828 return kmod_free_linkedit_data();
829 break;
830 }
831
832 case KMOD_CNTL_GET_UUID:
833 {
834 uint32_t id_length = *dataCount;
835 char * kext_id = NULL;
836 vm_map_offset_t map_addr;
837 void * user_data;
838 kern_return_t result;
839
840 /* Get the bundle id, if provided, and discard the buffer sent down.
841 */
842 if (*data && *dataCount) {
843 (char *)(kmem_alloc(kernel_map, (vm_offset_t *)&kext_id, id_length));
844 if (!kext_id) {
845 return KERN_FAILURE;
846 }
847
848 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
849 user_data = CAST_DOWN(void *, map_addr);
850
851 memcpy(kext_id, user_data, id_length);
852 kext_id[id_length-1] = '\0';
853 if (user_data) {
854 (void)vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
855 }
856 *data = NULL;
857 *dataCount = 0;
858 }
859
860 result = kmod_get_kext_uuid(kext_id, data, dataCount);
861 if (kext_id) {
862 kmem_free(kernel_map, (vm_offset_t)kext_id, id_length);
863 }
864 return result;
865 break;
866 }
867
868 case KMOD_CNTL_DISABLE_LOAD:
869 {
870 kmod_load_disabled = 1;
871 rc = KERN_SUCCESS;
872 break;
873 }
874
875 default:
876 rc = KERN_INVALID_ARGUMENT;
877 }
878
879 return rc;
880 };
881
882 /*******************************************************************************
883 * This function creates a dummy symbol file for the running kernel based on data
884 * in the run-time image. This allows us to correctly link other executables
885 * (drivers, etc) against the kernel when the kernel image on the root filesystem
886 * does not match the live kernel, as c can occur during net-booting where the
887 * actual kernel image is obtained from the network via tftp rather than the root
888 * device.
889 *
890 * If a symbol table is available, then a link-suitable Mach-O file image is
891 * created containing a Mach Header and an LC_SYMTAB load command followed by the
892 * the symbol table data for mach_kernel. A UUID load command is also present for
893 * identification, so we don't link against the wrong kernel.
894 *
895 * NOTE: This file supports only 32 bit kernels; adding support for 64 bit
896 * kernels is possible, but is not necessary yet.
897 *******************************************************************************/
898 extern struct mach_header _mh_execute_header;
899 static int _linkedit_segment_freed = 0;
900
901 static kern_return_t
902 kmod_get_symbol_data(
903 kmod_args_t * symbol_data,
904 mach_msg_type_number_t * data_size)
905 {
906 kern_return_t result = KERN_FAILURE;
907
908 struct load_command * load_cmd;
909 struct mach_header * orig_header = &_mh_execute_header;
910 struct segment_command * orig_text = NULL;
911 struct segment_command * orig_data = NULL;
912 struct segment_command * orig_linkedit = NULL;
913 struct uuid_command * orig_uuid = NULL;
914 struct symtab_command * orig_symtab = NULL;
915 struct section * sect;
916 struct section * const_text = NULL;
917
918 vm_size_t header_size = 0;
919 vm_offset_t symtab_size;
920 vm_offset_t total_size; // copied out to 'data_size'
921 char * buffer = 0; // copied out to 'symbol_data'
922
923 struct mach_header * header;
924 struct segment_command * seg_cmd = NULL;
925 struct symtab_command * symtab;
926
927 unsigned int i;
928 caddr_t addr;
929 vm_offset_t offset;
930
931 // only want to do these 1st call
932 static int syms_marked = 0;
933
934 mutex_lock(kmod_lock);
935
936 /*****
937 * Check for empty out parameter pointers, and zero them if ok.
938 */
939 if (!symbol_data || !data_size) {
940 result = KERN_INVALID_ARGUMENT;
941 goto finish;
942 }
943
944 *symbol_data = NULL;
945 *data_size = 0;
946
947 if (_linkedit_segment_freed) {
948 result = KERN_MEMORY_FAILURE;
949 goto finish;
950 }
951
952 /*****
953 * Scan the in-memory kernel's mach header for the parts we need to copy:
954 * TEXT (for basic file info + const section), DATA (for basic file info),
955 * LINKEDIT (for the symbol table entries), SYMTAB (for the symbol table
956 * overall).
957 */
958 load_cmd = (struct load_command *)&orig_header[1];
959 for (i = 0; i < orig_header->ncmds; i++) {
960 if (load_cmd->cmd == LC_SEGMENT) {
961 struct segment_command * orig_seg_cmd =
962 (struct segment_command *)load_cmd;
963
964 if (!strncmp(SEG_TEXT, orig_seg_cmd->segname, strlen(SEG_TEXT))) {
965 orig_text = orig_seg_cmd;
966 } else if (!strncmp(SEG_DATA, orig_seg_cmd->segname,
967 strlen(SEG_DATA))) {
968
969 orig_data = orig_seg_cmd;
970 } else if (!strncmp(SEG_LINKEDIT, orig_seg_cmd->segname,
971 strlen(SEG_LINKEDIT))) {
972
973 orig_linkedit = orig_seg_cmd;
974 }
975 } else if (load_cmd->cmd == LC_UUID) {
976 orig_uuid = (struct uuid_command *)load_cmd;
977 } else if (load_cmd->cmd == LC_SYMTAB) {
978 orig_symtab = (struct symtab_command *)load_cmd;
979 }
980
981 load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
982 }
983
984 /* Bail if any wasn't found.
985 */
986 if (!orig_text || !orig_data || !orig_linkedit || !orig_uuid || !orig_symtab) {
987 goto finish;
988 }
989
990 /* Now seek out the const section of the TEXT segment, bailing if not found.
991 */
992 sect = (struct section *)&orig_text[1];
993 for (i = 0; i < orig_text->nsects; i++, sect++) {
994 if (!strncmp("__const", sect->sectname, sizeof("__const"))) {
995 const_text = sect;
996 break;
997 }
998 }
999 if (!const_text) {
1000 goto finish;
1001 }
1002
1003 /*****
1004 * Calculate the total size needed and allocate the buffer. In summing the
1005 * total size, every size before the last must be rounded to a
1006 * page-size increment.
1007 */
1008 header_size = sizeof(struct mach_header) +
1009 orig_text->cmdsize + orig_data->cmdsize +
1010 orig_uuid->cmdsize + orig_symtab->cmdsize;
1011 symtab_size = (orig_symtab->nsyms * sizeof(struct nlist)) +
1012 orig_symtab->strsize;
1013 total_size = round_page(header_size) + round_page(const_text->size) +
1014 symtab_size;
1015
1016 (void)kmem_alloc(kernel_map, (vm_offset_t *)&buffer, total_size);
1017 if (!buffer) {
1018 goto finish;
1019 }
1020 bzero((void *)buffer, total_size);
1021
1022 /*****
1023 * Set up the Mach-O header in the buffer.
1024 */
1025 header = (struct mach_header *)buffer;
1026 header->magic = orig_header->magic;
1027 header->cputype = orig_header->cputype;
1028 header->cpusubtype = orig_header->cpusubtype;
1029 header->filetype = orig_header->filetype;
1030 header->ncmds = 4; // TEXT, DATA, UUID, SYMTAB
1031 header->sizeofcmds = header_size - sizeof(struct mach_header);
1032 header->flags = orig_header->flags;
1033
1034 /*****
1035 * Initialize the current file offset and addr; updated as we go through,
1036 * but only for fields that need proper info.
1037 */
1038 offset = round_page(header_size);
1039 addr = (caddr_t)const_text->addr;
1040
1041 /*****
1042 * Construct a TEXT segment load command. The only content of the TEXT
1043 * segment that we actually copy is the __TEXT,__const, which contains the
1044 * kernel vtables. The other sections are just filled with unincremented
1045 * addr/offset and zero size and number fields.
1046 */
1047 seg_cmd = (struct segment_command *)&header[1]; // just past mach header
1048 memcpy(seg_cmd, orig_text, orig_text->cmdsize);
1049 seg_cmd->vmaddr = (unsigned long)addr;
1050 seg_cmd->vmsize = const_text->size;
1051 seg_cmd->fileoff = 0;
1052 seg_cmd->filesize = const_text->size + round_page(header_size);
1053 seg_cmd->maxprot = 0;
1054 seg_cmd->initprot = 0;
1055 seg_cmd->flags = 0;
1056 sect = (struct section *)(seg_cmd + 1);
1057 for (i = 0; i < seg_cmd->nsects; i++, sect++) {
1058 sect->addr = (unsigned long)addr; // only valid for __TEXT,__const
1059 sect->size = 0;
1060 sect->offset = offset;
1061 sect->nreloc = 0;
1062 if (0 == strncmp("__const", sect->sectname, sizeof("__const"))) {
1063 sect->size = const_text->size;
1064 addr += const_text->size;
1065 offset += const_text->size;
1066 const_text = sect; // retarget to constructed section
1067 }
1068 }
1069 offset = round_page(offset);
1070
1071 /*****
1072 * Now copy the __DATA segment load command, but none of its content.
1073 */
1074 seg_cmd = (struct segment_command *)((int)seg_cmd + seg_cmd->cmdsize);
1075 memcpy(seg_cmd, orig_data, orig_data->cmdsize);
1076
1077 seg_cmd->vmaddr = (unsigned long)addr;
1078 seg_cmd->vmsize = 0x1000; // Why not just zero? DATA seg is empty.
1079 seg_cmd->fileoff = offset;
1080 seg_cmd->filesize = 0;
1081 seg_cmd->maxprot = 0;
1082 seg_cmd->initprot = 0;
1083 seg_cmd->flags = 0;
1084 sect = (struct section *)(seg_cmd+1);
1085 for (i = 0; i < seg_cmd->nsects; i++, sect++) {
1086 sect->addr = (unsigned long)addr;
1087 sect->size = 0;
1088 sect->offset = offset;
1089 sect->nreloc = 0;
1090 }
1091 offset = round_page(offset);
1092
1093 /* Set up LC_UUID command
1094 */
1095 seg_cmd = (struct segment_command *)((int)seg_cmd + seg_cmd->cmdsize);
1096 memcpy(seg_cmd, orig_uuid, orig_uuid->cmdsize);
1097
1098 /* Set up LC_SYMTAB command
1099 */
1100 symtab = (struct symtab_command *)((int)seg_cmd + seg_cmd->cmdsize);
1101 symtab->cmd = LC_SYMTAB;
1102 symtab->cmdsize = sizeof(struct symtab_command);
1103 symtab->symoff = offset;
1104 symtab->nsyms = orig_symtab->nsyms;
1105 symtab->strsize = orig_symtab->strsize;
1106 symtab->stroff = offset + symtab->nsyms * sizeof(struct nlist);
1107
1108 /* Convert the symbol table in place (yes, in the running kernel)
1109 * from section references to absolute references.
1110 */
1111 if (!syms_marked) {
1112 struct nlist * sym = (struct nlist *) orig_linkedit->vmaddr;
1113 for (i = 0; i < orig_symtab->nsyms; i++, sym++) {
1114 if ((sym->n_type & N_TYPE) == N_SECT) {
1115 sym->n_sect = NO_SECT;
1116 sym->n_type = (sym->n_type & ~N_TYPE) | N_ABS;
1117 }
1118 }
1119 syms_marked = 1;
1120 }
1121
1122 /*****
1123 * Copy the contents of the __TEXT,__const section and the linkedit symbol
1124 * data into the constructed object file buffer. The header has already been
1125 * filled in.
1126 */
1127 memcpy(buffer + const_text->offset, (void *)const_text->addr, const_text->size);
1128 memcpy(buffer + symtab->symoff, (void *)orig_linkedit->vmaddr, symtab_size);
1129
1130 result = vm_map_copyin(kernel_map,
1131 (vm_offset_t)buffer,
1132 (vm_map_size_t)total_size,
1133 /* src_destroy */ TRUE,
1134 (vm_map_copy_t *)symbol_data);
1135 if (result != KERN_SUCCESS) {
1136 kmem_free(kernel_map, (vm_offset_t)buffer, total_size);
1137 *symbol_data = NULL;
1138 *data_size = 0;
1139 goto finish;
1140 } else {
1141 *data_size = total_size;
1142 }
1143
1144 finish:
1145 mutex_unlock(kmod_lock);
1146 return result;
1147 }
1148
1149 /*******************************************************************************
1150 * Drop the LINKEDIT segment from the running kernel to recover wired memory.
1151 * This is invoked by kextd after it has successfully determined a file is
1152 * available in the root filesystem to link against (either a symbol file it
1153 * wrote, or /mach_kernel).
1154 *******************************************************************************/
1155 // in IOCatalogue.cpp
1156 extern int kernelLinkerPresent;
1157
1158 static kern_return_t
1159 kmod_free_linkedit_data(void)
1160 {
1161 kern_return_t result = KERN_FAILURE;
1162
1163 const char * dt_kernel_header_name = "Kernel-__HEADER";
1164 const char * dt_kernel_symtab_name = "Kernel-__SYMTAB";
1165 struct mach_header_t * dt_mach_header = NULL;
1166 vm_size_t dt_mach_header_size = 0;
1167 struct symtab_command *dt_symtab = NULL;
1168 vm_size_t dt_symtab_size = 0;
1169 int dt_result;
1170
1171 struct segment_command * segmentLE;
1172 boolean_t keepsyms = FALSE;
1173 const char * segment_name = "__LINKEDIT";
1174 #if __ppc__ || __arm__
1175 const char * devtree_segment_name = "Kernel-__LINKEDIT";
1176 void * segment_paddress;
1177 vm_size_t segment_size;
1178 #endif
1179
1180 mutex_lock(kmod_lock);
1181
1182 /* The semantic is "make sure the linkedit segment is freed", so if we
1183 * previously did it, it's a success.
1184 */
1185 if (_linkedit_segment_freed) {
1186 result = KERN_SUCCESS;
1187 goto finish;
1188 } else if (kernelLinkerPresent) {
1189 // The in-kernel linker requires the linkedit segment to function.
1190 // Refuse to dump if it's still around.
1191 // XXX: We need a dedicated error return code for this.
1192 printf("can't remove kernel __LINKEDIT segment - in-kernel linker needs it\n");
1193 result = KERN_MEMORY_FAILURE;
1194 goto finish;
1195 }
1196
1197 /* Dispose of unnecessary stuff that the booter didn't need to load.
1198 */
1199 dt_result = IODTGetLoaderInfo(dt_kernel_header_name,
1200 (void **)&dt_mach_header, &dt_mach_header_size);
1201 if (dt_result == 0 && dt_mach_header) {
1202 IODTFreeLoaderInfo(dt_kernel_header_name, (void *)dt_mach_header,
1203 round_page_32(dt_mach_header_size));
1204 }
1205 dt_result = IODTGetLoaderInfo(dt_kernel_symtab_name,
1206 (void **)&dt_symtab, &dt_symtab_size);
1207 if (dt_result == 0 && dt_symtab) {
1208 IODTFreeLoaderInfo(dt_kernel_symtab_name, (void *)dt_symtab,
1209 round_page_32(dt_symtab_size));
1210 }
1211
1212 PE_parse_boot_arg("keepsyms", &keepsyms);
1213
1214 segmentLE = getsegbyname(segment_name);
1215 if (!segmentLE) {
1216 printf("error removing kernel __LINKEDIT segment\n");
1217 goto finish;
1218 }
1219 OSRuntimeUnloadCPPForSegment(segmentLE);
1220 #if __ppc__ || __arm__
1221 if (!keepsyms && 0 == IODTGetLoaderInfo(devtree_segment_name,
1222 &segment_paddress, &segment_size)) {
1223
1224 IODTFreeLoaderInfo(devtree_segment_name, (void *)segment_paddress,
1225 (int)segment_size);
1226 }
1227 #elif __i386__
1228 if (!keepsyms && segmentLE->vmaddr && segmentLE->vmsize) {
1229 ml_static_mfree(segmentLE->vmaddr, segmentLE->vmsize);
1230 }
1231 #else
1232 #error arch
1233 #endif
1234 result = KERN_SUCCESS;
1235
1236 finish:
1237 if (!keepsyms && result == KERN_SUCCESS) {
1238 _linkedit_segment_freed = 1;
1239 }
1240 mutex_unlock(kmod_lock);
1241 return result;
1242 }
1243
1244 /*******************************************************************************
1245 * Retrieve the UUID load command payload from the running kernel.
1246 *******************************************************************************/
1247 static kern_return_t
1248 kmod_get_kext_uuid(
1249 const char * kext_id,
1250 kmod_args_t * data,
1251 mach_msg_type_number_t * dataCount)
1252 {
1253 kern_return_t result = KERN_FAILURE;
1254 kmod_info_t * kmod_info = NULL;
1255 unsigned int i;
1256 char * uuid_data = 0;
1257 struct mach_header * header = &_mh_execute_header;
1258 struct load_command * load_cmd = (struct load_command *)&header[1];
1259 struct uuid_command * uuid_cmd;
1260
1261 /* If given no kext ID, retrieve the kernel UUID.
1262 */
1263 if (!kext_id) {
1264 header = &_mh_execute_header;
1265 } else {
1266 kmod_info = kmod_lookupbyname_locked(kext_id);
1267 if (!kmod_info) {
1268 result = KERN_INVALID_ARGUMENT;
1269 goto finish;
1270 }
1271
1272 /* If the kmod is build-in, it's part of the kernel, so retrieve the
1273 * kernel UUID.
1274 */
1275 if (!kmod_info->address) {
1276 header = &_mh_execute_header;
1277 } else {
1278 header = (struct mach_header *)kmod_info->address;
1279 }
1280 }
1281
1282 load_cmd = (struct load_command *)&header[1];
1283
1284 for (i = 0; i < header->ncmds; i++) {
1285 if (load_cmd->cmd == LC_UUID) {
1286 uuid_cmd = (struct uuid_command *)load_cmd;
1287
1288 /* kmem_alloc() a local buffer that's on a boundary known to work
1289 * with vm_map_copyin().
1290 */
1291 result = kmem_alloc(kernel_map, (vm_offset_t *)&uuid_data,
1292 sizeof(uuid_cmd->uuid));
1293 if (result != KERN_SUCCESS) {
1294 result = KERN_RESOURCE_SHORTAGE;
1295 goto finish;
1296 }
1297
1298 memcpy(uuid_data, uuid_cmd->uuid, sizeof(uuid_cmd->uuid));
1299
1300 result = vm_map_copyin(kernel_map, (vm_offset_t)uuid_data,
1301 sizeof(uuid_cmd->uuid), /* src_destroy */ TRUE,
1302 (vm_map_copy_t *)data);
1303 if (result == KERN_SUCCESS) {
1304 *dataCount = sizeof(uuid_cmd->uuid);
1305 } else {
1306 result = KERN_RESOURCE_SHORTAGE;
1307 kmem_free(kernel_map, (vm_offset_t)uuid_data,
1308 sizeof(uuid_cmd->uuid));
1309 }
1310 goto finish;
1311 }
1312
1313 load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
1314 }
1315
1316 finish:
1317 return result;
1318 }
1319
1320 kern_return_t
1321 kmod_get_info(__unused host_t host,
1322 kmod_info_array_t *kmods,
1323 mach_msg_type_number_t *kmodCount)
1324 {
1325 vm_offset_t data;
1326 kmod_info_t *k, *p1;
1327 kmod_reference_t *r, *p2;
1328 int ref_count;
1329 unsigned size = 0;
1330 kern_return_t rc = KERN_SUCCESS;
1331
1332 *kmods = (void *)0;
1333 *kmodCount = 0;
1334
1335 retry:
1336 mutex_lock(kmod_lock);
1337 size = 0;
1338 k = kmod;
1339 while (k) {
1340 size += sizeof(kmod_info_t);
1341 r = k->reference_list;
1342 while (r) {
1343 size +=sizeof(kmod_reference_t);
1344 r = r->next;
1345 }
1346 k = k->next;
1347 }
1348 mutex_unlock(kmod_lock);
1349 if (!size) return KERN_SUCCESS;
1350
1351 rc = kmem_alloc(kernel_map, &data, size);
1352 if (rc) return rc;
1353
1354 // copy kmod into data, retry if kmod's size has changed (grown)
1355 // the copied out data is tweeked to figure what's what at user level
1356 // change the copied out k->next pointers to point to themselves
1357 // change the k->reference into a count, tack the references on
1358 // the end of the data packet in the order they are found
1359
1360 mutex_lock(kmod_lock);
1361 k = kmod; p1 = (kmod_info_t *)data;
1362 while (k) {
1363 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
1364 mutex_unlock(kmod_lock);
1365 kmem_free(kernel_map, data, size);
1366 goto retry;
1367 }
1368
1369 *p1 = *k;
1370 if (k->next) p1->next = k;
1371 p1++; k = k->next;
1372 }
1373
1374 p2 = (kmod_reference_t *)p1;
1375 k = kmod; p1 = (kmod_info_t *)data;
1376 while (k) {
1377 r = k->reference_list; ref_count = 0;
1378 while (r) {
1379 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
1380 mutex_unlock(kmod_lock);
1381 kmem_free(kernel_map, data, size);
1382 goto retry;
1383 }
1384 // note the last 'k' in the chain has its next == 0
1385 // since there can only be one like that,
1386 // this case is handled by the caller
1387 *p2 = *r;
1388 p2++; r = r->next; ref_count++;
1389 }
1390 p1->reference_list = (kmod_reference_t *)ref_count;
1391 p1++; k = k->next;
1392 }
1393 mutex_unlock(kmod_lock);
1394
1395 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
1396 if (rc) {
1397 kmem_free(kernel_map, data, size);
1398 *kmods = NULL;
1399 *kmodCount = 0;
1400 return rc;
1401 }
1402 *kmodCount = size;
1403
1404 return KERN_SUCCESS;
1405 }
1406
1407 /*
1408 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1409 */
1410 static kern_return_t
1411 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
1412 {
1413 typedef void (*Routine)(void);
1414 Routine * routines;
1415 int size, i;
1416
1417 if (header->magic != MH_MAGIC) {
1418 return KERN_INVALID_ARGUMENT;
1419 }
1420
1421 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
1422 if (!routines) return KERN_SUCCESS;
1423
1424 size /= sizeof(Routine);
1425 for (i = 0; i < size; i++) {
1426 (*routines[i])();
1427 }
1428
1429 return KERN_SUCCESS;
1430 }
1431
1432 /*
1433 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1434 */
1435 kern_return_t
1436 kmod_initialize_cpp(kmod_info_t *info)
1437 {
1438 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
1439 }
1440
1441 /*
1442 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1443 */
1444 kern_return_t
1445 kmod_finalize_cpp(kmod_info_t *info)
1446 {
1447 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
1448 }
1449
1450 kern_return_t
1451 kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
1452 {
1453 return KMOD_RETURN_SUCCESS;
1454 }
1455
1456 kern_return_t
1457 kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
1458 {
1459 return KMOD_RETURN_SUCCESS;
1460 }
1461
1462 static void
1463 kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
1464 int (*printf_func)(const char *fmt, ...))
1465 {
1466 vm_offset_t * kscan_addr = NULL;
1467 kmod_info_t * k;
1468 kmod_reference_t * r;
1469 unsigned int i;
1470 int found_kmod = 0;
1471 kmod_info_t * stop_kmod = NULL;
1472
1473 for (k = kmod; k; k = k->next) {
1474 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
1475 (*printf_func)(" kmod scan stopped due to missing "
1476 "kmod page: %08x\n", stop_kmod);
1477 break;
1478 }
1479 if (!k->address) {
1480 continue; // skip fake entries for built-in kernel components
1481 }
1482 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
1483 if ((*kscan_addr >= k->address) &&
1484 (*kscan_addr < (k->address + k->size))) {
1485
1486 if (!found_kmod) {
1487 (*printf_func)(" Kernel loadable modules in backtrace "
1488 "(with dependencies):\n");
1489 }
1490 found_kmod = 1;
1491 (*printf_func)(" %s(%s)@0x%x->0x%x\n",
1492 k->name, k->version, k->address, k->address + k->size - 1);
1493
1494 for (r = k->reference_list; r; r = r->next) {
1495 kmod_info_t * rinfo;
1496
1497 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
1498 (*printf_func)(" kmod dependency scan stopped "
1499 "due to missing dependency page: %08x\n", r);
1500 break;
1501 }
1502
1503 rinfo = r->info;
1504
1505 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
1506 (*printf_func)(" kmod dependency scan stopped "
1507 "due to missing kmod page: %08x\n", rinfo);
1508 break;
1509 }
1510
1511 if (!rinfo->address) {
1512 continue; // skip fake entries for built-ins
1513 }
1514
1515 (*printf_func)(" dependency: %s(%s)@0x%x\n",
1516 rinfo->name, rinfo->version, rinfo->address);
1517 }
1518
1519 break; // only report this kmod for one backtrace address
1520 }
1521 }
1522 }
1523
1524 return;
1525 }
1526
1527 void
1528 kmod_dump(vm_offset_t *addr, unsigned int cnt)
1529 {
1530 kmod_dump_to(addr, cnt, &kdb_printf);
1531 }
1532
1533 void kmod_dump_log(vm_offset_t *, unsigned); /* gcc 4 warn fix */
1534
1535 void
1536 kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
1537 {
1538 kmod_dump_to(addr, cnt, &printf);
1539 }