]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 * 1999 Mar 29 rsulack created.
34 */
35
36 #include <mach/mach_types.h>
37 #include <mach/vm_types.h>
38 #include <mach/kern_return.h>
39 #include <mach/host_priv_server.h>
40 #include <mach/vm_map.h>
41
42 #include <kern/kalloc.h>
43 #include <kern/kern_types.h>
44 #include <kern/thread.h>
45
46 #include <vm/vm_kern.h>
47
48 #include <mach-o/mach_header.h>
49
50 #include <mach_host.h>
51
52 /*
53 * XXX headers for which prototypes should be in a common include file;
54 * XXX see libsa/kext.cpp for why.
55 */
56 kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
57 kern_return_t kmod_destroy_internal(kmod_t id);
58 kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
59 mach_msg_type_number_t *dataCount);
60 kern_return_t kmod_retain(kmod_t id);
61 kern_return_t kmod_release(kmod_t id);
62 kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
63 kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
64 mach_msg_type_number_t *kmodCount);
65 extern void kdb_printf(const char *fmt, ...);
66
67
68
69 #define WRITE_PROTECT_MODULE_TEXT (0)
70
71 kmod_info_t *kmod = 0;
72 static int kmod_index = 1;
73
74 decl_simple_lock_data(,kmod_lock)
75 decl_simple_lock_data(,kmod_queue_lock)
76
77 typedef struct cmd_queue_entry {
78 queue_chain_t links;
79 vm_address_t data;
80 vm_size_t size;
81 } cmd_queue_entry_t;
82
83 queue_head_t kmod_cmd_queue;
84
85 void
86 kmod_init(void)
87 {
88 simple_lock_init(&kmod_lock, 0);
89 simple_lock_init(&kmod_queue_lock, 0);
90 queue_init(&kmod_cmd_queue);
91 }
92
93 kmod_info_t *
94 kmod_lookupbyid(kmod_t id)
95 {
96 kmod_info_t *k = 0;
97
98 k = kmod;
99 while (k) {
100 if (k->id == id) break;
101 k = k->next;
102 }
103
104 return k;
105 }
106
107 kmod_info_t *
108 kmod_lookupbyname(const char * name)
109 {
110 kmod_info_t *k = 0;
111
112 k = kmod;
113 while (k) {
114 if (!strcmp(k->name, name)) break;
115 k = k->next;
116 }
117
118 return k;
119 }
120
121 kmod_info_t *
122 kmod_lookupbyid_locked(kmod_t id)
123 {
124 kmod_info_t *k = 0;
125 kmod_info_t *kc = 0;
126
127 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
128 if (!kc) return kc;
129
130 simple_lock(&kmod_queue_lock);
131 k = kmod_lookupbyid(id);
132 if (k) {
133 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
134 }
135
136 simple_unlock(&kmod_queue_lock);
137
138 if (k == 0) {
139 kfree(kc, sizeof(kmod_info_t));
140 kc = 0;
141 }
142 return kc;
143 }
144
145 kmod_info_t *
146 kmod_lookupbyname_locked(const char * name)
147 {
148 kmod_info_t *k = 0;
149 kmod_info_t *kc = 0;
150
151 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
152 if (!kc) return kc;
153
154 simple_lock(&kmod_queue_lock);
155 k = kmod_lookupbyname(name);
156 if (k) {
157 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
158 }
159
160 simple_unlock(&kmod_queue_lock);
161
162 if (k == 0) {
163 kfree(kc, sizeof(kmod_info_t));
164 kc = 0;
165 }
166 return kc;
167 }
168
169 // XXX add a nocopy flag??
170
171 kern_return_t
172 kmod_queue_cmd(vm_address_t data, vm_size_t size)
173 {
174 kern_return_t rc;
175 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
176 if (!e) return KERN_RESOURCE_SHORTAGE;
177
178 rc = kmem_alloc(kernel_map, &e->data, size);
179 if (rc != KERN_SUCCESS) {
180 kfree(e, sizeof(struct cmd_queue_entry));
181 return rc;
182 }
183 e->size = size;
184 bcopy((void *)data, (void *)e->data, size);
185
186 simple_lock(&kmod_queue_lock);
187 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
188 simple_unlock(&kmod_queue_lock);
189
190 thread_wakeup_one((event_t)&kmod_cmd_queue);
191
192 return KERN_SUCCESS;
193 }
194
195 kern_return_t
196 kmod_load_extension(char *name)
197 {
198 kmod_load_extension_cmd_t *data;
199 vm_size_t size;
200
201 size = sizeof(kmod_load_extension_cmd_t);
202 data = (kmod_load_extension_cmd_t *)kalloc(size);
203 if (!data) return KERN_RESOURCE_SHORTAGE;
204
205 data->type = KMOD_LOAD_EXTENSION_PACKET;
206 strncpy(data->name, name, KMOD_MAX_NAME);
207
208 return kmod_queue_cmd((vm_address_t)data, size);
209 }
210
211 kern_return_t
212 kmod_load_extension_with_dependencies(char *name, char **dependencies)
213 {
214 kmod_load_with_dependencies_cmd_t *data;
215 vm_size_t size;
216 char **c;
217 int i, count = 0;
218
219 c = dependencies;
220 if (c) {
221 while (*c) {
222 count++; c++;
223 }
224 }
225 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
226 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
227 if (!data) return KERN_RESOURCE_SHORTAGE;
228
229 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
230 strncpy(data->name, name, KMOD_MAX_NAME);
231
232 c = dependencies;
233 for (i=0; i < count; i++) {
234 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
235 c++;
236 }
237 data->dependencies[count][0] = 0;
238
239 return kmod_queue_cmd((vm_address_t)data, size);
240 }
241 kern_return_t
242 kmod_send_generic(int type, void *generic_data, int size)
243 {
244 kmod_generic_cmd_t *data;
245
246 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
247 if (!data) return KERN_RESOURCE_SHORTAGE;
248
249 data->type = type;
250 bcopy(data->data, generic_data, size);
251
252 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
253 }
254
255 extern vm_offset_t sectPRELINKB;
256 extern int sectSizePRELINK;
257
258 /*
259 * Operates only on 32 bit mach keaders on behalf of kernel module loader
260 * if WRITE_PROTECT_MODULE_TEXT is defined.
261 */
262 kern_return_t
263 kmod_create_internal(kmod_info_t *info, kmod_t *id)
264 {
265 kern_return_t rc;
266 boolean_t isPrelink;
267
268 if (!info) return KERN_INVALID_ADDRESS;
269
270 // double check for page alignment
271 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
272 return KERN_INVALID_ADDRESS;
273 }
274
275 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
276 if (!isPrelink) {
277 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
278 info->address + info->size, VM_PROT_DEFAULT, FALSE);
279 if (rc != KERN_SUCCESS) {
280 return rc;
281 }
282 }
283 #if WRITE_PROTECT_MODULE_TEXT
284 {
285 struct section * sect = getsectbynamefromheader(
286 (struct mach_header*) info->address, "__TEXT", "__text");
287
288 if(sect) {
289 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
290 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
291 }
292 }
293 #endif /* WRITE_PROTECT_MODULE_TEXT */
294
295 simple_lock(&kmod_lock);
296
297 // check to see if already loaded
298 if (kmod_lookupbyname(info->name)) {
299 simple_unlock(&kmod_lock);
300 if (!isPrelink) {
301 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
302 info->address + info->size, FALSE);
303 assert(rc == KERN_SUCCESS);
304 }
305 return KERN_INVALID_ARGUMENT;
306 }
307
308 info->id = kmod_index++;
309 info->reference_count = 0;
310
311 info->next = kmod;
312 kmod = info;
313
314 *id = info->id;
315
316 simple_unlock(&kmod_lock);
317
318 #if DEBUG
319 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
320 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
321 #endif /* DEBUG */
322
323 return KERN_SUCCESS;
324 }
325
326
327 kern_return_t
328 kmod_create(host_priv_t host_priv,
329 vm_address_t addr,
330 kmod_t *id)
331 {
332 kmod_info_t *info = (kmod_info_t *)addr;
333
334 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
335 return kmod_create_internal(info, id);
336 }
337
338 kern_return_t
339 kmod_create_fake_with_address(const char *name, const char *version,
340 vm_address_t address, vm_size_t size,
341 int * return_id)
342 {
343 kmod_info_t *info;
344
345 if (!name || ! version ||
346 (1 + strlen(name) > KMOD_MAX_NAME) ||
347 (1 + strlen(version) > KMOD_MAX_NAME)) {
348
349 return KERN_INVALID_ARGUMENT;
350 }
351
352 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
353 if (!info) {
354 return KERN_RESOURCE_SHORTAGE;
355 }
356
357 // make de fake
358 info->info_version = KMOD_INFO_VERSION;
359 bcopy(name, info->name, 1 + strlen(name));
360 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
361 info->reference_count = 1; // keep it from unloading, starting, stopping
362 info->reference_list = 0;
363 info->address = address;
364 info->size = size;
365 info->hdr_size = 0;
366 info->start = info->stop = 0;
367
368 simple_lock(&kmod_lock);
369
370 // check to see if already "loaded"
371 if (kmod_lookupbyname(info->name)) {
372 simple_unlock(&kmod_lock);
373 return KERN_INVALID_ARGUMENT;
374 }
375
376 info->id = kmod_index++;
377 if (return_id)
378 *return_id = info->id;
379
380 info->next = kmod;
381 kmod = info;
382
383 simple_unlock(&kmod_lock);
384
385 return KERN_SUCCESS;
386 }
387
388 kern_return_t
389 kmod_create_fake(const char *name, const char *version)
390 {
391 return kmod_create_fake_with_address(name, version, 0, 0, NULL);
392 }
393
394
395 static kern_return_t
396 _kmod_destroy_internal(kmod_t id, boolean_t fake)
397 {
398 kern_return_t rc;
399 kmod_info_t *k;
400 kmod_info_t *p;
401
402 simple_lock(&kmod_lock);
403
404 k = p = kmod;
405 while (k) {
406 if (k->id == id) {
407 kmod_reference_t *r, *t;
408
409 if (!fake && (k->reference_count != 0)) {
410 simple_unlock(&kmod_lock);
411 return KERN_INVALID_ARGUMENT;
412 }
413
414 if (k == p) { // first element
415 kmod = k->next;
416 } else {
417 p->next = k->next;
418 }
419 simple_unlock(&kmod_lock);
420
421 r = k->reference_list;
422 while (r) {
423 r->info->reference_count--;
424 t = r;
425 r = r->next;
426 kfree(t, sizeof(struct kmod_reference));
427 }
428
429 if (!fake)
430 {
431 #if DEBUG
432 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
433 k->name, k->id, k->size / PAGE_SIZE, k->address);
434 #endif /* DEBUG */
435
436 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
437 {
438 vm_offset_t
439 virt = ml_static_ptovirt(k->address);
440 if( virt) {
441 ml_static_mfree( virt, k->size);
442 }
443 }
444 else
445 {
446 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
447 k->address + k->size, FALSE);
448 assert(rc == KERN_SUCCESS);
449
450 rc = vm_deallocate(kernel_map, k->address, k->size);
451 assert(rc == KERN_SUCCESS);
452 }
453 }
454 return KERN_SUCCESS;
455 }
456 p = k;
457 k = k->next;
458 }
459
460 simple_unlock(&kmod_lock);
461
462 return KERN_INVALID_ARGUMENT;
463 }
464
465 kern_return_t
466 kmod_destroy_internal(kmod_t id)
467 {
468 return _kmod_destroy_internal(id, FALSE);
469 }
470
471 kern_return_t
472 kmod_destroy(host_priv_t host_priv,
473 kmod_t id)
474 {
475 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
476 return _kmod_destroy_internal(id, FALSE);
477 }
478
479 kern_return_t
480 kmod_destroy_fake(kmod_t id)
481 {
482 return _kmod_destroy_internal(id, TRUE);
483 }
484
485 kern_return_t
486 kmod_start_or_stop(
487 kmod_t id,
488 int start,
489 kmod_args_t *data,
490 mach_msg_type_number_t *dataCount)
491 {
492 kern_return_t rc = KERN_SUCCESS;
493 void * user_data = 0;
494 kern_return_t (*func)(kmod_info_t *, void *);
495 kmod_info_t *k;
496
497 simple_lock(&kmod_lock);
498
499 k = kmod_lookupbyid(id);
500 if (!k || k->reference_count) {
501 simple_unlock(&kmod_lock);
502 rc = KERN_INVALID_ARGUMENT;
503 goto finish;
504 }
505
506 if (start) {
507 func = (void *)k->start;
508 } else {
509 func = (void *)k->stop;
510 }
511
512 simple_unlock(&kmod_lock);
513
514 //
515 // call kmod entry point
516 //
517 if (data && dataCount && *data && *dataCount) {
518 vm_map_offset_t map_addr;
519 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
520 user_data = CAST_DOWN(void *, map_addr);
521 }
522
523 rc = (*func)(k, user_data);
524
525 finish:
526
527 if (user_data) {
528 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
529 }
530 if (data) *data = 0;
531 if (dataCount) *dataCount = 0;
532
533 return rc;
534 }
535
536
537 /*
538 * The retain and release calls take no user data, but the caller
539 * may have sent some in error (the MIG definition allows it).
540 * If this is the case, they will just return that same data
541 * right back to the caller (since they never touch the *data and
542 * *dataCount fields).
543 */
544 kern_return_t
545 kmod_retain(kmod_t id)
546 {
547 kern_return_t rc = KERN_SUCCESS;
548
549 kmod_info_t *t; // reference to
550 kmod_info_t *f; // reference from
551 kmod_reference_t *r = 0;
552
553 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
554 if (!r) {
555 rc = KERN_RESOURCE_SHORTAGE;
556 goto finish;
557 }
558
559 simple_lock(&kmod_lock);
560
561 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
562 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
563 if (!t || !f) {
564 simple_unlock(&kmod_lock);
565 if (r) kfree(r, sizeof(struct kmod_reference));
566 rc = KERN_INVALID_ARGUMENT;
567 goto finish;
568 }
569
570 r->next = f->reference_list;
571 r->info = t;
572 f->reference_list = r;
573 t->reference_count++;
574
575 simple_unlock(&kmod_lock);
576
577 finish:
578
579 return rc;
580 }
581
582
583 kern_return_t
584 kmod_release(kmod_t id)
585 {
586 kern_return_t rc = KERN_INVALID_ARGUMENT;
587
588 kmod_info_t *t; // reference to
589 kmod_info_t *f; // reference from
590 kmod_reference_t *r = 0;
591 kmod_reference_t * p;
592
593 simple_lock(&kmod_lock);
594
595 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
596 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
597 if (!t || !f) {
598 rc = KERN_INVALID_ARGUMENT;
599 goto finish;
600 }
601
602 p = r = f->reference_list;
603 while (r) {
604 if (r->info == t) {
605 if (p == r) { // first element
606 f->reference_list = r->next;
607 } else {
608 p->next = r->next;
609 }
610 r->info->reference_count--;
611
612 simple_unlock(&kmod_lock);
613 kfree(r, sizeof(struct kmod_reference));
614 rc = KERN_SUCCESS;
615 goto finish;
616 }
617 p = r;
618 r = r->next;
619 }
620
621 simple_unlock(&kmod_lock);
622
623 finish:
624
625 return rc;
626 }
627
628
629 kern_return_t
630 kmod_control(host_priv_t host_priv,
631 kmod_t id,
632 kmod_control_flavor_t flavor,
633 kmod_args_t *data,
634 mach_msg_type_number_t *dataCount)
635 {
636 kern_return_t rc = KERN_SUCCESS;
637
638 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
639
640 switch (flavor) {
641
642 case KMOD_CNTL_START:
643 case KMOD_CNTL_STOP:
644 {
645 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
646 data, dataCount);
647 break;
648 }
649
650 case KMOD_CNTL_RETAIN:
651 {
652 rc = kmod_retain(id);
653 break;
654 }
655
656 case KMOD_CNTL_RELEASE:
657 {
658 rc = kmod_release(id);
659 break;
660 }
661
662 case KMOD_CNTL_GET_CMD:
663 {
664
665 cmd_queue_entry_t *e;
666
667 /*
668 * Throw away any data the user may have sent in error.
669 * We must do this, because we are likely to return to
670 * some data for these commands (thus causing a leak of
671 * whatever data the user sent us in error).
672 */
673 if (*data && *dataCount) {
674 vm_map_copy_discard(*data);
675 *data = 0;
676 *dataCount = 0;
677 }
678
679 simple_lock(&kmod_queue_lock);
680
681 if (queue_empty(&kmod_cmd_queue)) {
682 wait_result_t res;
683
684 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
685 &kmod_queue_lock,
686 THREAD_ABORTSAFE);
687 if (queue_empty(&kmod_cmd_queue)) {
688 // we must have been interrupted!
689 simple_unlock(&kmod_queue_lock);
690 assert(res == THREAD_INTERRUPTED);
691 return KERN_ABORTED;
692 }
693 }
694 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
695
696 simple_unlock(&kmod_queue_lock);
697
698 rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
699 (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
700 if (rc) {
701 simple_lock(&kmod_queue_lock);
702 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
703 simple_unlock(&kmod_queue_lock);
704 *data = 0;
705 *dataCount = 0;
706 return rc;
707 }
708 *dataCount = e->size;
709
710 kfree(e, sizeof(struct cmd_queue_entry));
711
712 break;
713 }
714
715 default:
716 rc = KERN_INVALID_ARGUMENT;
717 }
718
719 return rc;
720 };
721
722
723 kern_return_t
724 kmod_get_info(__unused host_t host,
725 kmod_info_array_t *kmods,
726 mach_msg_type_number_t *kmodCount)
727 {
728 vm_offset_t data;
729 kmod_info_t *k, *p1;
730 kmod_reference_t *r, *p2;
731 int ref_count;
732 unsigned size = 0;
733 kern_return_t rc = KERN_SUCCESS;
734
735 *kmods = (void *)0;
736 *kmodCount = 0;
737
738 retry:
739 simple_lock(&kmod_lock);
740 size = 0;
741 k = kmod;
742 while (k) {
743 size += sizeof(kmod_info_t);
744 r = k->reference_list;
745 while (r) {
746 size +=sizeof(kmod_reference_t);
747 r = r->next;
748 }
749 k = k->next;
750 }
751 simple_unlock(&kmod_lock);
752 if (!size) return KERN_SUCCESS;
753
754 rc = kmem_alloc(kernel_map, &data, size);
755 if (rc) return rc;
756
757 // copy kmod into data, retry if kmod's size has changed (grown)
758 // the copied out data is tweeked to figure what's what at user level
759 // change the copied out k->next pointers to point to themselves
760 // change the k->reference into a count, tack the references on
761 // the end of the data packet in the order they are found
762
763 simple_lock(&kmod_lock);
764 k = kmod; p1 = (kmod_info_t *)data;
765 while (k) {
766 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
767 simple_unlock(&kmod_lock);
768 kmem_free(kernel_map, data, size);
769 goto retry;
770 }
771
772 *p1 = *k;
773 if (k->next) p1->next = k;
774 p1++; k = k->next;
775 }
776
777 p2 = (kmod_reference_t *)p1;
778 k = kmod; p1 = (kmod_info_t *)data;
779 while (k) {
780 r = k->reference_list; ref_count = 0;
781 while (r) {
782 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
783 simple_unlock(&kmod_lock);
784 kmem_free(kernel_map, data, size);
785 goto retry;
786 }
787 // note the last 'k' in the chain has its next == 0
788 // since there can only be one like that,
789 // this case is handled by the caller
790 *p2 = *r;
791 p2++; r = r->next; ref_count++;
792 }
793 p1->reference_list = (kmod_reference_t *)ref_count;
794 p1++; k = k->next;
795 }
796 simple_unlock(&kmod_lock);
797
798 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
799 if (rc) {
800 kmem_free(kernel_map, data, size);
801 *kmods = 0;
802 *kmodCount = 0;
803 return rc;
804 }
805 *kmodCount = size;
806
807 return KERN_SUCCESS;
808 }
809
810 /*
811 * Operates only on 32 bit mach keaders on behalf of kernel module loader
812 */
813 static kern_return_t
814 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
815 {
816 typedef void (*Routine)(void);
817 Routine * routines;
818 int size, i;
819
820 if (header->magic != MH_MAGIC) {
821 return KERN_INVALID_ARGUMENT;
822 }
823
824 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
825 if (!routines) return KERN_SUCCESS;
826
827 size /= sizeof(Routine);
828 for (i = 0; i < size; i++) {
829 (*routines[i])();
830 }
831
832 return KERN_SUCCESS;
833 }
834
835 /*
836 * Operates only on 32 bit mach keaders on behalf of kernel module loader
837 */
838 kern_return_t
839 kmod_initialize_cpp(kmod_info_t *info)
840 {
841 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
842 }
843
844 /*
845 * Operates only on 32 bit mach keaders on behalf of kernel module loader
846 */
847 kern_return_t
848 kmod_finalize_cpp(kmod_info_t *info)
849 {
850 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
851 }
852
853 kern_return_t
854 kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
855 {
856 return KMOD_RETURN_SUCCESS;
857 }
858
859 kern_return_t
860 kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
861 {
862 return KMOD_RETURN_SUCCESS;
863 }
864
865 static void
866 kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
867 void (*printf_func)(const char *fmt, ...))
868 {
869 vm_offset_t * kscan_addr = 0;
870 kmod_info_t * k;
871 kmod_reference_t * r;
872 unsigned int i;
873 int found_kmod = 0;
874 kmod_info_t * stop_kmod = 0;
875
876 for (k = kmod; k; k = k->next) {
877 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
878 (*printf_func)(" kmod scan stopped due to missing "
879 "kmod page: %08x\n", stop_kmod);
880 break;
881 }
882 if (!k->address) {
883 continue; // skip fake entries for built-in kernel components
884 }
885 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
886 if ((*kscan_addr >= k->address) &&
887 (*kscan_addr < (k->address + k->size))) {
888
889 if (!found_kmod) {
890 (*printf_func)(" Kernel loadable modules in backtrace "
891 "(with dependencies):\n");
892 }
893 found_kmod = 1;
894 (*printf_func)(" %s(%s)@0x%x\n",
895 k->name, k->version, k->address);
896
897 for (r = k->reference_list; r; r = r->next) {
898 kmod_info_t * rinfo;
899
900 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
901 (*printf_func)(" kmod dependency scan stopped "
902 "due to missing dependency page: %08x\n", r);
903 break;
904 }
905
906 rinfo = r->info;
907
908 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
909 (*printf_func)(" kmod dependency scan stopped "
910 "due to missing kmod page: %08x\n", rinfo);
911 break;
912 }
913
914 if (!rinfo->address) {
915 continue; // skip fake entries for built-ins
916 }
917
918 (*printf_func)(" dependency: %s(%s)@0x%x\n",
919 rinfo->name, rinfo->version, rinfo->address);
920 }
921
922 break; // only report this kmod for one backtrace address
923 }
924 }
925 }
926
927 return;
928 }
929
930 void
931 kmod_dump(vm_offset_t *addr, unsigned int cnt)
932 {
933 kmod_dump_to(addr, cnt, &kdb_printf);
934 }
935
936 void
937 kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
938 {
939 kmod_dump_to(addr, cnt, &printf);
940 }