]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kmod.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / kern / kmod.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 1999 Mar 29 rsulack created.
28 */
29
30#include <mach/mach_types.h>
31#include <mach/vm_types.h>
32#include <mach/kern_return.h>
33#include <kern/kern_types.h>
34#include <vm/vm_kern.h>
35#include <kern/thread.h>
9bccf70c 36#include <mach-o/mach_header.h>
1c79356b
A
37
38#include <mach_host.h>
39
9bccf70c
A
40#define WRITE_PROTECT_MODULE_TEXT (0)
41
1c79356b
A
42kmod_info_t *kmod = 0;
43static int kmod_index = 1;
44
45decl_simple_lock_data(,kmod_lock)
46decl_simple_lock_data(,kmod_queue_lock)
47
48typedef struct cmd_queue_entry {
9bccf70c
A
49 queue_chain_t links;
50 vm_address_t data;
51 vm_size_t size;
1c79356b
A
52} cmd_queue_entry_t;
53
9bccf70c 54queue_head_t kmod_cmd_queue;
1c79356b
A
55
56void
57kmod_init()
58{
9bccf70c
A
59 simple_lock_init(&kmod_lock, ETAP_MISC_Q);
60 simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q);
61 queue_init(&kmod_cmd_queue);
1c79356b
A
62}
63
64kmod_info_t *
65kmod_lookupbyid(kmod_t id)
66{
9bccf70c 67 kmod_info_t *k = 0;
1c79356b 68
9bccf70c
A
69 k = kmod;
70 while (k) {
71 if (k->id == id) break;
72 k = k->next;
73 }
1c79356b 74
9bccf70c 75 return k;
1c79356b
A
76}
77
78kmod_info_t *
0b4e3aa0 79kmod_lookupbyname(const char * name)
1c79356b 80{
9bccf70c 81 kmod_info_t *k = 0;
1c79356b 82
9bccf70c
A
83 k = kmod;
84 while (k) {
85 if (!strcmp(k->name, name)) break;
86 k = k->next;
87 }
1c79356b 88
9bccf70c
A
89 return k;
90}
91
92kmod_info_t *
93kmod_lookupbyid_locked(kmod_t id)
94{
95 kmod_info_t *k = 0;
96 kmod_info_t *kc = 0;
97
98 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
99 if (!kc) return kc;
100
101 simple_lock(&kmod_queue_lock);
102 k = kmod_lookupbyid(id);
103 if (k) {
104 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
105 }
106finish:
107 simple_unlock(&kmod_queue_lock);
108
109 if (k == 0) {
110 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
111 kc = 0;
112 }
113 return kc;
114}
115
116kmod_info_t *
117kmod_lookupbyname_locked(const char * name)
118{
119 kmod_info_t *k = 0;
120 kmod_info_t *kc = 0;
121
122 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
123 if (!kc) return kc;
124
125 simple_lock(&kmod_queue_lock);
126 k = kmod_lookupbyname(name);
127 if (k) {
128 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
129 }
130finish:
131 simple_unlock(&kmod_queue_lock);
132
133 if (k == 0) {
134 kfree((vm_offset_t)kc, sizeof(kmod_info_t));
135 kc = 0;
136 }
137 return kc;
1c79356b
A
138}
139
140// XXX add a nocopy flag??
141
142kern_return_t
143kmod_queue_cmd(vm_address_t data, vm_size_t size)
144{
9bccf70c
A
145 kern_return_t rc;
146 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
147 if (!e) return KERN_RESOURCE_SHORTAGE;
148
149 rc = kmem_alloc(kernel_map, &e->data, size);
150 if (rc != KERN_SUCCESS) {
151 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
152 return rc;
153 }
154 e->size = size;
155 bcopy((void *)data, (void *)e->data, size);
1c79356b 156
9bccf70c
A
157 simple_lock(&kmod_queue_lock);
158 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
159 simple_unlock(&kmod_queue_lock);
1c79356b 160
9bccf70c
A
161 thread_wakeup_one((event_t)&kmod_cmd_queue);
162
163 return KERN_SUCCESS;
1c79356b
A
164}
165
166kern_return_t
167kmod_load_extension(char *name)
168{
9bccf70c
A
169 kmod_load_extension_cmd_t *data;
170 vm_size_t size;
1c79356b 171
9bccf70c
A
172 size = sizeof(kmod_load_extension_cmd_t);
173 data = (kmod_load_extension_cmd_t *)kalloc(size);
174 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 175
9bccf70c
A
176 data->type = KMOD_LOAD_EXTENSION_PACKET;
177 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 178
9bccf70c 179 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
180}
181
182kern_return_t
183kmod_load_extension_with_dependencies(char *name, char **dependencies)
184{
9bccf70c
A
185 kmod_load_with_dependencies_cmd_t *data;
186 vm_size_t size;
187 char **c;
188 int i, count = 0;
189
190 c = dependencies;
191 if (c) {
192 while (*c) {
193 count++; c++;
194 }
195 }
196 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
197 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
198 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 199
9bccf70c
A
200 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
201 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 202
9bccf70c
A
203 c = dependencies;
204 for (i=0; i < count; i++) {
205 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
206 c++;
207 }
208 data->dependencies[count][0] = 0;
1c79356b 209
9bccf70c 210 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
211}
212kern_return_t
213kmod_send_generic(int type, void *generic_data, int size)
214{
9bccf70c 215 kmod_generic_cmd_t *data;
1c79356b 216
9bccf70c
A
217 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
218 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 219
9bccf70c
A
220 data->type = type;
221 bcopy(data->data, generic_data, size);
1c79356b 222
9bccf70c 223 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
1c79356b
A
224}
225
55e303ae
A
226extern vm_offset_t sectPRELINKB;
227extern int sectSizePRELINK;
228
1c79356b
A
229kern_return_t
230kmod_create_internal(kmod_info_t *info, kmod_t *id)
231{
9bccf70c 232 kern_return_t rc;
55e303ae 233 boolean_t isPrelink;
1c79356b 234
9bccf70c 235 if (!info) return KERN_INVALID_ADDRESS;
1c79356b 236
9bccf70c
A
237 // double check for page alignment
238 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
239 return KERN_INVALID_ADDRESS;
240 }
1c79356b 241
55e303ae
A
242 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
243 if (!isPrelink) {
244 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
245 info->address + info->size, VM_PROT_DEFAULT, FALSE);
246 if (rc != KERN_SUCCESS) {
247 return rc;
248 }
9bccf70c
A
249 }
250#if WRITE_PROTECT_MODULE_TEXT
251 {
252 struct section * sect = getsectbynamefromheader(
253 (struct mach_header*) info->address, "__TEXT", "__text");
254
255 if(sect) {
256 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
257 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
1c79356b 258 }
9bccf70c 259 }
55e303ae 260#endif /* WRITE_PROTECT_MODULE_TEXT */
1c79356b 261
9bccf70c 262 simple_lock(&kmod_lock);
1c79356b 263
9bccf70c
A
264 // check to see if already loaded
265 if (kmod_lookupbyname(info->name)) {
266 simple_unlock(&kmod_lock);
55e303ae
A
267 if (!isPrelink) {
268 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
269 info->address + info->size, FALSE);
270 assert(rc == KERN_SUCCESS);
271 }
9bccf70c
A
272 return KERN_INVALID_ARGUMENT;
273 }
1c79356b 274
9bccf70c
A
275 info->id = kmod_index++;
276 info->reference_count = 0;
1c79356b 277
9bccf70c
A
278 info->next = kmod;
279 kmod = info;
1c79356b 280
9bccf70c 281 *id = info->id;
1c79356b 282
9bccf70c 283 simple_unlock(&kmod_lock);
1c79356b 284
0b4e3aa0 285#if DEBUG
9bccf70c
A
286 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
287 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
55e303ae 288#endif /* DEBUG */
1c79356b 289
9bccf70c 290 return KERN_SUCCESS;
1c79356b
A
291}
292
293
294kern_return_t
295kmod_create(host_priv_t host_priv,
9bccf70c
A
296 kmod_info_t *info,
297 kmod_t *id)
1c79356b 298{
9bccf70c
A
299 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
300 return kmod_create_internal(info, id);
1c79356b
A
301}
302
303kern_return_t
9bccf70c 304kmod_create_fake(const char *name, const char *version)
1c79356b 305{
9bccf70c 306 kmod_info_t *info;
1c79356b 307
9bccf70c
A
308 if (!name || ! version ||
309 (1 + strlen(name) > KMOD_MAX_NAME) ||
310 (1 + strlen(version) > KMOD_MAX_NAME)) {
0b4e3aa0 311
9bccf70c
A
312 return KERN_INVALID_ARGUMENT;
313 }
0b4e3aa0 314
9bccf70c
A
315 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
316 if (!info) {
317 return KERN_RESOURCE_SHORTAGE;
318 }
1c79356b 319
9bccf70c
A
320 // make de fake
321 info->info_version = KMOD_INFO_VERSION;
322 bcopy(name, info->name, 1 + strlen(name));
323 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
324 info->reference_count = 1; // keep it from unloading, starting, stopping
325 info->reference_list = 0;
326 info->address = info->size = info->hdr_size = 0;
327 info->start = info->stop = 0;
328
329 simple_lock(&kmod_lock);
1c79356b 330
9bccf70c
A
331 // check to see if already "loaded"
332 if (kmod_lookupbyname(info->name)) {
333 simple_unlock(&kmod_lock);
334 return KERN_INVALID_ARGUMENT;
335 }
1c79356b 336
9bccf70c 337 info->id = kmod_index++;
1c79356b 338
9bccf70c
A
339 info->next = kmod;
340 kmod = info;
1c79356b 341
9bccf70c
A
342 simple_unlock(&kmod_lock);
343
344 return KERN_SUCCESS;
1c79356b
A
345}
346
347kern_return_t
348kmod_destroy_internal(kmod_t id)
349{
9bccf70c
A
350 kern_return_t rc;
351 kmod_info_t *k;
352 kmod_info_t *p;
353
354 simple_lock(&kmod_lock);
355
356 k = p = kmod;
357 while (k) {
358 if (k->id == id) {
359 kmod_reference_t *r, *t;
360
361 if (k->reference_count != 0) {
362 simple_unlock(&kmod_lock);
363 return KERN_INVALID_ARGUMENT;
364 }
365
366 if (k == p) { // first element
367 kmod = k->next;
368 } else {
369 p->next = k->next;
370 }
371 simple_unlock(&kmod_lock);
372
373 r = k->reference_list;
374 while (r) {
375 r->info->reference_count--;
376 t = r;
377 r = r->next;
378 kfree((vm_offset_t)t, sizeof(struct kmod_reference));
379 }
1c79356b 380
0b4e3aa0 381#if DEBUG
9bccf70c
A
382 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
383 k->name, k->id, k->size / PAGE_SIZE, k->address);
55e303ae
A
384#endif /* DEBUG */
385
386 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
387 {
388 vm_offset_t
389 virt = ml_static_ptovirt(k->address);
390 if( virt) {
391 ml_static_mfree( virt, k->size);
392 }
393 }
394 else
395 {
396 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
397 k->address + k->size, FALSE);
398 assert(rc == KERN_SUCCESS);
399
400 rc = vm_deallocate(kernel_map, k->address, k->size);
401 assert(rc == KERN_SUCCESS);
402 }
9bccf70c
A
403 return KERN_SUCCESS;
404 }
405 p = k;
406 k = k->next;
407 }
1c79356b 408
9bccf70c 409 simple_unlock(&kmod_lock);
1c79356b 410
9bccf70c 411 return KERN_INVALID_ARGUMENT;
1c79356b
A
412}
413
414
415kern_return_t
416kmod_destroy(host_priv_t host_priv,
9bccf70c 417 kmod_t id)
1c79356b 418{
9bccf70c
A
419 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
420 return kmod_destroy_internal(id);
1c79356b
A
421}
422
423
424kern_return_t
425kmod_start_or_stop(
426 kmod_t id,
427 int start,
428 kmod_args_t *data,
429 mach_msg_type_number_t *dataCount)
430{
431 kern_return_t rc = KERN_SUCCESS;
432 void * user_data = 0;
433 kern_return_t (*func)();
434 kmod_info_t *k;
435
436 simple_lock(&kmod_lock);
437
438 k = kmod_lookupbyid(id);
439 if (!k || k->reference_count) {
440 simple_unlock(&kmod_lock);
441 rc = KERN_INVALID_ARGUMENT;
442 goto finish;
443 }
444
445 if (start) {
446 func = (void *)k->start;
447 } else {
448 func = (void *)k->stop;
449 }
450
451 simple_unlock(&kmod_lock);
452
453 //
454 // call kmod entry point
455 //
456 if (data && dataCount && *data && *dataCount) {
457 vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data);
458 }
459
460 rc = (*func)(k, user_data);
461
462finish:
463
464 if (user_data) {
465 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
466 }
467 if (data) *data = 0;
468 if (dataCount) *dataCount = 0;
469
470 return rc;
471}
472
473
474/*
475 * The retain and release calls take no user data, but the caller
476 * may have sent some in error (the MIG definition allows it).
477 * If this is the case, they will just return that same data
478 * right back to the caller (since they never touch the *data and
479 * *dataCount fields).
480 */
481kern_return_t
482kmod_retain(kmod_t id)
483{
484 kern_return_t rc = KERN_SUCCESS;
485
486 kmod_info_t *t; // reference to
487 kmod_info_t *f; // reference from
488 kmod_reference_t *r = 0;
489
490 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
491 if (!r) {
492 rc = KERN_RESOURCE_SHORTAGE;
493 goto finish;
494 }
495
496 simple_lock(&kmod_lock);
497
498 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
499 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
500 if (!t || !f) {
501 simple_unlock(&kmod_lock);
502 if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference));
503 rc = KERN_INVALID_ARGUMENT;
504 goto finish;
505 }
506
507 r->next = f->reference_list;
508 r->info = t;
509 f->reference_list = r;
510 t->reference_count++;
511
512 simple_unlock(&kmod_lock);
513
514finish:
515
516 return rc;
517}
518
519
520kern_return_t
521kmod_release(kmod_t id)
522{
523 kern_return_t rc = KERN_INVALID_ARGUMENT;
524
525 kmod_info_t *t; // reference to
526 kmod_info_t *f; // reference from
527 kmod_reference_t *r = 0;
528 kmod_reference_t * p;
529
530 simple_lock(&kmod_lock);
531
532 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
533 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
534 if (!t || !f) {
535 rc = KERN_INVALID_ARGUMENT;
536 goto finish;
537 }
538
539 p = r = f->reference_list;
540 while (r) {
541 if (r->info == t) {
9bccf70c 542 if (p == r) { // first element
1c79356b
A
543 f->reference_list = r->next;
544 } else {
545 p->next = r->next;
546 }
547 r->info->reference_count--;
548
9bccf70c 549 simple_unlock(&kmod_lock);
1c79356b 550 kfree((vm_offset_t)r, sizeof(struct kmod_reference));
9bccf70c 551 rc = KERN_SUCCESS;
1c79356b
A
552 goto finish;
553 }
554 p = r;
555 r = r->next;
556 }
557
558 simple_unlock(&kmod_lock);
559
560finish:
561
562 return rc;
563}
564
565
566kern_return_t
567kmod_control(host_priv_t host_priv,
9bccf70c
A
568 kmod_t id,
569 kmod_control_flavor_t flavor,
570 kmod_args_t *data,
571 mach_msg_type_number_t *dataCount)
1c79356b 572{
9bccf70c 573 kern_return_t rc = KERN_SUCCESS;
1c79356b 574
9bccf70c 575 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
1c79356b 576
9bccf70c 577 switch (flavor) {
1c79356b 578
9bccf70c
A
579 case KMOD_CNTL_START:
580 case KMOD_CNTL_STOP:
581 {
1c79356b
A
582 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
583 data, dataCount);
584 break;
9bccf70c 585 }
1c79356b 586
9bccf70c 587 case KMOD_CNTL_RETAIN:
1c79356b
A
588 {
589 rc = kmod_retain(id);
590 break;
591 }
592
9bccf70c 593 case KMOD_CNTL_RELEASE:
1c79356b
A
594 {
595 rc = kmod_release(id);
596 break;
597 }
598
9bccf70c
A
599 case KMOD_CNTL_GET_CMD:
600 {
1c79356b 601
9bccf70c
A
602 cmd_queue_entry_t *e;
603
604 /*
605 * Throw away any data the user may have sent in error.
606 * We must do this, because we are likely to return to
607 * some data for these commands (thus causing a leak of
608 * whatever data the user sent us in error).
609 */
610 if (*data && *dataCount) {
611 vm_map_copy_discard(*data);
612 *data = 0;
613 *dataCount = 0;
614 }
615
616 simple_lock(&kmod_queue_lock);
617
618 if (queue_empty(&kmod_cmd_queue)) {
619 wait_result_t res;
620
621 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
622 &kmod_queue_lock,
623 THREAD_ABORTSAFE);
624 if (queue_empty(&kmod_cmd_queue)) {
625 // we must have been interrupted!
626 simple_unlock(&kmod_queue_lock);
627 assert(res == THREAD_INTERRUPTED);
628 return KERN_ABORTED;
629 }
630 }
631 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
632
633 simple_unlock(&kmod_queue_lock);
634
635 rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data);
636 if (rc) {
637 simple_lock(&kmod_queue_lock);
638 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
639 simple_unlock(&kmod_queue_lock);
640 *data = 0;
641 *dataCount = 0;
642 return rc;
643 }
644 *dataCount = e->size;
645
646 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
647
648 break;
649 }
1c79356b 650
9bccf70c
A
651 default:
652 rc = KERN_INVALID_ARGUMENT;
653 }
654
655 return rc;
1c79356b
A
656};
657
658
659kern_return_t
660kmod_get_info(host_t host,
9bccf70c
A
661 kmod_info_array_t *kmods,
662 mach_msg_type_number_t *kmodCount)
1c79356b 663{
9bccf70c
A
664 vm_offset_t data;
665 kmod_info_t *k, *p1;
666 kmod_reference_t *r, *p2;
667 int ref_count;
668 unsigned size = 0;
669 kern_return_t rc = KERN_SUCCESS;
1c79356b 670
9bccf70c
A
671 *kmods = (void *)0;
672 *kmodCount = 0;
1c79356b
A
673
674retry:
9bccf70c
A
675 simple_lock(&kmod_lock);
676 size = 0;
677 k = kmod;
678 while (k) {
679 size += sizeof(kmod_info_t);
680 r = k->reference_list;
681 while (r) {
682 size +=sizeof(kmod_reference_t);
683 r = r->next;
684 }
685 k = k->next;
686 }
687 simple_unlock(&kmod_lock);
688 if (!size) return KERN_SUCCESS;
1c79356b 689
9bccf70c
A
690 rc = kmem_alloc(kernel_map, &data, size);
691 if (rc) return rc;
1c79356b 692
9bccf70c
A
693 // copy kmod into data, retry if kmod's size has changed (grown)
694 // the copied out data is tweeked to figure what's what at user level
695 // change the copied out k->next pointers to point to themselves
696 // change the k->reference into a count, tack the references on
697 // the end of the data packet in the order they are found
1c79356b 698
9bccf70c
A
699 simple_lock(&kmod_lock);
700 k = kmod; p1 = (kmod_info_t *)data;
701 while (k) {
702 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
703 simple_unlock(&kmod_lock);
704 kmem_free(kernel_map, data, size);
705 goto retry;
706 }
1c79356b 707
9bccf70c
A
708 *p1 = *k;
709 if (k->next) p1->next = k;
710 p1++; k = k->next;
711 }
712
713 p2 = (kmod_reference_t *)p1;
714 k = kmod; p1 = (kmod_info_t *)data;
715 while (k) {
716 r = k->reference_list; ref_count = 0;
717 while (r) {
718 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
719 simple_unlock(&kmod_lock);
720 kmem_free(kernel_map, data, size);
721 goto retry;
722 }
723 // note the last 'k' in the chain has its next == 0
724 // since there can only be one like that,
725 // this case is handled by the caller
726 *p2 = *r;
727 p2++; r = r->next; ref_count++;
728 }
729 p1->reference_list = (kmod_reference_t *)ref_count;
730 p1++; k = k->next;
731 }
732 simple_unlock(&kmod_lock);
733
734 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
735 if (rc) {
736 kmem_free(kernel_map, data, size);
737 *kmods = 0;
738 *kmodCount = 0;
739 return rc;
740 }
741 *kmodCount = size;
742
743 return KERN_SUCCESS;
744}
1c79356b
A
745
746static kern_return_t
747kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
748{
9bccf70c
A
749 typedef void (*Routine)(void);
750 Routine * routines;
751 int size, i;
1c79356b 752
9bccf70c
A
753 if (header->magic != MH_MAGIC) {
754 return KERN_INVALID_ARGUMENT;
755 }
1c79356b 756
9bccf70c
A
757 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, (char *) sectName, &size);
758 if (!routines) return KERN_SUCCESS;
1c79356b 759
9bccf70c
A
760 size /= sizeof(Routine);
761 for (i = 0; i < size; i++) {
762 (*routines[i])();
763 }
1c79356b 764
9bccf70c 765 return KERN_SUCCESS;
1c79356b
A
766}
767
768kern_return_t
769kmod_initialize_cpp(kmod_info_t *info)
770{
9bccf70c 771 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
1c79356b
A
772}
773
774kern_return_t
775kmod_finalize_cpp(kmod_info_t *info)
776{
9bccf70c 777 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
1c79356b
A
778}
779
780kern_return_t
781kmod_default_start(struct kmod_info *ki, void *data)
782{
9bccf70c 783 return KMOD_RETURN_SUCCESS;
1c79356b
A
784}
785
786kern_return_t
787kmod_default_stop(struct kmod_info *ki, void *data)
788{
9bccf70c 789 return KMOD_RETURN_SUCCESS;
1c79356b
A
790}
791
1c79356b
A
792void
793kmod_dump(vm_offset_t *addr, unsigned int cnt)
794{
9bccf70c
A
795 vm_offset_t * kscan_addr = 0;
796 vm_offset_t * rscan_addr = 0;
797 kmod_info_t * k;
798 kmod_reference_t * r;
799 int i, j;
800 int found_kmod = 0;
801 int kmod_scan_stopped = 0;
802 kmod_info_t * stop_kmod = 0;
803 int ref_scan_stopped = 0;
804 kmod_reference_t * stop_ref = 0;
805
806 for (k = kmod; k; k = k->next) {
807 if (!k->address) {
808 continue; // skip fake entries for built-in kernel components
809 }
55e303ae 810 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
9bccf70c
A
811 kdb_printf(" kmod scan stopped due to missing "
812 "kmod page: %08x\n", stop_kmod);
813 break;
814 }
815 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
816 if ((*kscan_addr >= k->address) &&
817 (*kscan_addr < (k->address + k->size))) {
818
819 if (!found_kmod) {
820 kdb_printf(" Kernel loadable modules in backtrace "
821 "(with dependencies):\n");
822 }
823 found_kmod = 1;
824 kdb_printf(" %s(%s)@0x%x\n",
825 k->name, k->version, k->address);
826
827 for (r = k->reference_list; r; r = r->next) {
828 kmod_info_t * rinfo;
829
55e303ae 830 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
9bccf70c
A
831 kdb_printf(" kmod dependency scan stopped "
832 "due to missing dependency page: %08x\n", r);
833 break;
834 }
835
836 rinfo = r->info;
837
838 if (!rinfo->address) {
839 continue; // skip fake entries for built-ins
840 }
841
55e303ae 842 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
9bccf70c
A
843 kdb_printf(" kmod dependency scan stopped "
844 "due to missing kmod page: %08x\n", rinfo);
845 break;
846 }
847
848 kdb_printf(" dependency: %s(%s)@0x%x\n",
849 rinfo->name, rinfo->version, rinfo->address);
850 }
851
852 break; // only report this kmod for one backtrace address
853 }
854 }
1c79356b 855 }
1c79356b 856
9bccf70c 857 return;
1c79356b 858}