]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kmod.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / kmod.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
25 *
26 * HISTORY
27 *
28 * 1999 Mar 29 rsulack created.
29 */
30
31#include <mach/mach_types.h>
32#include <mach/vm_types.h>
33#include <mach/kern_return.h>
91447636
A
34#include <mach/host_priv_server.h>
35#include <mach/vm_map.h>
36
37#include <kern/kalloc.h>
1c79356b 38#include <kern/kern_types.h>
1c79356b 39#include <kern/thread.h>
91447636
A
40
41#include <vm/vm_kern.h>
42
9bccf70c 43#include <mach-o/mach_header.h>
1c79356b
A
44
45#include <mach_host.h>
46
91447636
A
47/*
48 * XXX headers for which prototypes should be in a common include file;
49 * XXX see libsa/kext.cpp for why.
50 */
51kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
52kern_return_t kmod_destroy_internal(kmod_t id);
53kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
54 mach_msg_type_number_t *dataCount);
55kern_return_t kmod_retain(kmod_t id);
56kern_return_t kmod_release(kmod_t id);
57kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
58kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
59 mach_msg_type_number_t *kmodCount);
60extern void kdb_printf(const char *fmt, ...);
61
62
63
9bccf70c
A
64#define WRITE_PROTECT_MODULE_TEXT (0)
65
1c79356b
A
66kmod_info_t *kmod = 0;
67static int kmod_index = 1;
68
69decl_simple_lock_data(,kmod_lock)
70decl_simple_lock_data(,kmod_queue_lock)
71
72typedef struct cmd_queue_entry {
9bccf70c
A
73 queue_chain_t links;
74 vm_address_t data;
75 vm_size_t size;
1c79356b
A
76} cmd_queue_entry_t;
77
9bccf70c 78queue_head_t kmod_cmd_queue;
1c79356b
A
79
80void
91447636 81kmod_init(void)
1c79356b 82{
91447636
A
83 simple_lock_init(&kmod_lock, 0);
84 simple_lock_init(&kmod_queue_lock, 0);
9bccf70c 85 queue_init(&kmod_cmd_queue);
1c79356b
A
86}
87
88kmod_info_t *
89kmod_lookupbyid(kmod_t id)
90{
9bccf70c 91 kmod_info_t *k = 0;
1c79356b 92
9bccf70c
A
93 k = kmod;
94 while (k) {
95 if (k->id == id) break;
96 k = k->next;
97 }
1c79356b 98
9bccf70c 99 return k;
1c79356b
A
100}
101
102kmod_info_t *
0b4e3aa0 103kmod_lookupbyname(const char * name)
1c79356b 104{
9bccf70c 105 kmod_info_t *k = 0;
1c79356b 106
9bccf70c
A
107 k = kmod;
108 while (k) {
109 if (!strcmp(k->name, name)) break;
110 k = k->next;
111 }
1c79356b 112
9bccf70c
A
113 return k;
114}
115
116kmod_info_t *
117kmod_lookupbyid_locked(kmod_t id)
118{
119 kmod_info_t *k = 0;
120 kmod_info_t *kc = 0;
121
122 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
123 if (!kc) return kc;
124
125 simple_lock(&kmod_queue_lock);
126 k = kmod_lookupbyid(id);
127 if (k) {
128 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
129 }
91447636 130
9bccf70c
A
131 simple_unlock(&kmod_queue_lock);
132
133 if (k == 0) {
91447636 134 kfree(kc, sizeof(kmod_info_t));
9bccf70c
A
135 kc = 0;
136 }
137 return kc;
138}
139
140kmod_info_t *
141kmod_lookupbyname_locked(const char * name)
142{
143 kmod_info_t *k = 0;
144 kmod_info_t *kc = 0;
145
146 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
147 if (!kc) return kc;
148
149 simple_lock(&kmod_queue_lock);
150 k = kmod_lookupbyname(name);
151 if (k) {
152 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
153 }
91447636 154
9bccf70c
A
155 simple_unlock(&kmod_queue_lock);
156
157 if (k == 0) {
91447636 158 kfree(kc, sizeof(kmod_info_t));
9bccf70c
A
159 kc = 0;
160 }
161 return kc;
1c79356b
A
162}
163
164// XXX add a nocopy flag??
165
166kern_return_t
167kmod_queue_cmd(vm_address_t data, vm_size_t size)
168{
9bccf70c
A
169 kern_return_t rc;
170 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
171 if (!e) return KERN_RESOURCE_SHORTAGE;
172
173 rc = kmem_alloc(kernel_map, &e->data, size);
174 if (rc != KERN_SUCCESS) {
91447636 175 kfree(e, sizeof(struct cmd_queue_entry));
9bccf70c
A
176 return rc;
177 }
178 e->size = size;
179 bcopy((void *)data, (void *)e->data, size);
1c79356b 180
9bccf70c
A
181 simple_lock(&kmod_queue_lock);
182 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
183 simple_unlock(&kmod_queue_lock);
1c79356b 184
9bccf70c
A
185 thread_wakeup_one((event_t)&kmod_cmd_queue);
186
187 return KERN_SUCCESS;
1c79356b
A
188}
189
190kern_return_t
191kmod_load_extension(char *name)
192{
9bccf70c
A
193 kmod_load_extension_cmd_t *data;
194 vm_size_t size;
1c79356b 195
9bccf70c
A
196 size = sizeof(kmod_load_extension_cmd_t);
197 data = (kmod_load_extension_cmd_t *)kalloc(size);
198 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 199
9bccf70c
A
200 data->type = KMOD_LOAD_EXTENSION_PACKET;
201 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 202
9bccf70c 203 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
204}
205
206kern_return_t
207kmod_load_extension_with_dependencies(char *name, char **dependencies)
208{
9bccf70c
A
209 kmod_load_with_dependencies_cmd_t *data;
210 vm_size_t size;
211 char **c;
212 int i, count = 0;
213
214 c = dependencies;
215 if (c) {
216 while (*c) {
217 count++; c++;
218 }
219 }
220 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
221 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
222 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 223
9bccf70c
A
224 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
225 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 226
9bccf70c
A
227 c = dependencies;
228 for (i=0; i < count; i++) {
229 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
230 c++;
231 }
232 data->dependencies[count][0] = 0;
1c79356b 233
9bccf70c 234 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
235}
236kern_return_t
237kmod_send_generic(int type, void *generic_data, int size)
238{
9bccf70c 239 kmod_generic_cmd_t *data;
1c79356b 240
9bccf70c
A
241 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
242 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 243
9bccf70c
A
244 data->type = type;
245 bcopy(data->data, generic_data, size);
1c79356b 246
9bccf70c 247 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
1c79356b
A
248}
249
55e303ae
A
250extern vm_offset_t sectPRELINKB;
251extern int sectSizePRELINK;
252
91447636
A
253/*
254 * Operates only on 32 bit mach keaders on behalf of kernel module loader
255 * if WRITE_PROTECT_MODULE_TEXT is defined.
256 */
1c79356b
A
257kern_return_t
258kmod_create_internal(kmod_info_t *info, kmod_t *id)
259{
9bccf70c 260 kern_return_t rc;
55e303ae 261 boolean_t isPrelink;
1c79356b 262
9bccf70c 263 if (!info) return KERN_INVALID_ADDRESS;
1c79356b 264
9bccf70c
A
265 // double check for page alignment
266 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
267 return KERN_INVALID_ADDRESS;
268 }
1c79356b 269
55e303ae
A
270 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
271 if (!isPrelink) {
272 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
273 info->address + info->size, VM_PROT_DEFAULT, FALSE);
274 if (rc != KERN_SUCCESS) {
275 return rc;
276 }
9bccf70c
A
277 }
278#if WRITE_PROTECT_MODULE_TEXT
279 {
280 struct section * sect = getsectbynamefromheader(
281 (struct mach_header*) info->address, "__TEXT", "__text");
282
283 if(sect) {
284 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
285 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
1c79356b 286 }
9bccf70c 287 }
55e303ae 288#endif /* WRITE_PROTECT_MODULE_TEXT */
1c79356b 289
9bccf70c 290 simple_lock(&kmod_lock);
1c79356b 291
9bccf70c
A
292 // check to see if already loaded
293 if (kmod_lookupbyname(info->name)) {
294 simple_unlock(&kmod_lock);
55e303ae
A
295 if (!isPrelink) {
296 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
297 info->address + info->size, FALSE);
298 assert(rc == KERN_SUCCESS);
299 }
9bccf70c
A
300 return KERN_INVALID_ARGUMENT;
301 }
1c79356b 302
9bccf70c
A
303 info->id = kmod_index++;
304 info->reference_count = 0;
1c79356b 305
9bccf70c
A
306 info->next = kmod;
307 kmod = info;
1c79356b 308
9bccf70c 309 *id = info->id;
1c79356b 310
9bccf70c 311 simple_unlock(&kmod_lock);
1c79356b 312
0b4e3aa0 313#if DEBUG
9bccf70c
A
314 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
315 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
55e303ae 316#endif /* DEBUG */
1c79356b 317
9bccf70c 318 return KERN_SUCCESS;
1c79356b
A
319}
320
321
322kern_return_t
323kmod_create(host_priv_t host_priv,
91447636 324 vm_address_t addr,
9bccf70c 325 kmod_t *id)
1c79356b 326{
91447636
A
327 kmod_info_t *info = (kmod_info_t *)addr;
328
9bccf70c
A
329 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
330 return kmod_create_internal(info, id);
1c79356b
A
331}
332
333kern_return_t
91447636
A
334kmod_create_fake_with_address(const char *name, const char *version,
335 vm_address_t address, vm_size_t size,
336 int * return_id)
1c79356b 337{
9bccf70c 338 kmod_info_t *info;
1c79356b 339
9bccf70c
A
340 if (!name || ! version ||
341 (1 + strlen(name) > KMOD_MAX_NAME) ||
342 (1 + strlen(version) > KMOD_MAX_NAME)) {
0b4e3aa0 343
9bccf70c
A
344 return KERN_INVALID_ARGUMENT;
345 }
0b4e3aa0 346
9bccf70c
A
347 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
348 if (!info) {
349 return KERN_RESOURCE_SHORTAGE;
350 }
1c79356b 351
9bccf70c
A
352 // make de fake
353 info->info_version = KMOD_INFO_VERSION;
354 bcopy(name, info->name, 1 + strlen(name));
355 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
356 info->reference_count = 1; // keep it from unloading, starting, stopping
357 info->reference_list = 0;
91447636
A
358 info->address = address;
359 info->size = size;
360 info->hdr_size = 0;
9bccf70c
A
361 info->start = info->stop = 0;
362
363 simple_lock(&kmod_lock);
1c79356b 364
9bccf70c
A
365 // check to see if already "loaded"
366 if (kmod_lookupbyname(info->name)) {
367 simple_unlock(&kmod_lock);
368 return KERN_INVALID_ARGUMENT;
369 }
1c79356b 370
9bccf70c 371 info->id = kmod_index++;
91447636
A
372 if (return_id)
373 *return_id = info->id;
1c79356b 374
9bccf70c
A
375 info->next = kmod;
376 kmod = info;
1c79356b 377
9bccf70c
A
378 simple_unlock(&kmod_lock);
379
380 return KERN_SUCCESS;
1c79356b
A
381}
382
383kern_return_t
91447636
A
384kmod_create_fake(const char *name, const char *version)
385{
386 return kmod_create_fake_with_address(name, version, 0, 0, NULL);
387}
388
389
390static kern_return_t
391_kmod_destroy_internal(kmod_t id, boolean_t fake)
1c79356b 392{
9bccf70c
A
393 kern_return_t rc;
394 kmod_info_t *k;
395 kmod_info_t *p;
396
397 simple_lock(&kmod_lock);
398
399 k = p = kmod;
400 while (k) {
401 if (k->id == id) {
402 kmod_reference_t *r, *t;
403
91447636 404 if (!fake && (k->reference_count != 0)) {
9bccf70c
A
405 simple_unlock(&kmod_lock);
406 return KERN_INVALID_ARGUMENT;
407 }
408
409 if (k == p) { // first element
410 kmod = k->next;
411 } else {
412 p->next = k->next;
413 }
414 simple_unlock(&kmod_lock);
415
416 r = k->reference_list;
417 while (r) {
418 r->info->reference_count--;
419 t = r;
420 r = r->next;
91447636 421 kfree(t, sizeof(struct kmod_reference));
9bccf70c 422 }
1c79356b 423
91447636
A
424 if (!fake)
425 {
0b4e3aa0 426#if DEBUG
91447636
A
427 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
428 k->name, k->id, k->size / PAGE_SIZE, k->address);
55e303ae
A
429#endif /* DEBUG */
430
91447636
A
431 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
432 {
433 vm_offset_t
434 virt = ml_static_ptovirt(k->address);
435 if( virt) {
436 ml_static_mfree( virt, k->size);
437 }
438 }
439 else
440 {
441 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
442 k->address + k->size, FALSE);
443 assert(rc == KERN_SUCCESS);
444
445 rc = vm_deallocate(kernel_map, k->address, k->size);
446 assert(rc == KERN_SUCCESS);
447 }
448 }
9bccf70c
A
449 return KERN_SUCCESS;
450 }
451 p = k;
452 k = k->next;
453 }
1c79356b 454
9bccf70c 455 simple_unlock(&kmod_lock);
1c79356b 456
9bccf70c 457 return KERN_INVALID_ARGUMENT;
1c79356b
A
458}
459
91447636
A
460kern_return_t
461kmod_destroy_internal(kmod_t id)
462{
463 return _kmod_destroy_internal(id, FALSE);
464}
1c79356b
A
465
466kern_return_t
467kmod_destroy(host_priv_t host_priv,
9bccf70c 468 kmod_t id)
1c79356b 469{
9bccf70c 470 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
91447636 471 return _kmod_destroy_internal(id, FALSE);
1c79356b
A
472}
473
91447636
A
474kern_return_t
475kmod_destroy_fake(kmod_t id)
476{
477 return _kmod_destroy_internal(id, TRUE);
478}
1c79356b
A
479
480kern_return_t
481kmod_start_or_stop(
482 kmod_t id,
483 int start,
484 kmod_args_t *data,
485 mach_msg_type_number_t *dataCount)
486{
487 kern_return_t rc = KERN_SUCCESS;
488 void * user_data = 0;
91447636 489 kern_return_t (*func)(kmod_info_t *, void *);
1c79356b
A
490 kmod_info_t *k;
491
492 simple_lock(&kmod_lock);
493
494 k = kmod_lookupbyid(id);
495 if (!k || k->reference_count) {
496 simple_unlock(&kmod_lock);
497 rc = KERN_INVALID_ARGUMENT;
498 goto finish;
499 }
500
501 if (start) {
502 func = (void *)k->start;
503 } else {
504 func = (void *)k->stop;
505 }
506
507 simple_unlock(&kmod_lock);
508
509 //
510 // call kmod entry point
511 //
512 if (data && dataCount && *data && *dataCount) {
91447636
A
513 vm_map_offset_t map_addr;
514 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
515 user_data = CAST_DOWN(void *, map_addr);
1c79356b
A
516 }
517
518 rc = (*func)(k, user_data);
519
520finish:
521
522 if (user_data) {
523 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
524 }
525 if (data) *data = 0;
526 if (dataCount) *dataCount = 0;
527
528 return rc;
529}
530
531
532/*
533 * The retain and release calls take no user data, but the caller
534 * may have sent some in error (the MIG definition allows it).
535 * If this is the case, they will just return that same data
536 * right back to the caller (since they never touch the *data and
537 * *dataCount fields).
538 */
539kern_return_t
540kmod_retain(kmod_t id)
541{
542 kern_return_t rc = KERN_SUCCESS;
543
544 kmod_info_t *t; // reference to
545 kmod_info_t *f; // reference from
546 kmod_reference_t *r = 0;
547
548 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
549 if (!r) {
550 rc = KERN_RESOURCE_SHORTAGE;
551 goto finish;
552 }
553
554 simple_lock(&kmod_lock);
555
556 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
557 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
558 if (!t || !f) {
559 simple_unlock(&kmod_lock);
91447636 560 if (r) kfree(r, sizeof(struct kmod_reference));
1c79356b
A
561 rc = KERN_INVALID_ARGUMENT;
562 goto finish;
563 }
564
565 r->next = f->reference_list;
566 r->info = t;
567 f->reference_list = r;
568 t->reference_count++;
569
570 simple_unlock(&kmod_lock);
571
572finish:
573
574 return rc;
575}
576
577
578kern_return_t
579kmod_release(kmod_t id)
580{
581 kern_return_t rc = KERN_INVALID_ARGUMENT;
582
583 kmod_info_t *t; // reference to
584 kmod_info_t *f; // reference from
585 kmod_reference_t *r = 0;
586 kmod_reference_t * p;
587
588 simple_lock(&kmod_lock);
589
590 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
591 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
592 if (!t || !f) {
593 rc = KERN_INVALID_ARGUMENT;
594 goto finish;
595 }
596
597 p = r = f->reference_list;
598 while (r) {
599 if (r->info == t) {
9bccf70c 600 if (p == r) { // first element
1c79356b
A
601 f->reference_list = r->next;
602 } else {
603 p->next = r->next;
604 }
605 r->info->reference_count--;
606
9bccf70c 607 simple_unlock(&kmod_lock);
91447636 608 kfree(r, sizeof(struct kmod_reference));
9bccf70c 609 rc = KERN_SUCCESS;
1c79356b
A
610 goto finish;
611 }
612 p = r;
613 r = r->next;
614 }
615
616 simple_unlock(&kmod_lock);
617
618finish:
619
620 return rc;
621}
622
623
624kern_return_t
625kmod_control(host_priv_t host_priv,
9bccf70c
A
626 kmod_t id,
627 kmod_control_flavor_t flavor,
628 kmod_args_t *data,
629 mach_msg_type_number_t *dataCount)
1c79356b 630{
9bccf70c 631 kern_return_t rc = KERN_SUCCESS;
1c79356b 632
9bccf70c 633 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
1c79356b 634
9bccf70c 635 switch (flavor) {
1c79356b 636
9bccf70c
A
637 case KMOD_CNTL_START:
638 case KMOD_CNTL_STOP:
639 {
1c79356b
A
640 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
641 data, dataCount);
642 break;
9bccf70c 643 }
1c79356b 644
9bccf70c 645 case KMOD_CNTL_RETAIN:
1c79356b
A
646 {
647 rc = kmod_retain(id);
648 break;
649 }
650
9bccf70c 651 case KMOD_CNTL_RELEASE:
1c79356b
A
652 {
653 rc = kmod_release(id);
654 break;
655 }
656
9bccf70c
A
657 case KMOD_CNTL_GET_CMD:
658 {
1c79356b 659
9bccf70c
A
660 cmd_queue_entry_t *e;
661
662 /*
663 * Throw away any data the user may have sent in error.
664 * We must do this, because we are likely to return to
665 * some data for these commands (thus causing a leak of
666 * whatever data the user sent us in error).
667 */
668 if (*data && *dataCount) {
669 vm_map_copy_discard(*data);
670 *data = 0;
671 *dataCount = 0;
672 }
673
674 simple_lock(&kmod_queue_lock);
675
676 if (queue_empty(&kmod_cmd_queue)) {
677 wait_result_t res;
678
679 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
680 &kmod_queue_lock,
681 THREAD_ABORTSAFE);
682 if (queue_empty(&kmod_cmd_queue)) {
683 // we must have been interrupted!
684 simple_unlock(&kmod_queue_lock);
685 assert(res == THREAD_INTERRUPTED);
686 return KERN_ABORTED;
687 }
688 }
689 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
690
691 simple_unlock(&kmod_queue_lock);
692
91447636
A
693 rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
694 (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
9bccf70c
A
695 if (rc) {
696 simple_lock(&kmod_queue_lock);
697 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
698 simple_unlock(&kmod_queue_lock);
699 *data = 0;
700 *dataCount = 0;
701 return rc;
702 }
703 *dataCount = e->size;
704
91447636 705 kfree(e, sizeof(struct cmd_queue_entry));
9bccf70c
A
706
707 break;
708 }
1c79356b 709
9bccf70c
A
710 default:
711 rc = KERN_INVALID_ARGUMENT;
712 }
713
714 return rc;
1c79356b
A
715};
716
717
718kern_return_t
91447636 719kmod_get_info(__unused host_t host,
9bccf70c
A
720 kmod_info_array_t *kmods,
721 mach_msg_type_number_t *kmodCount)
1c79356b 722{
9bccf70c
A
723 vm_offset_t data;
724 kmod_info_t *k, *p1;
725 kmod_reference_t *r, *p2;
726 int ref_count;
727 unsigned size = 0;
728 kern_return_t rc = KERN_SUCCESS;
1c79356b 729
9bccf70c
A
730 *kmods = (void *)0;
731 *kmodCount = 0;
1c79356b
A
732
733retry:
9bccf70c
A
734 simple_lock(&kmod_lock);
735 size = 0;
736 k = kmod;
737 while (k) {
738 size += sizeof(kmod_info_t);
739 r = k->reference_list;
740 while (r) {
741 size +=sizeof(kmod_reference_t);
742 r = r->next;
743 }
744 k = k->next;
745 }
746 simple_unlock(&kmod_lock);
747 if (!size) return KERN_SUCCESS;
1c79356b 748
9bccf70c
A
749 rc = kmem_alloc(kernel_map, &data, size);
750 if (rc) return rc;
1c79356b 751
9bccf70c
A
752 // copy kmod into data, retry if kmod's size has changed (grown)
753 // the copied out data is tweeked to figure what's what at user level
754 // change the copied out k->next pointers to point to themselves
755 // change the k->reference into a count, tack the references on
756 // the end of the data packet in the order they are found
1c79356b 757
9bccf70c
A
758 simple_lock(&kmod_lock);
759 k = kmod; p1 = (kmod_info_t *)data;
760 while (k) {
761 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
762 simple_unlock(&kmod_lock);
763 kmem_free(kernel_map, data, size);
764 goto retry;
765 }
1c79356b 766
9bccf70c
A
767 *p1 = *k;
768 if (k->next) p1->next = k;
769 p1++; k = k->next;
770 }
771
772 p2 = (kmod_reference_t *)p1;
773 k = kmod; p1 = (kmod_info_t *)data;
774 while (k) {
775 r = k->reference_list; ref_count = 0;
776 while (r) {
777 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
778 simple_unlock(&kmod_lock);
779 kmem_free(kernel_map, data, size);
780 goto retry;
781 }
782 // note the last 'k' in the chain has its next == 0
783 // since there can only be one like that,
784 // this case is handled by the caller
785 *p2 = *r;
786 p2++; r = r->next; ref_count++;
787 }
788 p1->reference_list = (kmod_reference_t *)ref_count;
789 p1++; k = k->next;
790 }
791 simple_unlock(&kmod_lock);
792
793 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
794 if (rc) {
795 kmem_free(kernel_map, data, size);
796 *kmods = 0;
797 *kmodCount = 0;
798 return rc;
799 }
800 *kmodCount = size;
801
802 return KERN_SUCCESS;
803}
1c79356b 804
91447636
A
805/*
806 * Operates only on 32 bit mach keaders on behalf of kernel module loader
807 */
1c79356b
A
808static kern_return_t
809kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
810{
9bccf70c
A
811 typedef void (*Routine)(void);
812 Routine * routines;
813 int size, i;
1c79356b 814
9bccf70c
A
815 if (header->magic != MH_MAGIC) {
816 return KERN_INVALID_ARGUMENT;
817 }
1c79356b 818
91447636 819 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
9bccf70c 820 if (!routines) return KERN_SUCCESS;
1c79356b 821
9bccf70c
A
822 size /= sizeof(Routine);
823 for (i = 0; i < size; i++) {
824 (*routines[i])();
825 }
1c79356b 826
9bccf70c 827 return KERN_SUCCESS;
1c79356b
A
828}
829
91447636
A
830/*
831 * Operates only on 32 bit mach keaders on behalf of kernel module loader
832 */
1c79356b
A
833kern_return_t
834kmod_initialize_cpp(kmod_info_t *info)
835{
9bccf70c 836 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
1c79356b
A
837}
838
91447636
A
839/*
840 * Operates only on 32 bit mach keaders on behalf of kernel module loader
841 */
1c79356b
A
842kern_return_t
843kmod_finalize_cpp(kmod_info_t *info)
844{
9bccf70c 845 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
1c79356b
A
846}
847
848kern_return_t
91447636 849kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
1c79356b 850{
9bccf70c 851 return KMOD_RETURN_SUCCESS;
1c79356b
A
852}
853
854kern_return_t
91447636 855kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
1c79356b 856{
9bccf70c 857 return KMOD_RETURN_SUCCESS;
1c79356b
A
858}
859
91447636
A
860static void
861kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
862 void (*printf_func)(const char *fmt, ...))
1c79356b 863{
9bccf70c 864 vm_offset_t * kscan_addr = 0;
9bccf70c
A
865 kmod_info_t * k;
866 kmod_reference_t * r;
91447636 867 unsigned int i;
9bccf70c 868 int found_kmod = 0;
9bccf70c 869 kmod_info_t * stop_kmod = 0;
9bccf70c
A
870
871 for (k = kmod; k; k = k->next) {
55e303ae 872 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
91447636 873 (*printf_func)(" kmod scan stopped due to missing "
9bccf70c
A
874 "kmod page: %08x\n", stop_kmod);
875 break;
876 }
91447636
A
877 if (!k->address) {
878 continue; // skip fake entries for built-in kernel components
879 }
9bccf70c
A
880 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
881 if ((*kscan_addr >= k->address) &&
882 (*kscan_addr < (k->address + k->size))) {
883
884 if (!found_kmod) {
91447636 885 (*printf_func)(" Kernel loadable modules in backtrace "
9bccf70c
A
886 "(with dependencies):\n");
887 }
888 found_kmod = 1;
91447636 889 (*printf_func)(" %s(%s)@0x%x\n",
9bccf70c
A
890 k->name, k->version, k->address);
891
892 for (r = k->reference_list; r; r = r->next) {
893 kmod_info_t * rinfo;
894
55e303ae 895 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
91447636 896 (*printf_func)(" kmod dependency scan stopped "
9bccf70c
A
897 "due to missing dependency page: %08x\n", r);
898 break;
899 }
900
901 rinfo = r->info;
902
55e303ae 903 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
91447636 904 (*printf_func)(" kmod dependency scan stopped "
9bccf70c
A
905 "due to missing kmod page: %08x\n", rinfo);
906 break;
907 }
908
91447636
A
909 if (!rinfo->address) {
910 continue; // skip fake entries for built-ins
911 }
912
913 (*printf_func)(" dependency: %s(%s)@0x%x\n",
9bccf70c
A
914 rinfo->name, rinfo->version, rinfo->address);
915 }
916
917 break; // only report this kmod for one backtrace address
918 }
919 }
1c79356b 920 }
1c79356b 921
9bccf70c 922 return;
1c79356b 923}
91447636
A
924
925void
926kmod_dump(vm_offset_t *addr, unsigned int cnt)
927{
928 kmod_dump_to(addr, cnt, &kdb_printf);
929}
930
931void
932kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
933{
934 kmod_dump_to(addr, cnt, &printf);
935}