]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kmod.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / kern / kmod.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 * 1999 Mar 29 rsulack created.
34 */
35
36#include <mach/mach_types.h>
37#include <mach/vm_types.h>
38#include <mach/kern_return.h>
91447636
A
39#include <mach/host_priv_server.h>
40#include <mach/vm_map.h>
41
42#include <kern/kalloc.h>
1c79356b 43#include <kern/kern_types.h>
1c79356b 44#include <kern/thread.h>
91447636
A
45
46#include <vm/vm_kern.h>
47
9bccf70c 48#include <mach-o/mach_header.h>
1c79356b
A
49
50#include <mach_host.h>
51
91447636
A
52/*
53 * XXX headers for which prototypes should be in a common include file;
54 * XXX see libsa/kext.cpp for why.
55 */
56kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
57kern_return_t kmod_destroy_internal(kmod_t id);
58kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
59 mach_msg_type_number_t *dataCount);
60kern_return_t kmod_retain(kmod_t id);
61kern_return_t kmod_release(kmod_t id);
62kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
63kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
64 mach_msg_type_number_t *kmodCount);
65extern void kdb_printf(const char *fmt, ...);
66
67
68
9bccf70c
A
69#define WRITE_PROTECT_MODULE_TEXT (0)
70
1c79356b
A
71kmod_info_t *kmod = 0;
72static int kmod_index = 1;
73
74decl_simple_lock_data(,kmod_lock)
75decl_simple_lock_data(,kmod_queue_lock)
76
77typedef struct cmd_queue_entry {
9bccf70c
A
78 queue_chain_t links;
79 vm_address_t data;
80 vm_size_t size;
1c79356b
A
81} cmd_queue_entry_t;
82
9bccf70c 83queue_head_t kmod_cmd_queue;
1c79356b
A
84
85void
91447636 86kmod_init(void)
1c79356b 87{
91447636
A
88 simple_lock_init(&kmod_lock, 0);
89 simple_lock_init(&kmod_queue_lock, 0);
9bccf70c 90 queue_init(&kmod_cmd_queue);
1c79356b
A
91}
92
93kmod_info_t *
94kmod_lookupbyid(kmod_t id)
95{
9bccf70c 96 kmod_info_t *k = 0;
1c79356b 97
9bccf70c
A
98 k = kmod;
99 while (k) {
100 if (k->id == id) break;
101 k = k->next;
102 }
1c79356b 103
9bccf70c 104 return k;
1c79356b
A
105}
106
107kmod_info_t *
0b4e3aa0 108kmod_lookupbyname(const char * name)
1c79356b 109{
9bccf70c 110 kmod_info_t *k = 0;
1c79356b 111
9bccf70c
A
112 k = kmod;
113 while (k) {
114 if (!strcmp(k->name, name)) break;
115 k = k->next;
116 }
1c79356b 117
9bccf70c
A
118 return k;
119}
120
121kmod_info_t *
122kmod_lookupbyid_locked(kmod_t id)
123{
124 kmod_info_t *k = 0;
125 kmod_info_t *kc = 0;
126
127 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
128 if (!kc) return kc;
129
130 simple_lock(&kmod_queue_lock);
131 k = kmod_lookupbyid(id);
132 if (k) {
133 bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
134 }
91447636 135
9bccf70c
A
136 simple_unlock(&kmod_queue_lock);
137
138 if (k == 0) {
91447636 139 kfree(kc, sizeof(kmod_info_t));
9bccf70c
A
140 kc = 0;
141 }
142 return kc;
143}
144
145kmod_info_t *
146kmod_lookupbyname_locked(const char * name)
147{
148 kmod_info_t *k = 0;
149 kmod_info_t *kc = 0;
150
151 kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
152 if (!kc) return kc;
153
154 simple_lock(&kmod_queue_lock);
155 k = kmod_lookupbyname(name);
156 if (k) {
157 bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
158 }
91447636 159
9bccf70c
A
160 simple_unlock(&kmod_queue_lock);
161
162 if (k == 0) {
91447636 163 kfree(kc, sizeof(kmod_info_t));
9bccf70c
A
164 kc = 0;
165 }
166 return kc;
1c79356b
A
167}
168
169// XXX add a nocopy flag??
170
171kern_return_t
172kmod_queue_cmd(vm_address_t data, vm_size_t size)
173{
9bccf70c
A
174 kern_return_t rc;
175 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
176 if (!e) return KERN_RESOURCE_SHORTAGE;
177
178 rc = kmem_alloc(kernel_map, &e->data, size);
179 if (rc != KERN_SUCCESS) {
91447636 180 kfree(e, sizeof(struct cmd_queue_entry));
9bccf70c
A
181 return rc;
182 }
183 e->size = size;
184 bcopy((void *)data, (void *)e->data, size);
1c79356b 185
9bccf70c
A
186 simple_lock(&kmod_queue_lock);
187 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
188 simple_unlock(&kmod_queue_lock);
1c79356b 189
9bccf70c
A
190 thread_wakeup_one((event_t)&kmod_cmd_queue);
191
192 return KERN_SUCCESS;
1c79356b
A
193}
194
195kern_return_t
196kmod_load_extension(char *name)
197{
9bccf70c
A
198 kmod_load_extension_cmd_t *data;
199 vm_size_t size;
1c79356b 200
9bccf70c
A
201 size = sizeof(kmod_load_extension_cmd_t);
202 data = (kmod_load_extension_cmd_t *)kalloc(size);
203 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 204
9bccf70c
A
205 data->type = KMOD_LOAD_EXTENSION_PACKET;
206 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 207
9bccf70c 208 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
209}
210
211kern_return_t
212kmod_load_extension_with_dependencies(char *name, char **dependencies)
213{
9bccf70c
A
214 kmod_load_with_dependencies_cmd_t *data;
215 vm_size_t size;
216 char **c;
217 int i, count = 0;
218
219 c = dependencies;
220 if (c) {
221 while (*c) {
222 count++; c++;
223 }
224 }
225 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
226 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
227 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 228
9bccf70c
A
229 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
230 strncpy(data->name, name, KMOD_MAX_NAME);
1c79356b 231
9bccf70c
A
232 c = dependencies;
233 for (i=0; i < count; i++) {
234 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
235 c++;
236 }
237 data->dependencies[count][0] = 0;
1c79356b 238
9bccf70c 239 return kmod_queue_cmd((vm_address_t)data, size);
1c79356b
A
240}
241kern_return_t
242kmod_send_generic(int type, void *generic_data, int size)
243{
9bccf70c 244 kmod_generic_cmd_t *data;
1c79356b 245
9bccf70c
A
246 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
247 if (!data) return KERN_RESOURCE_SHORTAGE;
1c79356b 248
9bccf70c
A
249 data->type = type;
250 bcopy(data->data, generic_data, size);
1c79356b 251
9bccf70c 252 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
1c79356b
A
253}
254
55e303ae
A
255extern vm_offset_t sectPRELINKB;
256extern int sectSizePRELINK;
257
91447636
A
258/*
259 * Operates only on 32 bit mach keaders on behalf of kernel module loader
260 * if WRITE_PROTECT_MODULE_TEXT is defined.
261 */
1c79356b
A
262kern_return_t
263kmod_create_internal(kmod_info_t *info, kmod_t *id)
264{
9bccf70c 265 kern_return_t rc;
55e303ae 266 boolean_t isPrelink;
1c79356b 267
9bccf70c 268 if (!info) return KERN_INVALID_ADDRESS;
1c79356b 269
9bccf70c
A
270 // double check for page alignment
271 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
272 return KERN_INVALID_ADDRESS;
273 }
1c79356b 274
55e303ae
A
275 isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
276 if (!isPrelink) {
277 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
278 info->address + info->size, VM_PROT_DEFAULT, FALSE);
279 if (rc != KERN_SUCCESS) {
280 return rc;
281 }
9bccf70c
A
282 }
283#if WRITE_PROTECT_MODULE_TEXT
284 {
285 struct section * sect = getsectbynamefromheader(
286 (struct mach_header*) info->address, "__TEXT", "__text");
287
288 if(sect) {
289 (void) vm_map_protect(kernel_map, round_page(sect->addr), trunc_page(sect->addr + sect->size),
290 VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
1c79356b 291 }
9bccf70c 292 }
55e303ae 293#endif /* WRITE_PROTECT_MODULE_TEXT */
1c79356b 294
9bccf70c 295 simple_lock(&kmod_lock);
1c79356b 296
9bccf70c
A
297 // check to see if already loaded
298 if (kmod_lookupbyname(info->name)) {
299 simple_unlock(&kmod_lock);
55e303ae
A
300 if (!isPrelink) {
301 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
302 info->address + info->size, FALSE);
303 assert(rc == KERN_SUCCESS);
304 }
9bccf70c
A
305 return KERN_INVALID_ARGUMENT;
306 }
1c79356b 307
9bccf70c
A
308 info->id = kmod_index++;
309 info->reference_count = 0;
1c79356b 310
9bccf70c
A
311 info->next = kmod;
312 kmod = info;
1c79356b 313
9bccf70c 314 *id = info->id;
1c79356b 315
9bccf70c 316 simple_unlock(&kmod_lock);
1c79356b 317
0b4e3aa0 318#if DEBUG
9bccf70c
A
319 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
320 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
55e303ae 321#endif /* DEBUG */
1c79356b 322
9bccf70c 323 return KERN_SUCCESS;
1c79356b
A
324}
325
326
327kern_return_t
328kmod_create(host_priv_t host_priv,
91447636 329 vm_address_t addr,
9bccf70c 330 kmod_t *id)
1c79356b 331{
91447636
A
332 kmod_info_t *info = (kmod_info_t *)addr;
333
9bccf70c
A
334 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
335 return kmod_create_internal(info, id);
1c79356b
A
336}
337
338kern_return_t
91447636
A
339kmod_create_fake_with_address(const char *name, const char *version,
340 vm_address_t address, vm_size_t size,
341 int * return_id)
1c79356b 342{
9bccf70c 343 kmod_info_t *info;
1c79356b 344
9bccf70c
A
345 if (!name || ! version ||
346 (1 + strlen(name) > KMOD_MAX_NAME) ||
347 (1 + strlen(version) > KMOD_MAX_NAME)) {
0b4e3aa0 348
9bccf70c
A
349 return KERN_INVALID_ARGUMENT;
350 }
0b4e3aa0 351
9bccf70c
A
352 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
353 if (!info) {
354 return KERN_RESOURCE_SHORTAGE;
355 }
1c79356b 356
9bccf70c
A
357 // make de fake
358 info->info_version = KMOD_INFO_VERSION;
359 bcopy(name, info->name, 1 + strlen(name));
360 bcopy(version, info->version, 1 + strlen(version)); //NIK fixed this part
361 info->reference_count = 1; // keep it from unloading, starting, stopping
362 info->reference_list = 0;
91447636
A
363 info->address = address;
364 info->size = size;
365 info->hdr_size = 0;
9bccf70c
A
366 info->start = info->stop = 0;
367
368 simple_lock(&kmod_lock);
1c79356b 369
9bccf70c
A
370 // check to see if already "loaded"
371 if (kmod_lookupbyname(info->name)) {
372 simple_unlock(&kmod_lock);
373 return KERN_INVALID_ARGUMENT;
374 }
1c79356b 375
9bccf70c 376 info->id = kmod_index++;
91447636
A
377 if (return_id)
378 *return_id = info->id;
1c79356b 379
9bccf70c
A
380 info->next = kmod;
381 kmod = info;
1c79356b 382
9bccf70c
A
383 simple_unlock(&kmod_lock);
384
385 return KERN_SUCCESS;
1c79356b
A
386}
387
388kern_return_t
91447636
A
389kmod_create_fake(const char *name, const char *version)
390{
391 return kmod_create_fake_with_address(name, version, 0, 0, NULL);
392}
393
394
395static kern_return_t
396_kmod_destroy_internal(kmod_t id, boolean_t fake)
1c79356b 397{
9bccf70c
A
398 kern_return_t rc;
399 kmod_info_t *k;
400 kmod_info_t *p;
401
402 simple_lock(&kmod_lock);
403
404 k = p = kmod;
405 while (k) {
406 if (k->id == id) {
407 kmod_reference_t *r, *t;
408
91447636 409 if (!fake && (k->reference_count != 0)) {
9bccf70c
A
410 simple_unlock(&kmod_lock);
411 return KERN_INVALID_ARGUMENT;
412 }
413
414 if (k == p) { // first element
415 kmod = k->next;
416 } else {
417 p->next = k->next;
418 }
419 simple_unlock(&kmod_lock);
420
421 r = k->reference_list;
422 while (r) {
423 r->info->reference_count--;
424 t = r;
425 r = r->next;
91447636 426 kfree(t, sizeof(struct kmod_reference));
9bccf70c 427 }
1c79356b 428
91447636
A
429 if (!fake)
430 {
0b4e3aa0 431#if DEBUG
91447636
A
432 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
433 k->name, k->id, k->size / PAGE_SIZE, k->address);
55e303ae
A
434#endif /* DEBUG */
435
91447636
A
436 if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
437 {
438 vm_offset_t
439 virt = ml_static_ptovirt(k->address);
440 if( virt) {
441 ml_static_mfree( virt, k->size);
442 }
443 }
444 else
445 {
446 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
447 k->address + k->size, FALSE);
448 assert(rc == KERN_SUCCESS);
449
450 rc = vm_deallocate(kernel_map, k->address, k->size);
451 assert(rc == KERN_SUCCESS);
452 }
453 }
9bccf70c
A
454 return KERN_SUCCESS;
455 }
456 p = k;
457 k = k->next;
458 }
1c79356b 459
9bccf70c 460 simple_unlock(&kmod_lock);
1c79356b 461
9bccf70c 462 return KERN_INVALID_ARGUMENT;
1c79356b
A
463}
464
91447636
A
465kern_return_t
466kmod_destroy_internal(kmod_t id)
467{
468 return _kmod_destroy_internal(id, FALSE);
469}
1c79356b
A
470
471kern_return_t
472kmod_destroy(host_priv_t host_priv,
9bccf70c 473 kmod_t id)
1c79356b 474{
9bccf70c 475 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
91447636 476 return _kmod_destroy_internal(id, FALSE);
1c79356b
A
477}
478
91447636
A
479kern_return_t
480kmod_destroy_fake(kmod_t id)
481{
482 return _kmod_destroy_internal(id, TRUE);
483}
1c79356b
A
484
485kern_return_t
486kmod_start_or_stop(
487 kmod_t id,
488 int start,
489 kmod_args_t *data,
490 mach_msg_type_number_t *dataCount)
491{
492 kern_return_t rc = KERN_SUCCESS;
493 void * user_data = 0;
91447636 494 kern_return_t (*func)(kmod_info_t *, void *);
1c79356b
A
495 kmod_info_t *k;
496
497 simple_lock(&kmod_lock);
498
499 k = kmod_lookupbyid(id);
500 if (!k || k->reference_count) {
501 simple_unlock(&kmod_lock);
502 rc = KERN_INVALID_ARGUMENT;
503 goto finish;
504 }
505
506 if (start) {
507 func = (void *)k->start;
508 } else {
509 func = (void *)k->stop;
510 }
511
512 simple_unlock(&kmod_lock);
513
514 //
515 // call kmod entry point
516 //
517 if (data && dataCount && *data && *dataCount) {
91447636
A
518 vm_map_offset_t map_addr;
519 vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
520 user_data = CAST_DOWN(void *, map_addr);
1c79356b
A
521 }
522
523 rc = (*func)(k, user_data);
524
525finish:
526
527 if (user_data) {
528 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
529 }
530 if (data) *data = 0;
531 if (dataCount) *dataCount = 0;
532
533 return rc;
534}
535
536
537/*
538 * The retain and release calls take no user data, but the caller
539 * may have sent some in error (the MIG definition allows it).
540 * If this is the case, they will just return that same data
541 * right back to the caller (since they never touch the *data and
542 * *dataCount fields).
543 */
544kern_return_t
545kmod_retain(kmod_t id)
546{
547 kern_return_t rc = KERN_SUCCESS;
548
549 kmod_info_t *t; // reference to
550 kmod_info_t *f; // reference from
551 kmod_reference_t *r = 0;
552
553 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
554 if (!r) {
555 rc = KERN_RESOURCE_SHORTAGE;
556 goto finish;
557 }
558
559 simple_lock(&kmod_lock);
560
561 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
562 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
563 if (!t || !f) {
564 simple_unlock(&kmod_lock);
91447636 565 if (r) kfree(r, sizeof(struct kmod_reference));
1c79356b
A
566 rc = KERN_INVALID_ARGUMENT;
567 goto finish;
568 }
569
570 r->next = f->reference_list;
571 r->info = t;
572 f->reference_list = r;
573 t->reference_count++;
574
575 simple_unlock(&kmod_lock);
576
577finish:
578
579 return rc;
580}
581
582
583kern_return_t
584kmod_release(kmod_t id)
585{
586 kern_return_t rc = KERN_INVALID_ARGUMENT;
587
588 kmod_info_t *t; // reference to
589 kmod_info_t *f; // reference from
590 kmod_reference_t *r = 0;
591 kmod_reference_t * p;
592
593 simple_lock(&kmod_lock);
594
595 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
596 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
597 if (!t || !f) {
598 rc = KERN_INVALID_ARGUMENT;
599 goto finish;
600 }
601
602 p = r = f->reference_list;
603 while (r) {
604 if (r->info == t) {
9bccf70c 605 if (p == r) { // first element
1c79356b
A
606 f->reference_list = r->next;
607 } else {
608 p->next = r->next;
609 }
610 r->info->reference_count--;
611
9bccf70c 612 simple_unlock(&kmod_lock);
91447636 613 kfree(r, sizeof(struct kmod_reference));
9bccf70c 614 rc = KERN_SUCCESS;
1c79356b
A
615 goto finish;
616 }
617 p = r;
618 r = r->next;
619 }
620
621 simple_unlock(&kmod_lock);
622
623finish:
624
625 return rc;
626}
627
628
629kern_return_t
630kmod_control(host_priv_t host_priv,
9bccf70c
A
631 kmod_t id,
632 kmod_control_flavor_t flavor,
633 kmod_args_t *data,
634 mach_msg_type_number_t *dataCount)
1c79356b 635{
9bccf70c 636 kern_return_t rc = KERN_SUCCESS;
1c79356b 637
9bccf70c 638 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
1c79356b 639
9bccf70c 640 switch (flavor) {
1c79356b 641
9bccf70c
A
642 case KMOD_CNTL_START:
643 case KMOD_CNTL_STOP:
644 {
1c79356b
A
645 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
646 data, dataCount);
647 break;
9bccf70c 648 }
1c79356b 649
9bccf70c 650 case KMOD_CNTL_RETAIN:
1c79356b
A
651 {
652 rc = kmod_retain(id);
653 break;
654 }
655
9bccf70c 656 case KMOD_CNTL_RELEASE:
1c79356b
A
657 {
658 rc = kmod_release(id);
659 break;
660 }
661
9bccf70c
A
662 case KMOD_CNTL_GET_CMD:
663 {
1c79356b 664
9bccf70c
A
665 cmd_queue_entry_t *e;
666
667 /*
668 * Throw away any data the user may have sent in error.
669 * We must do this, because we are likely to return to
670 * some data for these commands (thus causing a leak of
671 * whatever data the user sent us in error).
672 */
673 if (*data && *dataCount) {
674 vm_map_copy_discard(*data);
675 *data = 0;
676 *dataCount = 0;
677 }
678
679 simple_lock(&kmod_queue_lock);
680
681 if (queue_empty(&kmod_cmd_queue)) {
682 wait_result_t res;
683
684 res = thread_sleep_simple_lock((event_t)&kmod_cmd_queue,
685 &kmod_queue_lock,
686 THREAD_ABORTSAFE);
687 if (queue_empty(&kmod_cmd_queue)) {
688 // we must have been interrupted!
689 simple_unlock(&kmod_queue_lock);
690 assert(res == THREAD_INTERRUPTED);
691 return KERN_ABORTED;
692 }
693 }
694 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
695
696 simple_unlock(&kmod_queue_lock);
697
91447636
A
698 rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
699 (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
9bccf70c
A
700 if (rc) {
701 simple_lock(&kmod_queue_lock);
702 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
703 simple_unlock(&kmod_queue_lock);
704 *data = 0;
705 *dataCount = 0;
706 return rc;
707 }
708 *dataCount = e->size;
709
91447636 710 kfree(e, sizeof(struct cmd_queue_entry));
9bccf70c
A
711
712 break;
713 }
1c79356b 714
9bccf70c
A
715 default:
716 rc = KERN_INVALID_ARGUMENT;
717 }
718
719 return rc;
1c79356b
A
720};
721
722
723kern_return_t
91447636 724kmod_get_info(__unused host_t host,
9bccf70c
A
725 kmod_info_array_t *kmods,
726 mach_msg_type_number_t *kmodCount)
1c79356b 727{
9bccf70c
A
728 vm_offset_t data;
729 kmod_info_t *k, *p1;
730 kmod_reference_t *r, *p2;
731 int ref_count;
732 unsigned size = 0;
733 kern_return_t rc = KERN_SUCCESS;
1c79356b 734
9bccf70c
A
735 *kmods = (void *)0;
736 *kmodCount = 0;
1c79356b
A
737
738retry:
9bccf70c
A
739 simple_lock(&kmod_lock);
740 size = 0;
741 k = kmod;
742 while (k) {
743 size += sizeof(kmod_info_t);
744 r = k->reference_list;
745 while (r) {
746 size +=sizeof(kmod_reference_t);
747 r = r->next;
748 }
749 k = k->next;
750 }
751 simple_unlock(&kmod_lock);
752 if (!size) return KERN_SUCCESS;
1c79356b 753
9bccf70c
A
754 rc = kmem_alloc(kernel_map, &data, size);
755 if (rc) return rc;
1c79356b 756
9bccf70c
A
757 // copy kmod into data, retry if kmod's size has changed (grown)
758 // the copied out data is tweeked to figure what's what at user level
759 // change the copied out k->next pointers to point to themselves
760 // change the k->reference into a count, tack the references on
761 // the end of the data packet in the order they are found
1c79356b 762
9bccf70c
A
763 simple_lock(&kmod_lock);
764 k = kmod; p1 = (kmod_info_t *)data;
765 while (k) {
766 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
767 simple_unlock(&kmod_lock);
768 kmem_free(kernel_map, data, size);
769 goto retry;
770 }
1c79356b 771
9bccf70c
A
772 *p1 = *k;
773 if (k->next) p1->next = k;
774 p1++; k = k->next;
775 }
776
777 p2 = (kmod_reference_t *)p1;
778 k = kmod; p1 = (kmod_info_t *)data;
779 while (k) {
780 r = k->reference_list; ref_count = 0;
781 while (r) {
782 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
783 simple_unlock(&kmod_lock);
784 kmem_free(kernel_map, data, size);
785 goto retry;
786 }
787 // note the last 'k' in the chain has its next == 0
788 // since there can only be one like that,
789 // this case is handled by the caller
790 *p2 = *r;
791 p2++; r = r->next; ref_count++;
792 }
793 p1->reference_list = (kmod_reference_t *)ref_count;
794 p1++; k = k->next;
795 }
796 simple_unlock(&kmod_lock);
797
798 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
799 if (rc) {
800 kmem_free(kernel_map, data, size);
801 *kmods = 0;
802 *kmodCount = 0;
803 return rc;
804 }
805 *kmodCount = size;
806
807 return KERN_SUCCESS;
808}
1c79356b 809
91447636
A
810/*
811 * Operates only on 32 bit mach keaders on behalf of kernel module loader
812 */
1c79356b
A
813static kern_return_t
814kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
815{
9bccf70c
A
816 typedef void (*Routine)(void);
817 Routine * routines;
818 int size, i;
1c79356b 819
9bccf70c
A
820 if (header->magic != MH_MAGIC) {
821 return KERN_INVALID_ARGUMENT;
822 }
1c79356b 823
91447636 824 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
9bccf70c 825 if (!routines) return KERN_SUCCESS;
1c79356b 826
9bccf70c
A
827 size /= sizeof(Routine);
828 for (i = 0; i < size; i++) {
829 (*routines[i])();
830 }
1c79356b 831
9bccf70c 832 return KERN_SUCCESS;
1c79356b
A
833}
834
91447636
A
835/*
836 * Operates only on 32 bit mach keaders on behalf of kernel module loader
837 */
1c79356b
A
838kern_return_t
839kmod_initialize_cpp(kmod_info_t *info)
840{
9bccf70c 841 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
1c79356b
A
842}
843
91447636
A
844/*
845 * Operates only on 32 bit mach keaders on behalf of kernel module loader
846 */
1c79356b
A
847kern_return_t
848kmod_finalize_cpp(kmod_info_t *info)
849{
9bccf70c 850 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
1c79356b
A
851}
852
853kern_return_t
91447636 854kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
1c79356b 855{
9bccf70c 856 return KMOD_RETURN_SUCCESS;
1c79356b
A
857}
858
859kern_return_t
91447636 860kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
1c79356b 861{
9bccf70c 862 return KMOD_RETURN_SUCCESS;
1c79356b
A
863}
864
91447636
A
865static void
866kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
867 void (*printf_func)(const char *fmt, ...))
1c79356b 868{
9bccf70c 869 vm_offset_t * kscan_addr = 0;
9bccf70c
A
870 kmod_info_t * k;
871 kmod_reference_t * r;
91447636 872 unsigned int i;
9bccf70c 873 int found_kmod = 0;
9bccf70c 874 kmod_info_t * stop_kmod = 0;
9bccf70c
A
875
876 for (k = kmod; k; k = k->next) {
55e303ae 877 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
91447636 878 (*printf_func)(" kmod scan stopped due to missing "
9bccf70c
A
879 "kmod page: %08x\n", stop_kmod);
880 break;
881 }
91447636
A
882 if (!k->address) {
883 continue; // skip fake entries for built-in kernel components
884 }
9bccf70c
A
885 for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
886 if ((*kscan_addr >= k->address) &&
887 (*kscan_addr < (k->address + k->size))) {
888
889 if (!found_kmod) {
91447636 890 (*printf_func)(" Kernel loadable modules in backtrace "
9bccf70c
A
891 "(with dependencies):\n");
892 }
893 found_kmod = 1;
91447636 894 (*printf_func)(" %s(%s)@0x%x\n",
9bccf70c
A
895 k->name, k->version, k->address);
896
897 for (r = k->reference_list; r; r = r->next) {
898 kmod_info_t * rinfo;
899
55e303ae 900 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
91447636 901 (*printf_func)(" kmod dependency scan stopped "
9bccf70c
A
902 "due to missing dependency page: %08x\n", r);
903 break;
904 }
905
906 rinfo = r->info;
907
55e303ae 908 if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
91447636 909 (*printf_func)(" kmod dependency scan stopped "
9bccf70c
A
910 "due to missing kmod page: %08x\n", rinfo);
911 break;
912 }
913
91447636
A
914 if (!rinfo->address) {
915 continue; // skip fake entries for built-ins
916 }
917
918 (*printf_func)(" dependency: %s(%s)@0x%x\n",
9bccf70c
A
919 rinfo->name, rinfo->version, rinfo->address);
920 }
921
922 break; // only report this kmod for one backtrace address
923 }
924 }
1c79356b 925 }
1c79356b 926
9bccf70c 927 return;
1c79356b 928}
91447636
A
929
930void
931kmod_dump(vm_offset_t *addr, unsigned int cnt)
932{
933 kmod_dump_to(addr, cnt, &kdb_printf);
934}
935
936void
937kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
938{
939 kmod_dump_to(addr, cnt, &printf);
940}