2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
27 * 1999 Mar 29 rsulack created.
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <kern/kern_types.h>
34 #include <vm/vm_kern.h>
35 #include <kern/thread.h>
37 #include <mach_host.h>
39 kmod_info_t
*kmod
= 0;
40 static int kmod_index
= 1;
42 decl_simple_lock_data(,kmod_lock
)
43 decl_simple_lock_data(,kmod_queue_lock
)
45 typedef struct cmd_queue_entry
{
51 queue_head_t kmod_cmd_queue
;
56 simple_lock_init(&kmod_lock
, ETAP_MISC_Q
);
57 simple_lock_init(&kmod_queue_lock
, ETAP_MISC_Q
);
58 queue_init(&kmod_cmd_queue
);
62 kmod_lookupbyid(kmod_t id
)
68 if (k
->id
== id
) break;
76 kmod_lookupbyname(char * name
)
82 if (!strcmp(k
->name
, name
)) break;
89 // XXX add a nocopy flag??
92 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
95 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
96 if (!e
) return KERN_RESOURCE_SHORTAGE
;
98 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
99 if (rc
!= KERN_SUCCESS
) {
100 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
104 bcopy((void *)data
, (void *)e
->data
, size
);
106 simple_lock(&kmod_queue_lock
);
107 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
108 simple_unlock(&kmod_queue_lock
);
110 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
116 kmod_load_extension(char *name
)
118 kmod_load_extension_cmd_t
*data
;
121 size
= sizeof(kmod_load_extension_cmd_t
);
122 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
123 if (!data
) return KERN_RESOURCE_SHORTAGE
;
125 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
126 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
128 return kmod_queue_cmd((vm_address_t
)data
, size
);
132 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
134 kmod_load_with_dependencies_cmd_t
*data
;
145 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
146 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
147 if (!data
) return KERN_RESOURCE_SHORTAGE
;
149 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
150 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
153 for (i
=0; i
< count
; i
++) {
154 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
157 data
->dependencies
[count
][0] = 0;
159 return kmod_queue_cmd((vm_address_t
)data
, size
);
162 kmod_send_generic(int type
, void *generic_data
, int size
)
164 kmod_generic_cmd_t
*data
;
166 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
167 if (!data
) return KERN_RESOURCE_SHORTAGE
;
170 bcopy(data
->data
, generic_data
, size
);
172 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
176 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
180 if (!info
) return KERN_INVALID_ADDRESS
;
182 // double check for page alignment
183 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
184 return KERN_INVALID_ADDRESS
;
187 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
188 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
189 if (rc
!= KERN_SUCCESS
) {
193 simple_lock(&kmod_lock
);
195 // check to see if already loaded
196 if (kmod_lookupbyname(info
->name
)) {
197 simple_unlock(&kmod_lock
);
198 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
199 info
->address
+ info
->size
, FALSE
);
200 assert(rc
== KERN_SUCCESS
);
201 return KERN_INVALID_ARGUMENT
;
204 info
->id
= kmod_index
++;
205 info
->reference_count
= 0;
212 simple_unlock(&kmod_lock
);
214 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
215 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
222 kmod_create(host_priv_t host_priv
,
226 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
227 return kmod_create_internal(info
, id
);
231 kmod_create_fake(char *name
, char *version
)
235 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
237 return KERN_RESOURCE_SHORTAGE
;
241 info
->info_version
= KMOD_INFO_VERSION
;
242 bcopy(name
, info
->name
, KMOD_MAX_NAME
);
243 bcopy(version
, info
->version
, KMOD_MAX_NAME
);
244 info
->reference_count
= 1; // keep it from unloading, starting, stopping
245 info
->reference_list
= 0;
246 info
->address
= info
->size
= info
->hdr_size
= 0;
247 info
->start
= info
->stop
= 0;
249 simple_lock(&kmod_lock
);
251 // check to see if already "loaded"
252 if (kmod_lookupbyname(info
->name
)) {
253 simple_unlock(&kmod_lock
);
254 return KERN_INVALID_ARGUMENT
;
257 info
->id
= kmod_index
++;
262 simple_unlock(&kmod_lock
);
268 kmod_destroy_internal(kmod_t id
)
274 simple_lock(&kmod_lock
);
279 kmod_reference_t
*r
, *t
;
281 if (k
->reference_count
!= 0) {
282 simple_unlock(&kmod_lock
);
283 return KERN_INVALID_ARGUMENT
;
286 if (k
== p
) { // first element
291 simple_unlock(&kmod_lock
);
293 r
= k
->reference_list
;
295 r
->info
->reference_count
--;
298 kfree((vm_offset_t
)t
, sizeof(struct kmod_reference
));
301 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
302 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
304 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
305 k
->address
+ k
->size
, FALSE
);
306 assert(rc
== KERN_SUCCESS
);
308 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
309 assert(rc
== KERN_SUCCESS
);
317 simple_unlock(&kmod_lock
);
319 return KERN_INVALID_ARGUMENT
;
324 kmod_destroy(host_priv_t host_priv
,
327 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
328 return kmod_destroy_internal(id
);
337 mach_msg_type_number_t
*dataCount
)
339 kern_return_t rc
= KERN_SUCCESS
;
340 void * user_data
= 0;
341 kern_return_t (*func
)();
344 simple_lock(&kmod_lock
);
346 k
= kmod_lookupbyid(id
);
347 if (!k
|| k
->reference_count
) {
348 simple_unlock(&kmod_lock
);
349 rc
= KERN_INVALID_ARGUMENT
;
354 func
= (void *)k
->start
;
356 func
= (void *)k
->stop
;
359 simple_unlock(&kmod_lock
);
362 // call kmod entry point
364 if (data
&& dataCount
&& *data
&& *dataCount
) {
365 vm_map_copyout(kernel_map
, (vm_offset_t
*)&user_data
, (vm_map_copy_t
)*data
);
368 rc
= (*func
)(k
, user_data
);
373 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
376 if (dataCount
) *dataCount
= 0;
383 * The retain and release calls take no user data, but the caller
384 * may have sent some in error (the MIG definition allows it).
385 * If this is the case, they will just return that same data
386 * right back to the caller (since they never touch the *data and
387 * *dataCount fields).
390 kmod_retain(kmod_t id
)
392 kern_return_t rc
= KERN_SUCCESS
;
394 kmod_info_t
*t
; // reference to
395 kmod_info_t
*f
; // reference from
396 kmod_reference_t
*r
= 0;
398 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
400 rc
= KERN_RESOURCE_SHORTAGE
;
404 simple_lock(&kmod_lock
);
406 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
407 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
409 simple_unlock(&kmod_lock
);
410 if (r
) kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
411 rc
= KERN_INVALID_ARGUMENT
;
415 r
->next
= f
->reference_list
;
417 f
->reference_list
= r
;
418 t
->reference_count
++;
420 simple_unlock(&kmod_lock
);
429 kmod_release(kmod_t id
)
431 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
433 kmod_info_t
*t
; // reference to
434 kmod_info_t
*f
; // reference from
435 kmod_reference_t
*r
= 0;
436 kmod_reference_t
* p
;
438 simple_lock(&kmod_lock
);
440 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
441 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
443 rc
= KERN_INVALID_ARGUMENT
;
447 p
= r
= f
->reference_list
;
450 if (p
== r
) { // first element
451 f
->reference_list
= r
->next
;
455 r
->info
->reference_count
--;
457 simple_unlock(&kmod_lock
);
458 kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
466 simple_unlock(&kmod_lock
);
475 kmod_control(host_priv_t host_priv
,
477 kmod_control_flavor_t flavor
,
479 mach_msg_type_number_t
*dataCount
)
481 kern_return_t rc
= KERN_SUCCESS
;
483 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
487 case KMOD_CNTL_START
:
490 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
495 case KMOD_CNTL_RETAIN
:
497 rc
= kmod_retain(id
);
501 case KMOD_CNTL_RELEASE
:
503 rc
= kmod_release(id
);
507 case KMOD_CNTL_GET_CMD
: {
509 cmd_queue_entry_t
*e
;
512 * Throw away any data the user may have sent in error.
513 * We must do this, because we are likely to return to
514 * some data for these commands (thus causing a leak of
515 * whatever data the user sent us in error).
517 if (*data
&& *dataCount
) {
518 vm_map_copy_discard(*data
);
523 simple_lock(&kmod_queue_lock
);
525 if (queue_empty(&kmod_cmd_queue
)) {
526 assert_wait((event_t
)&kmod_cmd_queue
, THREAD_ABORTSAFE
);
527 simple_unlock(&kmod_queue_lock
);
528 thread_block((void(*)(void))0);
529 simple_lock(&kmod_queue_lock
);
530 if (queue_empty(&kmod_cmd_queue
)) {
531 // we must have been interrupted!
532 simple_unlock(&kmod_queue_lock
);
536 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
538 simple_unlock(&kmod_queue_lock
);
540 rc
= vm_map_copyin(kernel_map
, e
->data
, e
->size
, TRUE
, (vm_map_copy_t
*)data
);
542 simple_lock(&kmod_queue_lock
);
543 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
544 simple_unlock(&kmod_queue_lock
);
549 *dataCount
= e
->size
;
551 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
557 rc
= KERN_INVALID_ARGUMENT
;
565 kmod_get_info(host_t host
,
566 kmod_info_array_t
*kmods
,
567 mach_msg_type_number_t
*kmodCount
)
571 kmod_reference_t
*r
, *p2
;
574 kern_return_t rc
= KERN_SUCCESS
;
580 simple_lock(&kmod_lock
);
584 size
+= sizeof(kmod_info_t
);
585 r
= k
->reference_list
;
587 size
+=sizeof(kmod_reference_t
);
592 simple_unlock(&kmod_lock
);
593 if (!size
) return KERN_SUCCESS
;
595 rc
= kmem_alloc(kernel_map
, &data
, size
);
598 // copy kmod into data, retry if kmod's size has changed (grown)
599 // the copied out data is tweeked to figure what's what at user level
600 // change the copied out k->next pointers to point to themselves
601 // change the k->reference into a count, tack the references on
602 // the end of the data packet in the order they are found
604 simple_lock(&kmod_lock
);
605 k
= kmod
; p1
= (kmod_info_t
*)data
;
607 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
608 simple_unlock(&kmod_lock
);
609 kmem_free(kernel_map
, data
, size
);
614 if (k
->next
) p1
->next
= k
;
618 p2
= (kmod_reference_t
*)p1
;
619 k
= kmod
; p1
= (kmod_info_t
*)data
;
621 r
= k
->reference_list
; ref_count
= 0;
623 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
624 simple_unlock(&kmod_lock
);
625 kmem_free(kernel_map
, data
, size
);
628 // note the last 'k' in the chain has its next == 0
629 // since there can only be one like that,
630 // this case is handled by the caller
632 p2
++; r
= r
->next
; ref_count
++;
634 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
637 simple_unlock(&kmod_lock
);
639 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
641 kmem_free(kernel_map
, data
, size
);
651 #include <mach-o/loader.h>
653 extern void *getsectdatafromheader(struct mach_header
*mhp
,
655 const char *sectname
,
659 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
661 typedef void (*Routine
)(void);
665 if (header
->magic
!= MH_MAGIC
) {
666 return KERN_INVALID_ARGUMENT
;
669 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, sectName
, &size
);
670 if (!routines
) return KERN_SUCCESS
;
672 size
/= sizeof(Routine
);
673 for (i
= 0; i
< size
; i
++) {
681 kmod_initialize_cpp(kmod_info_t
*info
)
683 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
687 kmod_finalize_cpp(kmod_info_t
*info
)
689 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
693 kmod_default_start(struct kmod_info
*ki
, void *data
)
695 return KMOD_RETURN_SUCCESS
;
699 kmod_default_stop(struct kmod_info
*ki
, void *data
)
701 return KMOD_RETURN_SUCCESS
;
704 #define IS_IN_BACKTRACE 0xdeadbeef
705 #define IS_A_DEPENDENCY 0xbeefdead
708 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
712 int i
, found_one
= 0;
714 // find backtrace addresses that are inside a kmod
715 for (i
=0; i
< cnt
; i
++, addr
++) {
718 // XXX - validate page(s) that k points to
719 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
720 printf("kmod scan stopped due to missing page: %08X\n", k
);
723 if ((*addr
>= k
->address
) && (*addr
< (k
->address
+ k
->size
))) {
724 // got one, blast info_version, we don't need it at this point
725 k
->info_version
= IS_IN_BACKTRACE
;
732 if (!found_one
) return;
734 printf("kernel modules in backtrace: ");
737 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
738 printf("kmod scan stopped due to missing page: %08X\n", k
);
741 if (k
->info_version
== IS_IN_BACKTRACE
) {
742 printf("%s(%s)@0x%x ", k
->name
, k
->version
, k
->address
);
748 // look for dependencies
749 k
= kmod
; found_one
= 0;
751 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
752 printf("kmod dependency scan stopped due to missing page: %08X\n", k
);
755 if (k
->info_version
== IS_IN_BACKTRACE
) {
756 r
= k
->reference_list
;
758 // XXX - validate page(s) that r and r->info point to
759 if(pmap_extract(kernel_pmap
, (vm_offset_t
)r
) == 0) { /* Exit loop if page not mapped */
760 printf("kmod validation scan stopped due to missing page: %08X\n", r
);
763 if (r
->info
->info_version
!= IS_IN_BACKTRACE
) {
764 r
->info
->info_version
= IS_A_DEPENDENCY
;
772 if (!found_one
) goto cleanup
;
774 printf("kernel module dependencies: ");
777 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
778 printf("kmod dependency print stopped due to missing page: %08X\n", k
);
781 if (k
->info_version
== IS_A_DEPENDENCY
) {
782 printf("%s(%s)@0x%x ", k
->name
, k
->version
, k
->address
);
789 // in case we double panic
792 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
793 printf("kmod dump cleanup stopped due to missing page: %08X\n", k
);
796 k
->info_version
= KMOD_INFO_VERSION
;