2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
27 * 1999 Mar 29 rsulack created.
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <kern/kern_types.h>
34 #include <vm/vm_kern.h>
35 #include <kern/thread.h>
37 #include <mach_host.h>
39 kmod_info_t
*kmod
= 0;
40 static int kmod_index
= 1;
42 decl_simple_lock_data(,kmod_lock
)
43 decl_simple_lock_data(,kmod_queue_lock
)
45 typedef struct cmd_queue_entry
{
51 queue_head_t kmod_cmd_queue
;
56 simple_lock_init(&kmod_lock
, ETAP_MISC_Q
);
57 simple_lock_init(&kmod_queue_lock
, ETAP_MISC_Q
);
58 queue_init(&kmod_cmd_queue
);
62 kmod_lookupbyid(kmod_t id
)
68 if (k
->id
== id
) break;
76 kmod_lookupbyname(const char * name
)
82 if (!strcmp(k
->name
, name
)) break;
89 // XXX add a nocopy flag??
92 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
95 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
96 if (!e
) return KERN_RESOURCE_SHORTAGE
;
98 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
99 if (rc
!= KERN_SUCCESS
) {
100 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
104 bcopy((void *)data
, (void *)e
->data
, size
);
106 simple_lock(&kmod_queue_lock
);
107 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
108 simple_unlock(&kmod_queue_lock
);
110 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
116 kmod_load_extension(char *name
)
118 kmod_load_extension_cmd_t
*data
;
121 size
= sizeof(kmod_load_extension_cmd_t
);
122 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
123 if (!data
) return KERN_RESOURCE_SHORTAGE
;
125 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
126 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
128 return kmod_queue_cmd((vm_address_t
)data
, size
);
132 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
134 kmod_load_with_dependencies_cmd_t
*data
;
145 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
146 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
147 if (!data
) return KERN_RESOURCE_SHORTAGE
;
149 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
150 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
153 for (i
=0; i
< count
; i
++) {
154 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
157 data
->dependencies
[count
][0] = 0;
159 return kmod_queue_cmd((vm_address_t
)data
, size
);
162 kmod_send_generic(int type
, void *generic_data
, int size
)
164 kmod_generic_cmd_t
*data
;
166 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
167 if (!data
) return KERN_RESOURCE_SHORTAGE
;
170 bcopy(data
->data
, generic_data
, size
);
172 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
176 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
180 if (!info
) return KERN_INVALID_ADDRESS
;
182 // double check for page alignment
183 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
184 return KERN_INVALID_ADDRESS
;
187 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
188 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
189 if (rc
!= KERN_SUCCESS
) {
193 simple_lock(&kmod_lock
);
195 // check to see if already loaded
196 if (kmod_lookupbyname(info
->name
)) {
197 simple_unlock(&kmod_lock
);
198 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
199 info
->address
+ info
->size
, FALSE
);
200 assert(rc
== KERN_SUCCESS
);
201 return KERN_INVALID_ARGUMENT
;
204 info
->id
= kmod_index
++;
205 info
->reference_count
= 0;
212 simple_unlock(&kmod_lock
);
215 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
216 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
224 kmod_create(host_priv_t host_priv
,
228 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
229 return kmod_create_internal(info
, id
);
233 kmod_create_fake(char *name
, char *version
)
237 if (!name
|| ! version
||
238 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
239 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
241 return KERN_INVALID_ARGUMENT
;
244 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
246 return KERN_RESOURCE_SHORTAGE
;
250 info
->info_version
= KMOD_INFO_VERSION
;
251 bcopy(name
, info
->name
, 1 + strlen(name
));
252 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
253 info
->reference_count
= 1; // keep it from unloading, starting, stopping
254 info
->reference_list
= 0;
255 info
->address
= info
->size
= info
->hdr_size
= 0;
256 info
->start
= info
->stop
= 0;
258 simple_lock(&kmod_lock
);
260 // check to see if already "loaded"
261 if (kmod_lookupbyname(info
->name
)) {
262 simple_unlock(&kmod_lock
);
263 return KERN_INVALID_ARGUMENT
;
266 info
->id
= kmod_index
++;
271 simple_unlock(&kmod_lock
);
277 kmod_destroy_internal(kmod_t id
)
283 simple_lock(&kmod_lock
);
288 kmod_reference_t
*r
, *t
;
290 if (k
->reference_count
!= 0) {
291 simple_unlock(&kmod_lock
);
292 return KERN_INVALID_ARGUMENT
;
295 if (k
== p
) { // first element
300 simple_unlock(&kmod_lock
);
302 r
= k
->reference_list
;
304 r
->info
->reference_count
--;
307 kfree((vm_offset_t
)t
, sizeof(struct kmod_reference
));
311 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
312 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
315 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
316 k
->address
+ k
->size
, FALSE
);
317 assert(rc
== KERN_SUCCESS
);
319 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
320 assert(rc
== KERN_SUCCESS
);
328 simple_unlock(&kmod_lock
);
330 return KERN_INVALID_ARGUMENT
;
335 kmod_destroy(host_priv_t host_priv
,
338 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
339 return kmod_destroy_internal(id
);
348 mach_msg_type_number_t
*dataCount
)
350 kern_return_t rc
= KERN_SUCCESS
;
351 void * user_data
= 0;
352 kern_return_t (*func
)();
355 simple_lock(&kmod_lock
);
357 k
= kmod_lookupbyid(id
);
358 if (!k
|| k
->reference_count
) {
359 simple_unlock(&kmod_lock
);
360 rc
= KERN_INVALID_ARGUMENT
;
365 func
= (void *)k
->start
;
367 func
= (void *)k
->stop
;
370 simple_unlock(&kmod_lock
);
373 // call kmod entry point
375 if (data
&& dataCount
&& *data
&& *dataCount
) {
376 vm_map_copyout(kernel_map
, (vm_offset_t
*)&user_data
, (vm_map_copy_t
)*data
);
379 rc
= (*func
)(k
, user_data
);
384 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
387 if (dataCount
) *dataCount
= 0;
394 * The retain and release calls take no user data, but the caller
395 * may have sent some in error (the MIG definition allows it).
396 * If this is the case, they will just return that same data
397 * right back to the caller (since they never touch the *data and
398 * *dataCount fields).
401 kmod_retain(kmod_t id
)
403 kern_return_t rc
= KERN_SUCCESS
;
405 kmod_info_t
*t
; // reference to
406 kmod_info_t
*f
; // reference from
407 kmod_reference_t
*r
= 0;
409 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
411 rc
= KERN_RESOURCE_SHORTAGE
;
415 simple_lock(&kmod_lock
);
417 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
418 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
420 simple_unlock(&kmod_lock
);
421 if (r
) kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
422 rc
= KERN_INVALID_ARGUMENT
;
426 r
->next
= f
->reference_list
;
428 f
->reference_list
= r
;
429 t
->reference_count
++;
431 simple_unlock(&kmod_lock
);
440 kmod_release(kmod_t id
)
442 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
444 kmod_info_t
*t
; // reference to
445 kmod_info_t
*f
; // reference from
446 kmod_reference_t
*r
= 0;
447 kmod_reference_t
* p
;
449 simple_lock(&kmod_lock
);
451 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
452 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
454 rc
= KERN_INVALID_ARGUMENT
;
458 p
= r
= f
->reference_list
;
461 if (p
== r
) { // first element
462 f
->reference_list
= r
->next
;
466 r
->info
->reference_count
--;
468 simple_unlock(&kmod_lock
);
469 kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
477 simple_unlock(&kmod_lock
);
486 kmod_control(host_priv_t host_priv
,
488 kmod_control_flavor_t flavor
,
490 mach_msg_type_number_t
*dataCount
)
492 kern_return_t rc
= KERN_SUCCESS
;
494 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
498 case KMOD_CNTL_START
:
501 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
506 case KMOD_CNTL_RETAIN
:
508 rc
= kmod_retain(id
);
512 case KMOD_CNTL_RELEASE
:
514 rc
= kmod_release(id
);
518 case KMOD_CNTL_GET_CMD
: {
520 cmd_queue_entry_t
*e
;
523 * Throw away any data the user may have sent in error.
524 * We must do this, because we are likely to return to
525 * some data for these commands (thus causing a leak of
526 * whatever data the user sent us in error).
528 if (*data
&& *dataCount
) {
529 vm_map_copy_discard(*data
);
534 simple_lock(&kmod_queue_lock
);
536 if (queue_empty(&kmod_cmd_queue
)) {
537 assert_wait((event_t
)&kmod_cmd_queue
, THREAD_ABORTSAFE
);
538 simple_unlock(&kmod_queue_lock
);
539 thread_block((void(*)(void))0);
540 simple_lock(&kmod_queue_lock
);
541 if (queue_empty(&kmod_cmd_queue
)) {
542 // we must have been interrupted!
543 simple_unlock(&kmod_queue_lock
);
547 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
549 simple_unlock(&kmod_queue_lock
);
551 rc
= vm_map_copyin(kernel_map
, e
->data
, e
->size
, TRUE
, (vm_map_copy_t
*)data
);
553 simple_lock(&kmod_queue_lock
);
554 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
555 simple_unlock(&kmod_queue_lock
);
560 *dataCount
= e
->size
;
562 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
568 rc
= KERN_INVALID_ARGUMENT
;
576 kmod_get_info(host_t host
,
577 kmod_info_array_t
*kmods
,
578 mach_msg_type_number_t
*kmodCount
)
582 kmod_reference_t
*r
, *p2
;
585 kern_return_t rc
= KERN_SUCCESS
;
591 simple_lock(&kmod_lock
);
595 size
+= sizeof(kmod_info_t
);
596 r
= k
->reference_list
;
598 size
+=sizeof(kmod_reference_t
);
603 simple_unlock(&kmod_lock
);
604 if (!size
) return KERN_SUCCESS
;
606 rc
= kmem_alloc(kernel_map
, &data
, size
);
609 // copy kmod into data, retry if kmod's size has changed (grown)
610 // the copied out data is tweeked to figure what's what at user level
611 // change the copied out k->next pointers to point to themselves
612 // change the k->reference into a count, tack the references on
613 // the end of the data packet in the order they are found
615 simple_lock(&kmod_lock
);
616 k
= kmod
; p1
= (kmod_info_t
*)data
;
618 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
619 simple_unlock(&kmod_lock
);
620 kmem_free(kernel_map
, data
, size
);
625 if (k
->next
) p1
->next
= k
;
629 p2
= (kmod_reference_t
*)p1
;
630 k
= kmod
; p1
= (kmod_info_t
*)data
;
632 r
= k
->reference_list
; ref_count
= 0;
634 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
635 simple_unlock(&kmod_lock
);
636 kmem_free(kernel_map
, data
, size
);
639 // note the last 'k' in the chain has its next == 0
640 // since there can only be one like that,
641 // this case is handled by the caller
643 p2
++; r
= r
->next
; ref_count
++;
645 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
648 simple_unlock(&kmod_lock
);
650 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
652 kmem_free(kernel_map
, data
, size
);
662 #include <mach-o/loader.h>
664 extern void *getsectdatafromheader(struct mach_header
*mhp
,
666 const char *sectname
,
670 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
672 typedef void (*Routine
)(void);
676 if (header
->magic
!= MH_MAGIC
) {
677 return KERN_INVALID_ARGUMENT
;
680 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, sectName
, &size
);
681 if (!routines
) return KERN_SUCCESS
;
683 size
/= sizeof(Routine
);
684 for (i
= 0; i
< size
; i
++) {
692 kmod_initialize_cpp(kmod_info_t
*info
)
694 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
698 kmod_finalize_cpp(kmod_info_t
*info
)
700 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
704 kmod_default_start(struct kmod_info
*ki
, void *data
)
706 return KMOD_RETURN_SUCCESS
;
710 kmod_default_stop(struct kmod_info
*ki
, void *data
)
712 return KMOD_RETURN_SUCCESS
;
715 #define IS_IN_BACKTRACE 0xdeadbeef
716 #define IS_A_DEPENDENCY 0xbeefdead
719 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
723 int i
, found_one
= 0;
725 // find backtrace addresses that are inside a kmod
726 for (i
=0; i
< cnt
; i
++, addr
++) {
729 // XXX - validate page(s) that k points to
730 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
731 printf(" kmod scan stopped due to missing page: %08X\n", k
);
734 if ((*addr
>= k
->address
) && (*addr
< (k
->address
+ k
->size
))) {
735 // got one, blast info_version, we don't need it at this point
736 k
->info_version
= IS_IN_BACKTRACE
;
743 if (!found_one
) return;
745 printf(" Kernel loadable modules in backtrace:\n");
748 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
749 printf(" kmod scan stopped due to missing page: %08X\n", k
);
752 if (k
->info_version
== IS_IN_BACKTRACE
) {
753 printf(" %s(%s)@0x%x\n", k
->name
, k
->version
, k
->address
);
758 // look for dependencies
759 k
= kmod
; found_one
= 0;
761 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
762 printf(" kmod dependency scan stopped due to missing page: %08X\n", k
);
765 if (k
->info_version
== IS_IN_BACKTRACE
) {
766 r
= k
->reference_list
;
768 // XXX - validate page(s) that r and r->info point to
769 if(pmap_extract(kernel_pmap
, (vm_offset_t
)r
) == 0) { /* Exit loop if page not mapped */
770 printf(" kmod validation scan stopped due to missing page: %08X\n", r
);
773 if (r
->info
->info_version
!= IS_IN_BACKTRACE
) {
774 r
->info
->info_version
= IS_A_DEPENDENCY
;
782 if (!found_one
) goto cleanup
;
784 printf(" Kernel loadable module dependencies:\n");
787 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
788 printf(" kmod dependency print stopped due to missing page: %08X\n", k
);
791 if (k
->info_version
== IS_A_DEPENDENCY
) {
792 printf(" %s(%s)@0x%x\n", k
->name
, k
->version
, k
->address
);
798 // in case we double panic
801 if(pmap_extract(kernel_pmap
, (vm_offset_t
)k
) == 0) { /* Exit loop if page not mapped */
802 printf(" kmod dump cleanup stopped due to missing page: %08X\n", k
);
805 k
->info_version
= KMOD_INFO_VERSION
;