2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
27 * 1999 Mar 29 rsulack created.
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/host_priv_server.h>
34 #include <mach/vm_map.h>
36 #include <kern/kalloc.h>
37 #include <kern/kern_types.h>
38 #include <kern/thread.h>
40 #include <vm/vm_kern.h>
42 #include <mach-o/mach_header.h>
44 #include <mach_host.h>
47 * XXX headers for which prototypes should be in a common include file;
48 * XXX see libsa/kext.cpp for why.
50 kern_return_t
kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
);
51 kern_return_t
kmod_destroy_internal(kmod_t id
);
52 kern_return_t
kmod_start_or_stop(kmod_t id
, int start
, kmod_args_t
*data
,
53 mach_msg_type_number_t
*dataCount
);
54 kern_return_t
kmod_retain(kmod_t id
);
55 kern_return_t
kmod_release(kmod_t id
);
56 kern_return_t
kmod_queue_cmd(vm_address_t data
, vm_size_t size
);
57 kern_return_t
kmod_get_info(host_t host
, kmod_info_array_t
*kmods
,
58 mach_msg_type_number_t
*kmodCount
);
59 extern void kdb_printf(const char *fmt
, ...);
63 #define WRITE_PROTECT_MODULE_TEXT (0)
65 kmod_info_t
*kmod
= 0;
66 static int kmod_index
= 1;
68 decl_simple_lock_data(,kmod_lock
)
69 decl_simple_lock_data(,kmod_queue_lock
)
71 typedef struct cmd_queue_entry
{
77 queue_head_t kmod_cmd_queue
;
82 simple_lock_init(&kmod_lock
, 0);
83 simple_lock_init(&kmod_queue_lock
, 0);
84 queue_init(&kmod_cmd_queue
);
88 kmod_lookupbyid(kmod_t id
)
94 if (k
->id
== id
) break;
102 kmod_lookupbyname(const char * name
)
108 if (!strcmp(k
->name
, name
)) break;
116 kmod_lookupbyid_locked(kmod_t id
)
121 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
124 simple_lock(&kmod_queue_lock
);
125 k
= kmod_lookupbyid(id
);
127 bcopy((char*)k
, (char *)kc
, sizeof(kmod_info_t
));
130 simple_unlock(&kmod_queue_lock
);
133 kfree(kc
, sizeof(kmod_info_t
));
140 kmod_lookupbyname_locked(const char * name
)
145 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
148 simple_lock(&kmod_queue_lock
);
149 k
= kmod_lookupbyname(name
);
151 bcopy((char *)k
, (char *)kc
, sizeof(kmod_info_t
));
154 simple_unlock(&kmod_queue_lock
);
157 kfree(kc
, sizeof(kmod_info_t
));
163 // XXX add a nocopy flag??
166 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
169 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
170 if (!e
) return KERN_RESOURCE_SHORTAGE
;
172 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
173 if (rc
!= KERN_SUCCESS
) {
174 kfree(e
, sizeof(struct cmd_queue_entry
));
178 bcopy((void *)data
, (void *)e
->data
, size
);
180 simple_lock(&kmod_queue_lock
);
181 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
182 simple_unlock(&kmod_queue_lock
);
184 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
190 kmod_load_extension(char *name
)
192 kmod_load_extension_cmd_t
*data
;
195 size
= sizeof(kmod_load_extension_cmd_t
);
196 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
197 if (!data
) return KERN_RESOURCE_SHORTAGE
;
199 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
200 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
202 return kmod_queue_cmd((vm_address_t
)data
, size
);
206 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
208 kmod_load_with_dependencies_cmd_t
*data
;
219 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
220 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
221 if (!data
) return KERN_RESOURCE_SHORTAGE
;
223 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
224 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
227 for (i
=0; i
< count
; i
++) {
228 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
231 data
->dependencies
[count
][0] = 0;
233 return kmod_queue_cmd((vm_address_t
)data
, size
);
236 kmod_send_generic(int type
, void *generic_data
, int size
)
238 kmod_generic_cmd_t
*data
;
240 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
241 if (!data
) return KERN_RESOURCE_SHORTAGE
;
244 bcopy(data
->data
, generic_data
, size
);
246 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
249 extern vm_offset_t sectPRELINKB
;
250 extern int sectSizePRELINK
;
253 * Operates only on 32 bit mach keaders on behalf of kernel module loader
254 * if WRITE_PROTECT_MODULE_TEXT is defined.
257 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
262 if (!info
) return KERN_INVALID_ADDRESS
;
264 // double check for page alignment
265 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
266 return KERN_INVALID_ADDRESS
;
269 isPrelink
= ((info
->address
>= sectPRELINKB
) && (info
->address
< (sectPRELINKB
+ sectSizePRELINK
)));
271 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
272 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
273 if (rc
!= KERN_SUCCESS
) {
277 #if WRITE_PROTECT_MODULE_TEXT
279 struct section
* sect
= getsectbynamefromheader(
280 (struct mach_header
*) info
->address
, "__TEXT", "__text");
283 (void) vm_map_protect(kernel_map
, round_page(sect
->addr
), trunc_page(sect
->addr
+ sect
->size
),
284 VM_PROT_READ
|VM_PROT_EXECUTE
, TRUE
);
287 #endif /* WRITE_PROTECT_MODULE_TEXT */
289 simple_lock(&kmod_lock
);
291 // check to see if already loaded
292 if (kmod_lookupbyname(info
->name
)) {
293 simple_unlock(&kmod_lock
);
295 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
296 info
->address
+ info
->size
, FALSE
);
297 assert(rc
== KERN_SUCCESS
);
299 return KERN_INVALID_ARGUMENT
;
302 info
->id
= kmod_index
++;
303 info
->reference_count
= 0;
310 simple_unlock(&kmod_lock
);
313 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
314 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
322 kmod_create(host_priv_t host_priv
,
326 kmod_info_t
*info
= (kmod_info_t
*)addr
;
328 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
329 return kmod_create_internal(info
, id
);
333 kmod_create_fake_with_address(const char *name
, const char *version
,
334 vm_address_t address
, vm_size_t size
,
339 if (!name
|| ! version
||
340 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
341 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
343 return KERN_INVALID_ARGUMENT
;
346 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
348 return KERN_RESOURCE_SHORTAGE
;
352 info
->info_version
= KMOD_INFO_VERSION
;
353 bcopy(name
, info
->name
, 1 + strlen(name
));
354 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
355 info
->reference_count
= 1; // keep it from unloading, starting, stopping
356 info
->reference_list
= 0;
357 info
->address
= address
;
360 info
->start
= info
->stop
= 0;
362 simple_lock(&kmod_lock
);
364 // check to see if already "loaded"
365 if (kmod_lookupbyname(info
->name
)) {
366 simple_unlock(&kmod_lock
);
367 return KERN_INVALID_ARGUMENT
;
370 info
->id
= kmod_index
++;
372 *return_id
= info
->id
;
377 simple_unlock(&kmod_lock
);
383 kmod_create_fake(const char *name
, const char *version
)
385 return kmod_create_fake_with_address(name
, version
, 0, 0, NULL
);
390 _kmod_destroy_internal(kmod_t id
, boolean_t fake
)
396 simple_lock(&kmod_lock
);
401 kmod_reference_t
*r
, *t
;
403 if (!fake
&& (k
->reference_count
!= 0)) {
404 simple_unlock(&kmod_lock
);
405 return KERN_INVALID_ARGUMENT
;
408 if (k
== p
) { // first element
413 simple_unlock(&kmod_lock
);
415 r
= k
->reference_list
;
417 r
->info
->reference_count
--;
420 kfree(t
, sizeof(struct kmod_reference
));
426 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
427 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
430 if( (k
->address
>= sectPRELINKB
) && (k
->address
< (sectPRELINKB
+ sectSizePRELINK
)))
433 virt
= ml_static_ptovirt(k
->address
);
435 ml_static_mfree( virt
, k
->size
);
440 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
441 k
->address
+ k
->size
, FALSE
);
442 assert(rc
== KERN_SUCCESS
);
444 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
445 assert(rc
== KERN_SUCCESS
);
454 simple_unlock(&kmod_lock
);
456 return KERN_INVALID_ARGUMENT
;
460 kmod_destroy_internal(kmod_t id
)
462 return _kmod_destroy_internal(id
, FALSE
);
466 kmod_destroy(host_priv_t host_priv
,
469 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
470 return _kmod_destroy_internal(id
, FALSE
);
474 kmod_destroy_fake(kmod_t id
)
476 return _kmod_destroy_internal(id
, TRUE
);
484 mach_msg_type_number_t
*dataCount
)
486 kern_return_t rc
= KERN_SUCCESS
;
487 void * user_data
= 0;
488 kern_return_t (*func
)(kmod_info_t
*, void *);
491 simple_lock(&kmod_lock
);
493 k
= kmod_lookupbyid(id
);
494 if (!k
|| k
->reference_count
) {
495 simple_unlock(&kmod_lock
);
496 rc
= KERN_INVALID_ARGUMENT
;
501 func
= (void *)k
->start
;
503 func
= (void *)k
->stop
;
506 simple_unlock(&kmod_lock
);
509 // call kmod entry point
511 if (data
&& dataCount
&& *data
&& *dataCount
) {
512 vm_map_offset_t map_addr
;
513 vm_map_copyout(kernel_map
, &map_addr
, (vm_map_copy_t
)*data
);
514 user_data
= CAST_DOWN(void *, map_addr
);
517 rc
= (*func
)(k
, user_data
);
522 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
525 if (dataCount
) *dataCount
= 0;
532 * The retain and release calls take no user data, but the caller
533 * may have sent some in error (the MIG definition allows it).
534 * If this is the case, they will just return that same data
535 * right back to the caller (since they never touch the *data and
536 * *dataCount fields).
539 kmod_retain(kmod_t id
)
541 kern_return_t rc
= KERN_SUCCESS
;
543 kmod_info_t
*t
; // reference to
544 kmod_info_t
*f
; // reference from
545 kmod_reference_t
*r
= 0;
547 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
549 rc
= KERN_RESOURCE_SHORTAGE
;
553 simple_lock(&kmod_lock
);
555 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
556 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
558 simple_unlock(&kmod_lock
);
559 if (r
) kfree(r
, sizeof(struct kmod_reference
));
560 rc
= KERN_INVALID_ARGUMENT
;
564 r
->next
= f
->reference_list
;
566 f
->reference_list
= r
;
567 t
->reference_count
++;
569 simple_unlock(&kmod_lock
);
578 kmod_release(kmod_t id
)
580 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
582 kmod_info_t
*t
; // reference to
583 kmod_info_t
*f
; // reference from
584 kmod_reference_t
*r
= 0;
585 kmod_reference_t
* p
;
587 simple_lock(&kmod_lock
);
589 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
590 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
592 rc
= KERN_INVALID_ARGUMENT
;
596 p
= r
= f
->reference_list
;
599 if (p
== r
) { // first element
600 f
->reference_list
= r
->next
;
604 r
->info
->reference_count
--;
606 simple_unlock(&kmod_lock
);
607 kfree(r
, sizeof(struct kmod_reference
));
615 simple_unlock(&kmod_lock
);
624 kmod_control(host_priv_t host_priv
,
626 kmod_control_flavor_t flavor
,
628 mach_msg_type_number_t
*dataCount
)
630 kern_return_t rc
= KERN_SUCCESS
;
632 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
636 case KMOD_CNTL_START
:
639 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
644 case KMOD_CNTL_RETAIN
:
646 rc
= kmod_retain(id
);
650 case KMOD_CNTL_RELEASE
:
652 rc
= kmod_release(id
);
656 case KMOD_CNTL_GET_CMD
:
659 cmd_queue_entry_t
*e
;
662 * Throw away any data the user may have sent in error.
663 * We must do this, because we are likely to return to
664 * some data for these commands (thus causing a leak of
665 * whatever data the user sent us in error).
667 if (*data
&& *dataCount
) {
668 vm_map_copy_discard(*data
);
673 simple_lock(&kmod_queue_lock
);
675 if (queue_empty(&kmod_cmd_queue
)) {
678 res
= thread_sleep_simple_lock((event_t
)&kmod_cmd_queue
,
681 if (queue_empty(&kmod_cmd_queue
)) {
682 // we must have been interrupted!
683 simple_unlock(&kmod_queue_lock
);
684 assert(res
== THREAD_INTERRUPTED
);
688 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
690 simple_unlock(&kmod_queue_lock
);
692 rc
= vm_map_copyin(kernel_map
, (vm_map_address_t
)e
->data
,
693 (vm_map_size_t
)e
->size
, TRUE
, (vm_map_copy_t
*)data
);
695 simple_lock(&kmod_queue_lock
);
696 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
697 simple_unlock(&kmod_queue_lock
);
702 *dataCount
= e
->size
;
704 kfree(e
, sizeof(struct cmd_queue_entry
));
710 rc
= KERN_INVALID_ARGUMENT
;
718 kmod_get_info(__unused host_t host
,
719 kmod_info_array_t
*kmods
,
720 mach_msg_type_number_t
*kmodCount
)
724 kmod_reference_t
*r
, *p2
;
727 kern_return_t rc
= KERN_SUCCESS
;
733 simple_lock(&kmod_lock
);
737 size
+= sizeof(kmod_info_t
);
738 r
= k
->reference_list
;
740 size
+=sizeof(kmod_reference_t
);
745 simple_unlock(&kmod_lock
);
746 if (!size
) return KERN_SUCCESS
;
748 rc
= kmem_alloc(kernel_map
, &data
, size
);
751 // copy kmod into data, retry if kmod's size has changed (grown)
752 // the copied out data is tweeked to figure what's what at user level
753 // change the copied out k->next pointers to point to themselves
754 // change the k->reference into a count, tack the references on
755 // the end of the data packet in the order they are found
757 simple_lock(&kmod_lock
);
758 k
= kmod
; p1
= (kmod_info_t
*)data
;
760 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
761 simple_unlock(&kmod_lock
);
762 kmem_free(kernel_map
, data
, size
);
767 if (k
->next
) p1
->next
= k
;
771 p2
= (kmod_reference_t
*)p1
;
772 k
= kmod
; p1
= (kmod_info_t
*)data
;
774 r
= k
->reference_list
; ref_count
= 0;
776 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
777 simple_unlock(&kmod_lock
);
778 kmem_free(kernel_map
, data
, size
);
781 // note the last 'k' in the chain has its next == 0
782 // since there can only be one like that,
783 // this case is handled by the caller
785 p2
++; r
= r
->next
; ref_count
++;
787 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
790 simple_unlock(&kmod_lock
);
792 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
794 kmem_free(kernel_map
, data
, size
);
805 * Operates only on 32 bit mach keaders on behalf of kernel module loader
808 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
810 typedef void (*Routine
)(void);
814 if (header
->magic
!= MH_MAGIC
) {
815 return KERN_INVALID_ARGUMENT
;
818 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, /*(char *)*/ sectName
, &size
);
819 if (!routines
) return KERN_SUCCESS
;
821 size
/= sizeof(Routine
);
822 for (i
= 0; i
< size
; i
++) {
830 * Operates only on 32 bit mach keaders on behalf of kernel module loader
833 kmod_initialize_cpp(kmod_info_t
*info
)
835 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
839 * Operates only on 32 bit mach keaders on behalf of kernel module loader
842 kmod_finalize_cpp(kmod_info_t
*info
)
844 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
848 kmod_default_start(__unused
struct kmod_info
*ki
, __unused
void *data
)
850 return KMOD_RETURN_SUCCESS
;
854 kmod_default_stop(__unused
struct kmod_info
*ki
, __unused
void *data
)
856 return KMOD_RETURN_SUCCESS
;
860 kmod_dump_to(vm_offset_t
*addr
, unsigned int cnt
,
861 void (*printf_func
)(const char *fmt
, ...))
863 vm_offset_t
* kscan_addr
= 0;
865 kmod_reference_t
* r
;
868 kmod_info_t
* stop_kmod
= 0;
870 for (k
= kmod
; k
; k
= k
->next
) {
871 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)k
)) == 0) {
872 (*printf_func
)(" kmod scan stopped due to missing "
873 "kmod page: %08x\n", stop_kmod
);
877 continue; // skip fake entries for built-in kernel components
879 for (i
= 0, kscan_addr
= addr
; i
< cnt
; i
++, kscan_addr
++) {
880 if ((*kscan_addr
>= k
->address
) &&
881 (*kscan_addr
< (k
->address
+ k
->size
))) {
884 (*printf_func
)(" Kernel loadable modules in backtrace "
885 "(with dependencies):\n");
888 (*printf_func
)(" %s(%s)@0x%x\n",
889 k
->name
, k
->version
, k
->address
);
891 for (r
= k
->reference_list
; r
; r
= r
->next
) {
894 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)r
)) == 0) {
895 (*printf_func
)(" kmod dependency scan stopped "
896 "due to missing dependency page: %08x\n", r
);
902 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)rinfo
)) == 0) {
903 (*printf_func
)(" kmod dependency scan stopped "
904 "due to missing kmod page: %08x\n", rinfo
);
908 if (!rinfo
->address
) {
909 continue; // skip fake entries for built-ins
912 (*printf_func
)(" dependency: %s(%s)@0x%x\n",
913 rinfo
->name
, rinfo
->version
, rinfo
->address
);
916 break; // only report this kmod for one backtrace address
925 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
927 kmod_dump_to(addr
, cnt
, &kdb_printf
);
931 kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
)
933 kmod_dump_to(addr
, cnt
, &printf
);