2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
33 * 1999 Mar 29 rsulack created.
36 #include <mach/mach_types.h>
37 #include <mach/vm_types.h>
38 #include <mach/kern_return.h>
39 #include <mach/host_priv_server.h>
40 #include <mach/vm_map.h>
42 #include <kern/kalloc.h>
43 #include <kern/kern_types.h>
44 #include <kern/thread.h>
46 #include <vm/vm_kern.h>
48 #include <mach-o/mach_header.h>
50 #include <mach_host.h>
53 * XXX headers for which prototypes should be in a common include file;
54 * XXX see libsa/kext.cpp for why.
56 kern_return_t
kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
);
57 kern_return_t
kmod_destroy_internal(kmod_t id
);
58 kern_return_t
kmod_start_or_stop(kmod_t id
, int start
, kmod_args_t
*data
,
59 mach_msg_type_number_t
*dataCount
);
60 kern_return_t
kmod_retain(kmod_t id
);
61 kern_return_t
kmod_release(kmod_t id
);
62 kern_return_t
kmod_queue_cmd(vm_address_t data
, vm_size_t size
);
63 kern_return_t
kmod_get_info(host_t host
, kmod_info_array_t
*kmods
,
64 mach_msg_type_number_t
*kmodCount
);
65 extern void kdb_printf(const char *fmt
, ...);
69 #define WRITE_PROTECT_MODULE_TEXT (0)
71 kmod_info_t
*kmod
= 0;
72 static int kmod_index
= 1;
74 decl_simple_lock_data(,kmod_lock
)
75 decl_simple_lock_data(,kmod_queue_lock
)
77 typedef struct cmd_queue_entry
{
83 queue_head_t kmod_cmd_queue
;
88 simple_lock_init(&kmod_lock
, 0);
89 simple_lock_init(&kmod_queue_lock
, 0);
90 queue_init(&kmod_cmd_queue
);
94 kmod_lookupbyid(kmod_t id
)
100 if (k
->id
== id
) break;
108 kmod_lookupbyname(const char * name
)
114 if (!strcmp(k
->name
, name
)) break;
122 kmod_lookupbyid_locked(kmod_t id
)
127 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
130 simple_lock(&kmod_queue_lock
);
131 k
= kmod_lookupbyid(id
);
133 bcopy((char*)k
, (char *)kc
, sizeof(kmod_info_t
));
136 simple_unlock(&kmod_queue_lock
);
139 kfree(kc
, sizeof(kmod_info_t
));
146 kmod_lookupbyname_locked(const char * name
)
151 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
154 simple_lock(&kmod_queue_lock
);
155 k
= kmod_lookupbyname(name
);
157 bcopy((char *)k
, (char *)kc
, sizeof(kmod_info_t
));
160 simple_unlock(&kmod_queue_lock
);
163 kfree(kc
, sizeof(kmod_info_t
));
169 // XXX add a nocopy flag??
172 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
175 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
176 if (!e
) return KERN_RESOURCE_SHORTAGE
;
178 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
179 if (rc
!= KERN_SUCCESS
) {
180 kfree(e
, sizeof(struct cmd_queue_entry
));
184 bcopy((void *)data
, (void *)e
->data
, size
);
186 simple_lock(&kmod_queue_lock
);
187 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
188 simple_unlock(&kmod_queue_lock
);
190 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
196 kmod_load_extension(char *name
)
198 kmod_load_extension_cmd_t
*data
;
201 size
= sizeof(kmod_load_extension_cmd_t
);
202 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
203 if (!data
) return KERN_RESOURCE_SHORTAGE
;
205 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
206 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
208 return kmod_queue_cmd((vm_address_t
)data
, size
);
212 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
214 kmod_load_with_dependencies_cmd_t
*data
;
225 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
226 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
227 if (!data
) return KERN_RESOURCE_SHORTAGE
;
229 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
230 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
233 for (i
=0; i
< count
; i
++) {
234 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
237 data
->dependencies
[count
][0] = 0;
239 return kmod_queue_cmd((vm_address_t
)data
, size
);
242 kmod_send_generic(int type
, void *generic_data
, int size
)
244 kmod_generic_cmd_t
*data
;
246 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
247 if (!data
) return KERN_RESOURCE_SHORTAGE
;
250 bcopy(data
->data
, generic_data
, size
);
252 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
255 extern vm_offset_t sectPRELINKB
;
256 extern int sectSizePRELINK
;
259 * Operates only on 32 bit mach keaders on behalf of kernel module loader
260 * if WRITE_PROTECT_MODULE_TEXT is defined.
263 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
268 if (!info
) return KERN_INVALID_ADDRESS
;
270 // double check for page alignment
271 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
272 return KERN_INVALID_ADDRESS
;
275 isPrelink
= ((info
->address
>= sectPRELINKB
) && (info
->address
< (sectPRELINKB
+ sectSizePRELINK
)));
277 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
278 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
279 if (rc
!= KERN_SUCCESS
) {
283 #if WRITE_PROTECT_MODULE_TEXT
285 struct section
* sect
= getsectbynamefromheader(
286 (struct mach_header
*) info
->address
, "__TEXT", "__text");
289 (void) vm_map_protect(kernel_map
, round_page(sect
->addr
), trunc_page(sect
->addr
+ sect
->size
),
290 VM_PROT_READ
|VM_PROT_EXECUTE
, TRUE
);
293 #endif /* WRITE_PROTECT_MODULE_TEXT */
295 simple_lock(&kmod_lock
);
297 // check to see if already loaded
298 if (kmod_lookupbyname(info
->name
)) {
299 simple_unlock(&kmod_lock
);
301 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
302 info
->address
+ info
->size
, FALSE
);
303 assert(rc
== KERN_SUCCESS
);
305 return KERN_INVALID_ARGUMENT
;
308 info
->id
= kmod_index
++;
309 info
->reference_count
= 0;
316 simple_unlock(&kmod_lock
);
319 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
320 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
328 kmod_create(host_priv_t host_priv
,
332 kmod_info_t
*info
= (kmod_info_t
*)addr
;
334 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
335 return kmod_create_internal(info
, id
);
339 kmod_create_fake_with_address(const char *name
, const char *version
,
340 vm_address_t address
, vm_size_t size
,
345 if (!name
|| ! version
||
346 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
347 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
349 return KERN_INVALID_ARGUMENT
;
352 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
354 return KERN_RESOURCE_SHORTAGE
;
358 info
->info_version
= KMOD_INFO_VERSION
;
359 bcopy(name
, info
->name
, 1 + strlen(name
));
360 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
361 info
->reference_count
= 1; // keep it from unloading, starting, stopping
362 info
->reference_list
= 0;
363 info
->address
= address
;
366 info
->start
= info
->stop
= 0;
368 simple_lock(&kmod_lock
);
370 // check to see if already "loaded"
371 if (kmod_lookupbyname(info
->name
)) {
372 simple_unlock(&kmod_lock
);
373 return KERN_INVALID_ARGUMENT
;
376 info
->id
= kmod_index
++;
378 *return_id
= info
->id
;
383 simple_unlock(&kmod_lock
);
389 kmod_create_fake(const char *name
, const char *version
)
391 return kmod_create_fake_with_address(name
, version
, 0, 0, NULL
);
396 _kmod_destroy_internal(kmod_t id
, boolean_t fake
)
402 simple_lock(&kmod_lock
);
407 kmod_reference_t
*r
, *t
;
409 if (!fake
&& (k
->reference_count
!= 0)) {
410 simple_unlock(&kmod_lock
);
411 return KERN_INVALID_ARGUMENT
;
414 if (k
== p
) { // first element
419 simple_unlock(&kmod_lock
);
421 r
= k
->reference_list
;
423 r
->info
->reference_count
--;
426 kfree(t
, sizeof(struct kmod_reference
));
432 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
433 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
436 if( (k
->address
>= sectPRELINKB
) && (k
->address
< (sectPRELINKB
+ sectSizePRELINK
)))
439 virt
= ml_static_ptovirt(k
->address
);
441 ml_static_mfree( virt
, k
->size
);
446 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
447 k
->address
+ k
->size
, FALSE
);
448 assert(rc
== KERN_SUCCESS
);
450 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
451 assert(rc
== KERN_SUCCESS
);
460 simple_unlock(&kmod_lock
);
462 return KERN_INVALID_ARGUMENT
;
466 kmod_destroy_internal(kmod_t id
)
468 return _kmod_destroy_internal(id
, FALSE
);
472 kmod_destroy(host_priv_t host_priv
,
475 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
476 return _kmod_destroy_internal(id
, FALSE
);
480 kmod_destroy_fake(kmod_t id
)
482 return _kmod_destroy_internal(id
, TRUE
);
490 mach_msg_type_number_t
*dataCount
)
492 kern_return_t rc
= KERN_SUCCESS
;
493 void * user_data
= 0;
494 kern_return_t (*func
)(kmod_info_t
*, void *);
497 simple_lock(&kmod_lock
);
499 k
= kmod_lookupbyid(id
);
500 if (!k
|| k
->reference_count
) {
501 simple_unlock(&kmod_lock
);
502 rc
= KERN_INVALID_ARGUMENT
;
507 func
= (void *)k
->start
;
509 func
= (void *)k
->stop
;
512 simple_unlock(&kmod_lock
);
515 // call kmod entry point
517 if (data
&& dataCount
&& *data
&& *dataCount
) {
518 vm_map_offset_t map_addr
;
519 vm_map_copyout(kernel_map
, &map_addr
, (vm_map_copy_t
)*data
);
520 user_data
= CAST_DOWN(void *, map_addr
);
523 rc
= (*func
)(k
, user_data
);
528 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
531 if (dataCount
) *dataCount
= 0;
538 * The retain and release calls take no user data, but the caller
539 * may have sent some in error (the MIG definition allows it).
540 * If this is the case, they will just return that same data
541 * right back to the caller (since they never touch the *data and
542 * *dataCount fields).
545 kmod_retain(kmod_t id
)
547 kern_return_t rc
= KERN_SUCCESS
;
549 kmod_info_t
*t
; // reference to
550 kmod_info_t
*f
; // reference from
551 kmod_reference_t
*r
= 0;
553 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
555 rc
= KERN_RESOURCE_SHORTAGE
;
559 simple_lock(&kmod_lock
);
561 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
562 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
564 simple_unlock(&kmod_lock
);
565 if (r
) kfree(r
, sizeof(struct kmod_reference
));
566 rc
= KERN_INVALID_ARGUMENT
;
570 r
->next
= f
->reference_list
;
572 f
->reference_list
= r
;
573 t
->reference_count
++;
575 simple_unlock(&kmod_lock
);
584 kmod_release(kmod_t id
)
586 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
588 kmod_info_t
*t
; // reference to
589 kmod_info_t
*f
; // reference from
590 kmod_reference_t
*r
= 0;
591 kmod_reference_t
* p
;
593 simple_lock(&kmod_lock
);
595 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
596 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
598 rc
= KERN_INVALID_ARGUMENT
;
602 p
= r
= f
->reference_list
;
605 if (p
== r
) { // first element
606 f
->reference_list
= r
->next
;
610 r
->info
->reference_count
--;
612 simple_unlock(&kmod_lock
);
613 kfree(r
, sizeof(struct kmod_reference
));
621 simple_unlock(&kmod_lock
);
630 kmod_control(host_priv_t host_priv
,
632 kmod_control_flavor_t flavor
,
634 mach_msg_type_number_t
*dataCount
)
636 kern_return_t rc
= KERN_SUCCESS
;
638 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
642 case KMOD_CNTL_START
:
645 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
650 case KMOD_CNTL_RETAIN
:
652 rc
= kmod_retain(id
);
656 case KMOD_CNTL_RELEASE
:
658 rc
= kmod_release(id
);
662 case KMOD_CNTL_GET_CMD
:
665 cmd_queue_entry_t
*e
;
668 * Throw away any data the user may have sent in error.
669 * We must do this, because we are likely to return to
670 * some data for these commands (thus causing a leak of
671 * whatever data the user sent us in error).
673 if (*data
&& *dataCount
) {
674 vm_map_copy_discard(*data
);
679 simple_lock(&kmod_queue_lock
);
681 if (queue_empty(&kmod_cmd_queue
)) {
684 res
= thread_sleep_simple_lock((event_t
)&kmod_cmd_queue
,
687 if (queue_empty(&kmod_cmd_queue
)) {
688 // we must have been interrupted!
689 simple_unlock(&kmod_queue_lock
);
690 assert(res
== THREAD_INTERRUPTED
);
694 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
696 simple_unlock(&kmod_queue_lock
);
698 rc
= vm_map_copyin(kernel_map
, (vm_map_address_t
)e
->data
,
699 (vm_map_size_t
)e
->size
, TRUE
, (vm_map_copy_t
*)data
);
701 simple_lock(&kmod_queue_lock
);
702 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
703 simple_unlock(&kmod_queue_lock
);
708 *dataCount
= e
->size
;
710 kfree(e
, sizeof(struct cmd_queue_entry
));
716 rc
= KERN_INVALID_ARGUMENT
;
724 kmod_get_info(__unused host_t host
,
725 kmod_info_array_t
*kmods
,
726 mach_msg_type_number_t
*kmodCount
)
730 kmod_reference_t
*r
, *p2
;
733 kern_return_t rc
= KERN_SUCCESS
;
739 simple_lock(&kmod_lock
);
743 size
+= sizeof(kmod_info_t
);
744 r
= k
->reference_list
;
746 size
+=sizeof(kmod_reference_t
);
751 simple_unlock(&kmod_lock
);
752 if (!size
) return KERN_SUCCESS
;
754 rc
= kmem_alloc(kernel_map
, &data
, size
);
757 // copy kmod into data, retry if kmod's size has changed (grown)
758 // the copied out data is tweeked to figure what's what at user level
759 // change the copied out k->next pointers to point to themselves
760 // change the k->reference into a count, tack the references on
761 // the end of the data packet in the order they are found
763 simple_lock(&kmod_lock
);
764 k
= kmod
; p1
= (kmod_info_t
*)data
;
766 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
767 simple_unlock(&kmod_lock
);
768 kmem_free(kernel_map
, data
, size
);
773 if (k
->next
) p1
->next
= k
;
777 p2
= (kmod_reference_t
*)p1
;
778 k
= kmod
; p1
= (kmod_info_t
*)data
;
780 r
= k
->reference_list
; ref_count
= 0;
782 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
783 simple_unlock(&kmod_lock
);
784 kmem_free(kernel_map
, data
, size
);
787 // note the last 'k' in the chain has its next == 0
788 // since there can only be one like that,
789 // this case is handled by the caller
791 p2
++; r
= r
->next
; ref_count
++;
793 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
796 simple_unlock(&kmod_lock
);
798 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
800 kmem_free(kernel_map
, data
, size
);
811 * Operates only on 32 bit mach keaders on behalf of kernel module loader
814 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
816 typedef void (*Routine
)(void);
820 if (header
->magic
!= MH_MAGIC
) {
821 return KERN_INVALID_ARGUMENT
;
824 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, /*(char *)*/ sectName
, &size
);
825 if (!routines
) return KERN_SUCCESS
;
827 size
/= sizeof(Routine
);
828 for (i
= 0; i
< size
; i
++) {
836 * Operates only on 32 bit mach keaders on behalf of kernel module loader
839 kmod_initialize_cpp(kmod_info_t
*info
)
841 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
845 * Operates only on 32 bit mach keaders on behalf of kernel module loader
848 kmod_finalize_cpp(kmod_info_t
*info
)
850 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
854 kmod_default_start(__unused
struct kmod_info
*ki
, __unused
void *data
)
856 return KMOD_RETURN_SUCCESS
;
860 kmod_default_stop(__unused
struct kmod_info
*ki
, __unused
void *data
)
862 return KMOD_RETURN_SUCCESS
;
866 kmod_dump_to(vm_offset_t
*addr
, unsigned int cnt
,
867 void (*printf_func
)(const char *fmt
, ...))
869 vm_offset_t
* kscan_addr
= 0;
871 kmod_reference_t
* r
;
874 kmod_info_t
* stop_kmod
= 0;
876 for (k
= kmod
; k
; k
= k
->next
) {
877 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)k
)) == 0) {
878 (*printf_func
)(" kmod scan stopped due to missing "
879 "kmod page: %08x\n", stop_kmod
);
883 continue; // skip fake entries for built-in kernel components
885 for (i
= 0, kscan_addr
= addr
; i
< cnt
; i
++, kscan_addr
++) {
886 if ((*kscan_addr
>= k
->address
) &&
887 (*kscan_addr
< (k
->address
+ k
->size
))) {
890 (*printf_func
)(" Kernel loadable modules in backtrace "
891 "(with dependencies):\n");
894 (*printf_func
)(" %s(%s)@0x%x\n",
895 k
->name
, k
->version
, k
->address
);
897 for (r
= k
->reference_list
; r
; r
= r
->next
) {
900 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)r
)) == 0) {
901 (*printf_func
)(" kmod dependency scan stopped "
902 "due to missing dependency page: %08x\n", r
);
908 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)rinfo
)) == 0) {
909 (*printf_func
)(" kmod dependency scan stopped "
910 "due to missing kmod page: %08x\n", rinfo
);
914 if (!rinfo
->address
) {
915 continue; // skip fake entries for built-ins
918 (*printf_func
)(" dependency: %s(%s)@0x%x\n",
919 rinfo
->name
, rinfo
->version
, rinfo
->address
);
922 break; // only report this kmod for one backtrace address
931 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
933 kmod_dump_to(addr
, cnt
, &kdb_printf
);
937 kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
)
939 kmod_dump_to(addr
, cnt
, &printf
);