2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
28 * 1999 Mar 29 rsulack created.
31 #include <mach/mach_types.h>
32 #include <mach/vm_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/host_priv_server.h>
35 #include <mach/vm_map.h>
37 #include <kern/kalloc.h>
38 #include <kern/kern_types.h>
39 #include <kern/thread.h>
41 #include <vm/vm_kern.h>
43 #include <mach-o/mach_header.h>
45 #include <mach_host.h>
48 * XXX headers for which prototypes should be in a common include file;
49 * XXX see libsa/kext.cpp for why.
51 kern_return_t
kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
);
52 kern_return_t
kmod_destroy_internal(kmod_t id
);
53 kern_return_t
kmod_start_or_stop(kmod_t id
, int start
, kmod_args_t
*data
,
54 mach_msg_type_number_t
*dataCount
);
55 kern_return_t
kmod_retain(kmod_t id
);
56 kern_return_t
kmod_release(kmod_t id
);
57 kern_return_t
kmod_queue_cmd(vm_address_t data
, vm_size_t size
);
58 kern_return_t
kmod_get_info(host_t host
, kmod_info_array_t
*kmods
,
59 mach_msg_type_number_t
*kmodCount
);
60 extern void kdb_printf(const char *fmt
, ...);
64 #define WRITE_PROTECT_MODULE_TEXT (0)
66 kmod_info_t
*kmod
= 0;
67 static int kmod_index
= 1;
69 decl_simple_lock_data(,kmod_lock
)
70 decl_simple_lock_data(,kmod_queue_lock
)
72 typedef struct cmd_queue_entry
{
78 queue_head_t kmod_cmd_queue
;
83 simple_lock_init(&kmod_lock
, 0);
84 simple_lock_init(&kmod_queue_lock
, 0);
85 queue_init(&kmod_cmd_queue
);
89 kmod_lookupbyid(kmod_t id
)
95 if (k
->id
== id
) break;
103 kmod_lookupbyname(const char * name
)
109 if (!strcmp(k
->name
, name
)) break;
117 kmod_lookupbyid_locked(kmod_t id
)
122 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
125 simple_lock(&kmod_queue_lock
);
126 k
= kmod_lookupbyid(id
);
128 bcopy((char*)k
, (char *)kc
, sizeof(kmod_info_t
));
131 simple_unlock(&kmod_queue_lock
);
134 kfree(kc
, sizeof(kmod_info_t
));
141 kmod_lookupbyname_locked(const char * name
)
146 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
149 simple_lock(&kmod_queue_lock
);
150 k
= kmod_lookupbyname(name
);
152 bcopy((char *)k
, (char *)kc
, sizeof(kmod_info_t
));
155 simple_unlock(&kmod_queue_lock
);
158 kfree(kc
, sizeof(kmod_info_t
));
164 // XXX add a nocopy flag??
167 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
170 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
171 if (!e
) return KERN_RESOURCE_SHORTAGE
;
173 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
174 if (rc
!= KERN_SUCCESS
) {
175 kfree(e
, sizeof(struct cmd_queue_entry
));
179 bcopy((void *)data
, (void *)e
->data
, size
);
181 simple_lock(&kmod_queue_lock
);
182 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
183 simple_unlock(&kmod_queue_lock
);
185 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
191 kmod_load_extension(char *name
)
193 kmod_load_extension_cmd_t
*data
;
196 size
= sizeof(kmod_load_extension_cmd_t
);
197 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
198 if (!data
) return KERN_RESOURCE_SHORTAGE
;
200 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
201 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
203 return kmod_queue_cmd((vm_address_t
)data
, size
);
207 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
209 kmod_load_with_dependencies_cmd_t
*data
;
220 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
221 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
222 if (!data
) return KERN_RESOURCE_SHORTAGE
;
224 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
225 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
228 for (i
=0; i
< count
; i
++) {
229 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
232 data
->dependencies
[count
][0] = 0;
234 return kmod_queue_cmd((vm_address_t
)data
, size
);
237 kmod_send_generic(int type
, void *generic_data
, int size
)
239 kmod_generic_cmd_t
*data
;
241 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
242 if (!data
) return KERN_RESOURCE_SHORTAGE
;
245 bcopy(data
->data
, generic_data
, size
);
247 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
250 extern vm_offset_t sectPRELINKB
;
251 extern int sectSizePRELINK
;
254 * Operates only on 32 bit mach keaders on behalf of kernel module loader
255 * if WRITE_PROTECT_MODULE_TEXT is defined.
258 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
263 if (!info
) return KERN_INVALID_ADDRESS
;
265 // double check for page alignment
266 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
267 return KERN_INVALID_ADDRESS
;
270 isPrelink
= ((info
->address
>= sectPRELINKB
) && (info
->address
< (sectPRELINKB
+ sectSizePRELINK
)));
272 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
273 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
274 if (rc
!= KERN_SUCCESS
) {
278 #if WRITE_PROTECT_MODULE_TEXT
280 struct section
* sect
= getsectbynamefromheader(
281 (struct mach_header
*) info
->address
, "__TEXT", "__text");
284 (void) vm_map_protect(kernel_map
, round_page(sect
->addr
), trunc_page(sect
->addr
+ sect
->size
),
285 VM_PROT_READ
|VM_PROT_EXECUTE
, TRUE
);
288 #endif /* WRITE_PROTECT_MODULE_TEXT */
290 simple_lock(&kmod_lock
);
292 // check to see if already loaded
293 if (kmod_lookupbyname(info
->name
)) {
294 simple_unlock(&kmod_lock
);
296 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
297 info
->address
+ info
->size
, FALSE
);
298 assert(rc
== KERN_SUCCESS
);
300 return KERN_INVALID_ARGUMENT
;
303 info
->id
= kmod_index
++;
304 info
->reference_count
= 0;
311 simple_unlock(&kmod_lock
);
314 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
315 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
323 kmod_create(host_priv_t host_priv
,
327 kmod_info_t
*info
= (kmod_info_t
*)addr
;
329 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
330 return kmod_create_internal(info
, id
);
334 kmod_create_fake_with_address(const char *name
, const char *version
,
335 vm_address_t address
, vm_size_t size
,
340 if (!name
|| ! version
||
341 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
342 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
344 return KERN_INVALID_ARGUMENT
;
347 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
349 return KERN_RESOURCE_SHORTAGE
;
353 info
->info_version
= KMOD_INFO_VERSION
;
354 bcopy(name
, info
->name
, 1 + strlen(name
));
355 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
356 info
->reference_count
= 1; // keep it from unloading, starting, stopping
357 info
->reference_list
= 0;
358 info
->address
= address
;
361 info
->start
= info
->stop
= 0;
363 simple_lock(&kmod_lock
);
365 // check to see if already "loaded"
366 if (kmod_lookupbyname(info
->name
)) {
367 simple_unlock(&kmod_lock
);
368 return KERN_INVALID_ARGUMENT
;
371 info
->id
= kmod_index
++;
373 *return_id
= info
->id
;
378 simple_unlock(&kmod_lock
);
384 kmod_create_fake(const char *name
, const char *version
)
386 return kmod_create_fake_with_address(name
, version
, 0, 0, NULL
);
391 _kmod_destroy_internal(kmod_t id
, boolean_t fake
)
397 simple_lock(&kmod_lock
);
402 kmod_reference_t
*r
, *t
;
404 if (!fake
&& (k
->reference_count
!= 0)) {
405 simple_unlock(&kmod_lock
);
406 return KERN_INVALID_ARGUMENT
;
409 if (k
== p
) { // first element
414 simple_unlock(&kmod_lock
);
416 r
= k
->reference_list
;
418 r
->info
->reference_count
--;
421 kfree(t
, sizeof(struct kmod_reference
));
427 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
428 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
431 if( (k
->address
>= sectPRELINKB
) && (k
->address
< (sectPRELINKB
+ sectSizePRELINK
)))
434 virt
= ml_static_ptovirt(k
->address
);
436 ml_static_mfree( virt
, k
->size
);
441 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
442 k
->address
+ k
->size
, FALSE
);
443 assert(rc
== KERN_SUCCESS
);
445 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
446 assert(rc
== KERN_SUCCESS
);
455 simple_unlock(&kmod_lock
);
457 return KERN_INVALID_ARGUMENT
;
461 kmod_destroy_internal(kmod_t id
)
463 return _kmod_destroy_internal(id
, FALSE
);
467 kmod_destroy(host_priv_t host_priv
,
470 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
471 return _kmod_destroy_internal(id
, FALSE
);
475 kmod_destroy_fake(kmod_t id
)
477 return _kmod_destroy_internal(id
, TRUE
);
485 mach_msg_type_number_t
*dataCount
)
487 kern_return_t rc
= KERN_SUCCESS
;
488 void * user_data
= 0;
489 kern_return_t (*func
)(kmod_info_t
*, void *);
492 simple_lock(&kmod_lock
);
494 k
= kmod_lookupbyid(id
);
495 if (!k
|| k
->reference_count
) {
496 simple_unlock(&kmod_lock
);
497 rc
= KERN_INVALID_ARGUMENT
;
502 func
= (void *)k
->start
;
504 func
= (void *)k
->stop
;
507 simple_unlock(&kmod_lock
);
510 // call kmod entry point
512 if (data
&& dataCount
&& *data
&& *dataCount
) {
513 vm_map_offset_t map_addr
;
514 vm_map_copyout(kernel_map
, &map_addr
, (vm_map_copy_t
)*data
);
515 user_data
= CAST_DOWN(void *, map_addr
);
518 rc
= (*func
)(k
, user_data
);
523 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
526 if (dataCount
) *dataCount
= 0;
533 * The retain and release calls take no user data, but the caller
534 * may have sent some in error (the MIG definition allows it).
535 * If this is the case, they will just return that same data
536 * right back to the caller (since they never touch the *data and
537 * *dataCount fields).
540 kmod_retain(kmod_t id
)
542 kern_return_t rc
= KERN_SUCCESS
;
544 kmod_info_t
*t
; // reference to
545 kmod_info_t
*f
; // reference from
546 kmod_reference_t
*r
= 0;
548 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
550 rc
= KERN_RESOURCE_SHORTAGE
;
554 simple_lock(&kmod_lock
);
556 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
557 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
559 simple_unlock(&kmod_lock
);
560 if (r
) kfree(r
, sizeof(struct kmod_reference
));
561 rc
= KERN_INVALID_ARGUMENT
;
565 r
->next
= f
->reference_list
;
567 f
->reference_list
= r
;
568 t
->reference_count
++;
570 simple_unlock(&kmod_lock
);
579 kmod_release(kmod_t id
)
581 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
583 kmod_info_t
*t
; // reference to
584 kmod_info_t
*f
; // reference from
585 kmod_reference_t
*r
= 0;
586 kmod_reference_t
* p
;
588 simple_lock(&kmod_lock
);
590 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
591 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
593 rc
= KERN_INVALID_ARGUMENT
;
597 p
= r
= f
->reference_list
;
600 if (p
== r
) { // first element
601 f
->reference_list
= r
->next
;
605 r
->info
->reference_count
--;
607 simple_unlock(&kmod_lock
);
608 kfree(r
, sizeof(struct kmod_reference
));
616 simple_unlock(&kmod_lock
);
625 kmod_control(host_priv_t host_priv
,
627 kmod_control_flavor_t flavor
,
629 mach_msg_type_number_t
*dataCount
)
631 kern_return_t rc
= KERN_SUCCESS
;
633 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
637 case KMOD_CNTL_START
:
640 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
645 case KMOD_CNTL_RETAIN
:
647 rc
= kmod_retain(id
);
651 case KMOD_CNTL_RELEASE
:
653 rc
= kmod_release(id
);
657 case KMOD_CNTL_GET_CMD
:
660 cmd_queue_entry_t
*e
;
663 * Throw away any data the user may have sent in error.
664 * We must do this, because we are likely to return to
665 * some data for these commands (thus causing a leak of
666 * whatever data the user sent us in error).
668 if (*data
&& *dataCount
) {
669 vm_map_copy_discard(*data
);
674 simple_lock(&kmod_queue_lock
);
676 if (queue_empty(&kmod_cmd_queue
)) {
679 res
= thread_sleep_simple_lock((event_t
)&kmod_cmd_queue
,
682 if (queue_empty(&kmod_cmd_queue
)) {
683 // we must have been interrupted!
684 simple_unlock(&kmod_queue_lock
);
685 assert(res
== THREAD_INTERRUPTED
);
689 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
691 simple_unlock(&kmod_queue_lock
);
693 rc
= vm_map_copyin(kernel_map
, (vm_map_address_t
)e
->data
,
694 (vm_map_size_t
)e
->size
, TRUE
, (vm_map_copy_t
*)data
);
696 simple_lock(&kmod_queue_lock
);
697 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
698 simple_unlock(&kmod_queue_lock
);
703 *dataCount
= e
->size
;
705 kfree(e
, sizeof(struct cmd_queue_entry
));
711 rc
= KERN_INVALID_ARGUMENT
;
719 kmod_get_info(__unused host_t host
,
720 kmod_info_array_t
*kmods
,
721 mach_msg_type_number_t
*kmodCount
)
725 kmod_reference_t
*r
, *p2
;
728 kern_return_t rc
= KERN_SUCCESS
;
734 simple_lock(&kmod_lock
);
738 size
+= sizeof(kmod_info_t
);
739 r
= k
->reference_list
;
741 size
+=sizeof(kmod_reference_t
);
746 simple_unlock(&kmod_lock
);
747 if (!size
) return KERN_SUCCESS
;
749 rc
= kmem_alloc(kernel_map
, &data
, size
);
752 // copy kmod into data, retry if kmod's size has changed (grown)
753 // the copied out data is tweeked to figure what's what at user level
754 // change the copied out k->next pointers to point to themselves
755 // change the k->reference into a count, tack the references on
756 // the end of the data packet in the order they are found
758 simple_lock(&kmod_lock
);
759 k
= kmod
; p1
= (kmod_info_t
*)data
;
761 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
762 simple_unlock(&kmod_lock
);
763 kmem_free(kernel_map
, data
, size
);
768 if (k
->next
) p1
->next
= k
;
772 p2
= (kmod_reference_t
*)p1
;
773 k
= kmod
; p1
= (kmod_info_t
*)data
;
775 r
= k
->reference_list
; ref_count
= 0;
777 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
778 simple_unlock(&kmod_lock
);
779 kmem_free(kernel_map
, data
, size
);
782 // note the last 'k' in the chain has its next == 0
783 // since there can only be one like that,
784 // this case is handled by the caller
786 p2
++; r
= r
->next
; ref_count
++;
788 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
791 simple_unlock(&kmod_lock
);
793 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
795 kmem_free(kernel_map
, data
, size
);
806 * Operates only on 32 bit mach keaders on behalf of kernel module loader
809 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
811 typedef void (*Routine
)(void);
815 if (header
->magic
!= MH_MAGIC
) {
816 return KERN_INVALID_ARGUMENT
;
819 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, /*(char *)*/ sectName
, &size
);
820 if (!routines
) return KERN_SUCCESS
;
822 size
/= sizeof(Routine
);
823 for (i
= 0; i
< size
; i
++) {
831 * Operates only on 32 bit mach keaders on behalf of kernel module loader
834 kmod_initialize_cpp(kmod_info_t
*info
)
836 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
840 * Operates only on 32 bit mach keaders on behalf of kernel module loader
843 kmod_finalize_cpp(kmod_info_t
*info
)
845 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
849 kmod_default_start(__unused
struct kmod_info
*ki
, __unused
void *data
)
851 return KMOD_RETURN_SUCCESS
;
855 kmod_default_stop(__unused
struct kmod_info
*ki
, __unused
void *data
)
857 return KMOD_RETURN_SUCCESS
;
861 kmod_dump_to(vm_offset_t
*addr
, unsigned int cnt
,
862 void (*printf_func
)(const char *fmt
, ...))
864 vm_offset_t
* kscan_addr
= 0;
866 kmod_reference_t
* r
;
869 kmod_info_t
* stop_kmod
= 0;
871 for (k
= kmod
; k
; k
= k
->next
) {
872 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)k
)) == 0) {
873 (*printf_func
)(" kmod scan stopped due to missing "
874 "kmod page: %08x\n", stop_kmod
);
878 continue; // skip fake entries for built-in kernel components
880 for (i
= 0, kscan_addr
= addr
; i
< cnt
; i
++, kscan_addr
++) {
881 if ((*kscan_addr
>= k
->address
) &&
882 (*kscan_addr
< (k
->address
+ k
->size
))) {
885 (*printf_func
)(" Kernel loadable modules in backtrace "
886 "(with dependencies):\n");
889 (*printf_func
)(" %s(%s)@0x%x\n",
890 k
->name
, k
->version
, k
->address
);
892 for (r
= k
->reference_list
; r
; r
= r
->next
) {
895 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)r
)) == 0) {
896 (*printf_func
)(" kmod dependency scan stopped "
897 "due to missing dependency page: %08x\n", r
);
903 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)rinfo
)) == 0) {
904 (*printf_func
)(" kmod dependency scan stopped "
905 "due to missing kmod page: %08x\n", rinfo
);
909 if (!rinfo
->address
) {
910 continue; // skip fake entries for built-ins
913 (*printf_func
)(" dependency: %s(%s)@0x%x\n",
914 rinfo
->name
, rinfo
->version
, rinfo
->address
);
917 break; // only report this kmod for one backtrace address
926 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
928 kmod_dump_to(addr
, cnt
, &kdb_printf
);
932 kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
)
934 kmod_dump_to(addr
, cnt
, &printf
);