2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 * 1999 Mar 29 rsulack created.
33 #include <mach/mach_types.h>
34 #include <mach/vm_types.h>
35 #include <mach/kern_return.h>
36 #include <kern/kern_types.h>
37 #include <vm/vm_kern.h>
38 #include <kern/thread.h>
39 #include <mach-o/mach_header.h>
41 #include <mach_host.h>
43 #define WRITE_PROTECT_MODULE_TEXT (0)
45 kmod_info_t
*kmod
= 0;
46 static int kmod_index
= 1;
48 decl_simple_lock_data(,kmod_lock
)
49 decl_simple_lock_data(,kmod_queue_lock
)
51 typedef struct cmd_queue_entry
{
57 queue_head_t kmod_cmd_queue
;
62 simple_lock_init(&kmod_lock
, ETAP_MISC_Q
);
63 simple_lock_init(&kmod_queue_lock
, ETAP_MISC_Q
);
64 queue_init(&kmod_cmd_queue
);
68 kmod_lookupbyid(kmod_t id
)
74 if (k
->id
== id
) break;
82 kmod_lookupbyname(const char * name
)
88 if (!strcmp(k
->name
, name
)) break;
96 kmod_lookupbyid_locked(kmod_t id
)
101 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
104 simple_lock(&kmod_queue_lock
);
105 k
= kmod_lookupbyid(id
);
107 bcopy((char*)k
, (char *)kc
, sizeof(kmod_info_t
));
110 simple_unlock(&kmod_queue_lock
);
113 kfree((vm_offset_t
)kc
, sizeof(kmod_info_t
));
120 kmod_lookupbyname_locked(const char * name
)
125 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
128 simple_lock(&kmod_queue_lock
);
129 k
= kmod_lookupbyname(name
);
131 bcopy((char *)k
, (char *)kc
, sizeof(kmod_info_t
));
134 simple_unlock(&kmod_queue_lock
);
137 kfree((vm_offset_t
)kc
, sizeof(kmod_info_t
));
143 // XXX add a nocopy flag??
146 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
149 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
150 if (!e
) return KERN_RESOURCE_SHORTAGE
;
152 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
153 if (rc
!= KERN_SUCCESS
) {
154 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
158 bcopy((void *)data
, (void *)e
->data
, size
);
160 simple_lock(&kmod_queue_lock
);
161 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
162 simple_unlock(&kmod_queue_lock
);
164 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
170 kmod_load_extension(char *name
)
172 kmod_load_extension_cmd_t
*data
;
175 size
= sizeof(kmod_load_extension_cmd_t
);
176 data
= (kmod_load_extension_cmd_t
*)kalloc(size
);
177 if (!data
) return KERN_RESOURCE_SHORTAGE
;
179 data
->type
= KMOD_LOAD_EXTENSION_PACKET
;
180 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
182 return kmod_queue_cmd((vm_address_t
)data
, size
);
186 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
188 kmod_load_with_dependencies_cmd_t
*data
;
199 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
200 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
201 if (!data
) return KERN_RESOURCE_SHORTAGE
;
203 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
204 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
207 for (i
=0; i
< count
; i
++) {
208 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
211 data
->dependencies
[count
][0] = 0;
213 return kmod_queue_cmd((vm_address_t
)data
, size
);
216 kmod_send_generic(int type
, void *generic_data
, int size
)
218 kmod_generic_cmd_t
*data
;
220 data
= (kmod_generic_cmd_t
*)kalloc(size
+ sizeof(int));
221 if (!data
) return KERN_RESOURCE_SHORTAGE
;
224 bcopy(data
->data
, generic_data
, size
);
226 return kmod_queue_cmd((vm_address_t
)data
, size
+ sizeof(int));
229 extern vm_offset_t sectPRELINKB
;
230 extern int sectSizePRELINK
;
233 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
238 if (!info
) return KERN_INVALID_ADDRESS
;
240 // double check for page alignment
241 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
242 return KERN_INVALID_ADDRESS
;
245 isPrelink
= ((info
->address
>= sectPRELINKB
) && (info
->address
< (sectPRELINKB
+ sectSizePRELINK
)));
247 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
248 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
249 if (rc
!= KERN_SUCCESS
) {
253 #if WRITE_PROTECT_MODULE_TEXT
255 struct section
* sect
= getsectbynamefromheader(
256 (struct mach_header
*) info
->address
, "__TEXT", "__text");
259 (void) vm_map_protect(kernel_map
, round_page(sect
->addr
), trunc_page(sect
->addr
+ sect
->size
),
260 VM_PROT_READ
|VM_PROT_EXECUTE
, TRUE
);
263 #endif /* WRITE_PROTECT_MODULE_TEXT */
265 simple_lock(&kmod_lock
);
267 // check to see if already loaded
268 if (kmod_lookupbyname(info
->name
)) {
269 simple_unlock(&kmod_lock
);
271 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
272 info
->address
+ info
->size
, FALSE
);
273 assert(rc
== KERN_SUCCESS
);
275 return KERN_INVALID_ARGUMENT
;
278 info
->id
= kmod_index
++;
279 info
->reference_count
= 0;
286 simple_unlock(&kmod_lock
);
289 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
290 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
298 kmod_create(host_priv_t host_priv
,
302 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
303 return kmod_create_internal(info
, id
);
307 kmod_create_fake(const char *name
, const char *version
)
311 if (!name
|| ! version
||
312 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
313 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
315 return KERN_INVALID_ARGUMENT
;
318 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
320 return KERN_RESOURCE_SHORTAGE
;
324 info
->info_version
= KMOD_INFO_VERSION
;
325 bcopy(name
, info
->name
, 1 + strlen(name
));
326 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
327 info
->reference_count
= 1; // keep it from unloading, starting, stopping
328 info
->reference_list
= 0;
329 info
->address
= info
->size
= info
->hdr_size
= 0;
330 info
->start
= info
->stop
= 0;
332 simple_lock(&kmod_lock
);
334 // check to see if already "loaded"
335 if (kmod_lookupbyname(info
->name
)) {
336 simple_unlock(&kmod_lock
);
337 return KERN_INVALID_ARGUMENT
;
340 info
->id
= kmod_index
++;
345 simple_unlock(&kmod_lock
);
351 kmod_destroy_internal(kmod_t id
)
357 simple_lock(&kmod_lock
);
362 kmod_reference_t
*r
, *t
;
364 if (k
->reference_count
!= 0) {
365 simple_unlock(&kmod_lock
);
366 return KERN_INVALID_ARGUMENT
;
369 if (k
== p
) { // first element
374 simple_unlock(&kmod_lock
);
376 r
= k
->reference_list
;
378 r
->info
->reference_count
--;
381 kfree((vm_offset_t
)t
, sizeof(struct kmod_reference
));
385 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
386 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
389 if( (k
->address
>= sectPRELINKB
) && (k
->address
< (sectPRELINKB
+ sectSizePRELINK
)))
392 virt
= ml_static_ptovirt(k
->address
);
394 ml_static_mfree( virt
, k
->size
);
399 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
400 k
->address
+ k
->size
, FALSE
);
401 assert(rc
== KERN_SUCCESS
);
403 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
404 assert(rc
== KERN_SUCCESS
);
412 simple_unlock(&kmod_lock
);
414 return KERN_INVALID_ARGUMENT
;
419 kmod_destroy(host_priv_t host_priv
,
422 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
423 return kmod_destroy_internal(id
);
432 mach_msg_type_number_t
*dataCount
)
434 kern_return_t rc
= KERN_SUCCESS
;
435 void * user_data
= 0;
436 kern_return_t (*func
)();
439 simple_lock(&kmod_lock
);
441 k
= kmod_lookupbyid(id
);
442 if (!k
|| k
->reference_count
) {
443 simple_unlock(&kmod_lock
);
444 rc
= KERN_INVALID_ARGUMENT
;
449 func
= (void *)k
->start
;
451 func
= (void *)k
->stop
;
454 simple_unlock(&kmod_lock
);
457 // call kmod entry point
459 if (data
&& dataCount
&& *data
&& *dataCount
) {
460 vm_map_copyout(kernel_map
, (vm_offset_t
*)&user_data
, (vm_map_copy_t
)*data
);
463 rc
= (*func
)(k
, user_data
);
468 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
471 if (dataCount
) *dataCount
= 0;
478 * The retain and release calls take no user data, but the caller
479 * may have sent some in error (the MIG definition allows it).
480 * If this is the case, they will just return that same data
481 * right back to the caller (since they never touch the *data and
482 * *dataCount fields).
485 kmod_retain(kmod_t id
)
487 kern_return_t rc
= KERN_SUCCESS
;
489 kmod_info_t
*t
; // reference to
490 kmod_info_t
*f
; // reference from
491 kmod_reference_t
*r
= 0;
493 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
495 rc
= KERN_RESOURCE_SHORTAGE
;
499 simple_lock(&kmod_lock
);
501 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
502 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
504 simple_unlock(&kmod_lock
);
505 if (r
) kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
506 rc
= KERN_INVALID_ARGUMENT
;
510 r
->next
= f
->reference_list
;
512 f
->reference_list
= r
;
513 t
->reference_count
++;
515 simple_unlock(&kmod_lock
);
524 kmod_release(kmod_t id
)
526 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
528 kmod_info_t
*t
; // reference to
529 kmod_info_t
*f
; // reference from
530 kmod_reference_t
*r
= 0;
531 kmod_reference_t
* p
;
533 simple_lock(&kmod_lock
);
535 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
536 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
538 rc
= KERN_INVALID_ARGUMENT
;
542 p
= r
= f
->reference_list
;
545 if (p
== r
) { // first element
546 f
->reference_list
= r
->next
;
550 r
->info
->reference_count
--;
552 simple_unlock(&kmod_lock
);
553 kfree((vm_offset_t
)r
, sizeof(struct kmod_reference
));
561 simple_unlock(&kmod_lock
);
570 kmod_control(host_priv_t host_priv
,
572 kmod_control_flavor_t flavor
,
574 mach_msg_type_number_t
*dataCount
)
576 kern_return_t rc
= KERN_SUCCESS
;
578 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
582 case KMOD_CNTL_START
:
585 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
590 case KMOD_CNTL_RETAIN
:
592 rc
= kmod_retain(id
);
596 case KMOD_CNTL_RELEASE
:
598 rc
= kmod_release(id
);
602 case KMOD_CNTL_GET_CMD
:
605 cmd_queue_entry_t
*e
;
608 * Throw away any data the user may have sent in error.
609 * We must do this, because we are likely to return to
610 * some data for these commands (thus causing a leak of
611 * whatever data the user sent us in error).
613 if (*data
&& *dataCount
) {
614 vm_map_copy_discard(*data
);
619 simple_lock(&kmod_queue_lock
);
621 if (queue_empty(&kmod_cmd_queue
)) {
624 res
= thread_sleep_simple_lock((event_t
)&kmod_cmd_queue
,
627 if (queue_empty(&kmod_cmd_queue
)) {
628 // we must have been interrupted!
629 simple_unlock(&kmod_queue_lock
);
630 assert(res
== THREAD_INTERRUPTED
);
634 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
636 simple_unlock(&kmod_queue_lock
);
638 rc
= vm_map_copyin(kernel_map
, e
->data
, e
->size
, TRUE
, (vm_map_copy_t
*)data
);
640 simple_lock(&kmod_queue_lock
);
641 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
642 simple_unlock(&kmod_queue_lock
);
647 *dataCount
= e
->size
;
649 kfree((vm_offset_t
)e
, sizeof(struct cmd_queue_entry
));
655 rc
= KERN_INVALID_ARGUMENT
;
663 kmod_get_info(host_t host
,
664 kmod_info_array_t
*kmods
,
665 mach_msg_type_number_t
*kmodCount
)
669 kmod_reference_t
*r
, *p2
;
672 kern_return_t rc
= KERN_SUCCESS
;
678 simple_lock(&kmod_lock
);
682 size
+= sizeof(kmod_info_t
);
683 r
= k
->reference_list
;
685 size
+=sizeof(kmod_reference_t
);
690 simple_unlock(&kmod_lock
);
691 if (!size
) return KERN_SUCCESS
;
693 rc
= kmem_alloc(kernel_map
, &data
, size
);
696 // copy kmod into data, retry if kmod's size has changed (grown)
697 // the copied out data is tweeked to figure what's what at user level
698 // change the copied out k->next pointers to point to themselves
699 // change the k->reference into a count, tack the references on
700 // the end of the data packet in the order they are found
702 simple_lock(&kmod_lock
);
703 k
= kmod
; p1
= (kmod_info_t
*)data
;
705 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
706 simple_unlock(&kmod_lock
);
707 kmem_free(kernel_map
, data
, size
);
712 if (k
->next
) p1
->next
= k
;
716 p2
= (kmod_reference_t
*)p1
;
717 k
= kmod
; p1
= (kmod_info_t
*)data
;
719 r
= k
->reference_list
; ref_count
= 0;
721 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
722 simple_unlock(&kmod_lock
);
723 kmem_free(kernel_map
, data
, size
);
726 // note the last 'k' in the chain has its next == 0
727 // since there can only be one like that,
728 // this case is handled by the caller
730 p2
++; r
= r
->next
; ref_count
++;
732 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
735 simple_unlock(&kmod_lock
);
737 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
739 kmem_free(kernel_map
, data
, size
);
750 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
752 typedef void (*Routine
)(void);
756 if (header
->magic
!= MH_MAGIC
) {
757 return KERN_INVALID_ARGUMENT
;
760 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, (char *) sectName
, &size
);
761 if (!routines
) return KERN_SUCCESS
;
763 size
/= sizeof(Routine
);
764 for (i
= 0; i
< size
; i
++) {
772 kmod_initialize_cpp(kmod_info_t
*info
)
774 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
778 kmod_finalize_cpp(kmod_info_t
*info
)
780 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
784 kmod_default_start(struct kmod_info
*ki
, void *data
)
786 return KMOD_RETURN_SUCCESS
;
790 kmod_default_stop(struct kmod_info
*ki
, void *data
)
792 return KMOD_RETURN_SUCCESS
;
796 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
798 vm_offset_t
* kscan_addr
= 0;
799 vm_offset_t
* rscan_addr
= 0;
801 kmod_reference_t
* r
;
804 int kmod_scan_stopped
= 0;
805 kmod_info_t
* stop_kmod
= 0;
806 int ref_scan_stopped
= 0;
807 kmod_reference_t
* stop_ref
= 0;
809 for (k
= kmod
; k
; k
= k
->next
) {
811 continue; // skip fake entries for built-in kernel components
813 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)k
)) == 0) {
814 kdb_printf(" kmod scan stopped due to missing "
815 "kmod page: %08x\n", stop_kmod
);
818 for (i
= 0, kscan_addr
= addr
; i
< cnt
; i
++, kscan_addr
++) {
819 if ((*kscan_addr
>= k
->address
) &&
820 (*kscan_addr
< (k
->address
+ k
->size
))) {
823 kdb_printf(" Kernel loadable modules in backtrace "
824 "(with dependencies):\n");
827 kdb_printf(" %s(%s)@0x%x\n",
828 k
->name
, k
->version
, k
->address
);
830 for (r
= k
->reference_list
; r
; r
= r
->next
) {
833 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)r
)) == 0) {
834 kdb_printf(" kmod dependency scan stopped "
835 "due to missing dependency page: %08x\n", r
);
841 if (!rinfo
->address
) {
842 continue; // skip fake entries for built-ins
845 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)rinfo
)) == 0) {
846 kdb_printf(" kmod dependency scan stopped "
847 "due to missing kmod page: %08x\n", rinfo
);
851 kdb_printf(" dependency: %s(%s)@0x%x\n",
852 rinfo
->name
, rinfo
->version
, rinfo
->address
);
855 break; // only report this kmod for one backtrace address