2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
35 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
39 * 1999 Mar 29 rsulack created.
42 #include <mach/mach_types.h>
43 #include <mach/vm_types.h>
44 #include <mach/kern_return.h>
45 #include <mach/host_priv_server.h>
46 #include <mach/vm_map.h>
48 #include <kern/kalloc.h>
49 #include <kern/kern_types.h>
50 #include <kern/thread.h>
52 #include <vm/vm_kern.h>
54 #include <mach-o/mach_header.h>
55 #include <mach-o/loader.h>
56 #include <mach-o/nlist.h>
59 * XXX headers for which prototypes should be in a common include file;
60 * XXX see libsa/kext.cpp for why.
62 kern_return_t
kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
);
63 kern_return_t
kmod_destroy_internal(kmod_t id
);
64 kern_return_t
kmod_start_or_stop(kmod_t id
, int start
, kmod_args_t
*data
,
65 mach_msg_type_number_t
*dataCount
);
66 kern_return_t
kmod_retain(kmod_t id
);
67 kern_return_t
kmod_release(kmod_t id
);
68 kern_return_t
kmod_queue_cmd(vm_address_t data
, vm_size_t size
);
69 kern_return_t
kmod_get_info(host_t host
, kmod_info_array_t
*kmods
,
70 mach_msg_type_number_t
*kmodCount
);
72 static kern_return_t
kmod_get_symbol_data(kmod_args_t
* data
,
73 mach_msg_type_number_t
* dataCount
);
74 static kern_return_t
kmod_free_linkedit_data(void);
75 static kern_return_t
kmod_get_kext_uuid(
78 mach_msg_type_number_t
* dataCount
);
80 extern int IODTGetLoaderInfo(const char * key
, void ** infoAddr
, vm_size_t
* infoSize
);
81 extern void IODTFreeLoaderInfo(const char * key
, void * infoAddr
, vm_size_t infoSize
);
82 /* operates on 32 bit segments */
83 extern void OSRuntimeUnloadCPPForSegment(struct segment_command
* segment
);
85 #define WRITE_PROTECT_MODULE_TEXT (0)
88 static int kmod_index
= 1;
89 static int kmod_load_disabled
= 0;
91 mutex_t
* kmod_lock
= 0;
92 static mutex_t
* kmod_queue_lock
= 0;
94 typedef struct cmd_queue_entry
{
100 queue_head_t kmod_cmd_queue
;
105 kmod_lock
= mutex_alloc(0);
106 kmod_queue_lock
= mutex_alloc(0);
107 queue_init(&kmod_cmd_queue
);
111 kmod_lookupbyid(kmod_t id
)
113 kmod_info_t
*k
= NULL
;
117 if (k
->id
== id
) break;
125 kmod_lookupbyname(const char * name
)
127 kmod_info_t
*k
= NULL
;
131 if (!strncmp(k
->name
, name
, sizeof(k
->name
)))
139 // get the id of a kext in a given range, if the address is not in a kext
141 int kmod_lookupidbyaddress_locked(vm_address_t addr
)
145 mutex_lock(kmod_queue_lock
);
149 if ((k
->address
<= addr
) && ((k
->address
+ k
->size
) > addr
)) {
154 mutex_unlock(kmod_queue_lock
);
156 mutex_unlock(kmod_queue_lock
);
168 kmod_lookupbyaddress(vm_address_t addr
)
174 if ((k
->address
<= addr
) && ((k
->address
+ k
->size
) > addr
)) break;
182 kmod_lookupbyid_locked(kmod_t id
)
184 kmod_info_t
*k
= NULL
;
185 kmod_info_t
*kc
= NULL
;
187 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
190 mutex_lock(kmod_lock
);
191 k
= kmod_lookupbyid(id
);
193 bcopy((char*)k
, (char *)kc
, sizeof(kmod_info_t
));
196 mutex_unlock(kmod_lock
);
199 kfree(kc
, sizeof(kmod_info_t
));
206 kmod_lookupbyname_locked(const char * name
)
208 kmod_info_t
*k
= NULL
;
209 kmod_info_t
*kc
= NULL
;
211 kc
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
214 mutex_lock(kmod_lock
);
215 k
= kmod_lookupbyname(name
);
217 bcopy((char *)k
, (char *)kc
, sizeof(kmod_info_t
));
220 mutex_unlock(kmod_lock
);
223 kfree(kc
, sizeof(kmod_info_t
));
229 // XXX add a nocopy flag??
232 kmod_queue_cmd(vm_address_t data
, vm_size_t size
)
235 cmd_queue_entry_t
*e
= (cmd_queue_entry_t
*)kalloc(sizeof(struct cmd_queue_entry
));
236 if (!e
) return KERN_RESOURCE_SHORTAGE
;
238 rc
= kmem_alloc(kernel_map
, &e
->data
, size
);
239 if (rc
!= KERN_SUCCESS
) {
240 kfree(e
, sizeof(struct cmd_queue_entry
));
244 bcopy((void *)data
, (void *)e
->data
, size
);
246 mutex_lock(kmod_queue_lock
);
247 enqueue_tail(&kmod_cmd_queue
, (queue_entry_t
)e
);
248 mutex_unlock(kmod_queue_lock
);
250 thread_wakeup_one((event_t
)&kmod_cmd_queue
);
256 kmod_load_extension(char *name
)
258 kmod_load_extension_cmd_t data
;
260 if (kmod_load_disabled
) {
261 return KERN_NO_ACCESS
;
264 data
.type
= KMOD_LOAD_EXTENSION_PACKET
;
265 strncpy(data
.name
, name
, sizeof(data
.name
));
267 return kmod_queue_cmd((vm_address_t
)&data
, sizeof(data
));
271 kmod_load_extension_with_dependencies(char *name
, char **dependencies
)
273 kern_return_t result
;
274 kmod_load_with_dependencies_cmd_t
* data
;
279 if (kmod_load_disabled
) {
280 return KERN_NO_ACCESS
;
289 size
= sizeof(int) + KMOD_MAX_NAME
* (count
+ 1) + 1;
290 data
= (kmod_load_with_dependencies_cmd_t
*)kalloc(size
);
291 if (!data
) return KERN_RESOURCE_SHORTAGE
;
293 data
->type
= KMOD_LOAD_WITH_DEPENDENCIES_PACKET
;
294 strncpy(data
->name
, name
, KMOD_MAX_NAME
);
297 for (i
=0; i
< count
; i
++) {
298 strncpy(data
->dependencies
[i
], *c
, KMOD_MAX_NAME
);
301 data
->dependencies
[count
][0] = 0;
303 result
= kmod_queue_cmd((vm_address_t
)data
, size
);
308 kmod_send_generic(int type
, void *generic_data
, int size
)
310 kern_return_t result
;
311 kmod_generic_cmd_t
* data
;
314 // add sizeof(int) for the type field
315 cmd_size
= size
+ sizeof(int);
316 data
= (kmod_generic_cmd_t
*)kalloc(cmd_size
);
317 if (!data
) return KERN_RESOURCE_SHORTAGE
;
320 bcopy(data
->data
, generic_data
, size
);
322 result
= kmod_queue_cmd((vm_address_t
)data
, cmd_size
);
323 kfree(data
, cmd_size
);
327 extern vm_offset_t sectPRELINKB
;
328 extern int sectSizePRELINK
;
329 extern int kth_started
;
332 * Operates only on 32 bit mach keaders on behalf of kernel module loader
333 * if WRITE_PROTECT_MODULE_TEXT is defined.
336 kmod_create_internal(kmod_info_t
*info
, kmod_t
*id
)
341 if (!info
) return KERN_INVALID_ADDRESS
;
343 // double check for page alignment
344 if ((info
->address
| info
->hdr_size
) & (PAGE_SIZE
- 1)) {
345 return KERN_INVALID_ADDRESS
;
348 isPrelink
= ((info
->address
>= sectPRELINKB
) && (info
->address
< (sectPRELINKB
+ sectSizePRELINK
)));
349 if (!isPrelink
&& kth_started
) {
350 rc
= vm_map_wire(kernel_map
, info
->address
+ info
->hdr_size
,
351 info
->address
+ info
->size
, VM_PROT_DEFAULT
, FALSE
);
352 if (rc
!= KERN_SUCCESS
) {
356 #if WRITE_PROTECT_MODULE_TEXT
358 struct section
* sect
= getsectbynamefromheader(
359 (struct mach_header
*) info
->address
, "__TEXT", "__text");
362 (void) vm_map_protect(kernel_map
, round_page(sect
->addr
),
363 trunc_page(sect
->addr
+ sect
->size
),
364 VM_PROT_READ
|VM_PROT_EXECUTE
, TRUE
);
367 #endif /* WRITE_PROTECT_MODULE_TEXT */
369 mutex_lock(kmod_lock
);
371 // check to see if already loaded
372 if (kmod_lookupbyname(info
->name
)) {
373 mutex_unlock(kmod_lock
);
375 rc
= vm_map_unwire(kernel_map
, info
->address
+ info
->hdr_size
,
376 info
->address
+ info
->size
, FALSE
);
377 assert(rc
== KERN_SUCCESS
);
379 return KERN_INVALID_ARGUMENT
;
382 info
->id
= kmod_index
++;
383 info
->reference_count
= 0;
390 mutex_unlock(kmod_lock
);
393 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
394 info
->name
, info
->id
, info
->size
/ PAGE_SIZE
, info
->address
, info
->hdr_size
);
402 kmod_create(host_priv_t host_priv
,
407 return KERN_NOT_SUPPORTED
;
411 if (kmod_load_disabled
) {
412 return KERN_NO_ACCESS
;
415 info
= (kmod_info_t
*)addr
;
417 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
418 return kmod_create_internal(info
, id
);
423 kmod_create_fake_with_address(const char *name
, const char *version
,
424 vm_address_t address
, vm_size_t size
,
429 if (!name
|| ! version
||
430 (1 + strlen(name
) > KMOD_MAX_NAME
) ||
431 (1 + strlen(version
) > KMOD_MAX_NAME
)) {
433 return KERN_INVALID_ARGUMENT
;
436 info
= (kmod_info_t
*)kalloc(sizeof(kmod_info_t
));
438 return KERN_RESOURCE_SHORTAGE
;
442 info
->info_version
= KMOD_INFO_VERSION
;
443 bcopy(name
, info
->name
, 1 + strlen(name
));
444 bcopy(version
, info
->version
, 1 + strlen(version
)); //NIK fixed this part
445 info
->reference_count
= 1; // keep it from unloading, starting, stopping
446 info
->reference_list
= NULL
;
447 info
->address
= address
;
450 info
->start
= info
->stop
= NULL
;
452 mutex_lock(kmod_lock
);
454 // check to see if already "loaded"
455 if (kmod_lookupbyname(info
->name
)) {
456 mutex_unlock(kmod_lock
);
457 kfree(info
, sizeof(kmod_info_t
));
458 return KERN_INVALID_ARGUMENT
;
461 info
->id
= kmod_index
++;
463 *return_id
= info
->id
;
468 mutex_unlock(kmod_lock
);
474 kmod_create_fake(const char *name
, const char *version
)
476 return kmod_create_fake_with_address(name
, version
, 0, 0, NULL
);
481 _kmod_destroy_internal(kmod_t id
, boolean_t fake
)
487 mutex_lock(kmod_lock
);
492 kmod_reference_t
*r
, *t
;
494 if (!fake
&& (k
->reference_count
!= 0)) {
495 mutex_unlock(kmod_lock
);
496 return KERN_INVALID_ARGUMENT
;
499 if (k
== p
) { // first element
504 mutex_unlock(kmod_lock
);
506 r
= k
->reference_list
;
508 r
->info
->reference_count
--;
511 kfree(t
, sizeof(struct kmod_reference
));
517 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
518 k
->name
, k
->id
, k
->size
/ PAGE_SIZE
, k
->address
);
521 if( (k
->address
>= sectPRELINKB
) && (k
->address
< (sectPRELINKB
+ sectSizePRELINK
)))
524 virt
= ml_static_ptovirt(k
->address
);
526 ml_static_mfree( virt
, k
->size
);
531 rc
= vm_map_unwire(kernel_map
, k
->address
+ k
->hdr_size
,
532 k
->address
+ k
->size
, FALSE
);
533 assert(rc
== KERN_SUCCESS
);
535 rc
= vm_deallocate(kernel_map
, k
->address
, k
->size
);
536 assert(rc
== KERN_SUCCESS
);
545 mutex_unlock(kmod_lock
);
547 return KERN_INVALID_ARGUMENT
;
551 kmod_destroy_internal(kmod_t id
)
553 return _kmod_destroy_internal(id
, FALSE
);
557 kmod_destroy(host_priv_t host_priv
,
560 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
561 return _kmod_destroy_internal(id
, FALSE
);
565 kmod_destroy_fake(kmod_t id
)
567 return _kmod_destroy_internal(id
, TRUE
);
575 mach_msg_type_number_t
*dataCount
)
577 kern_return_t rc
= KERN_SUCCESS
;
578 void * user_data
= NULL
;
579 kern_return_t (*func
)(kmod_info_t
*, void *);
582 if (start
&& kmod_load_disabled
) {
583 return KERN_NO_ACCESS
;
586 mutex_lock(kmod_lock
);
588 k
= kmod_lookupbyid(id
);
589 if (!k
|| k
->reference_count
) {
590 mutex_unlock(kmod_lock
);
591 rc
= KERN_INVALID_ARGUMENT
;
596 func
= (void *)k
->start
;
598 func
= (void *)k
->stop
;
601 mutex_unlock(kmod_lock
);
604 // call kmod entry point
606 if (data
&& dataCount
&& *data
&& *dataCount
) {
607 vm_map_offset_t map_addr
;
608 vm_map_copyout(kernel_map
, &map_addr
, (vm_map_copy_t
)*data
);
609 user_data
= CAST_DOWN(void *, map_addr
);
612 rc
= (*func
)(k
, user_data
);
617 (void) vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
619 if (data
) *data
= NULL
;
620 if (dataCount
) *dataCount
= 0;
627 * The retain and release calls take no user data, but the caller
628 * may have sent some in error (the MIG definition allows it).
629 * If this is the case, they will just return that same data
630 * right back to the caller (since they never touch the *data and
631 * *dataCount fields).
634 kmod_retain(kmod_t id
)
636 kern_return_t rc
= KERN_SUCCESS
;
638 kmod_info_t
*t
; // reference to
639 kmod_info_t
*f
; // reference from
640 kmod_reference_t
*r
= NULL
;
642 r
= (kmod_reference_t
*)kalloc(sizeof(struct kmod_reference
));
644 rc
= KERN_RESOURCE_SHORTAGE
;
648 mutex_lock(kmod_lock
);
650 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
651 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
653 mutex_unlock(kmod_lock
);
654 if (r
) kfree(r
, sizeof(struct kmod_reference
));
655 rc
= KERN_INVALID_ARGUMENT
;
659 r
->next
= f
->reference_list
;
661 f
->reference_list
= r
;
662 t
->reference_count
++;
664 mutex_unlock(kmod_lock
);
673 kmod_release(kmod_t id
)
675 kern_return_t rc
= KERN_INVALID_ARGUMENT
;
677 kmod_info_t
*t
; // reference to
678 kmod_info_t
*f
; // reference from
679 kmod_reference_t
*r
= NULL
;
680 kmod_reference_t
* p
;
682 mutex_lock(kmod_lock
);
684 t
= kmod_lookupbyid(KMOD_UNPACK_TO_ID(id
));
685 f
= kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id
));
687 rc
= KERN_INVALID_ARGUMENT
;
691 p
= r
= f
->reference_list
;
694 if (p
== r
) { // first element
695 f
->reference_list
= r
->next
;
699 r
->info
->reference_count
--;
701 mutex_unlock(kmod_lock
);
702 kfree(r
, sizeof(struct kmod_reference
));
710 mutex_unlock(kmod_lock
);
719 kmod_control(host_priv_t host_priv
,
721 kmod_control_flavor_t flavor
,
723 mach_msg_type_number_t
*dataCount
)
725 kern_return_t rc
= KERN_SUCCESS
;
727 /* Only allow non-root access to retrieve kernel symbols or UUID.
729 if (flavor
!= KMOD_CNTL_GET_KERNEL_SYMBOLS
&&
730 flavor
!= KMOD_CNTL_GET_UUID
) {
732 if (host_priv
== HOST_PRIV_NULL
) return KERN_INVALID_HOST
;
737 case KMOD_CNTL_START
:
740 rc
= kmod_start_or_stop(id
, (flavor
== KMOD_CNTL_START
),
745 case KMOD_CNTL_RETAIN
:
747 rc
= kmod_retain(id
);
751 case KMOD_CNTL_RELEASE
:
753 rc
= kmod_release(id
);
757 case KMOD_CNTL_GET_CMD
:
760 cmd_queue_entry_t
*e
;
762 /* Throw away any data the user may have sent in error.
763 * We must do this, because we are likely to return to
764 * some data for these commands (thus causing a leak of
765 * whatever data the user sent us in error).
767 if (*data
&& *dataCount
) {
768 vm_map_copy_discard(*data
);
773 mutex_lock(kmod_queue_lock
);
775 if (queue_empty(&kmod_cmd_queue
)) {
778 res
= thread_sleep_mutex((event_t
)&kmod_cmd_queue
,
781 if (queue_empty(&kmod_cmd_queue
)) {
782 // we must have been interrupted!
783 mutex_unlock(kmod_queue_lock
);
784 assert(res
== THREAD_INTERRUPTED
);
788 e
= (cmd_queue_entry_t
*)dequeue_head(&kmod_cmd_queue
);
790 mutex_unlock(kmod_queue_lock
);
792 rc
= vm_map_copyin(kernel_map
, (vm_map_address_t
)e
->data
,
793 (vm_map_size_t
)e
->size
, TRUE
, (vm_map_copy_t
*)data
);
795 mutex_lock(kmod_queue_lock
);
796 enqueue_head(&kmod_cmd_queue
, (queue_entry_t
)e
);
797 mutex_unlock(kmod_queue_lock
);
802 *dataCount
= e
->size
;
804 kfree(e
, sizeof(struct cmd_queue_entry
));
809 case KMOD_CNTL_GET_KERNEL_SYMBOLS
:
811 /* Throw away any data the user may have sent in error.
812 * We must do this, because we are likely to return to
813 * some data for these commands (thus causing a leak of
814 * whatever data the user sent us in error).
816 if (*data
&& *dataCount
) {
817 vm_map_copy_discard(*data
);
822 return kmod_get_symbol_data(data
, dataCount
);
826 case KMOD_CNTL_FREE_LINKEDIT_DATA
:
828 return kmod_free_linkedit_data();
832 case KMOD_CNTL_GET_UUID
:
834 uint32_t id_length
= *dataCount
;
835 char * kext_id
= NULL
;
836 vm_map_offset_t map_addr
;
838 kern_return_t result
;
840 /* Get the bundle id, if provided, and discard the buffer sent down.
842 if (*data
&& *dataCount
) {
843 (char *)(kmem_alloc(kernel_map
, (vm_offset_t
*)&kext_id
, id_length
));
848 vm_map_copyout(kernel_map
, &map_addr
, (vm_map_copy_t
)*data
);
849 user_data
= CAST_DOWN(void *, map_addr
);
851 memcpy(kext_id
, user_data
, id_length
);
852 kext_id
[id_length
-1] = '\0';
854 (void)vm_deallocate(kernel_map
, (vm_offset_t
)user_data
, *dataCount
);
860 result
= kmod_get_kext_uuid(kext_id
, data
, dataCount
);
862 kmem_free(kernel_map
, (vm_offset_t
)kext_id
, id_length
);
868 case KMOD_CNTL_DISABLE_LOAD
:
870 kmod_load_disabled
= 1;
876 rc
= KERN_INVALID_ARGUMENT
;
882 /*******************************************************************************
883 * This function creates a dummy symbol file for the running kernel based on data
884 * in the run-time image. This allows us to correctly link other executables
885 * (drivers, etc) against the kernel when the kernel image on the root filesystem
886 * does not match the live kernel, as c can occur during net-booting where the
887 * actual kernel image is obtained from the network via tftp rather than the root
890 * If a symbol table is available, then a link-suitable Mach-O file image is
891 * created containing a Mach Header and an LC_SYMTAB load command followed by the
892 * the symbol table data for mach_kernel. A UUID load command is also present for
893 * identification, so we don't link against the wrong kernel.
895 * NOTE: This file supports only 32 bit kernels; adding support for 64 bit
896 * kernels is possible, but is not necessary yet.
897 *******************************************************************************/
898 extern struct mach_header _mh_execute_header
;
899 static int _linkedit_segment_freed
= 0;
902 kmod_get_symbol_data(
903 kmod_args_t
* symbol_data
,
904 mach_msg_type_number_t
* data_size
)
906 kern_return_t result
= KERN_FAILURE
;
908 struct load_command
* load_cmd
;
909 struct mach_header
* orig_header
= &_mh_execute_header
;
910 struct segment_command
* orig_text
= NULL
;
911 struct segment_command
* orig_data
= NULL
;
912 struct segment_command
* orig_linkedit
= NULL
;
913 struct uuid_command
* orig_uuid
= NULL
;
914 struct symtab_command
* orig_symtab
= NULL
;
915 struct section
* sect
;
916 struct section
* const_text
= NULL
;
918 vm_size_t header_size
= 0;
919 vm_offset_t symtab_size
;
920 vm_offset_t total_size
; // copied out to 'data_size'
921 char * buffer
= 0; // copied out to 'symbol_data'
923 struct mach_header
* header
;
924 struct segment_command
* seg_cmd
= NULL
;
925 struct symtab_command
* symtab
;
931 // only want to do these 1st call
932 static int syms_marked
= 0;
934 mutex_lock(kmod_lock
);
937 * Check for empty out parameter pointers, and zero them if ok.
939 if (!symbol_data
|| !data_size
) {
940 result
= KERN_INVALID_ARGUMENT
;
947 if (_linkedit_segment_freed
) {
948 result
= KERN_MEMORY_FAILURE
;
953 * Scan the in-memory kernel's mach header for the parts we need to copy:
954 * TEXT (for basic file info + const section), DATA (for basic file info),
955 * LINKEDIT (for the symbol table entries), SYMTAB (for the symbol table
958 load_cmd
= (struct load_command
*)&orig_header
[1];
959 for (i
= 0; i
< orig_header
->ncmds
; i
++) {
960 if (load_cmd
->cmd
== LC_SEGMENT
) {
961 struct segment_command
* orig_seg_cmd
=
962 (struct segment_command
*)load_cmd
;
964 if (!strncmp(SEG_TEXT
, orig_seg_cmd
->segname
, strlen(SEG_TEXT
))) {
965 orig_text
= orig_seg_cmd
;
966 } else if (!strncmp(SEG_DATA
, orig_seg_cmd
->segname
,
969 orig_data
= orig_seg_cmd
;
970 } else if (!strncmp(SEG_LINKEDIT
, orig_seg_cmd
->segname
,
971 strlen(SEG_LINKEDIT
))) {
973 orig_linkedit
= orig_seg_cmd
;
975 } else if (load_cmd
->cmd
== LC_UUID
) {
976 orig_uuid
= (struct uuid_command
*)load_cmd
;
977 } else if (load_cmd
->cmd
== LC_SYMTAB
) {
978 orig_symtab
= (struct symtab_command
*)load_cmd
;
981 load_cmd
= (struct load_command
*)((caddr_t
)load_cmd
+ load_cmd
->cmdsize
);
984 /* Bail if any wasn't found.
986 if (!orig_text
|| !orig_data
|| !orig_linkedit
|| !orig_uuid
|| !orig_symtab
) {
990 /* Now seek out the const section of the TEXT segment, bailing if not found.
992 sect
= (struct section
*)&orig_text
[1];
993 for (i
= 0; i
< orig_text
->nsects
; i
++, sect
++) {
994 if (!strncmp("__const", sect
->sectname
, sizeof("__const"))) {
1004 * Calculate the total size needed and allocate the buffer. In summing the
1005 * total size, every size before the last must be rounded to a
1006 * page-size increment.
1008 header_size
= sizeof(struct mach_header
) +
1009 orig_text
->cmdsize
+ orig_data
->cmdsize
+
1010 orig_uuid
->cmdsize
+ orig_symtab
->cmdsize
;
1011 symtab_size
= (orig_symtab
->nsyms
* sizeof(struct nlist
)) +
1012 orig_symtab
->strsize
;
1013 total_size
= round_page(header_size
) + round_page(const_text
->size
) +
1016 (void)kmem_alloc(kernel_map
, (vm_offset_t
*)&buffer
, total_size
);
1020 bzero((void *)buffer
, total_size
);
1023 * Set up the Mach-O header in the buffer.
1025 header
= (struct mach_header
*)buffer
;
1026 header
->magic
= orig_header
->magic
;
1027 header
->cputype
= orig_header
->cputype
;
1028 header
->cpusubtype
= orig_header
->cpusubtype
;
1029 header
->filetype
= orig_header
->filetype
;
1030 header
->ncmds
= 4; // TEXT, DATA, UUID, SYMTAB
1031 header
->sizeofcmds
= header_size
- sizeof(struct mach_header
);
1032 header
->flags
= orig_header
->flags
;
1035 * Initialize the current file offset and addr; updated as we go through,
1036 * but only for fields that need proper info.
1038 offset
= round_page(header_size
);
1039 addr
= (caddr_t
)const_text
->addr
;
1042 * Construct a TEXT segment load command. The only content of the TEXT
1043 * segment that we actually copy is the __TEXT,__const, which contains the
1044 * kernel vtables. The other sections are just filled with unincremented
1045 * addr/offset and zero size and number fields.
1047 seg_cmd
= (struct segment_command
*)&header
[1]; // just past mach header
1048 memcpy(seg_cmd
, orig_text
, orig_text
->cmdsize
);
1049 seg_cmd
->vmaddr
= (unsigned long)addr
;
1050 seg_cmd
->vmsize
= const_text
->size
;
1051 seg_cmd
->fileoff
= 0;
1052 seg_cmd
->filesize
= const_text
->size
+ round_page(header_size
);
1053 seg_cmd
->maxprot
= 0;
1054 seg_cmd
->initprot
= 0;
1056 sect
= (struct section
*)(seg_cmd
+ 1);
1057 for (i
= 0; i
< seg_cmd
->nsects
; i
++, sect
++) {
1058 sect
->addr
= (unsigned long)addr
; // only valid for __TEXT,__const
1060 sect
->offset
= offset
;
1062 if (0 == strncmp("__const", sect
->sectname
, sizeof("__const"))) {
1063 sect
->size
= const_text
->size
;
1064 addr
+= const_text
->size
;
1065 offset
+= const_text
->size
;
1066 const_text
= sect
; // retarget to constructed section
1069 offset
= round_page(offset
);
1072 * Now copy the __DATA segment load command, but none of its content.
1074 seg_cmd
= (struct segment_command
*)((int)seg_cmd
+ seg_cmd
->cmdsize
);
1075 memcpy(seg_cmd
, orig_data
, orig_data
->cmdsize
);
1077 seg_cmd
->vmaddr
= (unsigned long)addr
;
1078 seg_cmd
->vmsize
= 0x1000; // Why not just zero? DATA seg is empty.
1079 seg_cmd
->fileoff
= offset
;
1080 seg_cmd
->filesize
= 0;
1081 seg_cmd
->maxprot
= 0;
1082 seg_cmd
->initprot
= 0;
1084 sect
= (struct section
*)(seg_cmd
+1);
1085 for (i
= 0; i
< seg_cmd
->nsects
; i
++, sect
++) {
1086 sect
->addr
= (unsigned long)addr
;
1088 sect
->offset
= offset
;
1091 offset
= round_page(offset
);
1093 /* Set up LC_UUID command
1095 seg_cmd
= (struct segment_command
*)((int)seg_cmd
+ seg_cmd
->cmdsize
);
1096 memcpy(seg_cmd
, orig_uuid
, orig_uuid
->cmdsize
);
1098 /* Set up LC_SYMTAB command
1100 symtab
= (struct symtab_command
*)((int)seg_cmd
+ seg_cmd
->cmdsize
);
1101 symtab
->cmd
= LC_SYMTAB
;
1102 symtab
->cmdsize
= sizeof(struct symtab_command
);
1103 symtab
->symoff
= offset
;
1104 symtab
->nsyms
= orig_symtab
->nsyms
;
1105 symtab
->strsize
= orig_symtab
->strsize
;
1106 symtab
->stroff
= offset
+ symtab
->nsyms
* sizeof(struct nlist
);
1108 /* Convert the symbol table in place (yes, in the running kernel)
1109 * from section references to absolute references.
1112 struct nlist
* sym
= (struct nlist
*) orig_linkedit
->vmaddr
;
1113 for (i
= 0; i
< orig_symtab
->nsyms
; i
++, sym
++) {
1114 if ((sym
->n_type
& N_TYPE
) == N_SECT
) {
1115 sym
->n_sect
= NO_SECT
;
1116 sym
->n_type
= (sym
->n_type
& ~N_TYPE
) | N_ABS
;
1123 * Copy the contents of the __TEXT,__const section and the linkedit symbol
1124 * data into the constructed object file buffer. The header has already been
1127 memcpy(buffer
+ const_text
->offset
, (void *)const_text
->addr
, const_text
->size
);
1128 memcpy(buffer
+ symtab
->symoff
, (void *)orig_linkedit
->vmaddr
, symtab_size
);
1130 result
= vm_map_copyin(kernel_map
,
1131 (vm_offset_t
)buffer
,
1132 (vm_map_size_t
)total_size
,
1133 /* src_destroy */ TRUE
,
1134 (vm_map_copy_t
*)symbol_data
);
1135 if (result
!= KERN_SUCCESS
) {
1136 kmem_free(kernel_map
, (vm_offset_t
)buffer
, total_size
);
1137 *symbol_data
= NULL
;
1141 *data_size
= total_size
;
1145 mutex_unlock(kmod_lock
);
1149 /*******************************************************************************
1150 * Drop the LINKEDIT segment from the running kernel to recover wired memory.
1151 * This is invoked by kextd after it has successfully determined a file is
1152 * available in the root filesystem to link against (either a symbol file it
1153 * wrote, or /mach_kernel).
1154 *******************************************************************************/
1155 // in IOCatalogue.cpp
1156 extern int kernelLinkerPresent
;
1158 static kern_return_t
1159 kmod_free_linkedit_data(void)
1161 kern_return_t result
= KERN_FAILURE
;
1163 const char * dt_kernel_header_name
= "Kernel-__HEADER";
1164 const char * dt_kernel_symtab_name
= "Kernel-__SYMTAB";
1165 struct mach_header_t
* dt_mach_header
= NULL
;
1166 vm_size_t dt_mach_header_size
= 0;
1167 struct symtab_command
*dt_symtab
= NULL
;
1168 vm_size_t dt_symtab_size
= 0;
1171 struct segment_command
* segmentLE
;
1172 boolean_t keepsyms
= FALSE
;
1173 const char * segment_name
= "__LINKEDIT";
1174 #if __ppc__ || __arm__
1175 const char * devtree_segment_name
= "Kernel-__LINKEDIT";
1176 void * segment_paddress
;
1177 vm_size_t segment_size
;
1180 mutex_lock(kmod_lock
);
1182 /* The semantic is "make sure the linkedit segment is freed", so if we
1183 * previously did it, it's a success.
1185 if (_linkedit_segment_freed
) {
1186 result
= KERN_SUCCESS
;
1188 } else if (kernelLinkerPresent
) {
1189 // The in-kernel linker requires the linkedit segment to function.
1190 // Refuse to dump if it's still around.
1191 // XXX: We need a dedicated error return code for this.
1192 printf("can't remove kernel __LINKEDIT segment - in-kernel linker needs it\n");
1193 result
= KERN_MEMORY_FAILURE
;
1197 /* Dispose of unnecessary stuff that the booter didn't need to load.
1199 dt_result
= IODTGetLoaderInfo(dt_kernel_header_name
,
1200 (void **)&dt_mach_header
, &dt_mach_header_size
);
1201 if (dt_result
== 0 && dt_mach_header
) {
1202 IODTFreeLoaderInfo(dt_kernel_header_name
, (void *)dt_mach_header
,
1203 round_page_32(dt_mach_header_size
));
1205 dt_result
= IODTGetLoaderInfo(dt_kernel_symtab_name
,
1206 (void **)&dt_symtab
, &dt_symtab_size
);
1207 if (dt_result
== 0 && dt_symtab
) {
1208 IODTFreeLoaderInfo(dt_kernel_symtab_name
, (void *)dt_symtab
,
1209 round_page_32(dt_symtab_size
));
1212 PE_parse_boot_arg("keepsyms", &keepsyms
);
1214 segmentLE
= getsegbyname(segment_name
);
1216 printf("error removing kernel __LINKEDIT segment\n");
1219 OSRuntimeUnloadCPPForSegment(segmentLE
);
1220 #if __ppc__ || __arm__
1221 if (!keepsyms
&& 0 == IODTGetLoaderInfo(devtree_segment_name
,
1222 &segment_paddress
, &segment_size
)) {
1224 IODTFreeLoaderInfo(devtree_segment_name
, (void *)segment_paddress
,
1228 if (!keepsyms
&& segmentLE
->vmaddr
&& segmentLE
->vmsize
) {
1229 ml_static_mfree(segmentLE
->vmaddr
, segmentLE
->vmsize
);
1234 result
= KERN_SUCCESS
;
1237 if (!keepsyms
&& result
== KERN_SUCCESS
) {
1238 _linkedit_segment_freed
= 1;
1240 mutex_unlock(kmod_lock
);
1244 /*******************************************************************************
1245 * Retrieve the UUID load command payload from the running kernel.
1246 *******************************************************************************/
1247 static kern_return_t
1249 const char * kext_id
,
1251 mach_msg_type_number_t
* dataCount
)
1253 kern_return_t result
= KERN_FAILURE
;
1254 kmod_info_t
* kmod_info
= NULL
;
1256 char * uuid_data
= 0;
1257 struct mach_header
* header
= &_mh_execute_header
;
1258 struct load_command
* load_cmd
= (struct load_command
*)&header
[1];
1259 struct uuid_command
* uuid_cmd
;
1261 /* If given no kext ID, retrieve the kernel UUID.
1264 header
= &_mh_execute_header
;
1266 kmod_info
= kmod_lookupbyname_locked(kext_id
);
1268 result
= KERN_INVALID_ARGUMENT
;
1272 /* If the kmod is build-in, it's part of the kernel, so retrieve the
1275 if (!kmod_info
->address
) {
1276 header
= &_mh_execute_header
;
1278 header
= (struct mach_header
*)kmod_info
->address
;
1282 load_cmd
= (struct load_command
*)&header
[1];
1284 for (i
= 0; i
< header
->ncmds
; i
++) {
1285 if (load_cmd
->cmd
== LC_UUID
) {
1286 uuid_cmd
= (struct uuid_command
*)load_cmd
;
1288 /* kmem_alloc() a local buffer that's on a boundary known to work
1289 * with vm_map_copyin().
1291 result
= kmem_alloc(kernel_map
, (vm_offset_t
*)&uuid_data
,
1292 sizeof(uuid_cmd
->uuid
));
1293 if (result
!= KERN_SUCCESS
) {
1294 result
= KERN_RESOURCE_SHORTAGE
;
1298 memcpy(uuid_data
, uuid_cmd
->uuid
, sizeof(uuid_cmd
->uuid
));
1300 result
= vm_map_copyin(kernel_map
, (vm_offset_t
)uuid_data
,
1301 sizeof(uuid_cmd
->uuid
), /* src_destroy */ TRUE
,
1302 (vm_map_copy_t
*)data
);
1303 if (result
== KERN_SUCCESS
) {
1304 *dataCount
= sizeof(uuid_cmd
->uuid
);
1306 result
= KERN_RESOURCE_SHORTAGE
;
1307 kmem_free(kernel_map
, (vm_offset_t
)uuid_data
,
1308 sizeof(uuid_cmd
->uuid
));
1313 load_cmd
= (struct load_command
*)((caddr_t
)load_cmd
+ load_cmd
->cmdsize
);
1321 kmod_get_info(__unused host_t host
,
1322 kmod_info_array_t
*kmods
,
1323 mach_msg_type_number_t
*kmodCount
)
1326 kmod_info_t
*k
, *p1
;
1327 kmod_reference_t
*r
, *p2
;
1330 kern_return_t rc
= KERN_SUCCESS
;
1336 mutex_lock(kmod_lock
);
1340 size
+= sizeof(kmod_info_t
);
1341 r
= k
->reference_list
;
1343 size
+=sizeof(kmod_reference_t
);
1348 mutex_unlock(kmod_lock
);
1349 if (!size
) return KERN_SUCCESS
;
1351 rc
= kmem_alloc(kernel_map
, &data
, size
);
1354 // copy kmod into data, retry if kmod's size has changed (grown)
1355 // the copied out data is tweeked to figure what's what at user level
1356 // change the copied out k->next pointers to point to themselves
1357 // change the k->reference into a count, tack the references on
1358 // the end of the data packet in the order they are found
1360 mutex_lock(kmod_lock
);
1361 k
= kmod
; p1
= (kmod_info_t
*)data
;
1363 if ((p1
+ 1) > (kmod_info_t
*)(data
+ size
)) {
1364 mutex_unlock(kmod_lock
);
1365 kmem_free(kernel_map
, data
, size
);
1370 if (k
->next
) p1
->next
= k
;
1374 p2
= (kmod_reference_t
*)p1
;
1375 k
= kmod
; p1
= (kmod_info_t
*)data
;
1377 r
= k
->reference_list
; ref_count
= 0;
1379 if ((p2
+ 1) > (kmod_reference_t
*)(data
+ size
)) {
1380 mutex_unlock(kmod_lock
);
1381 kmem_free(kernel_map
, data
, size
);
1384 // note the last 'k' in the chain has its next == 0
1385 // since there can only be one like that,
1386 // this case is handled by the caller
1388 p2
++; r
= r
->next
; ref_count
++;
1390 p1
->reference_list
= (kmod_reference_t
*)ref_count
;
1393 mutex_unlock(kmod_lock
);
1395 rc
= vm_map_copyin(kernel_map
, data
, size
, TRUE
, (vm_map_copy_t
*)kmods
);
1397 kmem_free(kernel_map
, data
, size
);
1404 return KERN_SUCCESS
;
1408 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1410 static kern_return_t
1411 kmod_call_funcs_in_section(struct mach_header
*header
, const char *sectName
)
1413 typedef void (*Routine
)(void);
1417 if (header
->magic
!= MH_MAGIC
) {
1418 return KERN_INVALID_ARGUMENT
;
1421 routines
= (Routine
*) getsectdatafromheader(header
, SEG_TEXT
, /*(char *)*/ sectName
, &size
);
1422 if (!routines
) return KERN_SUCCESS
;
1424 size
/= sizeof(Routine
);
1425 for (i
= 0; i
< size
; i
++) {
1429 return KERN_SUCCESS
;
1433 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1436 kmod_initialize_cpp(kmod_info_t
*info
)
1438 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__constructor");
1442 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1445 kmod_finalize_cpp(kmod_info_t
*info
)
1447 return kmod_call_funcs_in_section((struct mach_header
*)info
->address
, "__destructor");
1451 kmod_default_start(__unused
struct kmod_info
*ki
, __unused
void *data
)
1453 return KMOD_RETURN_SUCCESS
;
1457 kmod_default_stop(__unused
struct kmod_info
*ki
, __unused
void *data
)
1459 return KMOD_RETURN_SUCCESS
;
1463 kmod_dump_to(vm_offset_t
*addr
, unsigned int cnt
,
1464 int (*printf_func
)(const char *fmt
, ...))
1466 vm_offset_t
* kscan_addr
= NULL
;
1468 kmod_reference_t
* r
;
1471 kmod_info_t
* stop_kmod
= NULL
;
1473 for (k
= kmod
; k
; k
= k
->next
) {
1474 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)k
)) == 0) {
1475 (*printf_func
)(" kmod scan stopped due to missing "
1476 "kmod page: %08x\n", stop_kmod
);
1480 continue; // skip fake entries for built-in kernel components
1482 for (i
= 0, kscan_addr
= addr
; i
< cnt
; i
++, kscan_addr
++) {
1483 if ((*kscan_addr
>= k
->address
) &&
1484 (*kscan_addr
< (k
->address
+ k
->size
))) {
1487 (*printf_func
)(" Kernel loadable modules in backtrace "
1488 "(with dependencies):\n");
1491 (*printf_func
)(" %s(%s)@0x%x->0x%x\n",
1492 k
->name
, k
->version
, k
->address
, k
->address
+ k
->size
- 1);
1494 for (r
= k
->reference_list
; r
; r
= r
->next
) {
1495 kmod_info_t
* rinfo
;
1497 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)r
)) == 0) {
1498 (*printf_func
)(" kmod dependency scan stopped "
1499 "due to missing dependency page: %08x\n", r
);
1505 if (pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)rinfo
)) == 0) {
1506 (*printf_func
)(" kmod dependency scan stopped "
1507 "due to missing kmod page: %08x\n", rinfo
);
1511 if (!rinfo
->address
) {
1512 continue; // skip fake entries for built-ins
1515 (*printf_func
)(" dependency: %s(%s)@0x%x\n",
1516 rinfo
->name
, rinfo
->version
, rinfo
->address
);
1519 break; // only report this kmod for one backtrace address
1528 kmod_dump(vm_offset_t
*addr
, unsigned int cnt
)
1530 kmod_dump_to(addr
, cnt
, &kdb_printf
);
1533 void kmod_dump_log(vm_offset_t
*, unsigned); /* gcc 4 warn fix */
1536 kmod_dump_log(vm_offset_t
*addr
, unsigned int cnt
)
1538 kmod_dump_to(addr
, cnt
, &printf
);