* Copyright (c) 2007 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_LICENSE_HEADER_END@
*/
/*
*
* File: vm/vm_shared_region.h
*
- * protos and struct definitions for shared region
+ * protos and struct definitions for shared region
*/
#ifndef _VM_SHARED_REGION_H_
#define _VM_SHARED_REGION_H_
-#ifdef KERNEL_PRIVATE
+#ifdef KERNEL_PRIVATE
#include <mach/vm_prot.h>
#include <mach/mach_types.h>
#if DEBUG
extern int shared_region_debug;
-#define SHARED_REGION_DEBUG(args) \
- MACRO_BEGIN \
- if (shared_region_debug) { \
- kprintf args; \
- } \
+#define SHARED_REGION_DEBUG(args) \
+ MACRO_BEGIN \
+ if (shared_region_debug) { \
+ kprintf args; \
+ } \
MACRO_END
#else /* DEBUG */
#define SHARED_REGION_DEBUG(args)
extern int shared_region_trace_level;
-extern struct vm_shared_region *init_task_shared_region;
-
-#define SHARED_REGION_TRACE_NONE_LVL 0 /* no trace */
-#define SHARED_REGION_TRACE_ERROR_LVL 1 /* trace abnormal events */
-#define SHARED_REGION_TRACE_INFO_LVL 2 /* trace all events */
-#define SHARED_REGION_TRACE_DEBUG_LVL 3 /* extra traces for debug */
-#define SHARED_REGION_TRACE(level, args) \
- MACRO_BEGIN \
- if (shared_region_trace_level >= level) { \
- printf args; \
- } \
+extern struct vm_shared_region *primary_system_shared_region;
+
+#define SHARED_REGION_TRACE_NONE_LVL 0 /* no trace */
+#define SHARED_REGION_TRACE_ERROR_LVL 1 /* trace abnormal events */
+#define SHARED_REGION_TRACE_INFO_LVL 2 /* trace all events */
+#define SHARED_REGION_TRACE_DEBUG_LVL 3 /* extra traces for debug */
+#define SHARED_REGION_TRACE(level, args) \
+ MACRO_BEGIN \
+ if (shared_region_trace_level >= level) { \
+ printf args; \
+ } \
MACRO_END
#define SHARED_REGION_TRACE_NONE(args)
-#define SHARED_REGION_TRACE_ERROR(args) \
- MACRO_BEGIN \
- SHARED_REGION_TRACE(SHARED_REGION_TRACE_ERROR_LVL, \
- args); \
+#define SHARED_REGION_TRACE_ERROR(args) \
+ MACRO_BEGIN \
+ SHARED_REGION_TRACE(SHARED_REGION_TRACE_ERROR_LVL, \
+ args); \
MACRO_END
-#define SHARED_REGION_TRACE_INFO(args) \
- MACRO_BEGIN \
- SHARED_REGION_TRACE(SHARED_REGION_TRACE_INFO_LVL, \
- args); \
+#define SHARED_REGION_TRACE_INFO(args) \
+ MACRO_BEGIN \
+ SHARED_REGION_TRACE(SHARED_REGION_TRACE_INFO_LVL, \
+ args); \
MACRO_END
-#define SHARED_REGION_TRACE_DEBUG(args) \
- MACRO_BEGIN \
- SHARED_REGION_TRACE(SHARED_REGION_TRACE_DEBUG_LVL, \
- args); \
+#define SHARED_REGION_TRACE_DEBUG(args) \
+ MACRO_BEGIN \
+ SHARED_REGION_TRACE(SHARED_REGION_TRACE_DEBUG_LVL, \
+ args); \
MACRO_END
typedef struct vm_shared_region *vm_shared_region_t;
#include <vm/vm_object.h>
#include <vm/memory_object.h>
-#define PAGE_SIZE_FOR_SR_SLIDE 4096
+#define PAGE_SIZE_FOR_SR_SLIDE 4096
/* Documentation for the slide info format can be found in the dyld project in
* the file 'launch-cache/dyld_cache_format.h'. */
-typedef struct vm_shared_region_slide_info_entry_v1 *vm_shared_region_slide_info_entry_v1_t;
-struct vm_shared_region_slide_info_entry_v1 {
- uint32_t version;
- uint32_t toc_offset; // offset from start of header to table-of-contents
- uint32_t toc_count; // number of entries in toc (same as number of pages in r/w mapping)
- uint32_t entry_offset;
- uint32_t entry_count;
- // uint16_t toc[toc_count];
- // entrybitmap entries[entries_count];
-};
-
-#define NBBY 8
-#define NUM_SLIDING_BITMAPS_PER_PAGE (0x1000/sizeof(int)/NBBY) /*128*/
-typedef struct slide_info_entry_toc *slide_info_entry_toc_t;
-struct slide_info_entry_toc {
- uint8_t entry[NUM_SLIDING_BITMAPS_PER_PAGE];
-};
typedef struct vm_shared_region_slide_info_entry_v2 *vm_shared_region_slide_info_entry_v2_t;
struct vm_shared_region_slide_info_entry_v2 {
- uint32_t version;
- uint32_t page_size;
- uint32_t page_starts_offset;
- uint32_t page_starts_count;
- uint32_t page_extras_offset;
- uint32_t page_extras_count;
- uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location
- uint64_t value_add;
+ uint32_t version;
+ uint32_t page_size;
+ uint32_t page_starts_offset;
+ uint32_t page_starts_count;
+ uint32_t page_extras_offset;
+ uint32_t page_extras_count;
+ uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location
+ uint64_t value_add;
// uint16_t page_starts[page_starts_count];
// uint16_t page_extras[page_extras_count];
};
-#define DYLD_CACHE_SLIDE_PAGE_ATTRS 0xC000 // high bits of uint16_t are flags
-#define DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA 0x8000 // index is into extras array (not starts array)
-#define DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE 0x4000 // page has no rebasing
-#define DYLD_CACHE_SLIDE_PAGE_ATTR_END 0x8000 // last chain entry for page
-#define DYLD_CACHE_SLIDE_PAGE_VALUE 0x3FFF // bitwise negation of DYLD_CACHE_SLIDE_PAGE_ATTRS
-#define DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT 2
+#define DYLD_CACHE_SLIDE_PAGE_ATTRS 0xC000 // high bits of uint16_t are flags
+#define DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA 0x8000 // index is into extras array (not starts array)
+#define DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE 0x4000 // page has no rebasing
+#define DYLD_CACHE_SLIDE_PAGE_ATTR_END 0x8000 // last chain entry for page
+#define DYLD_CACHE_SLIDE_PAGE_VALUE 0x3FFF // bitwise negation of DYLD_CACHE_SLIDE_PAGE_ATTRS
+#define DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT 2
typedef struct vm_shared_region_slide_info_entry_v3 *vm_shared_region_slide_info_entry_v3_t;
-struct vm_shared_region_slide_info_entry_v3
-{
- uint32_t version; // currently 3
- uint32_t page_size; // currently 4096 (may also be 16384)
- uint32_t page_starts_count;
- uint64_t value_add;
- uint16_t page_starts[/* page_starts_count */];
+struct vm_shared_region_slide_info_entry_v3 {
+ uint32_t version; // currently 3
+ uint32_t page_size; // currently 4096 (may also be 16384)
+ uint32_t page_starts_count;
+ uint64_t value_add;
+ uint16_t page_starts[] /* page_starts_count */;
};
-#define DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE 0xFFFF // page has no rebasing
+#define DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE 0xFFFF // page has no rebasing
typedef struct vm_shared_region_slide_info_entry_v4 *vm_shared_region_slide_info_entry_v4_t;
struct vm_shared_region_slide_info_entry_v4 {
- uint32_t version; // currently 4
- uint32_t page_size; // currently 4096 (may also be 16384)
- uint32_t page_starts_offset;
- uint32_t page_starts_count;
- uint32_t page_extras_offset;
- uint32_t page_extras_count;
- uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location (0xC0000000)
- uint64_t value_add; // base address of cache
- // uint16_t page_starts[page_starts_count];
- // uint16_t page_extras[page_extras_count];
+ uint32_t version; // currently 4
+ uint32_t page_size; // currently 4096 (may also be 16384)
+ uint32_t page_starts_offset;
+ uint32_t page_starts_count;
+ uint32_t page_extras_offset;
+ uint32_t page_extras_count;
+ uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location (0xC0000000)
+ uint64_t value_add; // base address of cache
+ // uint16_t page_starts[page_starts_count];
+ // uint16_t page_extras[page_extras_count];
};
#define DYLD_CACHE_SLIDE4_PAGE_NO_REBASE 0xFFFF // page has no rebasing
typedef union vm_shared_region_slide_info_entry *vm_shared_region_slide_info_entry_t;
union vm_shared_region_slide_info_entry {
- uint32_t version;
- struct vm_shared_region_slide_info_entry_v1 v1;
- struct vm_shared_region_slide_info_entry_v2 v2;
- struct vm_shared_region_slide_info_entry_v3 v3;
- struct vm_shared_region_slide_info_entry_v4 v4;
+ uint32_t version;
+ struct vm_shared_region_slide_info_entry_v2 v2;
+ struct vm_shared_region_slide_info_entry_v3 v3;
+ struct vm_shared_region_slide_info_entry_v4 v4;
};
-typedef struct vm_shared_region_slide_info *vm_shared_region_slide_info_t;
-struct vm_shared_region_slide_info {
- mach_vm_address_t slid_address;
- mach_vm_offset_t start;
- mach_vm_offset_t end;
- uint32_t slide;
- vm_object_t slide_object;
- mach_vm_size_t slide_info_size;
- vm_shared_region_slide_info_entry_t slide_info_entry;
-};
+#define MIN_SLIDE_INFO_SIZE \
+ MIN(sizeof(struct vm_shared_region_slide_info_entry_v2), \
+ MIN(sizeof(struct vm_shared_region_slide_info_entry_v3), \
+ sizeof(struct vm_shared_region_slide_info_entry_v4)))
-/* address space shared region descriptor */
+/*
+ * This is the information used by the shared cache pager for sub-sections
+ * which must be modified for relocations and/or pointer authentications
+ * before it can be used. The shared_region_pager gets source pages from
+ * the shared cache file and modifies them -- see shared_region_pager_data_request().
+ *
+ * A single pager may be used from multiple shared regions provided:
+ * - same si_slide_object, si_start, si_end, si_slide, si_ptrauth and si_jop_key
+ * - The size and contents of si_slide_info_entry are the same.
+ */
+typedef struct vm_shared_region_slide_info {
+ uint32_t si_slide; /* the distance that the file data is relocated */
+ bool si_slid;
+#if __has_feature(ptrauth_calls)
+ bool si_ptrauth;
+ uint64_t si_jop_key;
+ struct vm_shared_region *si_shared_region; /* so we can ref/dealloc for authenticated slide info */
+#endif /* __has_feature(ptrauth_calls) */
+ mach_vm_address_t si_slid_address;
+ mach_vm_offset_t si_start; /* start offset in si_slide_object */
+ mach_vm_offset_t si_end;
+ vm_object_t si_slide_object; /* The source object for the pages to be modified */
+ mach_vm_size_t si_slide_info_size; /* size of dyld provided relocation information */
+ vm_shared_region_slide_info_entry_t si_slide_info_entry; /* dyld provided relocation information */
+} *vm_shared_region_slide_info_t;
+
+/*
+ * Data structure that represents a unique shared cache region.
+ */
struct vm_shared_region {
- uint32_t sr_ref_count;
- queue_chain_t sr_q;
- void *sr_root_dir;
- cpu_type_t sr_cpu_type;
- cpu_subtype_t sr_cpu_subtype;
- boolean_t sr_64bit;
- boolean_t sr_mapping_in_progress;
- boolean_t sr_slide_in_progress;
- boolean_t sr_persists;
- boolean_t sr_slid;
- ipc_port_t sr_mem_entry;
- mach_vm_offset_t sr_first_mapping;
- mach_vm_offset_t sr_base_address;
- mach_vm_size_t sr_size;
- mach_vm_offset_t sr_pmap_nesting_start;
- mach_vm_size_t sr_pmap_nesting_size;
- thread_call_t sr_timer_call;
- struct vm_shared_region_slide_info sr_slide_info;
- uuid_t sr_uuid;
- boolean_t sr_uuid_copied;
- uint32_t sr_images_count;
+ uint32_t sr_ref_count;
+ uint32_t sr_slide;
+ queue_chain_t sr_q;
+ void *sr_root_dir;
+ cpu_type_t sr_cpu_type;
+ cpu_subtype_t sr_cpu_subtype;
+ ipc_port_t sr_mem_entry;
+ mach_vm_offset_t sr_first_mapping;
+ mach_vm_offset_t sr_base_address;
+ mach_vm_size_t sr_size;
+ mach_vm_offset_t sr_pmap_nesting_start;
+ mach_vm_size_t sr_pmap_nesting_size;
+ thread_call_t sr_timer_call;
+ uuid_t sr_uuid;
+
+ bool sr_mapping_in_progress;
+ bool sr_slide_in_progress;
+ bool sr_64bit;
+ bool sr_persists;
+ bool sr_uuid_copied;
+ bool sr_stale; /* This region should never be used again. */
+
+#if __has_feature(ptrauth_calls)
+ bool sr_reslide; /* Special shared region for suspected attacked processes */
+#define NUM_SR_AUTH_SECTIONS 2
+ vm_shared_region_slide_info_t sr_auth_section[NUM_SR_AUTH_SECTIONS];
+ uint_t sr_num_auth_section;
+#endif /* __has_feature(ptrauth_calls) */
+
+ uint32_t sr_images_count;
struct dyld_uuid_info_64 *sr_images;
};
-extern kern_return_t vm_shared_region_slide_page(vm_shared_region_slide_info_t si,
- vm_offset_t vaddr,
- mach_vm_offset_t uservaddr,
- uint32_t pageIndex);
-extern vm_shared_region_slide_info_t vm_shared_region_get_slide_info(vm_shared_region_t sr);
+extern kern_return_t vm_shared_region_slide_page(
+ vm_shared_region_slide_info_t si,
+ vm_offset_t vaddr,
+ mach_vm_offset_t uservaddr,
+ uint32_t pageIndex,
+ uint64_t jop_key);
+extern uint64_t shared_region_find_key(char *shared_region_id);
#else /* !MACH_KERNEL_PRIVATE */
struct vm_shared_region;
#endif /* MACH_KERNEL_PRIVATE */
+struct _sr_file_mappings {
+ int fd;
+ uint32_t mappings_count;
+ struct shared_file_mapping_slide_np *mappings;
+ uint32_t slide;
+ struct fileproc *fp;
+ struct vnode *vp;
+ memory_object_size_t file_size;
+ memory_object_control_t file_control;
+};
+
extern void vm_shared_region_init(void);
extern kern_return_t vm_shared_region_enter(
- struct _vm_map *map,
- struct task *task,
- boolean_t is_64bit,
- void *fsroot,
- cpu_type_t cpu,
- cpu_subtype_t cpu_subtype);
+ struct _vm_map *map,
+ struct task *task,
+ boolean_t is_64bit,
+ void *fsroot,
+ cpu_type_t cpu,
+ cpu_subtype_t cpu_subtype,
+ boolean_t reslide);
extern kern_return_t vm_shared_region_remove(
- struct _vm_map *map,
- struct task *task);
+ struct _vm_map *map,
+ struct task *task);
extern vm_shared_region_t vm_shared_region_get(
- struct task *task);
+ struct task *task);
extern vm_shared_region_t vm_shared_region_trim_and_get(
- struct task *task);
+ struct task *task);
extern void vm_shared_region_deallocate(
- struct vm_shared_region *shared_region);
-extern mach_vm_offset_t vm_shared_region_base_address(
- struct vm_shared_region *shared_region);
-extern mach_vm_size_t vm_shared_region_size(
- struct vm_shared_region *shared_region);
-extern ipc_port_t vm_shared_region_mem_entry(
- struct vm_shared_region *shared_region);
+ struct vm_shared_region *shared_region);
extern vm_map_t vm_shared_region_vm_map(
- struct vm_shared_region *shared_region);
-extern uint32_t vm_shared_region_get_slide(
- vm_shared_region_t shared_region);
+ struct vm_shared_region *shared_region);
extern void vm_shared_region_set(
- struct task *task,
- struct vm_shared_region *new_shared_region);
+ struct task *task,
+ struct vm_shared_region *new_shared_region);
extern vm_shared_region_t vm_shared_region_lookup(
- void *root_dir,
- cpu_type_t cpu,
- cpu_subtype_t cpu_subtype,
- boolean_t is_64bit);
+ void *root_dir,
+ cpu_type_t cpu,
+ cpu_subtype_t cpu_subtype,
+ boolean_t is_64bit,
+ boolean_t reslide);
extern kern_return_t vm_shared_region_start_address(
- struct vm_shared_region *shared_region,
- mach_vm_offset_t *start_address);
+ struct vm_shared_region *shared_region,
+ mach_vm_offset_t *start_address,
+ task_t task);
extern void vm_shared_region_undo_mappings(
- vm_map_t sr_map,
- mach_vm_offset_t sr_base_address,
- struct shared_file_mapping_np *mappings,
- unsigned int mappings_count);
+ vm_map_t sr_map,
+ mach_vm_offset_t sr_base_address,
+ struct _sr_file_mappings *srf_mappings,
+ struct _sr_file_mappings *srf_mappings_count,
+ unsigned int mappings_count);
+__attribute__((noinline))
extern kern_return_t vm_shared_region_map_file(
- struct vm_shared_region *shared_region,
- unsigned int mappings_count,
- struct shared_file_mapping_np *mappings,
- memory_object_control_t file_control,
- memory_object_size_t file_size,
- void *root_dir,
- uint32_t slide,
- user_addr_t slide_start,
- user_addr_t slide_size);
+ struct vm_shared_region *shared_region,
+ int sr_mappings_count,
+ struct _sr_file_mappings *sr_mappings);
+extern void *vm_shared_region_root_dir(
+ struct vm_shared_region *shared_region);
extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide);
-extern kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_t sr);
-extern void* vm_shared_region_get_slide_info_entry(vm_shared_region_t sr);
extern void vm_commpage_init(void);
extern void vm_commpage_text_init(void);
extern kern_return_t vm_commpage_enter(
- struct _vm_map *map,
- struct task *task,
- boolean_t is64bit);
+ struct _vm_map *map,
+ struct task *task,
+ boolean_t is64bit);
extern kern_return_t vm_commpage_remove(
- struct _vm_map *map,
- struct task *task);
-int vm_shared_region_slide(uint32_t,
- mach_vm_offset_t,
- mach_vm_size_t,
- mach_vm_offset_t,
- mach_vm_size_t,
- mach_vm_offset_t,
- memory_object_control_t);
+ struct _vm_map *map,
+ struct task *task);
+int vm_shared_region_slide(uint32_t,
+ mach_vm_offset_t,
+ mach_vm_size_t,
+ mach_vm_offset_t,
+ mach_vm_size_t,
+ mach_vm_offset_t,
+ memory_object_control_t,
+ vm_prot_t);
+extern void vm_shared_region_pivot(void);
+extern void vm_shared_region_reslide_stale(void);
+#if __has_feature(ptrauth_calls)
+__attribute__((noinline))
+extern kern_return_t vm_shared_region_auth_remap(vm_shared_region_t sr);
+#endif /* __has_feature(ptrauth_calls) */
+extern void vm_shared_region_reference(vm_shared_region_t sr);
#endif /* KERNEL_PRIVATE */
-#endif /* _VM_SHARED_REGION_H_ */
+#endif /* _VM_SHARED_REGION_H_ */