#include <vm/vm_object.h>
#include <vm/memory_object.h>
+typedef struct vm_shared_region_slide_info_entry *vm_shared_region_slide_info_entry_t;
+struct vm_shared_region_slide_info_entry {
+ uint32_t version;
+ uint32_t toc_offset; // offset from start of header to table-of-contents
+ uint32_t toc_count; // number of entries in toc (same as number of pages in r/w mapping)
+ uint32_t entry_offset;
+ uint32_t entry_count;
+};
+
+#define NBBY 8
+#define NUM_SLIDING_BITMAPS_PER_PAGE (0x1000/sizeof(int)/NBBY) /*128*/
+typedef struct slide_info_entry_toc *slide_info_entry_toc_t;
+struct slide_info_entry_toc {
+ uint8_t entry[NUM_SLIDING_BITMAPS_PER_PAGE];
+};
+
+typedef struct vm_shared_region_slide_info *vm_shared_region_slide_info_t;
+struct vm_shared_region_slide_info {
+ mach_vm_offset_t start;
+ mach_vm_offset_t end;
+ uint32_t slide;
+ vm_object_t slide_object;
+ mach_vm_size_t slide_info_size;
+ vm_shared_region_slide_info_entry_t slide_info_entry;
+};
+
/* address space shared region descriptor */
struct vm_shared_region {
uint32_t sr_ref_count;
cpu_type_t sr_cpu_type;
boolean_t sr_64bit;
boolean_t sr_mapping_in_progress;
+ boolean_t sr_slide_in_progress;
boolean_t sr_persists;
+ boolean_t sr_slid;
ipc_port_t sr_mem_entry;
mach_vm_offset_t sr_first_mapping;
mach_vm_offset_t sr_base_address;
mach_vm_offset_t sr_pmap_nesting_start;
mach_vm_size_t sr_pmap_nesting_size;
thread_call_t sr_timer_call;
+ struct vm_shared_region_slide_info sr_slide_info;
};
+extern kern_return_t vm_shared_region_slide_page(vm_shared_region_slide_info_t si,
+ vm_offset_t vaddr,
+ uint32_t pageIndex);
+extern vm_shared_region_slide_info_t vm_shared_region_get_slide_info(vm_shared_region_t sr);
#else /* !MACH_KERNEL_PRIVATE */
struct vm_shared_region;
+struct vm_shared_region_slide_info;
+struct vm_shared_region_slide_info_entry;
+struct slide_info_entry_toc;
#endif /* MACH_KERNEL_PRIVATE */
struct vm_shared_region *shared_region);
extern ipc_port_t vm_shared_region_mem_entry(
struct vm_shared_region *shared_region);
+extern uint32_t vm_shared_region_get_slide(
+ vm_shared_region_t shared_region);
extern void vm_shared_region_set(
struct task *task,
struct vm_shared_region *new_shared_region);
extern kern_return_t vm_shared_region_start_address(
struct vm_shared_region *shared_region,
mach_vm_offset_t *start_address);
+extern void vm_shared_region_undo_mappings(
+ vm_map_t sr_map,
+ mach_vm_offset_t sr_base_address,
+ struct shared_file_mapping_np *mappings,
+ unsigned int mappings_count);
extern kern_return_t vm_shared_region_map_file(
struct vm_shared_region *shared_region,
unsigned int mappings_count,
struct shared_file_mapping_np *mappings,
memory_object_control_t file_control,
memory_object_size_t file_size,
- void *root_dir);
-
+ void *root_dir,
+ uint32_t slide,
+ user_addr_t slide_start,
+ user_addr_t slide_size);
+extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide);
+extern kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_t sr);
+extern kern_return_t vm_shared_region_slide_init(vm_shared_region_t sr,
+ mach_vm_size_t slide_info_size,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ uint32_t slide,
+ memory_object_control_t);
+extern void* vm_shared_region_get_slide_info_entry(vm_shared_region_t sr);
extern void vm_commpage_init(void);
+extern void vm_commpage_text_init(void);
extern kern_return_t vm_commpage_enter(
struct _vm_map *map,
struct task *task);
extern kern_return_t vm_commpage_remove(
struct _vm_map *map,
struct task *task);
+int vm_shared_region_slide(uint32_t,
+ mach_vm_offset_t,
+ mach_vm_size_t,
+ mach_vm_offset_t,
+ mach_vm_size_t,
+ memory_object_control_t);
#endif /* KERNEL_PRIVATE */