/* delay before reclaiming an unused shared region */
int shared_region_destroy_delay = 120; /* in seconds */
+/*
+ * Only one cache gets to slide on Desktop, since we can't
+ * tear down slide info properly today and the desktop actually
+ * produces lots of shared caches.
+ */
+boolean_t shared_region_completed_slide = FALSE;
+
/* this lock protects all the shared region data structures */
lck_grp_t *vm_shared_region_lck_grp;
lck_mtx_t vm_shared_region_lock;
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> get(%p)\n",
- task));
+ (void *)VM_KERNEL_ADDRPERM(task)));
task_lock(task);
vm_shared_region_lock();
SHARED_REGION_TRACE_DEBUG(
("shared_region: get(%p) <- %p\n",
- task, shared_region));
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
return shared_region;
}
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> base_address(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 1);
SHARED_REGION_TRACE_DEBUG(
("shared_region: base_address(%p) <- 0x%llx\n",
- shared_region, (long long)shared_region->sr_base_address));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (long long)shared_region->sr_base_address));
return shared_region->sr_base_address;
}
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> size(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 1);
SHARED_REGION_TRACE_DEBUG(
("shared_region: size(%p) <- 0x%llx\n",
- shared_region, (long long)shared_region->sr_size));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (long long)shared_region->sr_size));
return shared_region->sr_size;
}
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> mem_entry(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 1);
SHARED_REGION_TRACE_DEBUG(
("shared_region: mem_entry(%p) <- %p\n",
- shared_region, shared_region->sr_mem_entry));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (void *)VM_KERNEL_ADDRPERM(shared_region->sr_mem_entry)));
return shared_region->sr_mem_entry;
}
+uint32_t
+vm_shared_region_get_slide(
+ vm_shared_region_t shared_region)
+{
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: -> vm_shared_region_get_slide(%p)\n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
+ assert(shared_region->sr_ref_count > 1);
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ shared_region->sr_slide_info.slide));
+
+ /* 0 if we haven't slid */
+ assert(shared_region->sr_slide_info.slide_object != NULL ||
+ shared_region->sr_slide_info.slide == 0);
+
+ return shared_region->sr_slide_info.slide;
+}
+
+vm_shared_region_slide_info_t
+vm_shared_region_get_slide_info(
+ vm_shared_region_t shared_region)
+{
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
+ assert(shared_region->sr_ref_count > 1);
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info)));
+ return &shared_region->sr_slide_info;
+}
+
/*
* Set the shared region the process should use.
* A NULL new shared region means that we just want to release the old
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> set(%p, %p)\n",
- task, new_shared_region));
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(new_shared_region)));
task_lock(task);
vm_shared_region_lock();
SHARED_REGION_TRACE_DEBUG(
("shared_region: set(%p) <- old=%p new=%p\n",
- task, old_shared_region, new_shared_region));
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(old_shared_region),
+ (void *)VM_KERNEL_ADDRPERM(new_shared_region)));
}
/*
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
- root_dir, cputype, is_64bit));
+
+ (void *)VM_KERNEL_ADDRPERM(root_dir), cputype, is_64bit));
shared_region = NULL;
new_shared_region = NULL;
SHARED_REGION_TRACE_DEBUG(
("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
- root_dir, cputype, is_64bit, shared_region));
+ (void *)VM_KERNEL_ADDRPERM(root_dir),
+ cputype, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 0);
return shared_region;
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> reference_locked(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 0);
shared_region->sr_ref_count++;
SHARED_REGION_TRACE_DEBUG(
("shared_region: reference_locked(%p) <- %d\n",
- shared_region, shared_region->sr_ref_count));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ shared_region->sr_ref_count));
}
/*
{
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> deallocate(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
vm_shared_region_lock();
* can persist or not based on the "shared_region_persistence"
* sysctl.
* Make sure that this one complies.
+ *
+ * See comments in vm_shared_region_slide() for notes about
+ * shared regions we have slid (which are not torn down currently).
*/
if (shared_region_persistence &&
!shared_region->sr_persists) {
shared_region->sr_ref_count--;
SHARED_REGION_TRACE_DEBUG(
("shared_region: deallocate(%p): ref now %d\n",
- shared_region, shared_region->sr_ref_count));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ shared_region->sr_ref_count));
if (shared_region->sr_ref_count == 0) {
uint64_t deadline;
+ assert(!shared_region->sr_slid);
+
if (shared_region->sr_timer_call == NULL) {
/* hold one reference for the timer */
assert(! shared_region->sr_mapping_in_progress);
SHARED_REGION_TRACE_DEBUG(
("shared_region: deallocate(%p): armed timer\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
vm_shared_region_unlock();
} else {
/* timer expired: let go of this shared region */
+ /*
+ * We can't properly handle teardown of a slid object today.
+ */
+ assert(!shared_region->sr_slid);
+
/*
* Remove it from the queue first, so no one can find
* it...
vm_shared_region_t,
sr_q);
vm_shared_region_unlock();
+
/* ... and destroy it */
vm_shared_region_destroy(shared_region);
shared_region = NULL;
SHARED_REGION_TRACE_DEBUG(
("shared_region: deallocate(%p) <-\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
}
void
vm_named_entry_t mem_entry;
ipc_port_t mem_entry_port;
vm_shared_region_t shared_region;
+ vm_shared_region_slide_info_t si;
vm_map_t sub_map;
mach_vm_offset_t base_address, pmap_nesting_start;
mach_vm_size_t size, pmap_nesting_size;
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
- root_dir, cputype, is_64bit));
+ (void *)VM_KERNEL_ADDRPERM(root_dir), cputype, is_64bit));
base_address = 0;
size = 0;
pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC;
pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC;
break;
-#ifdef CPU_TYPE_ARM
- case CPU_TYPE_ARM:
- base_address = SHARED_REGION_BASE_ARM;
- size = SHARED_REGION_SIZE_ARM;
- pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM;
- pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM;
- break;
-#endif /* CPU_TYPE_ARM */
default:
SHARED_REGION_TRACE_ERROR(
("shared_region: create: unknown cpu type %d\n",
kfree(shared_region, sizeof (*shared_region));
shared_region = NULL;
goto done;
-
}
}
}
/* create a VM sub map and its pmap */
- sub_map = vm_map_create(pmap_create(0, is_64bit),
+ sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit),
0, size,
TRUE);
if (sub_map == VM_MAP_NULL) {
queue_init(&shared_region->sr_q);
shared_region->sr_mapping_in_progress = FALSE;
+ shared_region->sr_slide_in_progress = FALSE;
shared_region->sr_persists = FALSE;
+ shared_region->sr_slid = FALSE;
shared_region->sr_timer_call = NULL;
shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
/* grab a reference for the caller */
shared_region->sr_ref_count = 1;
+ /* And set up slide info */
+ si = &shared_region->sr_slide_info;
+ si->start = 0;
+ si->end = 0;
+ si->slide = 0;
+ si->slide_object = NULL;
+ si->slide_info_size = 0;
+ si->slide_info_entry = NULL;
+
done:
if (shared_region) {
SHARED_REGION_TRACE_INFO(
("shared_region: create(root=%p,cpu=%d,64bit=%d,"
"base=0x%llx,size=0x%llx) <- "
"%p mem=(%p,%p) map=%p pmap=%p\n",
- root_dir, cputype, is_64bit, (long long)base_address,
- (long long)size, shared_region,
- mem_entry_port, mem_entry, sub_map, sub_map->pmap));
+ (void *)VM_KERNEL_ADDRPERM(root_dir),
+ cputype, is_64bit, (long long)base_address,
+ (long long)size,
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (void *)VM_KERNEL_ADDRPERM(mem_entry_port),
+ (void *)VM_KERNEL_ADDRPERM(mem_entry),
+ (void *)VM_KERNEL_ADDRPERM(sub_map),
+ (void *)VM_KERNEL_ADDRPERM(sub_map->pmap)));
} else {
SHARED_REGION_TRACE_INFO(
("shared_region: create(root=%p,cpu=%d,64bit=%d,"
"base=0x%llx,size=0x%llx) <- NULL",
- root_dir, cputype, is_64bit, (long long)base_address,
+ (void *)VM_KERNEL_ADDRPERM(root_dir),
+ cputype, is_64bit, (long long)base_address,
(long long)size));
}
return shared_region;
SHARED_REGION_TRACE_INFO(
("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
- shared_region,
- shared_region->sr_root_dir,
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir),
shared_region->sr_cpu_type,
shared_region->sr_64bit));
assert(shared_region->sr_ref_count == 0);
assert(!shared_region->sr_persists);
+ assert(!shared_region->sr_slid);
mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject;
assert(mem_entry->is_sub_map);
assert(!mem_entry->internal);
assert(!mem_entry->is_pager);
+ assert(!mem_entry->is_copy);
map = mem_entry->backing.map;
/*
thread_call_free(shared_region->sr_timer_call);
}
+#if 0
+ /*
+ * If slid, free those resources. We'll want this eventually,
+ * but can't handle it properly today.
+ */
+ si = &shared_region->sr_slide_info;
+ if (si->slide_info_entry) {
+ kmem_free(kernel_map,
+ (vm_offset_t) si->slide_info_entry,
+ (vm_size_t) si->slide_info_size);
+ vm_object_deallocate(si->slide_object);
+ }
+#endif
+
/* release the shared region structure... */
kfree(shared_region, sizeof (*shared_region));
+
SHARED_REGION_TRACE_DEBUG(
("shared_region: destroy(%p) <-\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
shared_region = NULL;
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> start_address(%p)\n",
- shared_region));
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
assert(shared_region->sr_ref_count > 1);
vm_shared_region_lock();
SHARED_REGION_TRACE_DEBUG(
("shared_region: start_address(%p) <- 0x%llx\n",
- shared_region, (long long)shared_region->sr_base_address));
+ (void *)VM_KERNEL_ADDRPERM(shared_region),
+ (long long)shared_region->sr_base_address));
return kr;
}
+
+void
+vm_shared_region_undo_mappings(
+ vm_map_t sr_map,
+ mach_vm_offset_t sr_base_address,
+ struct shared_file_mapping_np *mappings,
+ unsigned int mappings_count)
+{
+ unsigned int j = 0;
+ vm_shared_region_t shared_region = NULL;
+ boolean_t reset_shared_region_state = FALSE;
+
+ shared_region = vm_shared_region_get(current_task());
+ if (shared_region == NULL) {
+ printf("Failed to undo mappings because of NULL shared region.\n");
+ return;
+ }
+
+
+ if (sr_map == NULL) {
+ ipc_port_t sr_handle;
+ vm_named_entry_t sr_mem_entry;
+
+ vm_shared_region_lock();
+ assert(shared_region->sr_ref_count > 1);
+
+ while (shared_region->sr_mapping_in_progress) {
+ /* wait for our turn... */
+ vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
+ THREAD_UNINT);
+ }
+ assert(! shared_region->sr_mapping_in_progress);
+ assert(shared_region->sr_ref_count > 1);
+ /* let others know we're working in this shared region */
+ shared_region->sr_mapping_in_progress = TRUE;
+
+ vm_shared_region_unlock();
+
+ reset_shared_region_state = TRUE;
+
+ /* no need to lock because this data is never modified... */
+ sr_handle = shared_region->sr_mem_entry;
+ sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
+ sr_map = sr_mem_entry->backing.map;
+ sr_base_address = shared_region->sr_base_address;
+ }
+ /*
+ * Undo the mappings we've established so far.
+ */
+ for (j = 0; j < mappings_count; j++) {
+ kern_return_t kr2;
+
+ if (mappings[j].sfm_size == 0) {
+ /*
+ * We didn't establish this
+ * mapping, so nothing to undo.
+ */
+ continue;
+ }
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx "
+ "size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x: "
+ "undoing...\n",
+ j,
+ (long long)mappings[j].sfm_address,
+ (long long)mappings[j].sfm_size,
+ (long long)mappings[j].sfm_file_offset,
+ mappings[j].sfm_max_prot,
+ mappings[j].sfm_init_prot));
+ kr2 = mach_vm_deallocate(
+ sr_map,
+ (mappings[j].sfm_address -
+ sr_base_address),
+ mappings[j].sfm_size);
+ assert(kr2 == KERN_SUCCESS);
+ }
+
+ if (reset_shared_region_state) {
+ vm_shared_region_lock();
+ assert(shared_region->sr_ref_count > 1);
+ assert(shared_region->sr_mapping_in_progress);
+ /* we're done working on that shared region */
+ shared_region->sr_mapping_in_progress = FALSE;
+ thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
+ vm_shared_region_unlock();
+ reset_shared_region_state = FALSE;
+ }
+
+ vm_shared_region_deallocate(shared_region);
+}
+
/*
* Establish some mappings of a file in the shared region.
* This is used by "dyld" via the shared_region_map_np() system call
struct shared_file_mapping_np *mappings,
memory_object_control_t file_control,
memory_object_size_t file_size,
- void *root_dir)
+ void *root_dir,
+ uint32_t slide,
+ user_addr_t slide_start,
+ user_addr_t slide_size)
{
kern_return_t kr;
vm_object_t file_object;
mach_vm_offset_t sr_base_address;
unsigned int i;
mach_port_t map_port;
- mach_vm_offset_t target_address;
+ vm_map_offset_t target_address;
vm_object_t object;
vm_object_size_t obj_size;
+ struct shared_file_mapping_np *mapping_to_slide = NULL;
+ mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1;
kr = KERN_SUCCESS;
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
- shared_region, mappings_count, mappings,
- file_control, file_size));
+ (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count,
+ (void *)VM_KERNEL_ADDRPERM(mappings),
+ (void *)VM_KERNEL_ADDRPERM(file_control), file_size));
/* get the VM object associated with the file to be mapped */
file_object = memory_object_control_to_vm_object(file_control);
/* file-backed memory */
map_port = (ipc_port_t) file_object->pager;
}
+
+ if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) {
+ /*
+ * This is the mapping that needs to be slid.
+ */
+ if (mapping_to_slide != NULL) {
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x "
+ "will not be slid as only one such mapping is allowed...\n",
+ i,
+ (long long)mappings[i].sfm_address,
+ (long long)mappings[i].sfm_size,
+ (long long)mappings[i].sfm_file_offset,
+ mappings[i].sfm_max_prot,
+ mappings[i].sfm_init_prot));
+ } else {
+ mapping_to_slide = &mappings[i];
+ }
+ }
/* mapping's address is relative to the shared region base */
target_address =
* We have to create the VM object now, so that it
* can be mapped "copy-on-write".
*/
- obj_size = vm_map_round_page(mappings[i].sfm_size);
+ obj_size = vm_map_round_page(mappings[i].sfm_size,
+ VM_MAP_PAGE_MASK(sr_map));
object = vm_object_allocate(obj_size);
if (object == VM_OBJECT_NULL) {
kr = KERN_RESOURCE_SHORTAGE;
kr = vm_map_enter(
sr_map,
&target_address,
- vm_map_round_page(mappings[i].sfm_size),
+ vm_map_round_page(mappings[i].sfm_size,
+ VM_MAP_PAGE_MASK(sr_map)),
0,
VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
object,
kr = vm_map_enter_mem_object(
sr_map,
&target_address,
- vm_map_round_page(mappings[i].sfm_size),
+ vm_map_round_page(mappings[i].sfm_size,
+ VM_MAP_PAGE_MASK(sr_map)),
0,
VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
map_port,
VM_INHERIT_DEFAULT);
}
- if (kr != KERN_SUCCESS) {
+ if (kr == KERN_SUCCESS) {
+ /*
+ * Record the first (chronologically) successful
+ * mapping in this shared region.
+ * We're protected by "sr_mapping_in_progress" here,
+ * so no need to lock "shared_region".
+ */
+ if (first_mapping == (mach_vm_offset_t) -1) {
+ first_mapping = target_address;
+ }
+ } else {
if (map_port == MACH_PORT_NULL) {
/*
* Get rid of the VM object we just created
mappings[i].sfm_size = 0;
kr = KERN_SUCCESS;
} else {
- unsigned int j;
-
/* this mapping failed ! */
SHARED_REGION_TRACE_ERROR(
("shared_region: mapping[%d]: "
mappings[i].sfm_init_prot,
kr));
- /*
- * Undo the mappings we've established so far.
- */
- for (j = 0; j < i; j++) {
- kern_return_t kr2;
-
- if (mappings[j].sfm_size == 0) {
- /*
- * We didn't establish this
- * mapping, so nothing to undo.
- */
- continue;
- }
- SHARED_REGION_TRACE_INFO(
- ("shared_region: mapping[%d]: "
- "address:0x%016llx "
- "size:0x%016llx "
- "offset:0x%016llx "
- "maxprot:0x%x prot:0x%x: "
- "undoing...\n",
- j,
- (long long)mappings[j].sfm_address,
- (long long)mappings[j].sfm_size,
- (long long)mappings[j].sfm_file_offset,
- mappings[j].sfm_max_prot,
- mappings[j].sfm_init_prot));
- kr2 = mach_vm_deallocate(
- sr_map,
- (mappings[j].sfm_address -
- sr_base_address),
- mappings[j].sfm_size);
- assert(kr2 == KERN_SUCCESS);
- }
-
+ vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i);
break;
}
}
- /*
- * Record the first (chronologically) mapping in
- * this shared region.
- * We're protected by "sr_mapping_in_progress" here,
- * so no need to lock "shared_region".
- */
- if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
- shared_region->sr_first_mapping = target_address;
+ }
+
+ if (kr == KERN_SUCCESS &&
+ slide &&
+ mapping_to_slide != NULL) {
+ kr = vm_shared_region_slide(slide,
+ mapping_to_slide->sfm_file_offset,
+ mapping_to_slide->sfm_size,
+ slide_start,
+ slide_size,
+ file_control);
+ if (kr != KERN_SUCCESS) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: region_slide("
+ "slide:0x%x start:0x%016llx "
+ "size:0x%016llx) failed 0x%x\n",
+ slide,
+ (long long)slide_start,
+ (long long)slide_size,
+ kr));
+ vm_shared_region_undo_mappings(sr_map,
+ sr_base_address,
+ mappings,
+ mappings_count);
}
}
vm_shared_region_lock();
assert(shared_region->sr_ref_count > 1);
assert(shared_region->sr_mapping_in_progress);
+ /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
+ if (kr == KERN_SUCCESS &&
+ shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
+ shared_region->sr_first_mapping = first_mapping;
+ }
/* we're done working on that shared region */
shared_region->sr_mapping_in_progress = FALSE;
thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
done:
SHARED_REGION_TRACE_DEBUG(
("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
- shared_region, mappings_count, mappings,
- file_control, file_size, kr));
+ (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count,
+ (void *)VM_KERNEL_ADDRPERM(mappings),
+ (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr));
return kr;
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: -> "
"enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
- map, task, fsroot, cpu, is_64bit));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit));
/* lookup (create if needed) the shared region for this environment */
shared_region = vm_shared_region_lookup(fsroot, cpu, is_64bit);
("shared_region: -> "
"enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
"lookup failed !\n",
- map, task, fsroot, cpu, is_64bit));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit));
//panic("shared_region_enter: lookup failed\n");
return KERN_FAILURE;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: enter(%p,%p,%p,%d,%d): "
"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu, is_64bit,
(long long)target_address,
- (long long)mapping_size, sr_handle, kr));
+ (long long)mapping_size,
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
goto done;
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: enter(%p,%p,%p,%d,%d): "
"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit,
(long long)target_address, (long long)mapping_size,
- sr_handle, kr));
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
sr_offset += mapping_size;
sr_size -= mapping_size;
}
SHARED_REGION_TRACE_ERROR(
("shared_region: enter(%p,%p,%p,%d,%d): "
"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu, is_64bit,
(long long)target_address,
- (long long)mapping_size, sr_handle, kr));
+ (long long)mapping_size,
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
goto done;
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: enter(%p,%p,%p,%d,%d): "
"nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit,
(long long)target_address, (long long)mapping_size,
- sr_handle, kr));
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
}
if (sr_size > 0) {
/* and there's some left to be mapped without pmap-nesting */
SHARED_REGION_TRACE_ERROR(
("shared_region: enter(%p,%p,%p,%d,%d): "
"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu, is_64bit,
(long long)target_address,
- (long long)mapping_size, sr_handle, kr));
+ (long long)mapping_size,
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
goto done;
}
SHARED_REGION_TRACE_DEBUG(
("shared_region: enter(%p,%p,%p,%d,%d): "
"vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
- map, task, fsroot, cpu, is_64bit,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit,
(long long)target_address, (long long)mapping_size,
- sr_handle, kr));
+ (void *)VM_KERNEL_ADDRPERM(sr_handle), kr));
sr_offset += mapping_size;
sr_size -= mapping_size;
}
done:
SHARED_REGION_TRACE_DEBUG(
("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
- map, task, fsroot, cpu, is_64bit, kr));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, is_64bit, kr));
+ return kr;
+}
+
+#define SANE_SLIDE_INFO_SIZE (2048*1024) /*Can be changed if needed*/
+struct vm_shared_region_slide_info slide_info;
+
+kern_return_t
+vm_shared_region_sliding_valid(uint32_t slide)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_shared_region_t sr = vm_shared_region_get(current_task());
+
+ /* No region yet? we're fine. */
+ if (sr == NULL) {
+ return kr;
+ }
+
+ if ((sr->sr_slid == TRUE) && slide) {
+ if (slide != vm_shared_region_get_slide_info(sr)->slide) {
+ printf("Only one shared region can be slid\n");
+ kr = KERN_FAILURE;
+ } else {
+ /*
+ * Request for sliding when we've
+ * already done it with exactly the
+ * same slide value before.
+ * This isn't wrong technically but
+ * we don't want to slide again and
+ * so we return this value.
+ */
+ kr = KERN_INVALID_ARGUMENT;
+ }
+ }
+ vm_shared_region_deallocate(sr);
return kr;
}
+kern_return_t
+vm_shared_region_slide_init(
+ vm_shared_region_t sr,
+ mach_vm_size_t slide_info_size,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ uint32_t slide,
+ memory_object_control_t sr_file_control)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_object_t object = VM_OBJECT_NULL;
+ vm_object_offset_t offset = 0;
+ vm_shared_region_slide_info_t si = vm_shared_region_get_slide_info(sr);
+ vm_offset_t slide_info_entry;
+
+ vm_map_t map = NULL, cur_map = NULL;
+ boolean_t is_map_locked = FALSE;
+
+ assert(sr->sr_slide_in_progress);
+ assert(!sr->sr_slid);
+ assert(si->slide_object == NULL);
+ assert(si->slide_info_entry == NULL);
+
+ if (slide_info_size > SANE_SLIDE_INFO_SIZE) {
+ printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size);
+ kr = KERN_FAILURE;
+ return kr;
+ }
+
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &slide_info_entry,
+ (vm_size_t) slide_info_size);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (sr_file_control != MEMORY_OBJECT_CONTROL_NULL) {
+
+ object = memory_object_control_to_vm_object(sr_file_control);
+ vm_object_reference(object);
+ offset = start;
+
+ vm_object_lock(object);
+ } else {
+ /*
+ * Remove this entire "else" block and all "map" references
+ * once we get rid of the shared_region_slide_np()
+ * system call.
+ */
+ vm_map_entry_t entry = VM_MAP_ENTRY_NULL;
+ map = current_map();
+ vm_map_lock_read(map);
+ is_map_locked = TRUE;
+ Retry:
+ cur_map = map;
+ if(!vm_map_lookup_entry(map, start, &entry)) {
+ kr = KERN_INVALID_ARGUMENT;
+ } else {
+ vm_object_t shadow_obj = VM_OBJECT_NULL;
+
+ if (entry->is_sub_map == TRUE) {
+ map = entry->object.sub_map;
+ start -= entry->vme_start;
+ start += entry->offset;
+ vm_map_lock_read(map);
+ vm_map_unlock_read(cur_map);
+ goto Retry;
+ } else {
+ object = entry->object.vm_object;
+ offset = (start - entry->vme_start) + entry->offset;
+ }
+
+ vm_object_lock(object);
+ while (object->shadow != VM_OBJECT_NULL) {
+ shadow_obj = object->shadow;
+ vm_object_lock(shadow_obj);
+ vm_object_unlock(object);
+ object = shadow_obj;
+ }
+ }
+ }
+
+ if (object->internal == TRUE) {
+ kr = KERN_INVALID_ADDRESS;
+ } else if (object->object_slid) {
+ /* Can only be slid once */
+ printf("%s: found vm_object %p already slid?\n", __FUNCTION__, object);
+ kr = KERN_FAILURE;
+ } else {
+
+ si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry;
+ si->slide_info_size = slide_info_size;
+ si->slide_object = object;
+ si->start = offset;
+ si->end = si->start + size;
+ si->slide = slide;
+
+ /*
+ * If we want to have this region get deallocated/freed
+ * then we will have to make sure that we msync(..MS_INVALIDATE..)
+ * the pages associated with this shared region. Those pages would
+ * have been slid with an older slide value.
+ */
+
+ /*
+ * Pointers in object are held without references; they
+ * are disconnected at the time that we destroy the
+ * shared region, and since the shared region holds
+ * a reference on the object, no references in the other
+ * direction are required.
+ */
+ object->object_slid = TRUE;
+ object->vo_slide_info = si;
+ }
+
+ vm_object_unlock(object);
+ if (is_map_locked == TRUE) {
+ vm_map_unlock_read(map);
+ }
+
+ if (kr != KERN_SUCCESS) {
+ kmem_free(kernel_map, slide_info_entry, slide_info_size);
+ }
+ return kr;
+}
+
+void*
+vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) {
+ return (void*)sr->sr_slide_info.slide_info_entry;
+}
+
+
+kern_return_t
+vm_shared_region_slide_sanity_check(vm_shared_region_t sr)
+{
+ uint32_t pageIndex=0;
+ uint16_t entryIndex=0;
+ uint16_t *toc = NULL;
+ vm_shared_region_slide_info_t si;
+ vm_shared_region_slide_info_entry_t s_info;
+ kern_return_t kr;
+
+ si = vm_shared_region_get_slide_info(sr);
+ s_info = si->slide_info_entry;
+ toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
+
+ kr = mach_vm_protect(kernel_map,
+ (mach_vm_offset_t)(vm_offset_t)s_info,
+ (mach_vm_size_t) si->slide_info_size,
+ TRUE, VM_PROT_READ);
+ if (kr != KERN_SUCCESS) {
+ panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr);
+ }
+
+ for (;pageIndex < s_info->toc_count; pageIndex++) {
+
+ entryIndex = (uint16_t)(toc[pageIndex]);
+
+ if (entryIndex >= s_info->entry_count) {
+ printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count);
+ goto fail;
+ }
+
+ }
+ return KERN_SUCCESS;
+fail:
+ if (si->slide_info_entry != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t) si->slide_info_entry,
+ (vm_size_t) si->slide_info_size);
+
+ vm_object_lock(si->slide_object);
+ si->slide_object->object_slid = FALSE;
+ si->slide_object->vo_slide_info = NULL;
+ vm_object_unlock(si->slide_object);
+
+ vm_object_deallocate(si->slide_object);
+ si->slide_object = NULL;
+ si->start = 0;
+ si->end = 0;
+ si->slide = 0;
+ si->slide_info_entry = NULL;
+ si->slide_info_size = 0;
+ }
+ return KERN_FAILURE;
+}
+
+kern_return_t
+vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
+{
+ uint16_t *toc = NULL;
+ slide_info_entry_toc_t bitmap = NULL;
+ uint32_t i=0, j=0;
+ uint8_t b = 0;
+ uint32_t slide = si->slide;
+ int is_64 = task_has_64BitAddr(current_task());
+
+ vm_shared_region_slide_info_entry_t s_info = si->slide_info_entry;
+ toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
+
+ if (pageIndex >= s_info->toc_count) {
+ printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count);
+ } else {
+ uint16_t entryIndex = (uint16_t)(toc[pageIndex]);
+ slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset);
+
+ if (entryIndex >= s_info->entry_count) {
+ printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count);
+ } else {
+ bitmap = &slide_info_entries[entryIndex];
+
+ for(i=0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) {
+ b = bitmap->entry[i];
+ if (b!=0) {
+ for (j=0; j <8; ++j) {
+ if (b & (1 <<j)){
+ uint32_t *ptr_to_slide;
+ uint32_t old_value;
+
+ ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr)+(sizeof(uint32_t)*(i*8 +j)));
+ old_value = *ptr_to_slide;
+ *ptr_to_slide += slide;
+ if (is_64 && *ptr_to_slide < old_value) {
+ /*
+ * We just slid the low 32 bits of a 64-bit pointer
+ * and it looks like there should have been a carry-over
+ * to the upper 32 bits.
+ * The sliding failed...
+ */
+ printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
+ i, j, b, slide, old_value, *ptr_to_slide);
+ return KERN_FAILURE;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
/******************************************************************************/
/* Comm page support */
/******************************************************************************/
vm_map_t commpage32_map = VM_MAP_NULL;
vm_map_t commpage64_map = VM_MAP_NULL;
+ipc_port_t commpage_text32_handle = IPC_PORT_NULL;
+ipc_port_t commpage_text64_handle = IPC_PORT_NULL;
+vm_named_entry_t commpage_text32_entry = NULL;
+vm_named_entry_t commpage_text64_entry = NULL;
+vm_map_t commpage_text32_map = VM_MAP_NULL;
+vm_map_t commpage_text64_map = VM_MAP_NULL;
+
+user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START;
+user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START;
+
+#if defined(__i386__) || defined(__x86_64__)
/*
* Create a memory entry, VM submap and pmap for one commpage.
*/
if (kr != KERN_SUCCESS) {
panic("_vm_commpage_init: could not allocate mem_entry");
}
- new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
+ new_map = vm_map_create(pmap_create(NULL, 0, FALSE), 0, size, TRUE);
if (new_map == VM_MAP_NULL) {
panic("_vm_commpage_init: could not allocate VM map");
}
SHARED_REGION_TRACE_DEBUG(
("commpage: _init(0x%llx) <- %p\n",
- (long long)size, *handlep));
+ (long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep)));
+}
+#endif
+
+
+/*
+ *Initialize the comm text pages at boot time
+ */
+ extern u_int32_t random(void);
+ void
+vm_commpage_text_init(void)
+{
+ SHARED_REGION_TRACE_DEBUG(
+ ("commpage text: ->init()\n"));
+#if defined(__i386__) || defined(__x86_64__)
+ /* create the 32 bit comm text page */
+ unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */
+ _vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
+ commpage_text32_entry = (vm_named_entry_t) commpage_text32_handle->ip_kobject;
+ commpage_text32_map = commpage_text32_entry->backing.map;
+ commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset);
+ /* XXX if (cpu_is_64bit_capable()) ? */
+ /* create the 64-bit comm page */
+ offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */
+ _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
+ commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject;
+ commpage_text64_map = commpage_text64_entry->backing.map;
+ commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset);
+
+ commpage_text_populate();
+#else
+#error Unknown architecture.
+#endif /* __i386__ || __x86_64__ */
+ /* populate the routines in here */
+ SHARED_REGION_TRACE_DEBUG(
+ ("commpage text: init() <-\n"));
+
}
/*
SHARED_REGION_TRACE_DEBUG(
("commpage: -> init()\n"));
+#if defined(__i386__) || defined(__x86_64__)
/* create the 32-bit comm page */
_vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH);
commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject;
commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject;
commpage64_map = commpage64_entry->backing.map;
+#endif /* __i386__ || __x86_64__ */
+
/* populate them according to this specific platform */
commpage_populate();
__commpage_setup = 1;
vm_map_t map,
task_t task)
{
- ipc_port_t commpage_handle;
- vm_map_offset_t commpage_address, objc_address;
- vm_map_size_t commpage_size, objc_size;
+ ipc_port_t commpage_handle, commpage_text_handle;
+ vm_map_offset_t commpage_address, objc_address, commpage_text_address;
+ vm_map_size_t commpage_size, objc_size, commpage_text_size;
int vm_flags;
kern_return_t kr;
SHARED_REGION_TRACE_DEBUG(
("commpage: -> enter(%p,%p)\n",
- map, task));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task)));
+ commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH;
/* the comm page is likely to be beyond the actual end of the VM map */
vm_flags = VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX;
/* select the appropriate comm page for this task */
assert(! (task_has_64BitAddr(task) ^ vm_map_is_64bit(map)));
if (task_has_64BitAddr(task)) {
-#ifdef __ppc__
- /*
- * PPC51: ppc64 is limited to 51-bit addresses.
- * Memory above that limit is handled specially at the
- * pmap level, so do not interfere.
- */
- vm_flags |= VM_FLAGS_NO_PMAP_CHECK;
-#endif /* __ppc__ */
commpage_handle = commpage64_handle;
commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS;
commpage_size = _COMM_PAGE64_AREA_LENGTH;
objc_size = _COMM_PAGE64_OBJC_SIZE;
objc_address = _COMM_PAGE64_OBJC_BASE;
+ commpage_text_handle = commpage_text64_handle;
+ commpage_text_address = (vm_map_offset_t) commpage_text64_location;
} else {
commpage_handle = commpage32_handle;
commpage_address =
commpage_size = _COMM_PAGE32_AREA_LENGTH;
objc_size = _COMM_PAGE32_OBJC_SIZE;
objc_address = _COMM_PAGE32_OBJC_BASE;
+ commpage_text_handle = commpage_text32_handle;
+ commpage_text_address = (vm_map_offset_t) commpage_text32_location;
}
if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 &&
/* the commpage is properly aligned or sized for pmap-nesting */
vm_flags |= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP);
}
-
/* map the comm page in the task's address space */
assert(commpage_handle != IPC_PORT_NULL);
kr = vm_map_enter_mem_object(
commpage_handle,
0,
FALSE,
- VM_PROT_READ|VM_PROT_EXECUTE,
- VM_PROT_READ|VM_PROT_EXECUTE,
+ VM_PROT_READ,
+ VM_PROT_READ,
VM_INHERIT_SHARE);
if (kr != KERN_SUCCESS) {
SHARED_REGION_TRACE_ERROR(
("commpage: enter(%p,0x%llx,0x%llx) "
"commpage %p mapping failed 0x%x\n",
- map, (long long)commpage_address,
- (long long)commpage_size, commpage_handle, kr));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (long long)commpage_address,
+ (long long)commpage_size,
+ (void *)VM_KERNEL_ADDRPERM(commpage_handle), kr));
+ }
+
+ /* map the comm text page in the task's address space */
+ assert(commpage_text_handle != IPC_PORT_NULL);
+ kr = vm_map_enter_mem_object(
+ map,
+ &commpage_text_address,
+ commpage_text_size,
+ 0,
+ vm_flags,
+ commpage_text_handle,
+ 0,
+ FALSE,
+ VM_PROT_READ|VM_PROT_EXECUTE,
+ VM_PROT_READ|VM_PROT_EXECUTE,
+ VM_INHERIT_SHARE);
+ if (kr != KERN_SUCCESS) {
+ SHARED_REGION_TRACE_ERROR(
+ ("commpage text: enter(%p,0x%llx,0x%llx) "
+ "commpage text %p mapping failed 0x%x\n",
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (long long)commpage_text_address,
+ (long long)commpage_text_size,
+ (void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr));
}
/*
SHARED_REGION_TRACE_ERROR(
("commpage: enter(%p,0x%llx,0x%llx) "
"objc mapping failed 0x%x\n",
- map, (long long)objc_address,
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (long long)objc_address,
(long long)objc_size, kr));
}
}
SHARED_REGION_TRACE_DEBUG(
("commpage: enter(%p,%p) <- 0x%x\n",
- map, task, kr));
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task), kr));
return kr;
}
+int
+vm_shared_region_slide(uint32_t slide,
+ mach_vm_offset_t entry_start_address,
+ mach_vm_size_t entry_size,
+ mach_vm_offset_t slide_start,
+ mach_vm_size_t slide_size,
+ memory_object_control_t sr_file_control)
+{
+ void *slide_info_entry = NULL;
+ int error;
+ vm_shared_region_t sr;
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
+ slide, entry_start_address, entry_size, slide_start, slide_size));
+
+ sr = vm_shared_region_get(current_task());
+ if (sr == NULL) {
+ printf("%s: no shared region?\n", __FUNCTION__);
+ SHARED_REGION_TRACE_DEBUG(
+ ("vm_shared_region_slide: <- %d (no shared region)\n",
+ KERN_FAILURE));
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Protect from concurrent access.
+ */
+ vm_shared_region_lock();
+ while(sr->sr_slide_in_progress) {
+ vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT);
+ }
+ if (sr->sr_slid
+ || shared_region_completed_slide
+ ) {
+ vm_shared_region_unlock();
+
+ vm_shared_region_deallocate(sr);
+ printf("%s: shared region already slid?\n", __FUNCTION__);
+ SHARED_REGION_TRACE_DEBUG(
+ ("vm_shared_region_slide: <- %d (already slid)\n",
+ KERN_FAILURE));
+ return KERN_FAILURE;
+ }
+
+ sr->sr_slide_in_progress = TRUE;
+ vm_shared_region_unlock();
+
+ if((error = vm_shared_region_slide_init(sr, slide_size, entry_start_address, entry_size, slide, sr_file_control))) {
+ printf("slide_info initialization failed with kr=%d\n", error);
+ goto done;
+ }
+
+ slide_info_entry = vm_shared_region_get_slide_info_entry(sr);
+ if (slide_info_entry == NULL){
+ error = KERN_FAILURE;
+ } else {
+ error = copyin((user_addr_t)slide_start,
+ slide_info_entry,
+ (vm_size_t)slide_size);
+ if (error) {
+ error = KERN_INVALID_ADDRESS;
+ }
+ }
+ if (error) {
+ goto done;
+ }
+
+ if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) {
+ error = KERN_INVALID_ARGUMENT;
+ printf("Sanity Check failed for slide_info\n");
+ } else {
+#if DEBUG
+ printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
+ (void*)(uintptr_t)entry_start_address,
+ (unsigned long)entry_size,
+ (unsigned long)slide_size);
+#endif
+ }
+done:
+ vm_shared_region_lock();
+
+ assert(sr->sr_slide_in_progress);
+ assert(sr->sr_slid == FALSE);
+ sr->sr_slide_in_progress = FALSE;
+ thread_wakeup(&sr->sr_slide_in_progress);
+
+ if (error == KERN_SUCCESS) {
+ sr->sr_slid = TRUE;
+
+ /*
+ * We don't know how to tear down a slid shared region today, because
+ * we would have to invalidate all the pages that have been slid
+ * atomically with respect to anyone mapping the shared region afresh.
+ * Therefore, take a dangling reference to prevent teardown.
+ */
+ sr->sr_ref_count++;
+ shared_region_completed_slide = TRUE;
+ }
+ vm_shared_region_unlock();
+
+ vm_shared_region_deallocate(sr);
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("vm_shared_region_slide: <- %d\n",
+ error));
+
+ return error;
+}
/*
* This is called from powermanagement code to let kernel know the current source of power.
}
}
#endif /* __i386__ || __x86_64__ */
+