+
+ *tptr = file_mapping;
+ tptr = &(file_mapping->next);
+ }
+ shared_region_mapping_set_alt_next(
+ (shared_region_mapping_t) sm_info->self,
+ alternate_load_next);
+ LSF_DEBUG(("lsf_load: done\n"));
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * lsf_slide:
+ *
+ * Look in the shared region, starting from the end, for a place to fit all the
+ * mappings while respecting their relative offsets.
+ */
+static kern_return_t
+lsf_slide(
+ unsigned int map_cnt,
+ struct shared_file_mapping_np *mappings_in,
+ shared_region_task_mappings_t sm_info,
+ mach_vm_offset_t *base_offset_p)
+{
+ mach_vm_offset_t max_mapping_offset;
+ int i;
+ vm_map_entry_t map_entry, prev_entry, next_entry;
+ mach_vm_offset_t prev_hole_start, prev_hole_end;
+ mach_vm_offset_t mapping_offset, mapping_end_offset;
+ mach_vm_offset_t base_offset;
+ mach_vm_size_t mapping_size;
+ mach_vm_offset_t wiggle_room, wiggle;
+ vm_map_t text_map, data_map, map;
+ vm_named_entry_t region_entry;
+ ipc_port_t region_handle;
+ kern_return_t kr;
+
+ struct shared_file_mapping_np *mappings, tmp_mapping;
+ unsigned int sort_index, sorted_index;
+ vm_map_offset_t sort_min_address;
+ unsigned int sort_min_index;
+
+ /*
+ * Sort the mappings array, so that we can try and fit them in
+ * in the right order as we progress along the VM maps.
+ *
+ * We can't modify the original array (the original order is
+ * important when doing lookups of the mappings), so copy it first.
+ */
+
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &mappings,
+ (vm_size_t) (map_cnt * sizeof (mappings[0])));
+ if (kr != KERN_SUCCESS) {
+ return KERN_NO_SPACE;
+ }
+
+ bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
+
+ max_mapping_offset = 0;
+ for (sorted_index = 0;
+ sorted_index < map_cnt;
+ sorted_index++) {
+
+ /* first remaining entry is our new starting point */
+ sort_min_index = sorted_index;
+ mapping_end_offset = ((mappings[sort_min_index].sfm_address &
+ SHARED_TEXT_REGION_MASK) +
+ mappings[sort_min_index].sfm_size);
+ sort_min_address = mapping_end_offset;
+ /* compute the highest mapping_offset as well... */
+ if (mapping_end_offset > max_mapping_offset) {
+ max_mapping_offset = mapping_end_offset;
+ }
+ /* find the lowest mapping_offset in the remaining entries */
+ for (sort_index = sorted_index + 1;
+ sort_index < map_cnt;
+ sort_index++) {
+
+ mapping_end_offset =
+ ((mappings[sort_index].sfm_address &
+ SHARED_TEXT_REGION_MASK) +
+ mappings[sort_index].sfm_size);
+
+ if (mapping_end_offset < sort_min_address) {
+ /* lowest mapping_offset so far... */
+ sort_min_index = sort_index;
+ sort_min_address = mapping_end_offset;
+ }
+ }
+ if (sort_min_index != sorted_index) {
+ /* swap entries */
+ tmp_mapping = mappings[sort_min_index];
+ mappings[sort_min_index] = mappings[sorted_index];
+ mappings[sorted_index] = tmp_mapping;
+ }
+
+ }
+
+ max_mapping_offset = vm_map_round_page(max_mapping_offset);
+
+ /* start from the end of the shared area */
+ base_offset = sm_info->text_size;
+
+ /* can all the mappings fit ? */
+ if (max_mapping_offset > base_offset) {
+ kmem_free(kernel_map,
+ (vm_offset_t) mappings,
+ map_cnt * sizeof (mappings[0]));
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Align the last mapping to the end of the submaps
+ * and start from there.
+ */
+ base_offset -= max_mapping_offset;
+
+ region_handle = (ipc_port_t) sm_info->text_region;
+ region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+ text_map = region_entry->backing.map;
+
+ region_handle = (ipc_port_t) sm_info->data_region;
+ region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+ data_map = region_entry->backing.map;
+
+ vm_map_lock_read(text_map);
+ vm_map_lock_read(data_map);
+
+start_over:
+ /*
+ * At first, we can wiggle all the way from our starting point
+ * (base_offset) towards the start of the map (0), if needed.
+ */
+ wiggle_room = base_offset;
+
+ for (i = (signed) map_cnt - 1; i >= 0; i--) {
+ if (mappings[i].sfm_init_prot & VM_PROT_COW) {
+ /* copy-on-write mappings are in the data submap */
+ map = data_map;
+ } else {
+ /* other mappings are in the text submap */
+ map = text_map;
+ }
+ /* get the offset within the appropriate submap */
+ mapping_offset = (mappings[i].sfm_address &
+ SHARED_TEXT_REGION_MASK);
+ mapping_size = mappings[i].sfm_size;
+ mapping_end_offset = mapping_offset + mapping_size;
+ mapping_offset = vm_map_trunc_page(mapping_offset);
+ mapping_end_offset = vm_map_round_page(mapping_end_offset);
+ mapping_size = mapping_end_offset - mapping_offset;
+
+ for (;;) {
+ if (vm_map_lookup_entry(map,
+ base_offset + mapping_offset,
+ &map_entry)) {
+ /*
+ * The start address for that mapping
+ * is already mapped: no fit.
+ * Locate the hole immediately before this map
+ * entry.
+ */
+ prev_hole_end = map_entry->vme_start;
+ prev_entry = map_entry->vme_prev;
+ if (prev_entry == vm_map_to_entry(map)) {
+ /* no previous entry */
+ prev_hole_start = map->min_offset;
+ } else {
+ /* previous entry ends here */
+ prev_hole_start = prev_entry->vme_end;
+ }
+ } else {
+ /*
+ * The start address for that mapping is not
+ * mapped.
+ * Locate the start and end of the hole
+ * at that location.
+ */
+ /* map_entry is the previous entry */
+ if (map_entry == vm_map_to_entry(map)) {
+ /* no previous entry */
+ prev_hole_start = map->min_offset;
+ } else {
+ /* previous entry ends there */
+ prev_hole_start = map_entry->vme_end;
+ }
+ next_entry = map_entry->vme_next;
+ if (next_entry == vm_map_to_entry(map)) {
+ /* no next entry */
+ prev_hole_end = map->max_offset;
+ } else {
+ prev_hole_end = next_entry->vme_start;
+ }
+ }
+
+ if (prev_hole_end <= base_offset + mapping_offset) {
+ /* hole is to our left: try and wiggle to fit */
+ wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
+ if (wiggle > base_offset) {
+ /* we're getting out of the map */
+ kr = KERN_FAILURE;
+ goto done;
+ }
+ base_offset -= wiggle;
+ if (wiggle > wiggle_room) {
+ /* can't wiggle that much: start over */
+ goto start_over;
+ }
+ /* account for the wiggling done */
+ wiggle_room -= wiggle;
+ }
+
+ if (prev_hole_end >
+ base_offset + mapping_offset + mapping_size) {
+ /*
+ * The hole extends further to the right
+ * than what we need. Ignore the extra space.
+ */
+ prev_hole_end = (base_offset + mapping_offset +
+ mapping_size);
+ }
+
+ if (prev_hole_end <
+ base_offset + mapping_offset + mapping_size) {
+ /*
+ * The hole is not big enough to establish
+ * the mapping right there: wiggle towards
+ * the beginning of the hole so that the end
+ * of our mapping fits in the hole...
+ */
+ wiggle = base_offset + mapping_offset
+ + mapping_size - prev_hole_end;
+ if (wiggle > base_offset) {
+ /* we're getting out of the map */
+ kr = KERN_FAILURE;
+ goto done;
+ }
+ base_offset -= wiggle;
+ if (wiggle > wiggle_room) {
+ /* can't wiggle that much: start over */
+ goto start_over;
+ }
+ /* account for the wiggling done */
+ wiggle_room -= wiggle;
+
+ /* keep searching from this new base */
+ continue;
+ }
+
+ if (prev_hole_start > base_offset + mapping_offset) {
+ /* no hole found: keep looking */
+ continue;
+ }
+
+ /* compute wiggling room at this hole */
+ wiggle = base_offset + mapping_offset - prev_hole_start;
+ if (wiggle < wiggle_room) {
+ /* less wiggle room than before... */
+ wiggle_room = wiggle;
+ }
+
+ /* found a hole that fits: skip to next mapping */
+ break;
+ } /* while we look for a hole */
+ } /* for each mapping */
+
+ *base_offset_p = base_offset;
+ kr = KERN_SUCCESS;
+
+done:
+ vm_map_unlock_read(text_map);
+ vm_map_unlock_read(data_map);
+
+ kmem_free(kernel_map,
+ (vm_offset_t) mappings,
+ map_cnt * sizeof (mappings[0]));
+
+ return kr;
+}
+
+/*
+ * lsf_map:
+ *
+ * Attempt to establish the mappings for a split library into the shared region.
+ */
+static kern_return_t
+lsf_map(
+ struct shared_file_mapping_np *mappings,
+ int map_cnt,
+ void *file_control,
+ memory_object_offset_t file_size,
+ shared_region_task_mappings_t sm_info,
+ mach_vm_offset_t base_offset,
+ mach_vm_offset_t *slide_p)
+{
+ load_struct_t *entry;
+ loaded_mapping_t *file_mapping;
+ loaded_mapping_t **tptr;
+ ipc_port_t region_handle;
+ vm_named_entry_t region_entry;
+ mach_port_t map_port;
+ vm_object_t file_object;
+ kern_return_t kr;
+ int i;
+ mach_vm_offset_t original_base_offset;
+
+ /* get the VM object from the file's memory object handle */
+ file_object = memory_object_control_to_vm_object(file_control);
+
+ original_base_offset = base_offset;
+
+ LSF_DEBUG(("lsf_map"
+ "(cnt=%d,file=%p,sm_info=%p)"
+ "\n",
+ map_cnt, file_object,
+ sm_info));
+
+restart_after_slide:
+ /* get a new "load_struct_t" to described the mappings for that file */
+ entry = (load_struct_t *)zalloc(lsf_zone);
+ LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
+ LSF_DEBUG(("lsf_map"
+ "(cnt=%d,file=%p,sm_info=%p) "
+ "entry=%p\n",
+ map_cnt, file_object,
+ sm_info, entry));
+ if (entry == NULL) {
+ printf("lsf_map: unable to allocate memory\n");
+ return KERN_NO_SPACE;
+ }
+ shared_file_available_hash_ele--;
+ entry->file_object = (int)file_object;
+ entry->mapping_cnt = map_cnt;
+ entry->mappings = NULL;
+ entry->links.prev = (queue_entry_t) 0;
+ entry->links.next = (queue_entry_t) 0;
+ entry->regions_instance = (shared_region_mapping_t)sm_info->self;
+ entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
+ entry->file_offset = mappings[0].sfm_file_offset;
+
+ /* insert the new file entry in the hash table, for later lookups */
+ lsf_hash_insert(entry, sm_info);
+
+ /* where we should add the next mapping description for that file */
+ tptr = &(entry->mappings);
+
+ entry->base_address = base_offset;
+
+
+ /* establish each requested mapping */
+ for (i = 0; i < map_cnt; i++) {
+ mach_vm_offset_t target_address;
+ mach_vm_offset_t region_mask;
+
+ if (mappings[i].sfm_init_prot & VM_PROT_COW) {
+ region_handle = (ipc_port_t)sm_info->data_region;
+ region_mask = SHARED_DATA_REGION_MASK;
+ if ((((mappings[i].sfm_address + base_offset)
+ & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
+ (((mappings[i].sfm_address + base_offset +
+ mappings[i].sfm_size - 1)
+ & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
+ lsf_unload(file_object,
+ entry->base_address, sm_info);
+ return KERN_INVALID_ARGUMENT;
+ }
+ } else {
+ region_mask = SHARED_TEXT_REGION_MASK;
+ region_handle = (ipc_port_t)sm_info->text_region;
+ if (((mappings[i].sfm_address + base_offset)
+ & GLOBAL_SHARED_SEGMENT_MASK) ||
+ ((mappings[i].sfm_address + base_offset +
+ mappings[i].sfm_size - 1)
+ & GLOBAL_SHARED_SEGMENT_MASK)) {
+ lsf_unload(file_object,
+ entry->base_address, sm_info);
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
+ ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
+ (file_size))) {
+ lsf_unload(file_object, entry->base_address, sm_info);
+ return KERN_INVALID_ARGUMENT;
+ }
+ target_address = entry->base_address +
+ ((mappings[i].sfm_address) & region_mask);
+ if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
+ map_port = MACH_PORT_NULL;
+ } else {
+ map_port = (ipc_port_t) file_object->pager;
+ }
+ region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+
+ if (mach_vm_map(region_entry->backing.map,
+ &target_address,
+ vm_map_round_page(mappings[i].sfm_size),
+ 0,
+ VM_FLAGS_FIXED,
+ map_port,
+ mappings[i].sfm_file_offset,
+ TRUE,
+ (mappings[i].sfm_init_prot &
+ (VM_PROT_READ|VM_PROT_EXECUTE)),
+ (mappings[i].sfm_max_prot &
+ (VM_PROT_READ|VM_PROT_EXECUTE)),
+ VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
+ lsf_unload(file_object, entry->base_address, sm_info);
+
+ if (slide_p != NULL) {
+ /*
+ * Requested mapping failed but the caller
+ * is OK with sliding the library in the
+ * shared region, so let's try and slide it...
+ */
+
+ /* lookup an appropriate spot */
+ kr = lsf_slide(map_cnt, mappings,
+ sm_info, &base_offset);
+ if (kr == KERN_SUCCESS) {
+ /* try and map it there ... */
+ entry->base_address = base_offset;
+ goto restart_after_slide;
+ }
+ /* couldn't slide ... */
+ }
+
+ return KERN_FAILURE;
+ }
+
+ /* record this mapping */