-/* Looks up the file type requested. If already loaded and the */
-/* file extents are an exact match, returns Success. If not */
-/* loaded attempts to load the file extents at the given offsets */
-/* if any extent fails to load or if the file was already loaded */
-/* in a different configuration, lsf_load fails. */
-
-static kern_return_t
-lsf_load(
- vm_offset_t mapped_file,
- vm_size_t mapped_file_size,
- vm_offset_t *base_address,
- sf_mapping_t *mappings,
- int map_cnt,
- void *file_object,
- int flags,
- shared_region_task_mappings_t sm_info)
-{
-
- load_struct_t *entry;
- vm_map_copy_t copy_object;
- loaded_mapping_t *file_mapping;
- loaded_mapping_t **tptr;
- int i;
- ipc_port_t local_map;
- vm_offset_t original_alt_load_next;
- vm_offset_t alternate_load_next;
-
- LSF_DEBUG(("lsf_load"
- "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
- "\n",
- mapped_file_size, *base_address, map_cnt, file_object,
- flags, sm_info));
- entry = (load_struct_t *)zalloc(lsf_zone);
- LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry, map_cnt));
- LSF_DEBUG(("lsf_load"
- "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
- "entry=%p\n",
- mapped_file_size, *base_address, map_cnt, file_object,
- flags, sm_info, entry));
- if (entry == NULL) {
- printf("lsf_load: unable to allocate memory\n");
- return KERN_NO_SPACE;
- }
-
- shared_file_available_hash_ele--;
- entry->file_object = (int)file_object;
- entry->mapping_cnt = map_cnt;
- entry->mappings = NULL;
- entry->links.prev = (queue_entry_t) 0;
- entry->links.next = (queue_entry_t) 0;
- entry->regions_instance = (shared_region_mapping_t)sm_info->self;
- entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
- entry->file_offset = mappings[0].file_offset;
-
- lsf_hash_insert(entry, sm_info);
- tptr = &(entry->mappings);
-
-
- alternate_load_next = sm_info->alternate_next;
- original_alt_load_next = alternate_load_next;
- if (flags & ALTERNATE_LOAD_SITE) {
- vm_offset_t max_loadfile_offset;
-
- *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
- sm_info->alternate_next;
- max_loadfile_offset = 0;
- for(i = 0; i<map_cnt; i++) {
- if(((mappings[i].mapping_offset
- & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
- max_loadfile_offset) {
- max_loadfile_offset =
- (mappings[i].mapping_offset
- & SHARED_TEXT_REGION_MASK)
- + mappings[i].size;
- }
- }
- if((alternate_load_next + round_page(max_loadfile_offset)) >=
- (sm_info->data_size - (sm_info->data_size>>9))) {
- entry->base_address =
- (*base_address) & SHARED_TEXT_REGION_MASK;
- lsf_unload(file_object, entry->base_address, sm_info);
-
- return KERN_NO_SPACE;
- }
- alternate_load_next += round_page(max_loadfile_offset);
-
- } else {
- if (((*base_address) & SHARED_TEXT_REGION_MASK) >
- sm_info->alternate_base) {
- entry->base_address =
- (*base_address) & SHARED_TEXT_REGION_MASK;
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- }
-
- entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
-
- // Sanity check the mappings -- make sure we don't stray across the
- // alternate boundary. If any bit of a library that we're not trying
- // to load in the alternate load space strays across that boundary,
- // return KERN_INVALID_ARGUMENT immediately so that the caller can
- // try to load it in the alternate shared area. We do this to avoid
- // a nasty case: if a library tries to load so that it crosses the
- // boundary, it'll occupy a bit of the alternate load area without
- // the kernel being aware. When loads into the alternate load area
- // at the first free address are tried, the load will fail.
- // Thus, a single library straddling the boundary causes all sliding
- // libraries to fail to load. This check will avoid such a case.
-
- if (!(flags & ALTERNATE_LOAD_SITE)) {
- for (i = 0; i<map_cnt;i++) {
- vm_offset_t region_mask;
- vm_address_t region_start;
- vm_address_t region_end;
-
- if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
- // mapping offsets are relative to start of shared segments.
- region_mask = SHARED_TEXT_REGION_MASK;
- region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
- region_end = (mappings[i].size + region_start);
- if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
- // No library is permitted to load so any bit of it is in the
- // shared alternate space. If they want it loaded, they can put
- // it in the alternate space explicitly.
- printf("Library trying to load across alternate shared region boundary -- denied!\n");
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } else {
- // rw section?
- region_mask = SHARED_DATA_REGION_MASK;
- region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
- region_end = (mappings[i].size + region_start);
- if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
- printf("Library trying to load across alternate shared region boundary-- denied!\n");
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } // write?
- } // for
- } // if not alternate load site.
-
- /* copyin mapped file data */
- for(i = 0; i<map_cnt; i++) {
- vm_offset_t target_address;
- vm_offset_t region_mask;
-
- if(mappings[i].protection & VM_PROT_COW) {
- local_map = (ipc_port_t)sm_info->data_region;
- region_mask = SHARED_DATA_REGION_MASK;
- if((mappings[i].mapping_offset
- & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
- lsf_unload(file_object,
- entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } else {
- region_mask = SHARED_TEXT_REGION_MASK;
- local_map = (ipc_port_t)sm_info->text_region;
- if(mappings[i].mapping_offset
- & GLOBAL_SHARED_SEGMENT_MASK) {
- lsf_unload(file_object,
- entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- }
- if(!(mappings[i].protection & VM_PROT_ZF)
- && ((mapped_file + mappings[i].file_offset +
- mappings[i].size) >
- (mapped_file + mapped_file_size))) {
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- target_address = ((mappings[i].mapping_offset) & region_mask)
- + entry->base_address;
- if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, &target_address,
- mappings[i].size, VM_FLAGS_FIXED)) {
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- target_address = ((mappings[i].mapping_offset) & region_mask)
- + entry->base_address;
- if(!(mappings[i].protection & VM_PROT_ZF)) {
- if(vm_map_copyin(current_map(),
- (vm_map_address_t)(mapped_file + mappings[i].file_offset),
- vm_map_round_page(mappings[i].size), FALSE, ©_object)) {
- vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address, mappings[i].size);
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- if(vm_map_copy_overwrite(((vm_named_entry_t)
- local_map->ip_kobject)->backing.map,
- (vm_map_address_t)target_address,
- copy_object, FALSE)) {
- vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address, mappings[i].size);
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- }
-
- file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
- if (file_mapping == NULL) {
- lsf_unload(file_object, entry->base_address, sm_info);
- printf("lsf_load: unable to allocate memory\n");
- return KERN_NO_SPACE;
- }
- shared_file_available_hash_ele--;
- file_mapping->mapping_offset = (mappings[i].mapping_offset)
- & region_mask;
- file_mapping->size = mappings[i].size;
- file_mapping->file_offset = mappings[i].file_offset;
- file_mapping->protection = mappings[i].protection;
- file_mapping->next = NULL;
- LSF_DEBUG(("lsf_load: file_mapping %p "
- "for offset=0x%x size=0x%x\n",
- file_mapping, file_mapping->mapping_offset,
- file_mapping->size));
-
- vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address,
- round_page(target_address + mappings[i].size),
- (mappings[i].protection &
- (VM_PROT_READ | VM_PROT_EXECUTE)),
- TRUE);
- vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address,
- round_page(target_address + mappings[i].size),
- (mappings[i].protection &
- (VM_PROT_READ | VM_PROT_EXECUTE)),
- FALSE);
-
- *tptr = file_mapping;
- tptr = &(file_mapping->next);
- }
- shared_region_mapping_set_alt_next(
- (shared_region_mapping_t) sm_info->self,
- alternate_load_next);
- LSF_DEBUG(("lsf_load: done\n"));
- return KERN_SUCCESS;
-}