/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- *
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the
+ * License may not be used to create, or enable the creation or
+ * redistribution of, unlawful or unlicensed copies of an Apple operating
+ * system, or to circumvent, violate, or enable the circumvention or
+ * violation of, any terms of an Apple operating system software license
+ * agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
*/
/*
*
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
-#include <vm/vm_protos.h>
#include <mach/mach_vm.h>
#include <mach/shared_memory_server.h>
#include <vm/vm_shared_memory_server.h>
-int shared_region_trace_level = SHARED_REGION_TRACE_ERROR;
-
#if DEBUG
int lsf_debug = 0;
int lsf_alloc_debug = 0;
static load_struct_t *
lsf_hash_delete(
- load_struct_t *target_entry, /* optional */
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info);
load_struct_t *entry,
shared_region_task_mappings_t sm_info);
-static kern_return_t
-lsf_load(
- vm_offset_t mapped_file,
- vm_size_t mapped_file_size,
- vm_offset_t *base_address,
- sf_mapping_t *mappings,
- int map_cnt,
- void *file_object,
- int flags,
- shared_region_task_mappings_t sm_info);
-
static kern_return_t
lsf_slide(
unsigned int map_cnt,
static void
lsf_deallocate(
- load_struct_t *target_entry, /* optional */
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info,
((((natural_t)file_object) & 0xffffff) % size)
/* Implementation */
+vm_offset_t shared_file_text_region;
+vm_offset_t shared_file_data_region;
+
+ipc_port_t shared_text_region_handle;
+ipc_port_t shared_data_region_handle;
vm_offset_t shared_file_mapping_array = 0;
shared_region_mapping_t default_environment_shared_regions = NULL;
ipc_port_t com_region_handle64 = NULL;
vm_map_t com_region_map32 = NULL;
vm_map_t com_region_map64 = NULL;
-vm_size_t com_region_size32 = _COMM_PAGE32_AREA_LENGTH;
-vm_size_t com_region_size64 = _COMM_PAGE64_AREA_LENGTH;
+vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
shared_region_mapping_t com_mapping_resource = NULL;
task_t task,
shared_region_mapping_t shared_region)
{
- shared_region_mapping_t old_region;
-
SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
- "shared_region=%p[%x,%x,%x])\n",
- task, shared_region,
- shared_region ? shared_region->fs_base : 0,
- shared_region ? shared_region->system : 0,
- shared_region ? shared_region->flags : 0));
+ "shared_region=%p)\n",
+ task, shared_region));
if (shared_region) {
assert(shared_region->ref_count > 0);
}
-
- old_region = task->system_shared_region;
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_INFO,
- ("shared_region: %p set_region(task=%p)"
- "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
- current_thread(), task,
- old_region,
- old_region ? old_region->fs_base : 0,
- old_region ? old_region->system : 0,
- old_region ? old_region->flags : 0,
- shared_region,
- shared_region ? shared_region->fs_base : 0,
- shared_region ? shared_region->system : 0,
- shared_region ? shared_region->flags : 0));
-
task->system_shared_region = shared_region;
return KERN_SUCCESS;
}
vm_offset_t client_base,
shared_region_mapping_t *shared_region,
vm_offset_t alt_base,
- vm_offset_t alt_next,
- int fs_base,
- int system)
+ vm_offset_t alt_next)
{
SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
*shared_region = (shared_region_mapping_t)
shared_region_mapping_lock_init((*shared_region));
(*shared_region)->text_region = text_region;
(*shared_region)->text_size = text_size;
- (*shared_region)->fs_base = fs_base;
- (*shared_region)->system = system;
+ (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
+ (*shared_region)->system = cpu_type();
(*shared_region)->data_region = data_region;
(*shared_region)->data_size = data_size;
(*shared_region)->region_mappings = region_mappings;
*next = shared_region->next;
shared_region_mapping_unlock(shared_region);
-
- return KERN_SUCCESS;
-}
-
-/* LP64todo - need 64-bit safe version */
-kern_return_t
-shared_region_mapping_set_alt_next(
- shared_region_mapping_t shared_region,
- vm_offset_t alt_next)
-{
- SHARED_REGION_DEBUG(("shared_region_mapping_set_alt_next"
- "(shared_region=%p, alt_next=0%x)\n",
- shared_region, alt_next));
- assert(shared_region->ref_count > 0);
- shared_region->alternate_next = alt_next;
- return KERN_SUCCESS;
}
kern_return_t
{
struct shared_region_task_mappings sm_info;
shared_region_mapping_t next = NULL;
- unsigned int ref_count;
+ int ref_count;
SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
"(shared_region=%p,%d,%d) ref_count=%d\n",
/* Create a named object based on a submap of specified size */
- new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
+ new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
user_entry->backing.map = new_map;
user_entry->internal = TRUE;
user_entry->is_sub_map = TRUE;
/* relevant as the system default flag is not set */
kern_return_t
shared_file_create_system_region(
- shared_region_mapping_t *shared_region,
- int fs_base,
- int system)
+ shared_region_mapping_t *shared_region)
{
ipc_port_t text_handle;
ipc_port_t data_handle;
kret));
return kret;
}
- kret = shared_region_mapping_create(text_handle, text_size,
- data_handle, data_size,
- mapping_array,
- GLOBAL_SHARED_TEXT_SEGMENT,
- shared_region,
- SHARED_ALTERNATE_LOAD_BASE,
- SHARED_ALTERNATE_LOAD_BASE,
- fs_base,
- system);
+ kret = shared_region_mapping_create(text_handle,
+ text_size, data_handle, data_size, mapping_array,
+ GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
+ SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
if(kret) {
SHARED_REGION_DEBUG(("shared_file_create_system_region: "
"shared_region_mapping_create failed "
/* create com page regions, 1 each for 32 and 64-bit code */
if((kret = shared_region_object_create(
- com_region_size32,
+ com_region_size,
&com_region_handle32))) {
panic("shared_com_boot_time_init: "
"unable to create 32-bit comm page\n");
return;
}
if((kret = shared_region_object_create(
- com_region_size64,
+ com_region_size,
&com_region_handle64))) {
panic("shared_com_boot_time_init: "
"unable to create 64-bit comm page\n");
/* wrap the com region in its own shared file mapping structure */
/* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
kret = shared_region_mapping_create(com_region_handle32,
- com_region_size32,
- NULL, 0, 0,
- _COMM_PAGE_BASE_ADDRESS,
- &com_mapping_resource,
- 0, 0,
- ENV_DEFAULT_ROOT, cpu_type());
+ com_region_size, NULL, 0, 0,
+ _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
+ 0, 0);
if (kret) {
panic("shared_region_mapping_create failed for commpage");
}
unsigned int fs_base,
unsigned int system)
{
- mach_port_t text_region_handle;
- mach_port_t data_region_handle;
long text_region_size;
long data_region_size;
shared_region_mapping_t new_system_region;
fs_base, system));
text_region_size = 0x10000000;
data_region_size = 0x10000000;
- shared_file_init(&text_region_handle,
+ shared_file_init(&shared_text_region_handle,
text_region_size,
- &data_region_handle,
+ &shared_data_region_handle,
data_region_size,
&shared_file_mapping_array);
- shared_region_mapping_create(text_region_handle,
+ shared_region_mapping_create(shared_text_region_handle,
text_region_size,
- data_region_handle,
+ shared_data_region_handle,
data_region_size,
shared_file_mapping_array,
GLOBAL_SHARED_TEXT_SEGMENT,
&new_system_region,
SHARED_ALTERNATE_LOAD_BASE,
- SHARED_ALTERNATE_LOAD_BASE,
- fs_base, system);
+ SHARED_ALTERNATE_LOAD_BASE);
+ new_system_region->fs_base = fs_base;
+ new_system_region->system = system;
new_system_region->flags = SHARED_REGION_SYSTEM;
/* grab an extra reference for the caller */
vm_offset_t *file_mapping_array)
{
shared_file_info_t *sf_head;
- vm_size_t data_table_size;
+ vm_offset_t table_mapping_address;
+ int data_table_size;
int hash_size;
kern_return_t kret;
data_table_size = data_region_size >> 9;
hash_size = data_region_size >> 14;
+ table_mapping_address = data_region_size - data_table_size;
if(shared_file_mapping_array == 0) {
vm_map_address_t map_addr;
buf_object = vm_object_allocate(data_table_size);
if(vm_map_find_space(kernel_map, &map_addr,
- data_table_size, 0, 0, &entry)
+ data_table_size, 0, &entry)
!= KERN_SUCCESS) {
panic("shared_file_init: no space");
}
*file_mapping_array = shared_file_mapping_array;
}
+ kret = vm_map(((vm_named_entry_t)
+ (*data_region_handle)->ip_kobject)->backing.map,
+ &table_mapping_address,
+ data_table_size, 0,
+ SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
+ sfma_handle, 0, FALSE,
+ VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+
SHARED_REGION_DEBUG(("shared_file_init() done\n"));
- return KERN_SUCCESS;
+ return kret;
}
static kern_return_t
if (vm_map_wire(kernel_map, hash_cram_address,
hash_cram_address + cram_size,
VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: shared_file_header_init: "
- "No memory for data table\n"));
+ printf("shared_file_header_init: "
+ "No memory for data table\n");
return KERN_NO_SPACE;
}
allocable_hash_pages -= cram_pages;
return KERN_SUCCESS;
}
-
-/* A call made from user space, copyin_shared_file requires the user to */
-/* provide the address and size of a mapped file, the full path name of */
-/* that file and a list of offsets to be mapped into shared memory. */
-/* By requiring that the file be pre-mapped, copyin_shared_file can */
-/* guarantee that the file is neither deleted nor changed after the user */
-/* begins the call. */
-
-kern_return_t
-copyin_shared_file(
- vm_offset_t mapped_file,
- vm_size_t mapped_file_size,
- vm_offset_t *base_address,
- int map_cnt,
- sf_mapping_t *mappings,
- memory_object_control_t file_control,
- shared_region_task_mappings_t sm_info,
- int *flags)
-{
- vm_object_t file_object;
- vm_map_entry_t entry;
- shared_file_info_t *shared_file_header;
- load_struct_t *file_entry;
- loaded_mapping_t *file_mapping;
- boolean_t alternate;
- int i;
- kern_return_t ret;
-
- SHARED_REGION_DEBUG(("copyin_shared_file()\n"));
-
- shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
-
- mutex_lock(&shared_file_header->lock);
-
- /* If this is the first call to this routine, take the opportunity */
- /* to initialize the hash table which will be used to look-up */
- /* mappings based on the file object */
-
- if(shared_file_header->hash_init == FALSE) {
- ret = shared_file_header_init(shared_file_header);
- if (ret != KERN_SUCCESS) {
- mutex_unlock(&shared_file_header->lock);
- return ret;
- }
- }
-
- /* Find the entry in the map associated with the current mapping */
- /* of the file object */
- file_object = memory_object_control_to_vm_object(file_control);
- if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
- vm_object_t mapped_object;
- if(entry->is_sub_map ||
- entry->object.vm_object == VM_OBJECT_NULL) {
- mutex_unlock(&shared_file_header->lock);
- return KERN_INVALID_ADDRESS;
- }
- mapped_object = entry->object.vm_object;
- while(mapped_object->shadow != NULL) {
- mapped_object = mapped_object->shadow;
- }
- /* check to see that the file object passed is indeed the */
- /* same as the mapped object passed */
- if(file_object != mapped_object) {
- if(sm_info->flags & SHARED_REGION_SYSTEM) {
- mutex_unlock(&shared_file_header->lock);
- return KERN_PROTECTION_FAILURE;
- } else {
- file_object = mapped_object;
- }
- }
- } else {
- mutex_unlock(&shared_file_header->lock);
- return KERN_INVALID_ADDRESS;
- }
-
- alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
-
- file_entry = lsf_hash_lookup(shared_file_header->hash,
- (void *) file_object,
- mappings[0].file_offset,
- shared_file_header->hash_size,
- !alternate, alternate, sm_info);
- if (file_entry) {
- /* File is loaded, check the load manifest for exact match */
- /* we simplify by requiring that the elements be the same */
- /* size and in the same order rather than checking for */
- /* semantic equivalence. */
-
- /* If the file is being loaded in the alternate */
- /* area, one load to alternate is allowed per mapped */
- /* object the base address is passed back to the */
- /* caller and the mappings field is filled in. If the */
- /* caller does not pass the precise mappings_cnt */
- /* and the Alternate is already loaded, an error */
- /* is returned. */
- i = 0;
- file_mapping = file_entry->mappings;
- while(file_mapping != NULL) {
- if(i>=map_cnt) {
- mutex_unlock(&shared_file_header->lock);
- return KERN_INVALID_ARGUMENT;
- }
- if(((mappings[i].mapping_offset)
- & SHARED_DATA_REGION_MASK) !=
- file_mapping->mapping_offset ||
- mappings[i].size !=
- file_mapping->size ||
- mappings[i].file_offset !=
- file_mapping->file_offset ||
- mappings[i].protection !=
- file_mapping->protection) {
- break;
- }
- file_mapping = file_mapping->next;
- i++;
- }
- if(i!=map_cnt) {
- mutex_unlock(&shared_file_header->lock);
- return KERN_INVALID_ARGUMENT;
- }
- *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
- + file_entry->base_address;
- *flags = SF_PREV_LOADED;
- mutex_unlock(&shared_file_header->lock);
- return KERN_SUCCESS;
- } else {
- /* File is not loaded, lets attempt to load it */
- ret = lsf_load(mapped_file, mapped_file_size, base_address,
- mappings, map_cnt,
- (void *)file_object,
- *flags, sm_info);
- *flags = 0;
- if(ret == KERN_NO_SPACE) {
- shared_region_mapping_t regions;
- shared_region_mapping_t system_region;
- regions = (shared_region_mapping_t)sm_info->self;
- regions->flags |= SHARED_REGION_FULL;
- system_region = lookup_default_shared_region(
- regions->fs_base, regions->system);
- if(system_region == regions) {
- shared_region_mapping_t new_system_shared_region;
- shared_file_boot_time_init(
- regions->fs_base, regions->system);
- /* current task must stay with its current */
- /* regions, drop count on system_shared_region */
- /* and put back our original set */
- vm_get_shared_region(current_task(),
- &new_system_shared_region);
- shared_region_mapping_dealloc_lock(
- new_system_shared_region, 0, 1);
- vm_set_shared_region(current_task(), regions);
- } else if(system_region != NULL) {
- shared_region_mapping_dealloc_lock(
- system_region, 0, 1);
- }
- }
- mutex_unlock(&shared_file_header->lock);
- return ret;
- }
-}
-
-extern void shared_region_dump_file_entry(
- int trace_level,
- load_struct_t *entry); /* forward */
-
-void shared_region_dump_file_entry(
- int trace_level,
- load_struct_t *entry)
-{
- int i;
- loaded_mapping_t *mapping;
-
- if (trace_level > shared_region_trace_level) {
- return;
- }
- printf("shared region: %p: "
- "file_entry %p base_address=0x%x file_offset=0x%x "
- "%d mappings\n",
- current_thread(), entry,
- entry->base_address, entry->file_offset, entry->mapping_cnt);
- mapping = entry->mappings;
- for (i = 0; i < entry->mapping_cnt; i++) {
- printf("shared region: %p:\t#%d: "
- "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
- current_thread(),
- i,
- mapping->mapping_offset,
- mapping->size,
- mapping->file_offset,
- mapping->protection);
- mapping = mapping->next;
- }
-}
-
-extern void shared_region_dump_mappings(
- int trace_level,
- struct shared_file_mapping_np *mappings,
- int map_cnt,
- mach_vm_offset_t base_offset); /* forward */
-
-void shared_region_dump_mappings(
- int trace_level,
- struct shared_file_mapping_np *mappings,
- int map_cnt,
- mach_vm_offset_t base_offset)
-{
- int i;
-
- if (trace_level > shared_region_trace_level) {
- return;
- }
-
- printf("shared region: %p: %d mappings base_offset=0x%llx\n",
- current_thread(), map_cnt, (uint64_t) base_offset);
- for (i = 0; i < map_cnt; i++) {
- printf("shared region: %p:\t#%d: "
- "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
- "prot=(%d,%d)\n",
- current_thread(),
- i,
- (uint64_t) mappings[i].sfm_address,
- (uint64_t) mappings[i].sfm_size,
- (uint64_t) mappings[i].sfm_file_offset,
- mappings[i].sfm_max_prot,
- mappings[i].sfm_init_prot);
- }
-}
-
-extern void shared_region_dump_conflict_info(
- int trace_level,
- vm_map_t map,
- vm_map_offset_t offset,
- vm_map_size_t size); /* forward */
-
-void
-shared_region_dump_conflict_info(
- int trace_level,
- vm_map_t map,
- vm_map_offset_t offset,
- vm_map_size_t size)
-{
- vm_map_entry_t entry;
- vm_object_t object;
- memory_object_t mem_object;
- kern_return_t kr;
- char *filename;
-
- if (trace_level > shared_region_trace_level) {
- return;
- }
-
- object = VM_OBJECT_NULL;
-
- vm_map_lock_read(map);
- if (!vm_map_lookup_entry(map, offset, &entry)) {
- entry = entry->vme_next;
- }
-
- if (entry != vm_map_to_entry(map)) {
- if (entry->is_sub_map) {
- printf("shared region: %p: conflict with submap "
- "at 0x%llx size 0x%llx !?\n",
- current_thread(),
- (uint64_t) offset,
- (uint64_t) size);
- goto done;
- }
-
- object = entry->object.vm_object;
- if (object == VM_OBJECT_NULL) {
- printf("shared region: %p: conflict with NULL object "
- "at 0x%llx size 0x%llx !?\n",
- current_thread(),
- (uint64_t) offset,
- (uint64_t) size);
- object = VM_OBJECT_NULL;
- goto done;
- }
-
- vm_object_lock(object);
- while (object->shadow != VM_OBJECT_NULL) {
- vm_object_t shadow;
-
- shadow = object->shadow;
- vm_object_lock(shadow);
- vm_object_unlock(object);
- object = shadow;
- }
-
- if (object->internal) {
- printf("shared region: %p: conflict with anonymous "
- "at 0x%llx size 0x%llx\n",
- current_thread(),
- (uint64_t) offset,
- (uint64_t) size);
- goto done;
- }
- if (! object->pager_ready) {
- printf("shared region: %p: conflict with uninitialized "
- "at 0x%llx size 0x%llx\n",
- current_thread(),
- (uint64_t) offset,
- (uint64_t) size);
- goto done;
- }
-
- mem_object = object->pager;
-
- /*
- * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
- */
- kr = vnode_pager_get_object_filename(mem_object,
- &filename);
- if (kr != KERN_SUCCESS) {
- filename = NULL;
- }
- printf("shared region: %p: conflict with '%s' "
- "at 0x%llx size 0x%llx\n",
- current_thread(),
- filename ? filename : "<unknown>",
- (uint64_t) offset,
- (uint64_t) size);
- }
-done:
- if (object != VM_OBJECT_NULL) {
- vm_object_unlock(object);
- }
- vm_map_unlock_read(map);
-}
/*
* map_shared_file:
if(shared_file_header->hash_init == FALSE) {
ret = shared_file_header_init(shared_file_header);
if (ret != KERN_SUCCESS) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: map_shared_file: "
- "shared_file_header_init() failed kr=0x%x\n",
- current_thread(), ret));
mutex_unlock(&shared_file_header->lock);
return KERN_NO_SPACE;
}
file_mapping = file_entry->mappings;
while(file_mapping != NULL) {
if(i>=map_cnt) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: map_shared_file: "
- "already mapped with "
- "more than %d mappings\n",
- current_thread(), map_cnt));
- shared_region_dump_file_entry(
- SHARED_REGION_TRACE_INFO,
- file_entry);
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_INFO,
- mappings, map_cnt, base_offset);
-
mutex_unlock(&shared_file_header->lock);
return KERN_INVALID_ARGUMENT;
}
mappings[i].sfm_size != file_mapping->size ||
mappings[i].sfm_file_offset != file_mapping->file_offset ||
mappings[i].sfm_init_prot != file_mapping->protection) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: "
- "mapping #%d differs\n",
- current_thread(), i));
- shared_region_dump_file_entry(
- SHARED_REGION_TRACE_INFO,
- file_entry);
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_INFO,
- mappings, map_cnt, base_offset);
-
break;
}
file_mapping = file_mapping->next;
i++;
}
if(i!=map_cnt) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: map_shared_file: "
- "already mapped with "
- "%d mappings instead of %d\n",
- current_thread(), i, map_cnt));
- shared_region_dump_file_entry(
- SHARED_REGION_TRACE_INFO,
- file_entry);
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_INFO,
- mappings, map_cnt, base_offset);
-
mutex_unlock(&shared_file_header->lock);
return KERN_INVALID_ARGUMENT;
}
* requested address too ?
*/
ret = KERN_FAILURE;
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: "
- "map_shared_file: already mapped, "
- "would need to slide 0x%llx\n",
- current_thread(),
- slide));
} else {
/*
* The file is already mapped at the correct
lsf_hash_lookup(
queue_head_t *hash_table,
void *file_object,
- vm_offset_t recognizableOffset,
+ vm_offset_t recognizableOffset,
int size,
boolean_t regular,
boolean_t alternate,
static load_struct_t *
lsf_hash_delete(
- load_struct_t *target_entry, /* optional: NULL if not relevant */
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info)
shared_file_info_t *shared_file_header;
load_struct_t *entry;
- LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
- target_entry, file_object, base_offset, sm_info));
+ LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
+ file_object, base_offset, sm_info));
shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
entry = (load_struct_t *)queue_next(&entry->links)) {
if((!(sm_info->self)) || ((shared_region_mapping_t)
sm_info->self == entry->regions_instance)) {
- if ((target_entry == NULL ||
- entry == target_entry) &&
- (entry->file_object == (int) file_object) &&
- (entry->base_address == base_offset)) {
+ if ((entry->file_object == (int) file_object) &&
+ (entry->base_address == base_offset)) {
queue_remove(bucket, entry,
load_struct_ptr_t, links);
LSF_DEBUG(("lsf_hash_delete: found it\n"));
entry, load_struct_ptr_t, links);
}
-/* Looks up the file type requested. If already loaded and the */
-/* file extents are an exact match, returns Success. If not */
-/* loaded attempts to load the file extents at the given offsets */
-/* if any extent fails to load or if the file was already loaded */
-/* in a different configuration, lsf_load fails. */
-
-static kern_return_t
-lsf_load(
- vm_offset_t mapped_file,
- vm_size_t mapped_file_size,
- vm_offset_t *base_address,
- sf_mapping_t *mappings,
- int map_cnt,
- void *file_object,
- int flags,
- shared_region_task_mappings_t sm_info)
-{
-
- load_struct_t *entry;
- vm_map_copy_t copy_object;
- loaded_mapping_t *file_mapping;
- loaded_mapping_t **tptr;
- int i;
- ipc_port_t local_map;
- vm_offset_t original_alt_load_next;
- vm_offset_t alternate_load_next;
-
- LSF_DEBUG(("lsf_load"
- "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
- "\n",
- mapped_file_size, *base_address, map_cnt, file_object,
- flags, sm_info));
- entry = (load_struct_t *)zalloc(lsf_zone);
- LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry, map_cnt));
- LSF_DEBUG(("lsf_load"
- "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
- "entry=%p\n",
- mapped_file_size, *base_address, map_cnt, file_object,
- flags, sm_info, entry));
- if (entry == NULL) {
- printf("lsf_load: unable to allocate memory\n");
- return KERN_NO_SPACE;
- }
-
- shared_file_available_hash_ele--;
- entry->file_object = (int)file_object;
- entry->mapping_cnt = map_cnt;
- entry->mappings = NULL;
- entry->links.prev = (queue_entry_t) 0;
- entry->links.next = (queue_entry_t) 0;
- entry->regions_instance = (shared_region_mapping_t)sm_info->self;
- entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
- entry->file_offset = mappings[0].file_offset;
-
- lsf_hash_insert(entry, sm_info);
- tptr = &(entry->mappings);
-
-
- alternate_load_next = sm_info->alternate_next;
- original_alt_load_next = alternate_load_next;
- if (flags & ALTERNATE_LOAD_SITE) {
- vm_offset_t max_loadfile_offset;
-
- *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
- sm_info->alternate_next;
- max_loadfile_offset = 0;
- for(i = 0; i<map_cnt; i++) {
- if(((mappings[i].mapping_offset
- & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
- max_loadfile_offset) {
- max_loadfile_offset =
- (mappings[i].mapping_offset
- & SHARED_TEXT_REGION_MASK)
- + mappings[i].size;
- }
- }
- if((alternate_load_next + round_page(max_loadfile_offset)) >=
- (sm_info->data_size - (sm_info->data_size>>9))) {
- entry->base_address =
- (*base_address) & SHARED_TEXT_REGION_MASK;
- lsf_unload(file_object, entry->base_address, sm_info);
-
- return KERN_NO_SPACE;
- }
- alternate_load_next += round_page(max_loadfile_offset);
-
- } else {
- if (((*base_address) & SHARED_TEXT_REGION_MASK) >
- sm_info->alternate_base) {
- entry->base_address =
- (*base_address) & SHARED_TEXT_REGION_MASK;
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- }
-
- entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
-
- // Sanity check the mappings -- make sure we don't stray across the
- // alternate boundary. If any bit of a library that we're not trying
- // to load in the alternate load space strays across that boundary,
- // return KERN_INVALID_ARGUMENT immediately so that the caller can
- // try to load it in the alternate shared area. We do this to avoid
- // a nasty case: if a library tries to load so that it crosses the
- // boundary, it'll occupy a bit of the alternate load area without
- // the kernel being aware. When loads into the alternate load area
- // at the first free address are tried, the load will fail.
- // Thus, a single library straddling the boundary causes all sliding
- // libraries to fail to load. This check will avoid such a case.
-
- if (!(flags & ALTERNATE_LOAD_SITE)) {
- for (i = 0; i<map_cnt;i++) {
- vm_offset_t region_mask;
- vm_address_t region_start;
- vm_address_t region_end;
-
- if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
- // mapping offsets are relative to start of shared segments.
- region_mask = SHARED_TEXT_REGION_MASK;
- region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
- region_end = (mappings[i].size + region_start);
- if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
- // No library is permitted to load so any bit of it is in the
- // shared alternate space. If they want it loaded, they can put
- // it in the alternate space explicitly.
- printf("Library trying to load across alternate shared region boundary -- denied!\n");
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } else {
- // rw section?
- region_mask = SHARED_DATA_REGION_MASK;
- region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
- region_end = (mappings[i].size + region_start);
- if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
- printf("Library trying to load across alternate shared region boundary-- denied!\n");
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } // write?
- } // for
- } // if not alternate load site.
-
- /* copyin mapped file data */
- for(i = 0; i<map_cnt; i++) {
- vm_offset_t target_address;
- vm_offset_t region_mask;
-
- if(mappings[i].protection & VM_PROT_COW) {
- local_map = (ipc_port_t)sm_info->data_region;
- region_mask = SHARED_DATA_REGION_MASK;
- if((mappings[i].mapping_offset
- & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
- lsf_unload(file_object,
- entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- } else {
- region_mask = SHARED_TEXT_REGION_MASK;
- local_map = (ipc_port_t)sm_info->text_region;
- if(mappings[i].mapping_offset
- & GLOBAL_SHARED_SEGMENT_MASK) {
- lsf_unload(file_object,
- entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- }
- if(!(mappings[i].protection & VM_PROT_ZF)
- && ((mapped_file + mappings[i].file_offset +
- mappings[i].size) >
- (mapped_file + mapped_file_size))) {
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_INVALID_ARGUMENT;
- }
- target_address = ((mappings[i].mapping_offset) & region_mask)
- + entry->base_address;
- if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, &target_address,
- mappings[i].size, VM_FLAGS_FIXED)) {
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- target_address = ((mappings[i].mapping_offset) & region_mask)
- + entry->base_address;
- if(!(mappings[i].protection & VM_PROT_ZF)) {
- if(vm_map_copyin(current_map(),
- (vm_map_address_t)(mapped_file + mappings[i].file_offset),
- vm_map_round_page(mappings[i].size), FALSE, ©_object)) {
- vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address, mappings[i].size);
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- if(vm_map_copy_overwrite(((vm_named_entry_t)
- local_map->ip_kobject)->backing.map,
- (vm_map_address_t)target_address,
- copy_object, FALSE)) {
- vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address, mappings[i].size);
- lsf_unload(file_object, entry->base_address, sm_info);
- return KERN_FAILURE;
- }
- }
-
- file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
- if (file_mapping == NULL) {
- lsf_unload(file_object, entry->base_address, sm_info);
- printf("lsf_load: unable to allocate memory\n");
- return KERN_NO_SPACE;
- }
- shared_file_available_hash_ele--;
- file_mapping->mapping_offset = (mappings[i].mapping_offset)
- & region_mask;
- file_mapping->size = mappings[i].size;
- file_mapping->file_offset = mappings[i].file_offset;
- file_mapping->protection = mappings[i].protection;
- file_mapping->next = NULL;
- LSF_DEBUG(("lsf_load: file_mapping %p "
- "for offset=0x%x size=0x%x\n",
- file_mapping, file_mapping->mapping_offset,
- file_mapping->size));
-
- vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address,
- round_page(target_address + mappings[i].size),
- (mappings[i].protection &
- (VM_PROT_READ | VM_PROT_EXECUTE)),
- TRUE);
- vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
- ->backing.map, target_address,
- round_page(target_address + mappings[i].size),
- (mappings[i].protection &
- (VM_PROT_READ | VM_PROT_EXECUTE)),
- FALSE);
-
- *tptr = file_mapping;
- tptr = &(file_mapping->next);
- }
- shared_region_mapping_set_alt_next(
- (shared_region_mapping_t) sm_info->self,
- alternate_load_next);
- LSF_DEBUG(("lsf_load: done\n"));
- return KERN_SUCCESS;
-}
/*
wiggle_room = base_offset;
for (i = (signed) map_cnt - 1; i >= 0; i--) {
- if (mappings[i].sfm_size == 0) {
- /* nothing to map here... */
- continue;
- }
if (mappings[i].sfm_init_prot & VM_PROT_COW) {
/* copy-on-write mappings are in the data submap */
map = data_map;
kern_return_t kr;
int i;
mach_vm_offset_t original_base_offset;
- mach_vm_size_t total_size;
/* get the VM object from the file's memory object handle */
file_object = memory_object_control_to_vm_object(file_control);
map_cnt, file_object,
sm_info, entry));
if (entry == NULL) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: "
- "lsf_map: unable to allocate entry\n",
- current_thread()));
+ printf("lsf_map: unable to allocate memory\n");
return KERN_NO_SPACE;
}
shared_file_available_hash_ele--;
tptr = &(entry->mappings);
entry->base_address = base_offset;
- total_size = 0;
+
/* establish each requested mapping */
for (i = 0; i < map_cnt; i++) {
(((mappings[i].sfm_address + base_offset +
mappings[i].sfm_size - 1)
& GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: lsf_map: "
- "RW mapping #%d not in segment",
- current_thread(), i));
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_ERROR,
- mappings, map_cnt, base_offset);
-
- lsf_deallocate(entry,
- file_object,
- entry->base_address,
- sm_info,
- TRUE);
+ lsf_unload(file_object,
+ entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
} else {
((mappings[i].sfm_address + base_offset +
mappings[i].sfm_size - 1)
& GLOBAL_SHARED_SEGMENT_MASK)) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: lsf_map: "
- "RO mapping #%d not in segment",
- current_thread(), i));
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_ERROR,
- mappings, map_cnt, base_offset);
-
- lsf_deallocate(entry,
- file_object,
- entry->base_address,
- sm_info,
- TRUE);
+ lsf_unload(file_object,
+ entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
}
if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
(file_size))) {
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: lsf_map: "
- "ZF mapping #%d beyond EOF",
- current_thread(), i));
- shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR,
- mappings, map_cnt,
- base_offset);
-
-
- lsf_deallocate(entry,
- file_object,
- entry->base_address,
- sm_info,
- TRUE);
+ lsf_unload(file_object, entry->base_address, sm_info);
return KERN_INVALID_ARGUMENT;
}
target_address = entry->base_address +
}
region_entry = (vm_named_entry_t) region_handle->ip_kobject;
- total_size += mappings[i].sfm_size;
- if (mappings[i].sfm_size == 0) {
- /* nothing to map... */
- kr = KERN_SUCCESS;
- } else {
- kr = mach_vm_map(
- region_entry->backing.map,
+ if (mach_vm_map(region_entry->backing.map,
&target_address,
vm_map_round_page(mappings[i].sfm_size),
0,
(VM_PROT_READ|VM_PROT_EXECUTE)),
(mappings[i].sfm_max_prot &
(VM_PROT_READ|VM_PROT_EXECUTE)),
- VM_INHERIT_DEFAULT);
- }
- if (kr != KERN_SUCCESS) {
- vm_offset_t old_base_address;
-
- old_base_address = entry->base_address;
- lsf_deallocate(entry,
- file_object,
- entry->base_address,
- sm_info,
- TRUE);
- entry = NULL;
+ VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
+ lsf_unload(file_object, entry->base_address, sm_info);
if (slide_p != NULL) {
/*
* shared region, so let's try and slide it...
*/
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: lsf_map: "
- "mapping #%d failed to map, "
- "kr=0x%x, sliding...\n",
- current_thread(), i, kr));
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_INFO,
- mappings, map_cnt, base_offset);
- shared_region_dump_conflict_info(
- SHARED_REGION_TRACE_CONFLICT,
- region_entry->backing.map,
- (old_base_address +
- ((mappings[i].sfm_address)
- & region_mask)),
- vm_map_round_page(mappings[i].sfm_size));
-
/* lookup an appropriate spot */
kr = lsf_slide(map_cnt, mappings,
sm_info, &base_offset);
if (kr == KERN_SUCCESS) {
/* try and map it there ... */
+ entry->base_address = base_offset;
goto restart_after_slide;
}
/* couldn't slide ... */
}
-
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_CONFLICT,
- ("shared_region: %p: lsf_map: "
- "mapping #%d failed to map, "
- "kr=0x%x, no sliding\n",
- current_thread(), i, kr));
- shared_region_dump_mappings(
- SHARED_REGION_TRACE_INFO,
- mappings, map_cnt, base_offset);
- shared_region_dump_conflict_info(
- SHARED_REGION_TRACE_CONFLICT,
- region_entry->backing.map,
- (old_base_address +
- ((mappings[i].sfm_address)
- & region_mask)),
- vm_map_round_page(mappings[i].sfm_size));
+
return KERN_FAILURE;
}
/* record this mapping */
file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
if (file_mapping == NULL) {
- lsf_deallocate(entry,
- file_object,
- entry->base_address,
- sm_info,
- TRUE);
- SHARED_REGION_TRACE(
- SHARED_REGION_TRACE_ERROR,
- ("shared_region: %p: "
- "lsf_map: unable to allocate mapping\n",
- current_thread()));
+ lsf_unload(file_object, entry->base_address, sm_info);
+ printf("lsf_map: unable to allocate memory\n");
return KERN_NO_SPACE;
}
shared_file_available_hash_ele--;
*slide_p = base_offset - original_base_offset;
}
- if ((sm_info->flags & SHARED_REGION_STANDALONE) ||
- (total_size == 0)) {
+ if (sm_info->flags & SHARED_REGION_STANDALONE) {
/*
- * Two cases:
- * 1. we have a standalone and private shared region, so we
+ * We have a standalone and private shared region, so we
* don't really need to keep the information about each file
* and each mapping. Just deallocate it all.
- * 2. the total size of the mappings is 0, so nothing at all
- * was mapped. Let's not waste kernel resources to describe
- * nothing.
- *
* XXX we still have the hash table, though...
*/
- lsf_deallocate(entry, file_object, entry->base_address, sm_info,
+ lsf_deallocate(file_object, entry->base_address, sm_info,
FALSE);
}
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info)
{
- lsf_deallocate(NULL, file_object, base_offset, sm_info, TRUE);
+ lsf_deallocate(file_object, base_offset, sm_info, TRUE);
}
/*
*/
static void
lsf_deallocate(
- load_struct_t *target_entry,
void *file_object,
vm_offset_t base_offset,
shared_region_task_mappings_t sm_info,
load_struct_t *entry;
loaded_mapping_t *map_ele;
loaded_mapping_t *back_ptr;
- kern_return_t kr;
- LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
- target_entry, file_object, base_offset, sm_info, unload));
- entry = lsf_hash_delete(target_entry,
- file_object,
- base_offset,
- sm_info);
- if (entry) {
+ LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
+ file_object, base_offset, sm_info, unload));
+ entry = lsf_hash_delete(file_object, base_offset, sm_info);
+ if(entry) {
map_ele = entry->mappings;
while(map_ele != NULL) {
if (unload) {
region_entry = (vm_named_entry_t)
region_handle->ip_kobject;
- kr = vm_deallocate(region_entry->backing.map,
- (entry->base_address +
- map_ele->mapping_offset),
- map_ele->size);
- assert(kr == KERN_SUCCESS);
+ vm_deallocate(region_entry->backing.map,
+ (entry->base_address +
+ map_ele->mapping_offset),
+ map_ele->size);
}
back_ptr = map_ele;
map_ele = map_ele->next;
back_ptr, back_ptr->mapping_offset,
back_ptr->size));
zfree(lsf_zone, back_ptr);
- shared_file_available_hash_ele++;
+ shared_file_available_hash_ele++;
}
LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
zfree(lsf_zone, entry);
shared_file_available_hash_ele++;
}
- LSF_DEBUG(("lsf_deallocate: done\n"));
+ LSF_DEBUG(("lsf_unload: done\n"));
}
/* integer is from 1 to 100 and represents percent full */