+/*
+ * shared_region_make_private_np:
+ *
+ * This system call is for "dyld" only.
+ *
+ * It creates a private copy of the current process's "shared region" for
+ * split libraries. "dyld" uses this when the shared region is full or
+ * it needs to load a split library that conflicts with an already loaded one
+ * that this process doesn't need. "dyld" specifies a set of address ranges
+ * that it wants to keep in the now-private "shared region". These cover
+ * the set of split libraries that the process needs so far. The kernel needs
+ * to deallocate the rest of the shared region, so that it's available for
+ * more libraries for this process.
+ */
+int
+shared_region_make_private_np(
+ struct proc *p,
+ struct shared_region_make_private_np_args *uap,
+ __unused int *retvalp)
+{
+ int error;
+ kern_return_t kr;
+ boolean_t using_shared_regions;
+ user_addr_t user_ranges;
+ unsigned int range_count;
+ struct shared_region_range_np *ranges;
+ shared_region_mapping_t shared_region;
+ struct shared_region_task_mappings task_mapping_info;
+ shared_region_mapping_t next;
+
+ ranges = NULL;
+
+ range_count = uap->rangeCount;
+ user_ranges = uap->ranges;
+
+ /* allocate kernel space for the "ranges" */
+ if (range_count != 0) {
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &ranges,
+ (vm_size_t) (range_count * sizeof (ranges[0])));
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto done;
+ }
+
+ /* copy "ranges" from user-space */
+ error = copyin(user_ranges,
+ ranges,
+ (range_count * sizeof (ranges[0])));
+ if (error) {
+ goto done;
+ }
+ }
+
+ if (p->p_flag & P_NOSHLIB) {
+ /* no split library has been mapped for this process so far */
+ using_shared_regions = FALSE;
+ } else {
+ /* this process has already mapped some split libraries */
+ using_shared_regions = TRUE;
+ }
+
+ /*
+ * Get a private copy of the current shared region.
+ * Do not chain it to the system-wide shared region, as we'll want
+ * to map other split libraries in place of the old ones. We want
+ * to completely detach from the system-wide shared region and go our
+ * own way after this point, not sharing anything with other processes.
+ */
+ error = clone_system_shared_regions(using_shared_regions,
+ FALSE, /* chain_regions */
+ ENV_DEFAULT_ROOT);
+ if (error) {
+ goto done;
+ }
+
+ /* get info on the newly allocated shared region */
+ vm_get_shared_region(current_task(), &shared_region);
+ task_mapping_info.self = (vm_offset_t) shared_region;
+ shared_region_mapping_info(shared_region,
+ &(task_mapping_info.text_region),
+ &(task_mapping_info.text_size),
+ &(task_mapping_info.data_region),
+ &(task_mapping_info.data_size),
+ &(task_mapping_info.region_mappings),
+ &(task_mapping_info.client_base),
+ &(task_mapping_info.alternate_base),
+ &(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
+ &(task_mapping_info.flags),
+ &next);
+
+ /*
+ * We now have our private copy of the shared region, as it was before
+ * the call to clone_system_shared_regions(). We now need to clean it
+ * up and keep only the memory areas described by the "ranges" array.
+ */
+ kr = shared_region_cleanup(range_count, ranges, &task_mapping_info);
+ switch (kr) {
+ case KERN_SUCCESS:
+ error = 0;
+ break;
+ default:
+ error = EINVAL;
+ goto done;
+ }
+
+done:
+ if (ranges != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t) ranges,
+ range_count * sizeof (ranges[0]));
+ ranges = NULL;
+ }
+
+ return error;
+}
+
+
+/*
+ * shared_region_map_file_np:
+ *
+ * This system call is for "dyld" only.
+ *
+ * "dyld" wants to map parts of a split library in the shared region.
+ * We get a file descriptor on the split library to be mapped and a set
+ * of mapping instructions, describing which parts of the file to map in\
+ * which areas of the shared segment and with what protection.
+ * The "shared region" is split in 2 areas:
+ * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
+ * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
+ *
+ */
+int
+shared_region_map_file_np(
+ struct proc *p,
+ struct shared_region_map_file_np_args *uap,
+ __unused int *retvalp)
+{
+ int error;
+ kern_return_t kr;
+ int fd;
+ unsigned int mapping_count;
+ user_addr_t user_mappings; /* 64-bit */
+ user_addr_t user_slide_p; /* 64-bit */
+ struct shared_file_mapping_np *mappings;
+ struct fileproc *fp;
+ mach_vm_offset_t slide;
+ struct vnode *vp;
+ struct vfs_context context;
+ memory_object_control_t file_control;
+ memory_object_size_t file_size;
+ shared_region_mapping_t shared_region;
+ struct shared_region_task_mappings task_mapping_info;
+ shared_region_mapping_t next;
+ shared_region_mapping_t default_shared_region;
+ boolean_t using_default_region;
+ unsigned int j;
+ vm_prot_t max_prot;
+ mach_vm_offset_t base_offset, end_offset;
+ mach_vm_offset_t original_base_offset;
+ boolean_t mappings_in_segment;
+#define SFM_MAX_STACK 6
+ struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK];
+
+ mappings = NULL;
+ mapping_count = 0;
+ fp = NULL;
+ vp = NULL;
+
+ /* get file descriptor for split library from arguments */
+ fd = uap->fd;
+
+ /* get file structure from file descriptor */
+ error = fp_lookup(p, fd, &fp, 0);
+ if (error) {
+ goto done;
+ }
+
+ /* make sure we're attempting to map a vnode */
+ if (fp->f_fglob->fg_type != DTYPE_VNODE) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /* we need at least read permission on the file */
+ if (! (fp->f_fglob->fg_flag & FREAD)) {
+ error = EPERM;
+ goto done;
+ }
+
+ /* get vnode from file structure */
+ error = vnode_getwithref((vnode_t)fp->f_fglob->fg_data);
+ if (error) {
+ goto done;
+ }
+ vp = (struct vnode *) fp->f_fglob->fg_data;
+
+ /* make sure the vnode is a regular file */
+ if (vp->v_type != VREG) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /* get vnode size */
+ {
+ off_t fs;
+
+ context.vc_proc = p;
+ context.vc_ucred = kauth_cred_get();
+ if ((error = vnode_size(vp, &fs, &context)) != 0)
+ goto done;
+ file_size = fs;
+ }
+
+ /*
+ * Get the list of mappings the caller wants us to establish.
+ */
+ mapping_count = uap->mappingCount; /* the number of mappings */
+ if (mapping_count == 0) {
+ error = 0; /* no mappings: we're done ! */
+ goto done;
+ } else if (mapping_count <= SFM_MAX_STACK) {
+ mappings = &stack_mappings[0];
+ } else {
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &mappings,
+ (vm_size_t) (mapping_count *
+ sizeof (mappings[0])));
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto done;
+ }
+ }
+
+ user_mappings = uap->mappings; /* the mappings, in user space */
+ error = copyin(user_mappings,
+ mappings,
+ (mapping_count * sizeof (mappings[0])));
+ if (error != 0) {
+ goto done;
+ }
+
+ /*
+ * If the caller provides a "slide" pointer, it means they're OK
+ * with us moving the mappings around to make them fit.
+ */
+ user_slide_p = uap->slide_p;
+
+ /*
+ * Make each mapping address relative to the beginning of the
+ * shared region. Check that all mappings are in the shared region.
+ * Compute the maximum set of protections required to tell the
+ * buffer cache how we mapped the file (see call to ubc_map() below).
+ */
+ max_prot = VM_PROT_NONE;
+ base_offset = -1LL;
+ end_offset = 0;
+ mappings_in_segment = TRUE;
+ for (j = 0; j < mapping_count; j++) {
+ mach_vm_offset_t segment;
+ segment = (mappings[j].sfm_address &
+ GLOBAL_SHARED_SEGMENT_MASK);
+ if (segment != GLOBAL_SHARED_TEXT_SEGMENT &&
+ segment != GLOBAL_SHARED_DATA_SEGMENT) {
+ /* this mapping is not in the shared region... */
+ if (user_slide_p == NULL) {
+ /* ... and we can't slide it in: fail */
+ error = EINVAL;
+ goto done;
+ }
+ if (j == 0) {
+ /* expect all mappings to be outside */
+ mappings_in_segment = FALSE;
+ } else if (mappings_in_segment != FALSE) {
+ /* other mappings were not outside: fail */
+ error = EINVAL;
+ goto done;
+ }
+ /* we'll try and slide that mapping in the segments */
+ } else {
+ if (j == 0) {
+ /* expect all mappings to be inside */
+ mappings_in_segment = TRUE;
+ } else if (mappings_in_segment != TRUE) {
+ /* other mappings were not inside: fail */
+ error = EINVAL;
+ goto done;
+ }
+ /* get a relative offset inside the shared segments */
+ mappings[j].sfm_address -= GLOBAL_SHARED_TEXT_SEGMENT;
+ }
+ if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK)
+ < base_offset) {
+ base_offset = (mappings[j].sfm_address &
+ SHARED_TEXT_REGION_MASK);
+ }
+ if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK) +
+ mappings[j].sfm_size > end_offset) {
+ end_offset =
+ (mappings[j].sfm_address &
+ SHARED_TEXT_REGION_MASK) +
+ mappings[j].sfm_size;
+ }
+ max_prot |= mappings[j].sfm_max_prot;
+ }
+ /* Make all mappings relative to the base_offset */
+ base_offset = vm_map_trunc_page(base_offset);
+ end_offset = vm_map_round_page(end_offset);
+ for (j = 0; j < mapping_count; j++) {
+ mappings[j].sfm_address -= base_offset;
+ }
+ original_base_offset = base_offset;
+ if (mappings_in_segment == FALSE) {
+ /*
+ * We're trying to map a library that was not pre-bound to
+ * be in the shared segments. We want to try and slide it
+ * back into the shared segments but as far back as possible,
+ * so that it doesn't clash with pre-bound libraries. Set
+ * the base_offset to the end of the region, so that it can't
+ * possibly fit there and will have to be slid.
+ */
+ base_offset = SHARED_TEXT_REGION_SIZE - end_offset;
+ }
+
+ /* get the file's memory object handle */
+ UBCINFOCHECK("shared_region_map_file_np", vp);
+ file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
+ if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /*
+ * Get info about the current process's shared region.
+ * This might change if we decide we need to clone the shared region.
+ */
+ vm_get_shared_region(current_task(), &shared_region);
+ task_mapping_info.self = (vm_offset_t) shared_region;
+ shared_region_mapping_info(shared_region,
+ &(task_mapping_info.text_region),
+ &(task_mapping_info.text_size),
+ &(task_mapping_info.data_region),
+ &(task_mapping_info.data_size),
+ &(task_mapping_info.region_mappings),
+ &(task_mapping_info.client_base),
+ &(task_mapping_info.alternate_base),
+ &(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
+ &(task_mapping_info.flags),
+ &next);
+
+ /*
+ * Are we using the system's current shared region
+ * for this environment ?
+ */
+ default_shared_region =
+ lookup_default_shared_region(ENV_DEFAULT_ROOT,
+ task_mapping_info.system);
+ if (shared_region == default_shared_region) {
+ using_default_region = TRUE;
+ } else {
+ using_default_region = FALSE;
+ }
+ shared_region_mapping_dealloc(default_shared_region);
+
+ if (vp->v_mount != rootvnode->v_mount &&
+ using_default_region) {
+ /*
+ * The split library is not on the root filesystem. We don't
+ * want to polute the system-wide ("default") shared region
+ * with it.
+ * Reject the mapping. The caller (dyld) should "privatize"
+ * (via shared_region_make_private()) the shared region and
+ * try to establish the mapping privately for this process.
+ */
+ error = EXDEV;
+ goto done;
+ }