+shared_region_map_file_np(
+ struct proc *p,
+ struct shared_region_map_file_np_args *uap,
+ __unused int *retvalp)
+{
+ int error;
+ kern_return_t kr;
+ int fd;
+ unsigned int mapping_count;
+ user_addr_t user_mappings; /* 64-bit */
+ user_addr_t user_slide_p; /* 64-bit */
+ struct shared_file_mapping_np *mappings;
+ struct fileproc *fp;
+ mach_vm_offset_t slide;
+ struct vnode *vp;
+ struct vfs_context context;
+ memory_object_control_t file_control;
+ memory_object_size_t file_size;
+ shared_region_mapping_t shared_region;
+ struct shared_region_task_mappings task_mapping_info;
+ shared_region_mapping_t next;
+ shared_region_mapping_t default_shared_region;
+ boolean_t using_default_region;
+ unsigned int j;
+ vm_prot_t max_prot;
+ mach_vm_offset_t base_offset, end_offset;
+ mach_vm_offset_t original_base_offset;
+ boolean_t mappings_in_segment;
+#define SFM_MAX_STACK 6
+ struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK];
+
+ mappings = NULL;
+ mapping_count = 0;
+ fp = NULL;
+ vp = NULL;
+
+ /* get file descriptor for split library from arguments */
+ fd = uap->fd;
+
+ /* get file structure from file descriptor */
+ error = fp_lookup(p, fd, &fp, 0);
+ if (error) {
+ goto done;
+ }
+
+ /* make sure we're attempting to map a vnode */
+ if (fp->f_fglob->fg_type != DTYPE_VNODE) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /* we need at least read permission on the file */
+ if (! (fp->f_fglob->fg_flag & FREAD)) {
+ error = EPERM;
+ goto done;
+ }
+
+ /* get vnode from file structure */
+ error = vnode_getwithref((vnode_t)fp->f_fglob->fg_data);
+ if (error) {
+ goto done;
+ }
+ vp = (struct vnode *) fp->f_fglob->fg_data;
+
+ /* make sure the vnode is a regular file */
+ if (vp->v_type != VREG) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /* get vnode size */
+ {
+ off_t fs;
+
+ context.vc_proc = p;
+ context.vc_ucred = kauth_cred_get();
+ if ((error = vnode_size(vp, &fs, &context)) != 0)
+ goto done;
+ file_size = fs;
+ }
+
+ /*
+ * Get the list of mappings the caller wants us to establish.
+ */
+ mapping_count = uap->mappingCount; /* the number of mappings */
+ if (mapping_count == 0) {
+ error = 0; /* no mappings: we're done ! */
+ goto done;
+ } else if (mapping_count <= SFM_MAX_STACK) {
+ mappings = &stack_mappings[0];
+ } else {
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &mappings,
+ (vm_size_t) (mapping_count *
+ sizeof (mappings[0])));
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto done;
+ }
+ }
+
+ user_mappings = uap->mappings; /* the mappings, in user space */
+ error = copyin(user_mappings,
+ mappings,
+ (mapping_count * sizeof (mappings[0])));
+ if (error != 0) {
+ goto done;
+ }
+
+ /*
+ * If the caller provides a "slide" pointer, it means they're OK
+ * with us moving the mappings around to make them fit.
+ */
+ user_slide_p = uap->slide_p;
+
+ /*
+ * Make each mapping address relative to the beginning of the
+ * shared region. Check that all mappings are in the shared region.
+ * Compute the maximum set of protections required to tell the
+ * buffer cache how we mapped the file (see call to ubc_map() below).
+ */
+ max_prot = VM_PROT_NONE;
+ base_offset = -1LL;
+ end_offset = 0;
+ mappings_in_segment = TRUE;
+ for (j = 0; j < mapping_count; j++) {
+ mach_vm_offset_t segment;
+ segment = (mappings[j].sfm_address &
+ GLOBAL_SHARED_SEGMENT_MASK);
+ if (segment != GLOBAL_SHARED_TEXT_SEGMENT &&
+ segment != GLOBAL_SHARED_DATA_SEGMENT) {
+ /* this mapping is not in the shared region... */
+ if (user_slide_p == NULL) {
+ /* ... and we can't slide it in: fail */
+ error = EINVAL;
+ goto done;
+ }
+ if (j == 0) {
+ /* expect all mappings to be outside */
+ mappings_in_segment = FALSE;
+ } else if (mappings_in_segment != FALSE) {
+ /* other mappings were not outside: fail */
+ error = EINVAL;
+ goto done;
+ }
+ /* we'll try and slide that mapping in the segments */
+ } else {
+ if (j == 0) {
+ /* expect all mappings to be inside */
+ mappings_in_segment = TRUE;
+ } else if (mappings_in_segment != TRUE) {
+ /* other mappings were not inside: fail */
+ error = EINVAL;
+ goto done;
+ }
+ /* get a relative offset inside the shared segments */
+ mappings[j].sfm_address -= GLOBAL_SHARED_TEXT_SEGMENT;
+ }
+ if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK)
+ < base_offset) {
+ base_offset = (mappings[j].sfm_address &
+ SHARED_TEXT_REGION_MASK);
+ }
+ if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK) +
+ mappings[j].sfm_size > end_offset) {
+ end_offset =
+ (mappings[j].sfm_address &
+ SHARED_TEXT_REGION_MASK) +
+ mappings[j].sfm_size;
+ }
+ max_prot |= mappings[j].sfm_max_prot;
+ }
+ /* Make all mappings relative to the base_offset */
+ base_offset = vm_map_trunc_page(base_offset);
+ end_offset = vm_map_round_page(end_offset);
+ for (j = 0; j < mapping_count; j++) {
+ mappings[j].sfm_address -= base_offset;
+ }
+ original_base_offset = base_offset;
+ if (mappings_in_segment == FALSE) {
+ /*
+ * We're trying to map a library that was not pre-bound to
+ * be in the shared segments. We want to try and slide it
+ * back into the shared segments but as far back as possible,
+ * so that it doesn't clash with pre-bound libraries. Set
+ * the base_offset to the end of the region, so that it can't
+ * possibly fit there and will have to be slid.
+ */
+ base_offset = SHARED_TEXT_REGION_SIZE - end_offset;
+ }
+
+ /* get the file's memory object handle */
+ UBCINFOCHECK("shared_region_map_file_np", vp);
+ file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
+ if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
+ error = EINVAL;
+ goto done;
+ }
+
+ /*
+ * Get info about the current process's shared region.
+ * This might change if we decide we need to clone the shared region.
+ */
+ vm_get_shared_region(current_task(), &shared_region);
+ task_mapping_info.self = (vm_offset_t) shared_region;
+ shared_region_mapping_info(shared_region,
+ &(task_mapping_info.text_region),
+ &(task_mapping_info.text_size),
+ &(task_mapping_info.data_region),
+ &(task_mapping_info.data_size),
+ &(task_mapping_info.region_mappings),
+ &(task_mapping_info.client_base),
+ &(task_mapping_info.alternate_base),
+ &(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
+ &(task_mapping_info.flags),
+ &next);
+
+ /*
+ * Are we using the system's current shared region
+ * for this environment ?
+ */
+ default_shared_region =
+ lookup_default_shared_region(ENV_DEFAULT_ROOT,
+ task_mapping_info.system);
+ if (shared_region == default_shared_region) {
+ using_default_region = TRUE;
+ } else {
+ using_default_region = FALSE;
+ }
+ shared_region_mapping_dealloc(default_shared_region);
+
+ if (vp->v_mount != rootvnode->v_mount &&
+ using_default_region) {
+ /*
+ * The split library is not on the root filesystem. We don't
+ * want to polute the system-wide ("default") shared region
+ * with it.
+ * Reject the mapping. The caller (dyld) should "privatize"
+ * (via shared_region_make_private()) the shared region and
+ * try to establish the mapping privately for this process.
+ */
+ error = EXDEV;
+ goto done;
+ }
+
+
+ /*
+ * Map the split library.
+ */
+ kr = map_shared_file(mapping_count,
+ mappings,
+ file_control,
+ file_size,
+ &task_mapping_info,
+ base_offset,
+ (user_slide_p) ? &slide : NULL);
+
+ switch (kr) {
+ case KERN_SUCCESS:
+ /*
+ * The mapping was successful. Let the buffer cache know
+ * that we've mapped that file with these protections. This
+ * prevents the vnode from getting recycled while it's mapped.
+ */
+ (void) ubc_map(vp, max_prot);
+ error = 0;
+ break;
+ case KERN_INVALID_ADDRESS:
+ error = EFAULT;
+ goto done;
+ case KERN_PROTECTION_FAILURE:
+ error = EPERM;
+ goto done;
+ case KERN_NO_SPACE:
+ error = ENOMEM;
+ goto done;
+ case KERN_FAILURE:
+ case KERN_INVALID_ARGUMENT:
+ default:
+ error = EINVAL;
+ goto done;
+ }
+
+ if (p->p_flag & P_NOSHLIB) {
+ /* signal that this process is now using split libraries */
+ p->p_flag &= ~P_NOSHLIB;
+ }
+
+ if (user_slide_p) {
+ /*
+ * The caller provided a pointer to a "slide" offset. Let
+ * them know by how much we slid the mappings.
+ */
+ if (mappings_in_segment == FALSE) {
+ /*
+ * We faked the base_offset earlier, so undo that
+ * and take into account the real base_offset.
+ */
+ slide += SHARED_TEXT_REGION_SIZE - end_offset;
+ slide -= original_base_offset;
+ /*
+ * The mappings were slid into the shared segments
+ * and "slide" is relative to the beginning of the
+ * shared segments. Adjust it to be absolute.
+ */
+ slide += GLOBAL_SHARED_TEXT_SEGMENT;
+ }
+ error = copyout(&slide,
+ user_slide_p,
+ sizeof (int64_t));
+ }
+
+done:
+ if (vp != NULL) {
+ /*
+ * release the vnode...
+ * ubc_map() still holds it for us in the non-error case
+ */
+ (void) vnode_put(vp);
+ vp = NULL;
+ }
+ if (fp != NULL) {
+ /* release the file descriptor */
+ fp_drop(p, fd, fp, 0);
+ fp = NULL;
+ }
+ if (mappings != NULL &&
+ mappings != &stack_mappings[0]) {
+ kmem_free(kernel_map,
+ (vm_offset_t) mappings,
+ mapping_count * sizeof (mappings[0]));
+ }
+ mappings = NULL;
+
+ return error;
+}
+
+int
+load_shared_file(struct proc *p, struct load_shared_file_args *uap,
+ __unused int *retval)