+int
+_shared_region_slide(uint32_t slide,
+ mach_vm_offset_t entry_start_address,
+ mach_vm_size_t entry_size,
+ mach_vm_offset_t slide_start,
+ mach_vm_size_t slide_size,
+ memory_object_control_t sr_file_control)
+{
+ void *slide_info_entry = NULL;
+ int error;
+
+ if((error = vm_shared_region_slide_init(slide_size, entry_start_address, entry_size, slide, sr_file_control))) {
+ printf("slide_info initialization failed with kr=%d\n", error);
+ goto done;
+ }
+
+ slide_info_entry = vm_shared_region_get_slide_info_entry();
+ if (slide_info_entry == NULL){
+ error = EFAULT;
+ } else {
+ error = copyin(slide_start,
+ slide_info_entry,
+ (vm_size_t)slide_size);
+ }
+ if (error) {
+ goto done;
+ }
+
+ if (vm_shared_region_slide_sanity_check() != KERN_SUCCESS) {
+ error = EFAULT;
+ printf("Sanity Check failed for slide_info\n");
+ } else {
+#if DEBUG
+ printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
+ (void*)(uintptr_t)entry_start_address,
+ (unsigned long)entry_size,
+ (unsigned long)slide_size);
+#endif
+ }
+done:
+ return error;
+}
+
+int
+shared_region_map_and_slide_np(
+ struct proc *p,
+ struct shared_region_map_and_slide_np_args *uap,
+ __unused int *retvalp)
+{
+ struct shared_file_mapping_np mapping_to_slide;
+ struct shared_file_mapping_np *mappings;
+ unsigned int mappings_count = uap->count;
+
+ memory_object_control_t sr_file_control;
+ kern_return_t kr = KERN_SUCCESS;
+ uint32_t slide = uap->slide;
+
+#define SFM_MAX_STACK 8
+ struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK];
+
+ if ((kr = vm_shared_region_sliding_valid(slide)) != KERN_SUCCESS) {
+ if (kr == KERN_INVALID_ARGUMENT) {
+ /*
+ * This will happen if we request sliding again
+ * with the same slide value that was used earlier
+ * for the very first sliding. We continue through
+ * to the mapping layer. This is so that we can be
+ * absolutely certain that the same mappings have
+ * been requested.
+ */
+ kr = KERN_SUCCESS;
+ } else {
+ goto done;
+ }
+ }
+
+ if (mappings_count == 0) {
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: %p [%d(%s)] map(): "
+ "no mappings\n",
+ current_thread(), p->p_pid, p->p_comm));
+ kr = 0; /* no mappings: we're done ! */
+ goto done;
+ } else if (mappings_count <= SFM_MAX_STACK) {
+ mappings = &stack_mappings[0];
+ } else {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: %p [%d(%s)] map(): "
+ "too many mappings (%d)\n",
+ current_thread(), p->p_pid, p->p_comm,
+ mappings_count));
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ if ( (kr = shared_region_copyin_mappings(p, uap->mappings, uap->count, mappings))) {
+ goto done;
+ }
+
+
+ kr = _shared_region_map(p, uap->fd, mappings_count, mappings, &sr_file_control, &mapping_to_slide);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (slide) {
+ kr = _shared_region_slide(slide,
+ mapping_to_slide.sfm_file_offset,
+ mapping_to_slide.sfm_size,
+ uap->slide_start,
+ uap->slide_size,
+ sr_file_control);
+ if (kr != KERN_SUCCESS) {
+ vm_shared_region_undo_mappings(NULL, 0, mappings, mappings_count);
+ return kr;
+ }
+ }
+done:
+ return kr;
+}