+
+ if ((sr->sr_slid == TRUE) && slide) {
+ if (slide != vm_shared_region_get_slide_info(sr)->slide) {
+ printf("Only one shared region can be slid\n");
+ kr = KERN_FAILURE;
+ } else {
+ /*
+ * Request for sliding when we've
+ * already done it with exactly the
+ * same slide value before.
+ * This isn't wrong technically but
+ * we don't want to slide again and
+ * so we return this value.
+ */
+ kr = KERN_INVALID_ARGUMENT;
+ }
+ }
+ vm_shared_region_deallocate(sr);
+ return kr;
+}
+
+kern_return_t
+vm_shared_region_slide_mapping(
+ vm_shared_region_t sr,
+ mach_vm_size_t slide_info_size,
+ mach_vm_offset_t start,
+ mach_vm_size_t size,
+ mach_vm_offset_t slid_mapping,
+ uint32_t slide,
+ memory_object_control_t sr_file_control)
+{
+ kern_return_t kr;
+ vm_object_t object;
+ vm_shared_region_slide_info_t si;
+ vm_offset_t slide_info_entry;
+ vm_map_entry_t slid_entry, tmp_entry;
+ struct vm_map_entry tmp_entry_store;
+ memory_object_t sr_pager;
+ vm_map_t sr_map;
+ int vm_flags;
+ vm_map_kernel_flags_t vmk_flags;
+ vm_map_offset_t map_addr;
+
+ tmp_entry = VM_MAP_ENTRY_NULL;
+ sr_pager = MEMORY_OBJECT_NULL;
+ object = VM_OBJECT_NULL;
+ slide_info_entry = 0;
+
+ assert(sr->sr_slide_in_progress);
+ assert(!sr->sr_slid);
+
+ si = vm_shared_region_get_slide_info(sr);
+ assert(si->slide_object == VM_OBJECT_NULL);
+ assert(si->slide_info_entry == NULL);
+
+ if (sr_file_control == MEMORY_OBJECT_CONTROL_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (slide_info_size > SANE_SLIDE_INFO_SIZE) {
+ printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size);
+ return KERN_FAILURE;
+ }
+
+ kr = kmem_alloc(kernel_map,
+ (vm_offset_t *) &slide_info_entry,
+ (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ object = memory_object_control_to_vm_object(sr_file_control);
+ if (object == VM_OBJECT_NULL || object->internal) {
+ object = VM_OBJECT_NULL;
+ kr = KERN_INVALID_ADDRESS;
+ goto done;
+ }
+
+ vm_object_lock(object);
+ vm_object_reference_locked(object); /* for si->slide_object */
+ object->object_is_shared_cache = TRUE;
+ vm_object_unlock(object);
+
+ si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry;
+ si->slide_info_size = slide_info_size;
+
+ assert(slid_mapping != (mach_vm_offset_t) -1);
+ si->slid_address = slid_mapping + sr->sr_base_address;
+ si->slide_object = object;
+ si->start = start;
+ si->end = si->start + size;
+ si->slide = slide;
+
+ /* find the shared region's map entry to slide */
+ sr_map = vm_shared_region_vm_map(sr);
+ vm_map_lock_read(sr_map);
+ if (!vm_map_lookup_entry(sr_map,
+ slid_mapping,
+ &slid_entry)) {
+ /* no mapping there */
+ vm_map_unlock(sr_map);
+ kr = KERN_INVALID_ARGUMENT;
+ goto done;
+ }
+ /*
+ * We might want to clip the entry to cover only the portion that
+ * needs sliding (offsets si->start to si->end in the shared cache
+ * file at the bottom of the shadow chain).
+ * In practice, it seems to cover the entire DATA segment...
+ */
+ tmp_entry_store = *slid_entry;
+ tmp_entry = &tmp_entry_store;
+ slid_entry = VM_MAP_ENTRY_NULL;
+ /* extra ref to keep object alive while map is unlocked */
+ vm_object_reference(VME_OBJECT(tmp_entry));
+ vm_map_unlock_read(sr_map);
+
+ /* create a "shared_region" sliding pager */
+ sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry),
+ VME_OFFSET(tmp_entry),
+ si);
+ if (sr_pager == NULL) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto done;
+ }
+
+ /* map that pager over the portion of the mapping that needs sliding */
+ vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_overwrite_immutable = TRUE;
+ map_addr = tmp_entry->vme_start;
+ kr = vm_map_enter_mem_object(sr_map,
+ &map_addr,
+ (tmp_entry->vme_end -
+ tmp_entry->vme_start),
+ (mach_vm_offset_t) 0,
+ vm_flags,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ (ipc_port_t)(uintptr_t) sr_pager,
+ 0,
+ TRUE,
+ tmp_entry->protection,
+ tmp_entry->max_protection,
+ tmp_entry->inheritance);
+ assertf(kr == KERN_SUCCESS, "kr = 0x%x\n", kr);
+ assertf(map_addr == tmp_entry->vme_start,
+ "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n",
+ (uint64_t)map_addr,
+ (uint64_t) tmp_entry->vme_start,
+ tmp_entry);
+
+ /* success! */
+ kr = KERN_SUCCESS;
+
+done:
+ if (sr_pager) {
+ /*
+ * Release the sr_pager reference obtained by
+ * shared_region_pager_setup().
+ * The mapping (if it succeeded) is now holding a reference on
+ * the memory object.
+ */
+ memory_object_deallocate(sr_pager);
+ sr_pager = MEMORY_OBJECT_NULL;
+ }
+ if (tmp_entry) {
+ /* release extra ref on tmp_entry's VM object */
+ vm_object_deallocate(VME_OBJECT(tmp_entry));
+ tmp_entry = VM_MAP_ENTRY_NULL;
+ }
+
+ if (kr != KERN_SUCCESS) {
+ /* cleanup */
+ if (slide_info_entry) {
+ kmem_free(kernel_map, slide_info_entry, slide_info_size);
+ slide_info_entry = 0;
+ }
+ if (si->slide_object) {
+ vm_object_deallocate(si->slide_object);
+ si->slide_object = VM_OBJECT_NULL;
+ }
+ }
+ return kr;
+}
+
+void*
+vm_shared_region_get_slide_info_entry(vm_shared_region_t sr)
+{
+ return (void*)sr->sr_slide_info.slide_info_entry;
+}
+
+static kern_return_t
+vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info)
+{
+ uint32_t pageIndex = 0;
+ uint16_t entryIndex = 0;
+ uint16_t *toc = NULL;
+
+ toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
+ for (; pageIndex < s_info->toc_count; pageIndex++) {
+ entryIndex = (uint16_t)(toc[pageIndex]);
+
+ if (entryIndex >= s_info->entry_count) {
+ printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count);
+ return KERN_FAILURE;
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info, mach_vm_size_t slide_info_size)
+{
+ if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
+ return KERN_FAILURE;
+ }
+
+ /* Ensure that the slide info doesn't reference any data outside of its bounds. */
+
+ uint32_t page_starts_count = s_info->page_starts_count;
+ uint32_t page_extras_count = s_info->page_extras_count;
+ mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
+ if (num_trailing_entries < page_starts_count) {
+ return KERN_FAILURE;
+ }
+
+ /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
+ mach_vm_size_t trailing_size = num_trailing_entries << 1;
+ if (trailing_size >> 1 != num_trailing_entries) {
+ return KERN_FAILURE;
+ }
+
+ mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
+ if (required_size < sizeof(*s_info)) {
+ return KERN_FAILURE;
+ }
+
+ if (required_size > slide_info_size) {
+ return KERN_FAILURE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info, mach_vm_size_t slide_info_size)
+{
+ if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
+ printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info->page_size, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE);
+ return KERN_FAILURE;
+ }
+
+ uint32_t page_starts_count = s_info->page_starts_count;
+ mach_vm_size_t num_trailing_entries = page_starts_count;
+ mach_vm_size_t trailing_size = num_trailing_entries << 1;
+ mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
+ if (required_size < sizeof(*s_info)) {
+ printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)sizeof(*s_info));
+ return KERN_FAILURE;
+ }
+
+ if (required_size > slide_info_size) {
+ printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n", (uint64_t)required_size, (uint64_t)slide_info_size);
+ return KERN_FAILURE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info, mach_vm_size_t slide_info_size)
+{
+ if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) {
+ return KERN_FAILURE;
+ }
+
+ /* Ensure that the slide info doesn't reference any data outside of its bounds. */
+
+ uint32_t page_starts_count = s_info->page_starts_count;
+ uint32_t page_extras_count = s_info->page_extras_count;
+ mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count;
+ if (num_trailing_entries < page_starts_count) {
+ return KERN_FAILURE;
+ }
+
+ /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */
+ mach_vm_size_t trailing_size = num_trailing_entries << 1;
+ if (trailing_size >> 1 != num_trailing_entries) {
+ return KERN_FAILURE;
+ }
+
+ mach_vm_size_t required_size = sizeof(*s_info) + trailing_size;
+ if (required_size < sizeof(*s_info)) {
+ return KERN_FAILURE;
+ }
+
+ if (required_size > slide_info_size) {
+ return KERN_FAILURE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t
+vm_shared_region_slide_sanity_check(vm_shared_region_t sr)
+{
+ vm_shared_region_slide_info_t si;
+ vm_shared_region_slide_info_entry_t s_info;
+ kern_return_t kr;
+
+ si = vm_shared_region_get_slide_info(sr);
+ s_info = si->slide_info_entry;
+
+ kr = mach_vm_protect(kernel_map,
+ (mach_vm_offset_t)(vm_offset_t)s_info,
+ (mach_vm_size_t) si->slide_info_size,
+ TRUE, VM_PROT_READ);
+ if (kr != KERN_SUCCESS) {
+ panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr);
+ }
+
+ if (s_info->version == 1) {
+ kr = vm_shared_region_slide_sanity_check_v1(&s_info->v1);
+ } else if (s_info->version == 2) {
+ kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, si->slide_info_size);
+ } else if (s_info->version == 3) {
+ kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, si->slide_info_size);
+ } else if (s_info->version == 4) {
+ kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size);
+ } else {
+ goto fail;
+ }
+ if (kr != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ return KERN_SUCCESS;
+fail:
+ if (si->slide_info_entry != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t) si->slide_info_entry,
+ (vm_size_t) si->slide_info_size);
+
+ vm_object_deallocate(si->slide_object);
+ si->slide_object = NULL;
+ si->start = 0;
+ si->end = 0;
+ si->slide = 0;
+ si->slide_info_entry = NULL;
+ si->slide_info_size = 0;
+ }
+ return KERN_FAILURE;
+}
+
+static kern_return_t
+vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
+{
+ uint16_t *toc = NULL;
+ slide_info_entry_toc_t bitmap = NULL;
+ uint32_t i = 0, j = 0;
+ uint8_t b = 0;
+ uint32_t slide = si->slide;
+ int is_64 = task_has_64Bit_addr(current_task());
+
+ vm_shared_region_slide_info_entry_v1_t s_info = &si->slide_info_entry->v1;
+ toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
+
+ if (pageIndex >= s_info->toc_count) {
+ printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count);
+ } else {
+ uint16_t entryIndex = (uint16_t)(toc[pageIndex]);
+ slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset);
+
+ if (entryIndex >= s_info->entry_count) {
+ printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count);
+ } else {
+ bitmap = &slide_info_entries[entryIndex];
+
+ for (i = 0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) {
+ b = bitmap->entry[i];
+ if (b != 0) {
+ for (j = 0; j < 8; ++j) {
+ if (b & (1 << j)) {
+ uint32_t *ptr_to_slide;
+ uint32_t old_value;
+
+ ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr) + (sizeof(uint32_t) * (i * 8 + j)));
+ old_value = *ptr_to_slide;
+ *ptr_to_slide += slide;
+ if (is_64 && *ptr_to_slide < old_value) {
+ /*
+ * We just slid the low 32 bits of a 64-bit pointer
+ * and it looks like there should have been a carry-over
+ * to the upper 32 bits.
+ * The sliding failed...
+ */
+ printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
+ i, j, b, slide, old_value, *ptr_to_slide);
+ return KERN_FAILURE;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+rebase_chain_32(
+ uint8_t *page_content,
+ uint16_t start_offset,
+ uint32_t slide_amount,
+ vm_shared_region_slide_info_entry_v2_t s_info)
+{
+ const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
+
+ const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
+ const uint32_t value_mask = ~delta_mask;
+ const uint32_t value_add = (uint32_t)(s_info->value_add);
+ const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
+
+ uint32_t page_offset = start_offset;
+ uint32_t delta = 1;
+
+ while (delta != 0 && page_offset <= last_page_offset) {
+ uint8_t *loc;
+ uint32_t value;
+
+ loc = page_content + page_offset;
+ memcpy(&value, loc, sizeof(value));
+ delta = (value & delta_mask) >> delta_shift;
+ value &= value_mask;
+
+ if (value != 0) {
+ value += value_add;
+ value += slide_amount;
+ }
+ memcpy(loc, &value, sizeof(value));
+ page_offset += delta;
+ }
+
+ /* If the offset went past the end of the page, then the slide data is invalid. */
+ if (page_offset > last_page_offset) {
+ return KERN_FAILURE;
+ }
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+rebase_chain_64(
+ uint8_t *page_content,
+ uint16_t start_offset,
+ uint32_t slide_amount,
+ vm_shared_region_slide_info_entry_v2_t s_info)
+{
+ const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint64_t);
+
+ const uint64_t delta_mask = s_info->delta_mask;
+ const uint64_t value_mask = ~delta_mask;
+ const uint64_t value_add = s_info->value_add;
+ const uint64_t delta_shift = __builtin_ctzll(delta_mask) - 2;
+
+ uint32_t page_offset = start_offset;
+ uint32_t delta = 1;
+
+ while (delta != 0 && page_offset <= last_page_offset) {
+ uint8_t *loc;
+ uint64_t value;
+
+ loc = page_content + page_offset;
+ memcpy(&value, loc, sizeof(value));
+ delta = (uint32_t)((value & delta_mask) >> delta_shift);
+ value &= value_mask;
+
+ if (value != 0) {
+ value += value_add;
+ value += slide_amount;
+ }
+ memcpy(loc, &value, sizeof(value));
+ page_offset += delta;
+ }
+
+ if (page_offset + sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE) {
+ /* If a pointer straddling the page boundary needs to be adjusted, then
+ * add the slide to the lower half. The encoding guarantees that the upper
+ * half on the next page will need no masking.
+ *
+ * This assumes a little-endian machine and that the region being slid
+ * never crosses a 4 GB boundary. */
+
+ uint8_t *loc = page_content + page_offset;
+ uint32_t value;
+
+ memcpy(&value, loc, sizeof(value));
+ value += slide_amount;
+ memcpy(loc, &value, sizeof(value));
+ } else if (page_offset > last_page_offset) {
+ return KERN_FAILURE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+rebase_chain(
+ boolean_t is_64,
+ uint32_t pageIndex,
+ uint8_t *page_content,
+ uint16_t start_offset,
+ uint32_t slide_amount,
+ vm_shared_region_slide_info_entry_v2_t s_info)
+{
+ kern_return_t kr;
+ if (is_64) {
+ kr = rebase_chain_64(page_content, start_offset, slide_amount, s_info);
+ } else {
+ kr = rebase_chain_32(page_content, start_offset, slide_amount, s_info);
+ }
+
+ if (kr != KERN_SUCCESS) {
+ printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n",
+ pageIndex, start_offset, slide_amount);
+ }
+ return kr;
+}
+
+static kern_return_t
+vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
+{
+ vm_shared_region_slide_info_entry_v2_t s_info = &si->slide_info_entry->v2;
+ const uint32_t slide_amount = si->slide;
+
+ /* The high bits of the delta_mask field are nonzero precisely when the shared
+ * cache is 64-bit. */
+ const boolean_t is_64 = (s_info->delta_mask >> 32) != 0;
+
+ const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
+ const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
+
+ uint8_t *page_content = (uint8_t *)vaddr;
+ uint16_t page_entry;
+
+ if (pageIndex >= s_info->page_starts_count) {
+ printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
+ pageIndex, s_info->page_starts_count);
+ return KERN_FAILURE;
+ }
+ page_entry = page_starts[pageIndex];
+
+ if (page_entry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) {
+ return KERN_SUCCESS;
+ }
+
+ if (page_entry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) {
+ uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE_PAGE_VALUE;
+ uint16_t info;
+
+ do {
+ uint16_t page_start_offset;
+ kern_return_t kr;
+
+ if (chain_index >= s_info->page_extras_count) {
+ printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
+ chain_index, s_info->page_extras_count);
+ return KERN_FAILURE;
+ }
+ info = page_extras[chain_index];
+ page_start_offset = (info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
+
+ kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ chain_index++;
+ } while (!(info & DYLD_CACHE_SLIDE_PAGE_ATTR_END));
+ } else {
+ const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
+ kern_return_t kr;
+
+ kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+static kern_return_t
+vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vaddr, __unused mach_vm_offset_t uservaddr, uint32_t pageIndex)
+{
+ vm_shared_region_slide_info_entry_v3_t s_info = &si->slide_info_entry->v3;
+ const uint32_t slide_amount = si->slide;
+
+ uint8_t *page_content = (uint8_t *)vaddr;
+ uint16_t page_entry;
+
+ if (pageIndex >= s_info->page_starts_count) {
+ printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
+ pageIndex, s_info->page_starts_count);
+ return KERN_FAILURE;
+ }
+ page_entry = s_info->page_starts[pageIndex];
+
+ if (page_entry == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) {
+ return KERN_SUCCESS;
+ }
+
+ uint8_t* rebaseLocation = page_content;
+ uint64_t delta = page_entry;
+ do {
+ rebaseLocation += delta;
+ uint64_t value;
+ memcpy(&value, rebaseLocation, sizeof(value));
+ delta = ((value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t);
+
+ // A pointer is one of :
+ // {
+ // uint64_t pointerValue : 51;
+ // uint64_t offsetToNextPointer : 11;
+ // uint64_t isBind : 1 = 0;
+ // uint64_t authenticated : 1 = 0;
+ // }
+ // {
+ // uint32_t offsetFromSharedCacheBase;
+ // uint16_t diversityData;
+ // uint16_t hasAddressDiversity : 1;
+ // uint16_t hasDKey : 1;
+ // uint16_t hasBKey : 1;
+ // uint16_t offsetToNextPointer : 11;
+ // uint16_t isBind : 1;
+ // uint16_t authenticated : 1 = 1;
+ // }
+
+ bool isBind = (value & (1ULL << 62)) == 1;
+ if (isBind) {
+ return KERN_FAILURE;
+ }
+
+ bool isAuthenticated = (value & (1ULL << 63)) != 0;
+
+ if (isAuthenticated) {
+ // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
+ value = (value & 0xFFFFFFFF) + slide_amount;
+ // Add in the offset from the mach_header
+ const uint64_t value_add = s_info->value_add;
+ value += value_add;
+
+ } else {
+ // The new value for a rebase is the low 51-bits of the threaded value plus the slide.
+ // Regular pointer which needs to fit in 51-bits of value.
+ // C++ RTTI uses the top bit, so we'll allow the whole top-byte
+ // and the bottom 43-bits to be fit in to 51-bits.
+ uint64_t top8Bits = value & 0x0007F80000000000ULL;
+ uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL;
+ uint64_t targetValue = (top8Bits << 13) | bottom43Bits;
+ value = targetValue + slide_amount;
+ }
+
+ memcpy(rebaseLocation, &value, sizeof(value));
+ } while (delta != 0);
+
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+rebase_chainv4(
+ uint8_t *page_content,
+ uint16_t start_offset,
+ uint32_t slide_amount,
+ vm_shared_region_slide_info_entry_v4_t s_info)
+{
+ const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t);
+
+ const uint32_t delta_mask = (uint32_t)(s_info->delta_mask);
+ const uint32_t value_mask = ~delta_mask;
+ const uint32_t value_add = (uint32_t)(s_info->value_add);
+ const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2;
+
+ uint32_t page_offset = start_offset;
+ uint32_t delta = 1;
+
+ while (delta != 0 && page_offset <= last_page_offset) {
+ uint8_t *loc;
+ uint32_t value;
+
+ loc = page_content + page_offset;
+ memcpy(&value, loc, sizeof(value));
+ delta = (value & delta_mask) >> delta_shift;
+ value &= value_mask;
+
+ if ((value & 0xFFFF8000) == 0) {
+ // small positive non-pointer, use as-is
+ } else if ((value & 0x3FFF8000) == 0x3FFF8000) {
+ // small negative non-pointer
+ value |= 0xC0000000;
+ } else {
+ // pointer that needs rebasing
+ value += value_add;
+ value += slide_amount;
+ }
+ memcpy(loc, &value, sizeof(value));
+ page_offset += delta;
+ }
+
+ /* If the offset went past the end of the page, then the slide data is invalid. */
+ if (page_offset > last_page_offset) {
+ return KERN_FAILURE;
+ }
+ return KERN_SUCCESS;
+}
+
+static kern_return_t
+vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
+{
+ vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4;
+ const uint32_t slide_amount = si->slide;
+
+ const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset);
+ const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset);
+
+ uint8_t *page_content = (uint8_t *)vaddr;
+ uint16_t page_entry;
+
+ if (pageIndex >= s_info->page_starts_count) {
+ printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n",
+ pageIndex, s_info->page_starts_count);
+ return KERN_FAILURE;
+ }
+ page_entry = page_starts[pageIndex];
+
+ if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) {
+ return KERN_SUCCESS;
+ }
+
+ if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) {
+ uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX;
+ uint16_t info;
+
+ do {
+ uint16_t page_start_offset;
+ kern_return_t kr;
+
+ if (chain_index >= s_info->page_extras_count) {
+ printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n",
+ chain_index, s_info->page_extras_count);
+ return KERN_FAILURE;
+ }
+ info = page_extras[chain_index];
+ page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
+
+ kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ chain_index++;
+ } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END));
+ } else {
+ const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT;
+ kern_return_t kr;
+
+ kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+
+
+kern_return_t
+vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, mach_vm_offset_t uservaddr, uint32_t pageIndex)
+{
+ if (si->slide_info_entry->version == 1) {
+ return vm_shared_region_slide_page_v1(si, vaddr, pageIndex);
+ } else if (si->slide_info_entry->version == 2) {
+ return vm_shared_region_slide_page_v2(si, vaddr, pageIndex);
+ } else if (si->slide_info_entry->version == 3) {
+ return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex);
+ } else if (si->slide_info_entry->version == 4) {
+ return vm_shared_region_slide_page_v4(si, vaddr, pageIndex);
+ } else {
+ return KERN_FAILURE;
+ }
+}
+
+/******************************************************************************/
+/* Comm page support */
+/******************************************************************************/
+
+ipc_port_t commpage32_handle = IPC_PORT_NULL;
+ipc_port_t commpage64_handle = IPC_PORT_NULL;
+vm_named_entry_t commpage32_entry = NULL;
+vm_named_entry_t commpage64_entry = NULL;
+vm_map_t commpage32_map = VM_MAP_NULL;
+vm_map_t commpage64_map = VM_MAP_NULL;
+
+ipc_port_t commpage_text32_handle = IPC_PORT_NULL;
+ipc_port_t commpage_text64_handle = IPC_PORT_NULL;
+vm_named_entry_t commpage_text32_entry = NULL;
+vm_named_entry_t commpage_text64_entry = NULL;
+vm_map_t commpage_text32_map = VM_MAP_NULL;
+vm_map_t commpage_text64_map = VM_MAP_NULL;
+
+user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START;
+user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START;
+
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * Create a memory entry, VM submap and pmap for one commpage.
+ */
+static void
+_vm_commpage_init(
+ ipc_port_t *handlep,
+ vm_map_size_t size)
+{
+ kern_return_t kr;
+ vm_named_entry_t mem_entry;
+ vm_map_t new_map;
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("commpage: -> _init(0x%llx)\n",
+ (long long)size));
+
+ kr = mach_memory_entry_allocate(&mem_entry,
+ handlep);
+ if (kr != KERN_SUCCESS) {
+ panic("_vm_commpage_init: could not allocate mem_entry");
+ }
+ new_map = vm_map_create(pmap_create(NULL, 0, 0), 0, size, TRUE);
+ if (new_map == VM_MAP_NULL) {