- rv = mach_vm_map(current_map(), /* process map */
- &attach_va, /* attach address */
- map_size, /* segment size */
- (mach_vm_offset_t)0, /* alignment mask */
- (flags & MAP_FIXED)? VM_FLAGS_FIXED: VM_FLAGS_ANYWHERE,
+ mapped_size = 0;
+
+ /* first reserve enough space... */
+ rv = mach_vm_map_kernel(current_map(),
+ &attach_va,
+ map_size,
+ 0,
+ vm_flags,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0,
+ FALSE,
+ VM_PROT_NONE,
+ VM_PROT_NONE,
+ VM_INHERIT_NONE);
+ if (rv != KERN_SUCCESS) {
+ goto out;
+ }
+
+ shmmap_s->va = attach_va;
+
+ /* ... then map the shared memory over the reserved space */
+ for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
+ shm_handle != NULL;
+ shm_handle = shm_handle->shm_handle_next) {
+ vm_map_size_t chunk_size;
+
+ assert(mapped_size < map_size);
+ chunk_size = shm_handle->shm_handle_size;
+ if (chunk_size > map_size - mapped_size) {
+ /*
+ * Partial mapping of last chunk due to
+ * page size mismatch.
+ */
+ assert(vm_map_page_shift(current_map()) < PAGE_SHIFT);
+ assert(shm_handle->shm_handle_next == NULL);
+ chunk_size = map_size - mapped_size;
+ }
+ rv = vm_map_enter_mem_object(
+ current_map(), /* process map */
+ &attach_va, /* attach address */
+ chunk_size, /* size to map */
+ (mach_vm_offset_t)0, /* alignment mask */
+ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,