-
- if (start != VM_MIN_KERNEL_ADDRESS) {
- vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
- (void) vm_map_enter(kernel_map,
- &addr, start - VM_MIN_KERNEL_ADDRESS,
- (vm_offset_t) 0, TRUE,
- VM_OBJECT_NULL,
- (vm_object_offset_t) 0, FALSE,
- VM_PROT_DEFAULT, VM_PROT_ALL,
- VM_INHERIT_DEFAULT);
- }
-
- /*
- * Account for kernel memory (text, data, bss, vm shenanigans).
- * This may include inaccessible "holes" as determined by what
- * the machine-dependent init code includes in max_mem.
- */
- vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
- + vm_page_active_count
- + vm_page_inactive_count));
-}
-
-
-/*
- * kmem_io_object_trunc:
- *
- * Truncate an object vm_map_copy_t.
- * Called by the scatter/gather list network code to remove pages from
- * the tail end of a packet. Also unwires the objects pages.
- */
-
-kern_return_t
-kmem_io_object_trunc(copy, new_size)
- vm_map_copy_t copy; /* IN/OUT copy object */
- register vm_size_t new_size; /* IN new object size */
-{
- register vm_size_t offset, old_size;
-
- assert(copy->type == VM_MAP_COPY_OBJECT);
-
- old_size = (vm_size_t)round_page_64(copy->size);
- copy->size = new_size;
- new_size = round_page_32(new_size);
-
- vm_object_lock(copy->cpy_object);
- vm_object_page_remove(copy->cpy_object,
- (vm_object_offset_t)new_size, (vm_object_offset_t)old_size);
- for (offset = 0; offset < new_size; offset += PAGE_SIZE) {
- register vm_page_t mem;
-
- if ((mem = vm_page_lookup(copy->cpy_object,
- (vm_object_offset_t)offset)) == VM_PAGE_NULL)
- panic("kmem_io_object_trunc: unable to find object page");
-
- /*
- * Make sure these pages are marked dirty
- */
- mem->dirty = TRUE;
- vm_page_lock_queues();
- vm_page_unwire(mem);
- vm_page_unlock_queues();
+ if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
+ vm_map_offset_t map_addr;
+ kern_return_t kr;
+
+ map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
+ kr = vm_map_enter(kernel_map,
+ &map_addr,
+ (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED | VM_FLAGS_NO_PMAP_CHECK,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0, FALSE,
+ VM_PROT_NONE, VM_PROT_NONE,
+ VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
+ (uint64_t) start, (uint64_t) end,
+ (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+ (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ kr);
+ }