X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0b4e3aa066abc0728aacb4bbeb86f53f9737156e..36401178fd6817c043cc00b0c00c7f723e58efae:/osfmk/vm/memory_object.c diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index 90de30229..c84e776c6 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -94,23 +100,18 @@ #include /* For pmap_clear_modify */ #include /* For kernel_map, vm_move */ #include /* For vm_map_pageable */ +#include /* Needed by some vm_page.h macros */ #if MACH_PAGEMAP #include #endif /* MACH_PAGEMAP */ +#include + memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; -vm_size_t memory_manager_default_cluster = 0; decl_mutex_data(, memory_manager_default_lock) -/* - * Forward ref to file-local function: - */ -boolean_t -vm_object_update(vm_object_t, vm_object_offset_t, - vm_size_t, memory_object_return_t, int, vm_prot_t); - /* * Routine: memory_object_should_return_page @@ -132,7 +133,7 @@ vm_object_update(vm_object_t, vm_object_offset_t, #define memory_object_should_return_page(m, should_return) \ (should_return != MEMORY_OBJECT_RETURN_NONE && \ - (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \ + (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \ ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) @@ -188,42 +189,36 @@ memory_object_lock_page( * does not have any data. */ - if (m->absent || m->error || m->restart) - return(MEMORY_OBJECT_LOCK_RESULT_DONE); + if (m->absent || m->error || m->restart) { + if(m->error && should_flush) { + /* dump the page, pager wants us to */ + /* clean it up and there is no */ + /* relevant data to return */ + if(m->wire_count == 0) { + VM_PAGE_FREE(m); + return(MEMORY_OBJECT_LOCK_RESULT_DONE); + } + } else { + return(MEMORY_OBJECT_LOCK_RESULT_DONE); + } + } assert(!m->fictitious); - if (m->wire_count != 0) { - /* - * If no change would take place - * anyway, return successfully. - * - * No change means: - * Not flushing AND - * No change to page lock [2 checks] AND - * Should not return page - * - * XXX This doesn't handle sending a copy of a wired - * XXX page to the pager, but that will require some - * XXX significant surgery. - */ - if (!should_flush && - (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) && - ! memory_object_should_return_page(m, should_return)) { - - /* - * Restart page unlock requests, - * even though no change took place. - * [Memory managers may be expecting - * to see new requests.] - */ - m->unlock_request = VM_PROT_NONE; - PAGE_WAKEUP(m); + /* + * If the page is wired, just clean or return the page if needed. + * Wired pages don't get flushed or disconnected from the pmap. + */ - return(MEMORY_OBJECT_LOCK_RESULT_DONE); + if (m->wire_count != 0) { + if (memory_object_should_return_page(m, should_return)) { + if (m->dirty) + return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); + else + return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); } - return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); + return(MEMORY_OBJECT_LOCK_RESULT_DONE); } /* @@ -243,36 +238,14 @@ memory_object_lock_page( */ if (prot != VM_PROT_NO_CHANGE) { - if ((m->page_lock ^ prot) & prot) { - pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot); - } -#if 0 - /* code associated with the vestigial - * memory_object_data_unlock - */ - m->page_lock = prot; - m->lock_supplied = TRUE; - if (prot != VM_PROT_NONE) - m->unusual = TRUE; - else - m->unusual = FALSE; - - /* - * Restart any past unlock requests, even if no - * change resulted. If the manager explicitly - * requested no protection change, then it is assumed - * to be remembering past requests. - */ + pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot); - m->unlock_request = VM_PROT_NONE; -#endif /* 0 */ PAGE_WAKEUP(m); } /* * Handle page returning. */ - if (memory_object_should_return_page(m, should_return)) { /* @@ -284,12 +257,12 @@ memory_object_lock_page( * maps now. */ - vm_page_lock_queues(); + vm_page_lockspin_queues(); VM_PAGE_QUEUES_REMOVE(m); vm_page_unlock_queues(); if (!should_flush) - pmap_page_protect(m->phys_addr, VM_PROT_NONE); + pmap_disconnect(m->phys_page); if (m->dirty) return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); @@ -300,12 +273,9 @@ memory_object_lock_page( /* * Handle flushing */ - if (should_flush) { VM_PAGE_FREE(m); } else { - extern boolean_t vm_page_deactivate_hint; - /* * XXX Make clean but not flush a paging hint, * and deactivate the pages. This is a hack @@ -325,21 +295,33 @@ memory_object_lock_page( return(MEMORY_OBJECT_LOCK_RESULT_DONE); } -#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \ +#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync) \ MACRO_BEGIN \ \ - register int i; \ - register vm_page_t hp; \ - \ - vm_object_unlock(object); \ + register int upl_flags; \ + memory_object_t pager; \ + \ + if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \ + vm_object_paging_begin(object); \ + vm_object_unlock(object); \ \ - (void) memory_object_data_return(object->pager, \ - po, \ - data_cnt, \ - (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ - !should_flush); \ + if (iosync) \ + upl_flags = UPL_MSYNC | UPL_IOSYNC; \ + else \ + upl_flags = UPL_MSYNC; \ + \ + (void) memory_object_data_return(pager, \ + po, \ + data_cnt, \ + ro, \ + ioerr, \ + (action) == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN,\ + !should_flush, \ + upl_flags); \ \ - vm_object_lock(object); \ + vm_object_lock(object); \ + vm_object_paging_end(object); \ + } \ MACRO_END /* @@ -372,13 +354,16 @@ memory_object_lock_request( memory_object_control_t control, memory_object_offset_t offset, memory_object_size_t size, + memory_object_offset_t * resid_offset, + int * io_errno, memory_object_return_t should_return, int flags, vm_prot_t prot) { vm_object_t object; - vm_object_offset_t original_offset = offset; - boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH; + __unused boolean_t should_flush; + + should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; XPR(XPR_MEMORY_OBJECT, "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n", @@ -395,7 +380,7 @@ memory_object_lock_request( if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) return (KERN_INVALID_ARGUMENT); - size = round_page(size); + size = round_page_64(size); /* * Lock the object, and acquire a paging reference to @@ -406,7 +391,7 @@ memory_object_lock_request( offset -= object->paging_offset; (void)vm_object_update(object, - offset, size, should_return, flags, prot); + offset, size, resid_offset, io_errno, should_return, flags, prot); vm_object_paging_end(object); vm_object_unlock(object); @@ -496,11 +481,13 @@ boolean_t vm_object_sync( vm_object_t object, vm_object_offset_t offset, - vm_size_t size, + vm_object_size_t size, boolean_t should_flush, - boolean_t should_return) + boolean_t should_return, + boolean_t should_iosync) { boolean_t rv; + int flags; XPR(XPR_VM_OBJECT, "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", @@ -514,12 +501,19 @@ vm_object_sync( vm_object_lock(object); vm_object_paging_begin(object); - rv = vm_object_update(object, offset, size, + if (should_flush) + flags = MEMORY_OBJECT_DATA_FLUSH; + else + flags = 0; + + if (should_iosync) + flags |= MEMORY_OBJECT_IO_SYNC; + + rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL, (should_return) ? MEMORY_OBJECT_RETURN_ALL : MEMORY_OBJECT_RETURN_NONE, - (should_flush) ? - MEMORY_OBJECT_DATA_FLUSH : 0, + flags, VM_PROT_NO_CHANGE); @@ -528,6 +522,150 @@ vm_object_sync( return rv; } + + + +static int +vm_object_update_extent( + vm_object_t object, + vm_object_offset_t offset, + vm_object_offset_t offset_end, + vm_object_offset_t *offset_resid, + int *io_errno, + boolean_t should_flush, + memory_object_return_t should_return, + boolean_t should_iosync, + vm_prot_t prot) +{ + vm_page_t m; + int retval = 0; + vm_size_t data_cnt = 0; + vm_object_offset_t paging_offset = 0; + vm_object_offset_t last_offset = offset; + memory_object_lock_result_t page_lock_result; + memory_object_lock_result_t pageout_action; + + pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE; + + for (; + offset < offset_end && object->resident_page_count; + offset += PAGE_SIZE_64) { + + /* + * Limit the number of pages to be cleaned at once. + */ + if (data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) { + LIST_REQ_PAGEOUT_PAGES(object, data_cnt, + pageout_action, paging_offset, offset_resid, io_errno, should_iosync); + data_cnt = 0; + } + + while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { + page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot); + + XPR(XPR_MEMORY_OBJECT, + "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", + (integer_t)object, offset, page_lock_result, 0, 0); + + switch (page_lock_result) + { + case MEMORY_OBJECT_LOCK_RESULT_DONE: + /* + * End of a cluster of dirty pages. + */ + if (data_cnt) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset, offset_resid, io_errno, should_iosync); + data_cnt = 0; + continue; + } + break; + + case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: + /* + * Since it is necessary to block, + * clean any dirty pages now. + */ + if (data_cnt) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset, offset_resid, io_errno, should_iosync); + data_cnt = 0; + continue; + } + PAGE_SLEEP(object, m, THREAD_UNINT); + continue; + + case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: + case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: + /* + * The clean and return cases are similar. + * + * if this would form a discontiguous block, + * clean the old pages and start anew. + * + * Mark the page busy since we will unlock the + * object if we issue the LIST_REQ_PAGEOUT + */ + m->busy = TRUE; + if (data_cnt && + ((last_offset != offset) || (pageout_action != page_lock_result))) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset, offset_resid, io_errno, should_iosync); + data_cnt = 0; + } + m->busy = FALSE; + + if (m->cleaning) { + PAGE_SLEEP(object, m, THREAD_UNINT); + continue; + } + if (data_cnt == 0) { + pageout_action = page_lock_result; + paging_offset = offset; + } + data_cnt += PAGE_SIZE; + last_offset = offset + PAGE_SIZE_64; + + vm_page_lockspin_queues(); + /* + * Clean + */ + m->list_req_pending = TRUE; + m->cleaning = TRUE; + + if (should_flush) { + /* + * and add additional state + * for the flush + */ + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } + vm_page_unlock_queues(); + + retval = 1; + break; + } + break; + } + } + /* + * We have completed the scan for applicable pages. + * Clean any pages that have been saved. + */ + if (data_cnt) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync); + } + return (retval); +} + + + /* * Routine: vm_object_update * Description: @@ -539,24 +677,28 @@ kern_return_t vm_object_update( register vm_object_t object, register vm_object_offset_t offset, - register vm_size_t size, + register vm_object_size_t size, + register vm_object_offset_t *resid_offset, + int *io_errno, memory_object_return_t should_return, int flags, - vm_prot_t prot) + vm_prot_t protection) { - register vm_page_t m; - vm_page_t holding_page; - vm_size_t original_size = size; - vm_object_offset_t paging_offset = 0; - vm_object_t copy_object; - vm_size_t data_cnt = 0; - vm_object_offset_t last_offset = offset; - memory_object_lock_result_t page_lock_result; - memory_object_lock_result_t pageout_action; + vm_object_t copy_object = VM_OBJECT_NULL; boolean_t data_returned = FALSE; boolean_t update_cow; - boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; - boolean_t pending_pageout = FALSE; + boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; + boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; + int num_of_extents; + int n; +#define MAX_EXTENTS 8 +#define EXTENT_SIZE (1024 * 1024 * 256) +#define RESIDENT_LIMIT (1024 * 32) + struct extent { + vm_object_offset_t e_base; + vm_object_offset_t e_min; + vm_object_offset_t e_max; + } extents[MAX_EXTENTS]; /* * To avoid blocking while scanning for pages, save @@ -580,89 +722,123 @@ vm_object_update( !(flags & MEMORY_OBJECT_DATA_PURGE))) || (flags & MEMORY_OBJECT_COPY_SYNC); + if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) { + int collisions = 0; + + while ((copy_object = object->copy) != VM_OBJECT_NULL) { + /* + * need to do a try here since we're swimming upstream + * against the normal lock ordering... however, we need + * to hold the object stable until we gain control of the + * copy object so we have to be careful how we approach this + */ + if (vm_object_lock_try(copy_object)) { + /* + * we 'won' the lock on the copy object... + * no need to hold the object lock any longer... + * take a real reference on the copy object because + * we're going to call vm_fault_page on it which may + * under certain conditions drop the lock and the paging + * reference we're about to take... the reference + * will keep the copy object from going away if that happens + */ + vm_object_unlock(object); + vm_object_reference_locked(copy_object); + break; + } + vm_object_unlock(object); + + collisions++; + mutex_pause(collisions); - if((((copy_object = object->copy) != NULL) && update_cow) || - (flags & MEMORY_OBJECT_DATA_SYNC)) { - vm_size_t i; - vm_size_t copy_size; - vm_object_offset_t copy_offset; + vm_object_lock(object); + } + } + if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) { + vm_map_size_t i; + vm_map_size_t copy_size; + vm_map_offset_t copy_offset; vm_prot_t prot; vm_page_t page; vm_page_t top_page; kern_return_t error = 0; + struct vm_object_fault_info fault_info; + + if (copy_object != VM_OBJECT_NULL) { + /* + * translate offset with respect to shadow's offset + */ + copy_offset = (offset >= copy_object->shadow_offset) ? + (vm_map_offset_t)(offset - copy_object->shadow_offset) : + (vm_map_offset_t) 0; + + if (copy_offset > copy_object->size) + copy_offset = copy_object->size; + + /* + * clip size with respect to shadow offset + */ + if (offset >= copy_object->shadow_offset) { + copy_size = size; + } else if (size >= copy_object->shadow_offset - offset) { + copy_size = size - (copy_object->shadow_offset - offset); + } else { + copy_size = 0; + } + + if (copy_offset + copy_size > copy_object->size) { + if (copy_object->size >= copy_offset) { + copy_size = copy_object->size - copy_offset; + } else { + copy_size = 0; + } + } + copy_size+=copy_offset; - if(copy_object != NULL) { - /* translate offset with respect to shadow's offset */ - copy_offset = (offset >= copy_object->shadow_offset)? - offset - copy_object->shadow_offset : - (vm_object_offset_t) 0; - if(copy_offset > copy_object->size) - copy_offset = copy_object->size; - - /* clip size with respect to shadow offset */ - copy_size = (offset >= copy_object->shadow_offset) ? - size : size - (copy_object->shadow_offset - offset); - - if(copy_size <= 0) { - copy_size = 0; - } else { - copy_size = ((copy_offset + copy_size) - <= copy_object->size) ? - copy_size : copy_object->size - copy_offset; - } - /* check for a copy_offset which is beyond the end of */ - /* the copy_object */ - if(copy_size < 0) - copy_size = 0; - - copy_size+=offset; - - vm_object_unlock(object); - vm_object_lock(copy_object); } else { copy_object = object; copy_size = offset + size; copy_offset = offset; } + fault_info.interruptible = THREAD_UNINT; + fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; + fault_info.user_tag = 0; + fault_info.lo_offset = copy_offset; + fault_info.hi_offset = copy_size; + fault_info.no_cache = FALSE; vm_object_paging_begin(copy_object); - for (i=copy_offset; iobject, top_page); - PAGE_WAKEUP_DONE(page); - vm_page_lock_queues(); - if (!page->active && !page->inactive) - vm_page_activate(page); - vm_page_unlock_queues(); vm_object_lock(copy_object); vm_object_paging_begin(copy_object); - } else { - PAGE_WAKEUP_DONE(page); - vm_page_lock_queues(); - if (!page->active && !page->inactive) - vm_page_activate(page); - vm_page_unlock_queues(); } + vm_page_lock_queues(); + if (!page->active && !page->inactive) + vm_page_deactivate(page); + vm_page_unlock_queues(); + + PAGE_WAKEUP_DONE(page); break; case VM_FAULT_RETRY: prot = VM_PROT_WRITE|VM_PROT_READ; @@ -687,173 +863,166 @@ vm_object_update( vm_object_paging_begin(copy_object); goto RETRY_COW_OF_LOCK_REQUEST; case VM_FAULT_MEMORY_ERROR: + if (object != copy_object) + vm_object_deallocate(copy_object); vm_object_lock(object); goto BYPASS_COW_COPYIN; } } vm_object_paging_end(copy_object); - if(copy_object != object) { + } + if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { + if (copy_object != VM_OBJECT_NULL && copy_object != object) { vm_object_unlock(copy_object); + vm_object_deallocate(copy_object); vm_object_lock(object); } + return KERN_SUCCESS; } - if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { - return KERN_SUCCESS; - } - if(((copy_object = object->copy) != NULL) && - (flags & MEMORY_OBJECT_DATA_PURGE)) { - copy_object->shadow_severed = TRUE; - copy_object->shadowed = FALSE; - copy_object->shadow = NULL; - /* delete the ref the COW was holding on the target object */ - vm_object_deallocate(object); + if (copy_object != VM_OBJECT_NULL && copy_object != object) { + if ((flags & MEMORY_OBJECT_DATA_PURGE)) { + copy_object->shadow_severed = TRUE; + copy_object->shadowed = FALSE; + copy_object->shadow = NULL; + /* + * delete the ref the COW was holding on the target object + */ + vm_object_deallocate(object); + } + vm_object_unlock(copy_object); + vm_object_deallocate(copy_object); + vm_object_lock(object); } BYPASS_COW_COPYIN: - for (; - size != 0; - size -= PAGE_SIZE, offset += PAGE_SIZE_64) - { - /* - * Limit the number of pages to be cleaned at once. - */ - if (pending_pageout && - data_cnt >= PAGE_SIZE * DATA_WRITE_MAX) - { - LIST_REQ_PAGEOUT_PAGES(object, data_cnt, - pageout_action, paging_offset); - data_cnt = 0; - pending_pageout = FALSE; - } - - while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { - page_lock_result = memory_object_lock_page(m, should_return, - should_flush, prot); - - XPR(XPR_MEMORY_OBJECT, - "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", - (integer_t)object, offset, page_lock_result, 0, 0); - - switch (page_lock_result) - { - case MEMORY_OBJECT_LOCK_RESULT_DONE: - /* - * End of a cluster of dirty pages. - */ - if(pending_pageout) { - LIST_REQ_PAGEOUT_PAGES(object, - data_cnt, pageout_action, - paging_offset); - data_cnt = 0; - pending_pageout = FALSE; - continue; - } - break; + /* + * when we have a really large range to check relative + * to the number of actual resident pages, we'd like + * to use the resident page list to drive our checks + * however, the object lock will get dropped while processing + * the page which means the resident queue can change which + * means we can't walk the queue as we process the pages + * we also want to do the processing in offset order to allow + * 'runs' of pages to be collected if we're being told to + * flush to disk... the resident page queue is NOT ordered. + * + * a temporary solution (until we figure out how to deal with + * large address spaces more generically) is to pre-flight + * the resident page queue (if it's small enough) and develop + * a collection of extents (that encompass actual resident pages) + * to visit. This will at least allow us to deal with some of the + * more pathological cases in a more efficient manner. The current + * worst case (a single resident page at the end of an extremely large + * range) can take minutes to complete for ranges in the terrabyte + * category... since this routine is called when truncating a file, + * and we currently support files up to 16 Tbytes in size, this + * is not a theoretical problem + */ - case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: - /* - * Since it is necessary to block, - * clean any dirty pages now. - */ - if(pending_pageout) { - LIST_REQ_PAGEOUT_PAGES(object, - data_cnt, pageout_action, - paging_offset); - pending_pageout = FALSE; - data_cnt = 0; - continue; - } + if ((object->resident_page_count < RESIDENT_LIMIT) && + (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) { + vm_page_t next; + vm_object_offset_t start; + vm_object_offset_t end; + vm_object_size_t e_mask; + vm_page_t m; - PAGE_ASSERT_WAIT(m, THREAD_UNINT); - vm_object_unlock(object); - thread_block((void (*)(void))0); - vm_object_lock(object); - continue; + start = offset; + end = offset + size; + num_of_extents = 0; + e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1)); - case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: - case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: - /* - * The clean and return cases are similar. - * - */ + m = (vm_page_t) queue_first(&object->memq); - /* - * if this would form a discontiguous block, - * clean the old pages and start anew. - * - */ + while (!queue_end(&object->memq, (queue_entry_t) m)) { + next = (vm_page_t) queue_next(&m->listq); - /* - * Mark the page busy since we unlock the - * object below. - */ - m->busy = TRUE; - if (pending_pageout && - (last_offset != offset || - pageout_action != page_lock_result)) { - LIST_REQ_PAGEOUT_PAGES(object, - data_cnt, pageout_action, - paging_offset); - pending_pageout = FALSE; - data_cnt = 0; - } - m->busy = FALSE; - holding_page = VM_PAGE_NULL; - if(m->cleaning) { - PAGE_ASSERT_WAIT(m, THREAD_UNINT); - vm_object_unlock(object); - thread_block((void (*)(void))0); - continue; - } - if(!pending_pageout) { - pending_pageout = TRUE; - pageout_action = page_lock_result; - paging_offset = offset; - } - if (should_flush) { - vm_page_lock_queues(); - m->list_req_pending = TRUE; - m->cleaning = TRUE; - m->busy = TRUE; - m->pageout = TRUE; - vm_page_wire(m); - vm_page_unlock_queues(); - } else { - /* - * Clean but do not flush + if ((m->offset >= start) && (m->offset < end)) { + /* + * this is a page we're interested in + * try to fit it into a current extent */ - vm_page_lock_queues(); - m->list_req_pending = TRUE; - m->cleaning = TRUE; - vm_page_unlock_queues(); - + for (n = 0; n < num_of_extents; n++) { + if ((m->offset & e_mask) == extents[n].e_base) { + /* + * use (PAGE_SIZE - 1) to determine the + * max offset so that we don't wrap if + * we're at the last page of the space + */ + if (m->offset < extents[n].e_min) + extents[n].e_min = m->offset; + else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max) + extents[n].e_max = m->offset + (PAGE_SIZE - 1); + break; + } + } + if (n == num_of_extents) { + /* + * didn't find a current extent that can encompass + * this page + */ + if (n < MAX_EXTENTS) { + /* + * if we still have room, + * create a new extent + */ + extents[n].e_base = m->offset & e_mask; + extents[n].e_min = m->offset; + extents[n].e_max = m->offset + (PAGE_SIZE - 1); + + num_of_extents++; + } else { + /* + * no room to create a new extent... + * fall back to a single extent based + * on the min and max page offsets + * we find in the range we're interested in... + * first, look through the extent list and + * develop the overall min and max for the + * pages we've looked at up to this point + */ + for (n = 1; n < num_of_extents; n++) { + if (extents[n].e_min < extents[0].e_min) + extents[0].e_min = extents[n].e_min; + if (extents[n].e_max > extents[0].e_max) + extents[0].e_max = extents[n].e_max; + } + /* + * now setup to run through the remaining pages + * to determine the overall min and max + * offset for the specified range + */ + extents[0].e_base = 0; + e_mask = 0; + num_of_extents = 1; + + /* + * by continuing, we'll reprocess the + * page that forced us to abandon trying + * to develop multiple extents + */ + continue; + } + } } - vm_object_unlock(object); - - - data_cnt += PAGE_SIZE; - last_offset = offset + PAGE_SIZE_64; - data_returned = TRUE; - - vm_object_lock(object); - break; + m = next; } - break; - } - } + } else { + extents[0].e_min = offset; + extents[0].e_max = offset + (size - 1); - /* - * We have completed the scan for applicable pages. - * Clean any pages that have been saved. - */ - if (pending_pageout) { - LIST_REQ_PAGEOUT_PAGES(object, - data_cnt, pageout_action, paging_offset); + num_of_extents = 1; + } + for (n = 0; n < num_of_extents; n++) { + if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, + should_flush, should_return, should_iosync, protection)) + data_returned = TRUE; } return (data_returned); } + /* * Routine: memory_object_synchronize_completed [user interface] * @@ -874,6 +1043,8 @@ memory_object_synchronize_completed( vm_object_t object; msync_req_t msr; + object = memory_object_control_to_vm_object(control); + XPR(XPR_MEMORY_OBJECT, "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", (integer_t)object, offset, length, 0, 0); @@ -882,7 +1053,6 @@ memory_object_synchronize_completed( * Look for bogus arguments */ - object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) return (KERN_INVALID_ARGUMENT); @@ -918,7 +1088,6 @@ vm_object_set_attributes_common( boolean_t may_cache, memory_object_copy_strategy_t copy_strategy, boolean_t temporary, - vm_size_t cluster_size, boolean_t silent_overwrite, boolean_t advisory_pageout) { @@ -952,17 +1121,6 @@ vm_object_set_attributes_common( may_cache = TRUE; if (temporary) temporary = TRUE; - if (cluster_size != 0) { - int pages_per_cluster; - pages_per_cluster = atop(cluster_size); - /* - * Cluster size must be integral multiple of page size, - * and be a power of 2 number of pages. - */ - if ((cluster_size & (PAGE_SIZE-1)) || - ((pages_per_cluster-1) & pages_per_cluster)) - return KERN_INVALID_ARGUMENT; - } vm_object_lock(object); @@ -976,12 +1134,6 @@ vm_object_set_attributes_common( object->temporary = temporary; object->silent_overwrite = silent_overwrite; object->advisory_pageout = advisory_pageout; - if (cluster_size == 0) - cluster_size = PAGE_SIZE; - object->cluster_size = cluster_size; - - assert(cluster_size >= PAGE_SIZE && - cluster_size % PAGE_SIZE == 0); /* * Wake up anyone waiting for the ready attribute @@ -1018,7 +1170,6 @@ memory_object_change_attributes( boolean_t temporary; boolean_t may_cache; boolean_t invalidate; - vm_size_t cluster_size; memory_object_copy_strategy_t copy_strategy; boolean_t silent_overwrite; boolean_t advisory_pageout; @@ -1037,7 +1188,6 @@ memory_object_change_attributes( #if notyet invalidate = object->invalidate; #endif - cluster_size = object->cluster_size; vm_object_unlock(object); switch (flavor) { @@ -1090,7 +1240,6 @@ memory_object_change_attributes( perf = (memory_object_perf_info_t) attributes; may_cache = perf->may_cache; - cluster_size = round_page(perf->cluster_size); break; } @@ -1108,7 +1257,6 @@ memory_object_change_attributes( may_cache = attr->may_cache; copy_strategy = attr->copy_strategy; - cluster_size = page_size; break; } @@ -1126,7 +1274,6 @@ memory_object_change_attributes( copy_strategy = attr->copy_strategy; may_cache = attr->may_cache_object; - cluster_size = attr->cluster_size; temporary = attr->temporary; break; @@ -1155,7 +1302,6 @@ memory_object_change_attributes( may_cache, copy_strategy, temporary, - cluster_size, silent_overwrite, advisory_pageout)); } @@ -1232,7 +1378,7 @@ memory_object_get_attributes( } perf = (memory_object_perf_info_t) attributes; - perf->cluster_size = object->cluster_size; + perf->cluster_size = PAGE_SIZE; perf->may_cache = object->can_persist; *count = MEMORY_OBJECT_PERF_INFO_COUNT; @@ -1267,7 +1413,7 @@ memory_object_get_attributes( attr = (memory_object_attr_info_t) attributes; attr->copy_strategy = object->copy_strategy; - attr->cluster_size = object->cluster_size; + attr->cluster_size = PAGE_SIZE; attr->may_cache_object = object->can_persist; attr->temporary = object->temporary; @@ -1286,6 +1432,141 @@ memory_object_get_attributes( } +kern_return_t +memory_object_iopl_request( + ipc_port_t port, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags) +{ + vm_object_t object; + kern_return_t ret; + int caller_flags; + + caller_flags = *flags; + + if (caller_flags & ~UPL_VALID_FLAGS) { + /* + * For forward compatibility's sake, + * reject any unknown flag. + */ + return KERN_INVALID_VALUE; + } + + if (ip_kotype(port) == IKOT_NAMED_ENTRY) { + vm_named_entry_t named_entry; + + named_entry = (vm_named_entry_t)port->ip_kobject; + /* a few checks to make sure user is obeying rules */ + if(*upl_size == 0) { + if(offset >= named_entry->size) + return(KERN_INVALID_RIGHT); + *upl_size = named_entry->size - offset; + } + if(caller_flags & UPL_COPYOUT_FROM) { + if((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return(KERN_INVALID_RIGHT); + } + } else { + if((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return(KERN_INVALID_RIGHT); + } + } + if(named_entry->size < (offset + *upl_size)) + return(KERN_INVALID_ARGUMENT); + + /* the callers parameter offset is defined to be the */ + /* offset from beginning of named entry offset in object */ + offset = offset + named_entry->offset; + + if(named_entry->is_sub_map) + return (KERN_INVALID_ARGUMENT); + + named_entry_lock(named_entry); + + if (named_entry->is_pager) { + object = vm_object_enter(named_entry->backing.pager, + named_entry->offset + named_entry->size, + named_entry->internal, + FALSE, + FALSE); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(named_entry); + return(KERN_INVALID_OBJECT); + } + + /* JMM - drop reference on pager here? */ + + /* create an extra reference for the named entry */ + vm_object_lock(object); + vm_object_reference_locked(object); + named_entry->backing.object = object; + named_entry->is_pager = FALSE; + named_entry_unlock(named_entry); + + /* wait for object to be ready */ + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } else { + /* This is the case where we are going to map */ + /* an already mapped object. If the object is */ + /* not ready it is internal. An external */ + /* object cannot be mapped until it is ready */ + /* we can therefore avoid the ready check */ + /* in this case. */ + object = named_entry->backing.object; + vm_object_reference(object); + named_entry_unlock(named_entry); + } + } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { + memory_object_control_t control; + control = (memory_object_control_t) port; + if (control == NULL) + return (KERN_INVALID_ARGUMENT); + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + vm_object_reference(object); + } else { + return KERN_INVALID_ARGUMENT; + } + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + if (!object->private) { + if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) + *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); + if (object->phys_contiguous) { + *flags = UPL_PHYS_CONTIG; + } else { + *flags = 0; + } + } else { + *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; + } + + ret = vm_object_iopl_request(object, + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + caller_flags); + vm_object_deallocate(object); + return ret; +} + /* * Routine: memory_object_upl_request [interface] * Purpose: @@ -1299,7 +1580,7 @@ kern_return_t memory_object_upl_request( memory_object_control_t control, memory_object_offset_t offset, - vm_size_t size, + upl_size_t size, upl_t *upl_ptr, upl_page_info_array_t user_page_list, unsigned int *page_list_count, @@ -1335,8 +1616,8 @@ kern_return_t memory_object_super_upl_request( memory_object_control_t control, memory_object_offset_t offset, - vm_size_t size, - vm_size_t super_cluster, + upl_size_t size, + upl_size_t super_cluster, upl_t *upl, upl_page_info_t *user_page_list, unsigned int *page_list_count, @@ -1358,6 +1639,27 @@ memory_object_super_upl_request( cntrl_flags); } +kern_return_t +memory_object_cluster_size(memory_object_control_t control, memory_object_offset_t *start, + vm_size_t *length, memory_object_fault_info_t fault_info) +{ + vm_object_t object; + + object = memory_object_control_to_vm_object(control); + + if (object == VM_OBJECT_NULL || object->paging_offset > *start) + return (KERN_INVALID_ARGUMENT); + + *start -= object->paging_offset; + + vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info); + + *start += object->paging_offset; + + return (KERN_SUCCESS); +} + + int vm_stat_discard_cleared_reply = 0; int vm_stat_discard_cleared_unset = 0; int vm_stat_discard_cleared_too_late = 0; @@ -1376,11 +1678,12 @@ kern_return_t host_default_memory_manager( host_priv_t host_priv, memory_object_default_t *default_manager, - vm_size_t cluster_size) + __unused memory_object_cluster_size_t cluster_size) { memory_object_default_t current_manager; memory_object_default_t new_manager; memory_object_default_t returned_manager; + kern_return_t result = KERN_SUCCESS; if (host_priv == HOST_PRIV_NULL) return(KERN_INVALID_HOST); @@ -1390,14 +1693,26 @@ host_default_memory_manager( new_manager = *default_manager; mutex_lock(&memory_manager_default_lock); current_manager = memory_manager_default; + returned_manager = MEMORY_OBJECT_DEFAULT_NULL; if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) { /* * Retrieve the current value. */ - memory_object_default_reference(current_manager); returned_manager = current_manager; + memory_object_default_reference(returned_manager); } else { + + /* + * If this is the first non-null manager, start + * up the internal pager support. + */ + if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { + result = vm_pageout_internal_start(); + if (result != KERN_SUCCESS) + goto out; + } + /* * Retrieve the current value, * and replace it with the supplied value. @@ -1405,21 +1720,10 @@ host_default_memory_manager( * but we have to take a reference on the new * one. */ - returned_manager = current_manager; memory_manager_default = new_manager; memory_object_default_reference(new_manager); - if (cluster_size % PAGE_SIZE != 0) { -#if 0 - mutex_unlock(&memory_manager_default_lock); - return KERN_INVALID_ARGUMENT; -#else - cluster_size = round_page(cluster_size); -#endif - } - memory_manager_default_cluster = cluster_size; - /* * In case anyone's been waiting for a memory * manager to be established, wake them up. @@ -1427,11 +1731,11 @@ host_default_memory_manager( thread_wakeup((event_t) &memory_manager_default); } - + out: mutex_unlock(&memory_manager_default_lock); *default_manager = returned_manager; - return(KERN_SUCCESS); + return(result); } /* @@ -1443,21 +1747,22 @@ host_default_memory_manager( */ __private_extern__ memory_object_default_t -memory_manager_default_reference( - vm_size_t *cluster_size) +memory_manager_default_reference(void) { memory_object_default_t current_manager; mutex_lock(&memory_manager_default_lock); current_manager = memory_manager_default; while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { - thread_sleep_mutex((event_t) &memory_manager_default, - &memory_manager_default_lock, THREAD_UNINT); - mutex_lock(&memory_manager_default_lock); + wait_result_t res; + + res = thread_sleep_mutex((event_t) &memory_manager_default, + &memory_manager_default_lock, + THREAD_UNINT); + assert(res == THREAD_AWAKENED); current_manager = memory_manager_default; } memory_object_default_reference(current_manager); - *cluster_size = memory_manager_default_cluster; mutex_unlock(&memory_manager_default_lock); return current_manager; @@ -1500,87 +1805,10 @@ __private_extern__ void memory_manager_default_init(void) { memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; - mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN); + mutex_init(&memory_manager_default_lock, 0); } -void -memory_object_deactivate_pages( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - boolean_t kill_page) -{ - vm_object_t orig_object; - int pages_moved = 0; - int pages_found = 0; - - /* - * entered with object lock held, acquire a paging reference to - * prevent the memory_object and control ports from - * being destroyed. - */ - orig_object = object; - - for (;;) { - register vm_page_t m; - vm_object_offset_t toffset; - vm_object_size_t tsize; - - vm_object_paging_begin(object); - vm_page_lock_queues(); - - for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) { - - if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) { - - pages_found++; - - if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) { - - m->reference = FALSE; - pmap_clear_reference(m->phys_addr); - - if ((kill_page) && (object->internal)) { - m->precious = FALSE; - m->dirty = FALSE; - pmap_clear_modify(m->phys_addr); - vm_external_state_clr(object->existence_map, offset); - } - VM_PAGE_QUEUES_REMOVE(m); - - queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq); - - m->inactive = TRUE; - if (!m->fictitious) - vm_page_inactive_count++; - - pages_moved++; - } - } - } - vm_page_unlock_queues(); - vm_object_paging_end(object); - - if (object->shadow) { - vm_object_t tmp_object; - - kill_page = 0; - - offset += object->shadow_offset; - - tmp_object = object->shadow; - vm_object_lock(tmp_object); - - if (object != orig_object) - vm_object_unlock(object); - object = tmp_object; - } else - break; - } - if (object != orig_object) - vm_object_unlock(object); -} /* Allow manipulation of individual page state. This is actually part of */ /* the UPL regimen but takes place on the object rather than on a UPL */ @@ -1590,114 +1818,85 @@ memory_object_page_op( memory_object_control_t control, memory_object_offset_t offset, int ops, - vm_offset_t *phys_entry, + ppnum_t *phys_entry, int *flags) { vm_object_t object; - vm_page_t dst_page; - object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) return (KERN_INVALID_ARGUMENT); - vm_object_lock(object); + return vm_object_page_op(object, offset, ops, phys_entry, flags); +} - if(ops & UPL_POP_PHYSICAL) { - if(object->phys_contiguous) { - if (phys_entry) { - *phys_entry = (vm_offset_t) - object->shadow_offset; - } - vm_object_unlock(object); - return KERN_SUCCESS; - } else { - vm_object_unlock(object); - return KERN_INVALID_OBJECT; - } - } +/* + * memory_object_range_op offers performance enhancement over + * memory_object_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op + * family of functions to work on multiple pages where the lack of page level + * state handling allows the caller to avoid the overhead of the upl structures. + */ - while(TRUE) { - if(object->phys_contiguous) { - vm_object_unlock(object); - return KERN_INVALID_OBJECT; - } +kern_return_t +memory_object_range_op( + memory_object_control_t control, + memory_object_offset_t offset_beg, + memory_object_offset_t offset_end, + int ops, + int *range) +{ + vm_object_t object; - if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { - vm_object_unlock(object); - return KERN_FAILURE; - } + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); - /* Sync up on getting the busy bit */ - if((dst_page->busy || dst_page->cleaning) && - (((ops & UPL_POP_SET) && - (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { - /* someone else is playing with the page, we will */ - /* have to wait */ - PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT); - vm_object_unlock(object); - thread_block((void(*)(void))0); - vm_object_lock(object); - continue; - } + return vm_object_range_op(object, + offset_beg, + offset_end, + ops, + range); +} - if (ops & UPL_POP_DUMP) { - vm_page_lock_queues(); - vm_page_free(dst_page); - vm_page_unlock_queues(); - break; - } - if (flags) { - *flags = 0; +kern_return_t +memory_object_pages_resident( + memory_object_control_t control, + boolean_t * has_pages_resident) +{ + vm_object_t object; - /* Get the condition of flags before requested ops */ - /* are undertaken */ + *has_pages_resident = FALSE; - if(dst_page->dirty) *flags |= UPL_POP_DIRTY; - if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT; - if(dst_page->precious) *flags |= UPL_POP_PRECIOUS; - if(dst_page->absent) *flags |= UPL_POP_ABSENT; - if(dst_page->busy) *flags |= UPL_POP_BUSY; - } - if (phys_entry) - *phys_entry = dst_page->phys_addr; + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + if (object->resident_page_count) + *has_pages_resident = TRUE; - /* The caller should have made a call either contingent with */ - /* or prior to this call to set UPL_POP_BUSY */ - if(ops & UPL_POP_SET) { - /* The protection granted with this assert will */ - /* not be complete. If the caller violates the */ - /* convention and attempts to change page state */ - /* without first setting busy we may not see it */ - /* because the page may already be busy. However */ - /* if such violations occur we will assert sooner */ - /* or later. */ - assert(dst_page->busy || (ops & UPL_POP_BUSY)); - if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE; - if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE; - if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE; - if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE; - if (ops & UPL_POP_BUSY) dst_page->busy = TRUE; - } + return (KERN_SUCCESS); +} - if(ops & UPL_POP_CLR) { - assert(dst_page->busy); - if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE; - if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE; - if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE; - if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE; - if (ops & UPL_POP_BUSY) { - dst_page->busy = FALSE; - PAGE_WAKEUP(dst_page); - } - } - break; - } +kern_return_t +memory_object_signed( + memory_object_control_t control, + boolean_t is_signed) +{ + vm_object_t object; + object = memory_object_control_to_vm_object(control); + if (object == VM_OBJECT_NULL) + return KERN_INVALID_ARGUMENT; + + vm_object_lock(object); + object->code_signed = is_signed; vm_object_unlock(object); + return KERN_SUCCESS; - } static zone_t mem_obj_control_zone; @@ -1719,8 +1918,10 @@ memory_object_control_allocate( memory_object_control_t control; control = (memory_object_control_t)zalloc(mem_obj_control_zone); - if (control != MEMORY_OBJECT_CONTROL_NULL) - control->object = object; + if (control != MEMORY_OBJECT_CONTROL_NULL) { + control->moc_object = object; + control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */ + } return (control); } @@ -1729,24 +1930,25 @@ memory_object_control_collapse( memory_object_control_t control, vm_object_t object) { - assert((control->object != VM_OBJECT_NULL) && - (control->object != object)); - control->object = object; + assert((control->moc_object != VM_OBJECT_NULL) && + (control->moc_object != object)); + control->moc_object = object; } __private_extern__ vm_object_t memory_object_control_to_vm_object( memory_object_control_t control) { - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL || + control->moc_ikot != IKOT_MEM_OBJ_CONTROL) return VM_OBJECT_NULL; - return (control->object); + return (control->moc_object); } memory_object_control_t convert_port_to_mo_control( - mach_port_t port) + __unused mach_port_t port) { return MEMORY_OBJECT_CONTROL_NULL; } @@ -1754,14 +1956,14 @@ convert_port_to_mo_control( mach_port_t convert_mo_control_to_port( - memory_object_control_t control) + __unused memory_object_control_t control) { return MACH_PORT_NULL; } void memory_object_control_reference( - memory_object_control_t control) + __unused memory_object_control_t control) { return; } @@ -1775,15 +1977,15 @@ void memory_object_control_deallocate( memory_object_control_t control) { - zfree(mem_obj_control_zone, (vm_offset_t)control); + zfree(mem_obj_control_zone, control); } void memory_object_control_disable( memory_object_control_t control) { - assert(control->object != VM_OBJECT_NULL); - control->object = VM_OBJECT_NULL; + assert(control->moc_object != VM_OBJECT_NULL); + control->moc_object = VM_OBJECT_NULL; } void @@ -1802,7 +2004,7 @@ memory_object_default_deallocate( memory_object_t convert_port_to_memory_object( - mach_port_t port) + __unused mach_port_t port) { return (MEMORY_OBJECT_NULL); } @@ -1810,54 +2012,26 @@ convert_port_to_memory_object( mach_port_t convert_memory_object_to_port( - memory_object_t object) + __unused memory_object_t object) { return (MACH_PORT_NULL); } -#ifdef MACH_BSD -/* remove after component interface available */ -extern int vnode_pager_workaround; -extern int device_pager_workaround; -#endif - /* Routine memory_object_reference */ void memory_object_reference( memory_object_t memory_object) { -extern void dp_memory_object_reference(memory_object_t); - -#ifdef MACH_BSD - extern void vnode_pager_reference(memory_object_t); - extern void device_pager_reference(memory_object_t); - - if(memory_object->pager == &vnode_pager_workaround) { - vnode_pager_reference(memory_object); - } else if(memory_object->pager == &device_pager_workaround) { - device_pager_reference(memory_object); - } else -#endif - dp_memory_object_reference(memory_object); + (memory_object->mo_pager_ops->memory_object_reference)( + memory_object); } /* Routine memory_object_deallocate */ void memory_object_deallocate( memory_object_t memory_object) { -extern void dp_memory_object_deallocate(memory_object_t); - -#ifdef MACH_BSD - extern void vnode_pager_deallocate(memory_object_t); - extern void device_pager_deallocate(memory_object_t); - - if(memory_object->pager == &vnode_pager_workaround) { - vnode_pager_deallocate(memory_object); - } else if(memory_object->pager == &device_pager_workaround) { - device_pager_deallocate(memory_object); - } else -#endif - dp_memory_object_deallocate(memory_object); + (memory_object->mo_pager_ops->memory_object_deallocate)( + memory_object); } @@ -1866,33 +2040,13 @@ kern_return_t memory_object_init ( memory_object_t memory_object, memory_object_control_t memory_control, - vm_size_t memory_object_page_size + memory_object_cluster_size_t memory_object_page_size ) { -extern kern_return_t dp_memory_object_init(memory_object_t, - memory_object_control_t, - vm_size_t); -#ifdef MACH_BSD -extern kern_return_t vnode_pager_init(memory_object_t, - memory_object_control_t, - vm_size_t); -extern kern_return_t device_pager_init(memory_object_t, - memory_object_control_t, - vm_size_t); - - if(memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_init(memory_object, - memory_control, - memory_object_page_size); - } else if(memory_object->pager == &device_pager_workaround) { - return device_pager_init(memory_object, - memory_control, - memory_object_page_size); - } else -#endif - return dp_memory_object_init(memory_object, - memory_control, - memory_object_page_size); + return (memory_object->mo_pager_ops->memory_object_init)( + memory_object, + memory_control, + memory_object_page_size); } /* Routine memory_object_terminate */ @@ -1901,19 +2055,8 @@ kern_return_t memory_object_terminate memory_object_t memory_object ) { -extern kern_return_t dp_memory_object_terminate(memory_object_t); - -#ifdef MACH_BSD -extern kern_return_t vnode_pager_terminate(memory_object_t); -extern kern_return_t device_pager_terminate(memory_object_t); - - if(memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_terminate(memory_object); - } else if(memory_object->pager == &device_pager_workaround) { - return device_pager_terminate(memory_object); - } else -#endif - return dp_memory_object_terminate(memory_object); + return (memory_object->mo_pager_ops->memory_object_terminate)( + memory_object); } /* Routine memory_object_data_request */ @@ -1921,35 +2064,17 @@ kern_return_t memory_object_data_request ( memory_object_t memory_object, memory_object_offset_t offset, - vm_size_t length, - vm_prot_t desired_access + memory_object_cluster_size_t length, + vm_prot_t desired_access, + memory_object_fault_info_t fault_info ) { -extern kern_return_t dp_memory_object_data_request(memory_object_t, - memory_object_offset_t, vm_size_t, vm_prot_t); - -#ifdef MACH_BSD -extern kern_return_t vnode_pager_data_request(memory_object_t, - memory_object_offset_t, vm_size_t, vm_prot_t); -extern kern_return_t device_pager_data_request(memory_object_t, - memory_object_offset_t, vm_size_t, vm_prot_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_data_request(memory_object, - offset, - length, - desired_access); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_data_request(memory_object, - offset, - length, - desired_access); - } else -#endif - return dp_memory_object_data_request(memory_object, - offset, - length, - desired_access); + return (memory_object->mo_pager_ops->memory_object_data_request)( + memory_object, + offset, + length, + desired_access, + fault_info); } /* Routine memory_object_data_return */ @@ -1958,46 +2083,22 @@ kern_return_t memory_object_data_return memory_object_t memory_object, memory_object_offset_t offset, vm_size_t size, + memory_object_offset_t *resid_offset, + int *io_error, boolean_t dirty, - boolean_t kernel_copy + boolean_t kernel_copy, + int upl_flags ) { - extern kern_return_t dp_memory_object_data_return(memory_object_t, - memory_object_offset_t, - vm_size_t, - boolean_t, - boolean_t); -#ifdef MACH_BSD - extern kern_return_t vnode_pager_data_return(memory_object_t, - memory_object_offset_t, - vm_size_t, - boolean_t, - boolean_t); - extern kern_return_t device_pager_data_return(memory_object_t, - memory_object_offset_t, - vm_size_t, - boolean_t, - boolean_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_data_return(memory_object, - offset, - size, - dirty, - kernel_copy); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_data_return(memory_object, - offset, - size, - dirty, - kernel_copy); - } else -#endif - return dp_memory_object_data_return(memory_object, - offset, - size, - dirty, - kernel_copy); + return (memory_object->mo_pager_ops->memory_object_data_return)( + memory_object, + offset, + size, + resid_offset, + io_error, + dirty, + kernel_copy, + upl_flags); } /* Routine memory_object_data_initialize */ @@ -2008,31 +2109,10 @@ kern_return_t memory_object_data_initialize vm_size_t size ) { - - extern kern_return_t dp_memory_object_data_initialize(memory_object_t, - memory_object_offset_t, - vm_size_t); -#ifdef MACH_BSD - extern kern_return_t vnode_pager_data_initialize(memory_object_t, - memory_object_offset_t, - vm_size_t); - extern kern_return_t device_pager_data_initialize(memory_object_t, - memory_object_offset_t, - vm_size_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_data_initialize(memory_object, - offset, - size); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_data_initialize(memory_object, - offset, - size); - } else -#endif - return dp_memory_object_data_initialize(memory_object, - offset, - size); + return (memory_object->mo_pager_ops->memory_object_data_initialize)( + memory_object, + offset, + size); } /* Routine memory_object_data_unlock */ @@ -2044,37 +2124,11 @@ kern_return_t memory_object_data_unlock vm_prot_t desired_access ) { - extern kern_return_t dp_memory_object_data_unlock(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_prot_t); -#ifdef MACH_BSD - extern kern_return_t vnode_pager_data_unlock(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_prot_t); - extern kern_return_t device_pager_data_unlock(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_prot_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_data_unlock(memory_object, - offset, - size, - desired_access); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_data_unlock(memory_object, - offset, - size, - desired_access); - } else -#endif - return dp_memory_object_data_unlock(memory_object, - offset, - size, - desired_access); - + return (memory_object->mo_pager_ops->memory_object_data_unlock)( + memory_object, + offset, + size, + desired_access); } /* Routine memory_object_synchronize */ @@ -2086,39 +2140,11 @@ kern_return_t memory_object_synchronize vm_sync_t sync_flags ) { - extern kern_return_t dp_memory_object_data_synchronize(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_sync_t); -#ifdef MACH_BSD - extern kern_return_t vnode_pager_data_synchronize(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_sync_t); - extern kern_return_t device_pager_data_synchronize(memory_object_t, - memory_object_offset_t, - vm_size_t, - vm_sync_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_synchronize( - memory_object, - offset, - size, - sync_flags); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_synchronize( - memory_object, - offset, - size, - sync_flags); - } else -#endif - return dp_memory_object_synchronize( - memory_object, - offset, - size, - sync_flags); + return (memory_object->mo_pager_ops->memory_object_synchronize)( + memory_object, + offset, + size, + sync_flags); } /* Routine memory_object_unmap */ @@ -2127,18 +2153,8 @@ kern_return_t memory_object_unmap memory_object_t memory_object ) { - extern kern_return_t dp_memory_object_unmap(memory_object_t); -#ifdef MACH_BSD - extern kern_return_t vnode_pager_unmap(memory_object_t); - extern kern_return_t device_pager_unmap(memory_object_t); - - if (memory_object->pager == &vnode_pager_workaround) { - return vnode_pager_unmap(memory_object); - } else if (memory_object->pager == &device_pager_workaround) { - return device_pager_unmap(memory_object); - } else -#endif - return dp_memory_object_unmap(memory_object); + return (memory_object->mo_pager_ops->memory_object_unmap)( + memory_object); } /* Routine memory_object_create */ @@ -2149,12 +2165,41 @@ kern_return_t memory_object_create memory_object_t *new_memory_object ) { -extern kern_return_t default_pager_memory_object_create(memory_object_default_t, - vm_size_t, - memory_object_t *); - return default_pager_memory_object_create(default_memory_manager, new_memory_object_size, new_memory_object); } +upl_t +convert_port_to_upl( + ipc_port_t port) +{ + upl_t upl; + + ip_lock(port); + if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { + ip_unlock(port); + return (upl_t)NULL; + } + upl = (upl_t) port->ip_kobject; + ip_unlock(port); + upl_lock(upl); + upl->ref_count+=1; + upl_unlock(upl); + return upl; +} + +mach_port_t +convert_upl_to_port( + __unused upl_t upl) +{ + return MACH_PORT_NULL; +} + +__private_extern__ void +upl_no_senders( + __unused ipc_port_t port, + __unused mach_port_mscount_t mscount) +{ + return; +}