+
/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
vm_prot_t full_fault_type;
+
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START,
vaddr,
0,
while (TRUE) {
m = vm_page_lookup(cur_object, cur_offset);
if (m != VM_PAGE_NULL) {
- if (m->busy)
- break;
+ if (m->busy) {
+ wait_result_t result;
+
+ if (object != cur_object)
+ vm_object_unlock(object);
+
+ vm_map_unlock_read(map);
+ if (pmap_map != map)
+ vm_map_unlock(pmap_map);
+
+#if !VM_FAULT_STATIC_CONFIG
+ if (!vm_fault_interruptible)
+ interruptible = THREAD_UNINT;
+#endif
+ result = PAGE_ASSERT_WAIT(m, interruptible);
+ vm_object_unlock(cur_object);
+
+ if (result == THREAD_WAITING) {
+ result = thread_block(THREAD_CONTINUE_NULL);
+
+ counter(c_vm_fault_page_block_busy_kernel++);
+ }
+ if (result == THREAD_AWAKENED || result == THREAD_RESTART)
+ goto RetryFault;
+
+ kr = KERN_ABORTED;
+ goto done;
+ }
if (m->unusual && (m->error || m->restart || m->private
|| m->absent || (fault_type & m->page_lock))) {
- /*
+ /*
* Unusual case. Give up.
*/
break;
m->busy = TRUE;
vm_object_paging_begin(object);
- vm_object_unlock(object);
FastPmapEnter:
/*
prot &= ~VM_PROT_WRITE;
#endif /* MACH_KDB */
#endif /* STATIC_CONFIG */
- if (m->no_isync == TRUE)
+ if (m->no_isync == TRUE) {
pmap_sync_caches_phys(m->phys_addr);
+ m->no_isync = FALSE;
+ }
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
if(caller_pmap) {
PMAP_ENTER(pmap, vaddr, m,
prot, cache_attr, wired);
}
- {
- tws_hash_line_t line;
- task_t task;
- task = current_task();
- if((map != NULL) &&
- (task->dynamic_working_set != 0) &&
- !(object->private)) {
- kern_return_t kr;
- vm_object_t base_object;
- vm_object_offset_t base_offset;
- base_object = object;
- base_offset = cur_offset;
- while(base_object->shadow) {
- base_offset +=
- base_object->shadow_offset;
- base_object =
- base_object->shadow;
- }
- kr = tws_lookup((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- &line);
- if(kr == KERN_OPERATION_TIMED_OUT){
- write_startup_file = 1;
- } else if (kr != KERN_SUCCESS) {
- kr = tws_insert((tws_hash_t)
- task->dynamic_working_set,
- base_offset, base_object,
- vaddr, pmap_map);
- if(kr == KERN_NO_SPACE) {
- tws_expand_working_set(
- task->dynamic_working_set,
- TWS_HASH_LINE_COUNT,
- FALSE);
- }
- if(kr ==
- KERN_OPERATION_TIMED_OUT) {
- write_startup_file = 1;
- }
- }
- }
- }
/*
- * Grab the object lock to manipulate
+ * Grab the queues lock to manipulate
* the page queues. Change wiring
* case is obvious. In soft ref bits
* case activate page only if it fell
* move active page to back of active
* queue. This code doesn't.
*/
- vm_object_lock(object);
vm_page_lock_queues();
if (m->clustered) {
vm_pagein_cluster_used++;
m->clustered = FALSE;
}
- /*
- * we did the isync above (if needed)... we're clearing
- * the flag here to avoid holding a lock
- * while calling pmap functions, however
- * we need hold the object lock before
- * we can modify the flag
- */
- m->no_isync = FALSE;
m->reference = TRUE;
if (change_wiring) {
*/
PAGE_WAKEUP_DONE(m);
vm_object_paging_end(object);
+
+ {
+ tws_hash_line_t line;
+ task_t task;
+
+ task = current_task();
+ if((map != NULL) &&
+ (task->dynamic_working_set != 0) &&
+ !(object->private)) {
+ kern_return_t kr;
+ vm_object_t base_object;
+ vm_object_offset_t base_offset;
+ base_object = object;
+ base_offset = cur_offset;
+ while(base_object->shadow) {
+ base_offset +=
+ base_object->shadow_offset;
+ base_object =
+ base_object->shadow;
+ }
+ kr = tws_lookup((tws_hash_t)
+ task->dynamic_working_set,
+ base_offset, base_object,
+ &line);
+ if(kr == KERN_OPERATION_TIMED_OUT){
+ write_startup_file = 1;
+ } else if (kr != KERN_SUCCESS) {
+ kr = tws_insert((tws_hash_t)
+ task->dynamic_working_set,
+ base_offset, base_object,
+ vaddr, pmap_map);
+ if(kr == KERN_NO_SPACE) {
+ vm_object_unlock(object);
+
+ tws_expand_working_set(
+ task->dynamic_working_set,
+ TWS_HASH_LINE_COUNT,
+ FALSE);
+
+ vm_object_lock(object);
+ }
+ if(kr ==
+ KERN_OPERATION_TIMED_OUT) {
+ write_startup_file = 1;
+ }
+ }
+ }
+ }
vm_object_unlock(object);
+
vm_map_unlock_read(map);
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(write_startup_file)
tws_send_startup_info(current_task());
- if (funnel_set) {
+ if (funnel_set)
thread_funnel_set( curflock, TRUE);
- funnel_set = FALSE;
- }
+
thread_interrupt_level(interruptible_state);
+
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END,
vaddr,
type_of_fault & 0xff,
KERN_SUCCESS,
type_of_fault >> 8,
0);
+
return KERN_SUCCESS;
}
if (cur_object == object)
break;
-
/*
* This is now a shadow based copy on write
* fault -- it requires a copy up the shadow
if (m == VM_PAGE_NULL) {
break;
}
-
/*
* Now do the copy. Mark the source busy
* and take out paging references on both
vm_object_paging_end(object);
vm_object_collapse(object);
vm_object_paging_begin(object);
- vm_object_unlock(object);
goto FastPmapEnter;
}
/*
* Have to talk to the pager. Give up.
*/
-
break;
}
m->inactive = TRUE;
vm_page_inactive_count++;
vm_page_unlock_queues();
+ vm_object_lock(object);
+
goto FastPmapEnter;
}
vm_object_unlock(cur_object);
}
vm_map_unlock_read(map);
+
if(pmap_map != map)
vm_map_unlock(pmap_map);
if(m != VM_PAGE_NULL) {
old_copy_object = m->object->copy;
-
vm_object_unlock(m->object);
} else {
old_copy_object = VM_OBJECT_NULL;
}
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
- vm_object_unlock(m->object);
if(caller_pmap) {
PMAP_ENTER(caller_pmap,
base_offset, base_object,
vaddr, pmap_map);
if(kr == KERN_NO_SPACE) {
+ vm_object_unlock(m->object);
tws_expand_working_set(
task->dynamic_working_set,
TWS_HASH_LINE_COUNT,
FALSE);
+ vm_object_lock(m->object);
}
if(kr == KERN_OPERATION_TIMED_OUT) {
write_startup_file = 1;
vm_map_entry_t entry;
vm_offset_t laddr;
vm_offset_t ldelta, hdelta;
+
/*
* do a pmap block mapping from the physical address
* in the object
* pageout daemon can find it.
*/
if(m != VM_PAGE_NULL) {
- vm_object_lock(m->object);
vm_page_lock_queues();
if (change_wiring) {
kr,
type_of_fault >> 8,
0);
+
return(kr);
}
}
cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
- vm_object_unlock(object);
PMAP_ENTER(pmap, pmap_addr, m, prot, cache_attr, TRUE);
- /*
- * Must relock object so that paging_in_progress can be cleared.
- */
- vm_object_lock(object);
-
/*
* Unlock everything, and return
*/