/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <mach/vm_behavior.h>
+#include <mach/vm_statistics.h>
#include <kern/assert.h>
#include <kern/counters.h>
#include <kern/zalloc.h>
#include <mach/vm_map_server.h>
#include <mach/mach_host_server.h>
#include <ddb/tr.h>
+#include <machine/db_machdep.h>
#include <kern/xpr.h>
/* Internal prototypes
*/
vm_size_t vm_map_aggressive_enter_max; /* set by bootstrap */
+/* Skip acquiring locks if we're in the midst of a kernel core dump */
+extern unsigned int not_in_kdp;
+
void
vm_map_init(
void)
vm_map_steal_memory(
void)
{
- map_data_size = round_page(10 * sizeof(struct vm_map));
+ map_data_size = round_page_32(10 * sizeof(struct vm_map));
map_data = pmap_steal_memory(map_data_size);
#if 0
kentry_data_size =
- round_page(kentry_count * sizeof(struct vm_map_entry));
+ round_page_32(kentry_count * sizeof(struct vm_map_entry));
kentry_data = pmap_steal_memory(kentry_data_size);
}
result->max_offset = max;
result->wiring_required = FALSE;
result->no_zero_fill = FALSE;
+ result->mapped = FALSE;
result->wait_for_space = FALSE;
result->first_free = vm_map_to_entry(result);
result->hint = vm_map_to_entry(result);
entry = vm_map_to_entry(map);
next = entry->vme_next;
- while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) ||
- (trunc_page(next->vme_start) == trunc_page(entry->vme_start) &&
+ while (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_end) ||
+ (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_start) &&
next != vm_map_to_entry(map))) {
entry = next;
next = entry->vme_next;
UFF_map = (map); \
UFF_first_free = (new_first_free); \
UFF_next_entry = UFF_first_free->vme_next; \
- while (trunc_page(UFF_next_entry->vme_start) == \
- trunc_page(UFF_first_free->vme_end) || \
- (trunc_page(UFF_next_entry->vme_start) == \
- trunc_page(UFF_first_free->vme_start) && \
+ while (trunc_page_32(UFF_next_entry->vme_start) == \
+ trunc_page_32(UFF_first_free->vme_end) || \
+ (trunc_page_32(UFF_next_entry->vme_start) == \
+ trunc_page_32(UFF_first_free->vme_start) && \
UFF_next_entry != vm_map_to_entry(UFF_map))) { \
UFF_first_free = UFF_next_entry; \
UFF_next_entry = UFF_first_free->vme_next; \
(entry)->vme_prev->vme_next = (entry)->vme_next; \
MACRO_END
-/*
- * kernel_vm_map_reference:
- *
- * kernel internal export version for iokit and bsd components
- * in lieu of component interface semantics.
- *
- */
-void
-kernel_vm_map_reference(
- register vm_map_t map)
-{
- if (map == VM_MAP_NULL)
- return;
-
- mutex_lock(&map->s_lock);
-#if TASK_SWAPPER
- assert(map->res_count > 0);
- assert(map->ref_count >= map->res_count);
- map->res_count++;
-#endif
- map->ref_count++;
- mutex_unlock(&map->s_lock);
-}
-
#if MACH_ASSERT && TASK_SWAPPER
-/*
- * vm_map_reference:
- *
- * Adds valid reference and residence counts to the given map.
- * The map must be in memory (i.e. non-zero residence count).
- *
- */
-void
-vm_map_reference(
- register vm_map_t map)
-{
- if (map == VM_MAP_NULL)
- return;
-
- mutex_lock(&map->s_lock);
- assert(map->res_count > 0);
- assert(map->ref_count >= map->res_count);
- map->ref_count++;
- map->res_count++;
- mutex_unlock(&map->s_lock);
-}
-
/*
* vm_map_res_reference:
*
}
#endif /* MACH_ASSERT && TASK_SWAPPER */
-/*
- * vm_map_deallocate:
- *
- * Removes a reference from the specified map,
- * destroying it if no references remain.
- * The map should not be locked.
- */
-void
-vm_map_deallocate(
- register vm_map_t map)
-{
- unsigned int ref;
-
- if (map == VM_MAP_NULL)
- return;
-
- mutex_lock(&map->s_lock);
- ref = --map->ref_count;
- if (ref > 0) {
- vm_map_res_deallocate(map);
- mutex_unlock(&map->s_lock);
- return;
- }
- assert(map->ref_count == 0);
- mutex_unlock(&map->s_lock);
-
-#if TASK_SWAPPER
- /*
- * The map residence count isn't decremented here because
- * the vm_map_delete below will traverse the entire map,
- * deleting entries, and the residence counts on objects
- * and sharing maps will go away then.
- */
-#endif
-
- vm_map_destroy(map);
-}
-
/*
* vm_map_destroy:
*
map->max_offset, VM_MAP_NO_FLAGS);
vm_map_unlock(map);
- pmap_destroy(map->pmap);
+ if(map->pmap)
+ pmap_destroy(map->pmap);
zfree(vm_map_zone, (vm_offset_t) map);
}
* future lookups. Performs necessary interlocks.
*/
#define SAVE_HINT(map,value) \
+MACRO_BEGIN \
mutex_lock(&(map)->s_lock); \
(map)->hint = (value); \
- mutex_unlock(&(map)->s_lock);
+ mutex_unlock(&(map)->s_lock); \
+MACRO_END
/*
* vm_map_lookup_entry: [ internal use only ]
* Start looking either from the head of the
* list, or from the hint.
*/
-
- mutex_lock(&map->s_lock);
+ if (not_in_kdp)
+ mutex_lock(&map->s_lock);
cur = map->hint;
- mutex_unlock(&map->s_lock);
+ if (not_in_kdp)
+ mutex_unlock(&map->s_lock);
if (cur == vm_map_to_entry(map))
cur = cur->vme_next;
*/
*entry = cur;
- SAVE_HINT(map, cur);
+ if (not_in_kdp)
+ SAVE_HINT(map, cur);
return(TRUE);
}
break;
cur = cur->vme_next;
}
*entry = cur->vme_prev;
- SAVE_HINT(map, *entry);
+ if (not_in_kdp)
+ SAVE_HINT(map, *entry);
return(FALSE);
}
vm_object_offset_t offset,
vm_prot_t protection)
{
+ unsigned int cache_attr;
+
+ if(map->pmap == 0)
+ return;
- vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC;
-
while (addr < end_addr) {
register vm_page_t m;
printf("map: %x, addr: %x, object: %x, offset: %x\n",
map, addr, object, offset);
}
-
m->busy = TRUE;
+
+ if (m->no_isync == TRUE) {
+ pmap_sync_caches_phys(m->phys_page);
+ m->no_isync = FALSE;
+ }
+
+ cache_attr = ((unsigned int)object->wimg_bits) & VM_WIMG_MASK;
vm_object_unlock(object);
- PMAP_ENTER(map->pmap, addr, m,
- protection, FALSE);
+ PMAP_ENTER(map->pmap, addr, m,
+ protection, cache_attr, FALSE);
- if (m->no_isync) {
- pmap_attribute(map->pmap,
- addr,
- PAGE_SIZE,
- MATTR_CACHE,
- &mv_cache_sync);
- }
vm_object_lock(object);
- m->no_isync = FALSE;
-
PAGE_WAKEUP_DONE(m);
vm_page_lock_queues();
if (!m->active && !m->inactive)
(entry->max_protection == max_protection) &&
(entry->behavior == VM_BEHAVIOR_DEFAULT) &&
(entry->in_transition == 0) &&
- ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) &&
+ ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) &&
(entry->wired_count == 0)) { /* implies user_wired_count == 0 */
if (vm_object_coalesce(entry->object.vm_object,
VM_OBJECT_NULL,
if (VMCS_startaddr > VMCS_entry->vme_start) { \
if(entry->use_pmap) { \
vm_offset_t pmap_base_addr; \
- vm_offset_t pmap_end_addr; \
\
pmap_base_addr = 0xF0000000 & entry->vme_start; \
- pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
- pmap_unnest(map->pmap, pmap_base_addr, \
- (pmap_end_addr - pmap_base_addr) + 1); \
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
entry->use_pmap = FALSE; \
+ } else if(entry->object.vm_object \
+ && !entry->is_sub_map \
+ && entry->object.vm_object->phys_contiguous) { \
+ pmap_remove(map->pmap, \
+ (addr64_t)(entry->vme_start), \
+ (addr64_t)(entry->vme_end)); \
} \
_vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
} \
if (VMCE_endaddr < VMCE_entry->vme_end) { \
if(entry->use_pmap) { \
vm_offset_t pmap_base_addr; \
- vm_offset_t pmap_end_addr; \
\
pmap_base_addr = 0xF0000000 & entry->vme_start; \
- pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
- pmap_unnest(map->pmap, pmap_base_addr, \
- (pmap_end_addr - pmap_base_addr) + 1); \
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
entry->use_pmap = FALSE; \
+ } else if(entry->object.vm_object \
+ && !entry->is_sub_map \
+ && entry->object.vm_object->phys_contiguous) { \
+ pmap_remove(map->pmap, \
+ (addr64_t)(entry->vme_start), \
+ (addr64_t)(entry->vme_end)); \
} \
_vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
} \
vm_map_lock(map);
+ submap->mapped = TRUE;
+
VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &entry)) {
(object->copy == VM_OBJECT_NULL) &&
(object->shadow == VM_OBJECT_NULL) &&
(!object->pager_created)) {
- entry->offset = (vm_object_offset_t)offset;
- entry->object.vm_object = VM_OBJECT_NULL;
- vm_object_deallocate(object);
- entry->is_sub_map = TRUE;
- vm_map_reference(entry->object.sub_map = submap);
+ entry->offset = (vm_object_offset_t)offset;
+ entry->object.vm_object = VM_OBJECT_NULL;
+ vm_object_deallocate(object);
+ entry->is_sub_map = TRUE;
+ entry->object.sub_map = submap;
+ vm_map_reference(submap);
#ifndef i386
- if ((use_pmap) && (offset == 0)) {
- /* nest if platform code will allow */
- result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap,
- start, end - start);
- if(result)
- panic("pmap_nest failed!");
- entry->use_pmap = TRUE;
- }
+ if ((use_pmap) && (offset == 0)) {
+ /* nest if platform code will allow */
+ if(submap->pmap == NULL) {
+ submap->pmap = pmap_create((vm_size_t) 0);
+ if(submap->pmap == PMAP_NULL) {
+ return(KERN_NO_SPACE);
+ }
+ }
+ result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap,
+ (addr64_t)start, (addr64_t)start, (uint64_t)(end - start));
+ if(result)
+ panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result);
+ entry->use_pmap = TRUE;
+ }
#endif
#ifdef i386
- pmap_remove(map->pmap, start, end);
+ pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end);
#endif
- result = KERN_SUCCESS;
+ result = KERN_SUCCESS;
}
vm_map_unlock(map);
local_entry->use_pmap = FALSE;
local_entry = local_entry->vme_next;
}
- pmap_unnest(map->pmap, pmap_base_addr,
- (pmap_end_addr - pmap_base_addr) + 1);
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr);
#endif
}
if (!(current->protection & VM_PROT_WRITE)) {
register vm_offset_t end,
register vm_prot_t access_type,
boolean_t user_wire,
- pmap_t map_pmap)
+ pmap_t map_pmap,
+ vm_offset_t pmap_addr)
{
register vm_map_entry_t entry;
struct vm_map_entry *first_entry, tmp_entry;
kern_return_t rc;
boolean_t need_wakeup;
boolean_t main_map = FALSE;
- boolean_t interruptible_state;
+ wait_interrupt_t interruptible_state;
thread_t cur_thread;
unsigned int last_timestamp;
vm_size_t size;
* block after informing other thread to wake us up.
*/
if (entry->in_transition) {
+ wait_result_t wait_result;
+
/*
* We have not clipped the entry. Make sure that
* the start address is in range so that the lookup
/*
* User wiring is interruptible
*/
- vm_map_entry_wait(map,
+ wait_result = vm_map_entry_wait(map,
(user_wire) ? THREAD_ABORTSAFE :
THREAD_UNINT);
- if (user_wire && cur_thread->wait_result ==
- THREAD_INTERRUPTED) {
+ if (user_wire && wait_result == THREAD_INTERRUPTED) {
/*
* undo the wirings we have done so far
* We do not clear the needs_wakeup flag,
* because we cannot tell if we were the
* only one waiting.
*/
+ vm_map_unlock(map);
vm_map_unwire(map, start, s, user_wire);
return(KERN_FAILURE);
}
- vm_map_lock(map);
/*
* Cannot avoid a lookup here. reset timestamp.
*/
if(entry->is_sub_map) {
vm_offset_t sub_start;
vm_offset_t sub_end;
+ vm_offset_t local_start;
vm_offset_t local_end;
pmap_t pmap;
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
- sub_start += entry->offset;
+ sub_start = entry->offset;
sub_end = entry->vme_end - entry->vme_start;
sub_end += entry->offset;
if(map_pmap == NULL) {
if(entry->use_pmap) {
pmap = entry->object.sub_map->pmap;
+ /* ppc implementation requires that */
+ /* submaps pmap address ranges line */
+ /* up with parent map */
+#ifdef notdef
+ pmap_addr = sub_start;
+#endif
+ pmap_addr = start;
} else {
pmap = map->pmap;
+ pmap_addr = start;
}
if (entry->wired_count) {
if (entry->wired_count
entry->vme_start, user_wire);
return(KERN_FAILURE);
}
- if (!user_wire ||
- (entry->user_wired_count++ == 0))
+ if(user_wire)
+ entry->user_wired_count++;
+ if((!user_wire) ||
+ (entry->user_wired_count == 0))
entry->wired_count++;
entry = entry->vme_next;
continue;
vm_prot_t prot;
boolean_t wired;
vm_behavior_t behavior;
- vm_offset_t local_start;
vm_map_entry_t local_entry;
vm_map_version_t version;
vm_map_t lookup_map;
vm_map_lock_write_to_read(map);
if(vm_map_lookup_locked(
&lookup_map, local_start,
- VM_PROT_WRITE,
+ access_type,
&version, &object,
&offset, &prot, &wired,
&behavior, &offset_lo,
}
if(pmap_map != lookup_map)
vm_map_unlock(pmap_map);
- if(lookup_map != map) {
- vm_map_unlock(lookup_map);
- vm_map_lock(map);
- } else {
- vm_map_unlock(map);
- vm_map_lock(map);
- }
- last_timestamp =
- version.main_timestamp;
+ vm_map_unlock_read(lookup_map);
+ vm_map_lock(map);
vm_object_unlock(object);
- if (vm_map_lookup_entry(map,
+
+ if (!vm_map_lookup_entry(map,
local_start, &local_entry)) {
vm_map_unlock(map);
vm_map_unwire(map, start,
return(KERN_FAILURE);
}
/* did we have a change of type? */
- if (!local_entry->is_sub_map)
+ if (!local_entry->is_sub_map) {
+ last_timestamp = map->timestamp;
continue;
+ }
entry = local_entry;
if (user_wire)
entry->user_wired_count++;
+ if((!user_wire) ||
+ (entry->user_wired_count == 1))
entry->wired_count++;
entry->in_transition = TRUE;
entry->object.sub_map,
sub_start, sub_end,
access_type,
- user_wire, pmap);
+ user_wire, pmap, pmap_addr);
vm_map_lock(map);
- last_timestamp = map->timestamp;
}
} else {
+ local_start = entry->vme_start;
+ if (user_wire)
+ entry->user_wired_count++;
+ if((!user_wire) ||
+ (entry->user_wired_count == 1))
+ entry->wired_count++;
vm_map_unlock(map);
rc = vm_map_wire_nested(entry->object.sub_map,
sub_start, sub_end,
access_type,
- user_wire, pmap);
+ user_wire, map_pmap, pmap_addr);
vm_map_lock(map);
- last_timestamp = map->timestamp;
}
s = entry->vme_start;
e = entry->vme_end;
- if (last_timestamp+1 != map->timestamp) {
+
/*
* Find the entry again. It could have been clipped
* after we unlocked the map.
*/
- if (!vm_map_lookup_entry(map, local_end,
- &first_entry))
- panic("vm_map_wire: re-lookup failed");
-
- entry = first_entry;
- }
+ if (!vm_map_lookup_entry(map, local_start,
+ &first_entry))
+ panic("vm_map_wire: re-lookup failed");
+ entry = first_entry;
last_timestamp = map->timestamp;
while ((entry != vm_map_to_entry(map)) &&
need_wakeup = TRUE;
}
if (rc != KERN_SUCCESS) {/* from vm_*_wire */
- if(main_map) {
if (user_wire)
entry->user_wired_count--;
- entry->wired_count--;
- }
+ if ((!user_wire) ||
+ (entry->user_wired_count == 0))
+ entry->wired_count--;
}
entry = entry->vme_next;
}
* If this entry is already wired then increment
* the appropriate wire reference count.
*/
- if (entry->wired_count && main_map) {
+ if (entry->wired_count) {
/* sanity check: wired_count is a short */
if (entry->wired_count >= MAX_WIRE_COUNT)
panic("vm_map_wire: too many wirings");
*/
vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end);
- if (!user_wire || (entry->user_wired_count++ == 0))
+ if (user_wire)
+ entry->user_wired_count++;
+ if ((!user_wire) || (entry->user_wired_count == 1))
entry->wired_count++;
entry = entry->vme_next;
assert(entry->wired_count == 0 && entry->user_wired_count == 0);
- if(main_map) {
- if (user_wire)
- entry->user_wired_count++;
+ if (user_wire)
+ entry->user_wired_count++;
+ if ((!user_wire) || (entry->user_wired_count == 1))
entry->wired_count++;
- }
entry->in_transition = TRUE;
*/
vm_map_unlock(map);
- if (!user_wire && cur_thread != THREAD_NULL) {
- interruptible_state = cur_thread->interruptible;
- cur_thread->interruptible = FALSE;
- }
-
+ if (!user_wire && cur_thread != THREAD_NULL)
+ interruptible_state = thread_interrupt_level(THREAD_UNINT);
+
if(map_pmap)
- rc = vm_fault_wire(map, &tmp_entry, map_pmap);
+ rc = vm_fault_wire(map,
+ &tmp_entry, map_pmap, pmap_addr);
else
- rc = vm_fault_wire(map, &tmp_entry, map->pmap);
+ rc = vm_fault_wire(map,
+ &tmp_entry, map->pmap,
+ tmp_entry.vme_start);
if (!user_wire && cur_thread != THREAD_NULL)
- cur_thread->interruptible = interruptible_state;
+ thread_interrupt_level(interruptible_state);
vm_map_lock(map);
need_wakeup = TRUE;
}
if (rc != KERN_SUCCESS) { /* from vm_*_wire */
- if(main_map) {
- if (user_wire)
- entry->user_wired_count--;
+ if (user_wire)
+ entry->user_wired_count--;
+ if ((!user_wire) ||
+ (entry->user_wired_count == 0))
entry->wired_count--;
- }
}
entry = entry->vme_next;
}
mapping_prealloc(end - start);
#endif
kret = vm_map_wire_nested(map, start, end, access_type,
- user_wire, (pmap_t)NULL);
+ user_wire, (pmap_t)NULL, 0);
#ifdef ppc
mapping_relpre();
#endif
register vm_offset_t start,
register vm_offset_t end,
boolean_t user_wire,
- pmap_t map_pmap)
+ pmap_t map_pmap,
+ vm_offset_t pmap_addr)
{
register vm_map_entry_t entry;
struct vm_map_entry *first_entry, tmp_entry;
if(map_pmap == NULL) {
if(entry->use_pmap) {
pmap = entry->object.sub_map->pmap;
+ pmap_addr = sub_start;
} else {
pmap = map->pmap;
+ pmap_addr = start;
}
if (entry->wired_count == 0 ||
(user_wire && entry->user_wired_count == 0)) {
*/
vm_map_unlock(map);
vm_map_unwire_nested(entry->object.sub_map,
- sub_start, sub_end, user_wire, pmap);
+ sub_start, sub_end, user_wire, pmap, pmap_addr);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
continue;
} else {
vm_map_unlock(map);
- vm_map_unwire_nested(entry->object.sub_map,
- sub_start, sub_end, user_wire, pmap);
+ vm_map_unwire_nested(entry->object.sub_map,
+ sub_start, sub_end, user_wire, map_pmap,
+ pmap_addr);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
}
- if (main_map && (entry->wired_count == 0 ||
- (user_wire && entry->user_wired_count == 0))) {
+ if ((entry->wired_count == 0) ||
+ (user_wire && entry->user_wired_count == 0)) {
if (!user_wire)
panic("vm_map_unwire: entry is unwired");
continue;
}
- if(main_map) {
- if (!user_wire || (--entry->user_wired_count == 0))
+ if (!user_wire || (--entry->user_wired_count == 0))
entry->wired_count--;
- if (entry->wired_count != 0) {
+ if (entry->wired_count != 0) {
entry = entry->vme_next;
continue;
- }
}
entry->in_transition = TRUE;
*/
vm_map_unlock(map);
if(map_pmap) {
- vm_fault_unwire(map, &tmp_entry, FALSE, map_pmap);
+ vm_fault_unwire(map,
+ &tmp_entry, FALSE, map_pmap, pmap_addr);
} else {
- vm_fault_unwire(map, &tmp_entry, FALSE, map->pmap);
+ vm_fault_unwire(map,
+ &tmp_entry, FALSE, map->pmap,
+ tmp_entry.vme_start);
}
vm_map_lock(map);
register vm_offset_t end,
boolean_t user_wire)
{
- return vm_map_unwire_nested(map, start, end, user_wire, (pmap_t)NULL);
+ return vm_map_unwire_nested(map, start, end,
+ user_wire, (pmap_t)NULL, 0);
}
entry->object.sub_map,
entry->offset);
} else {
- pmap_remove(map->pmap, start, start + remove_size);
+
+ if((map->mapped) && (map->ref_count)
+ && (entry->object.vm_object != NULL)) {
+ vm_object_pmap_protect(
+ entry->object.vm_object,
+ entry->offset,
+ remove_size,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else {
+ pmap_remove(map->pmap,
+ (addr64_t)start,
+ (addr64_t)(start + remove_size));
+ }
}
}
entry->object.sub_map,
entry->offset);
} else {
- pmap_remove(map->pmap,
- (start + entry->vme_start) - offset,
- ((start + entry->vme_start) - offset) + remove_size);
+ if((map->mapped) && (map->ref_count)
+ && (entry->object.vm_object != NULL)) {
+ vm_object_pmap_protect(
+ entry->object.vm_object,
+ entry->offset,
+ remove_size,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else {
+ pmap_remove(map->pmap,
+ (addr64_t)((start + entry->vme_start)
+ - offset),
+ (addr64_t)(((start + entry->vme_start)
+ - offset) + remove_size));
+ }
}
entry = entry->vme_next;
}
vm_map_clip_end(map, entry, end);
if (entry->in_transition) {
+ wait_result_t wait_result;
+
/*
* Another thread is wiring/unwiring this entry.
* Let the other thread know we are waiting.
need_wakeup = FALSE;
}
- vm_map_entry_wait(map, interruptible);
+ wait_result = vm_map_entry_wait(map, interruptible);
if (interruptible &&
- current_thread()->wait_result == THREAD_INTERRUPTED)
+ wait_result == THREAD_INTERRUPTED) {
/*
* We do not clear the needs_wakeup flag,
* since we cannot tell if we were the only one.
*/
+ vm_map_unlock(map);
return KERN_ABORTED;
-
- vm_map_lock(map);
- /*
- * Cannot avoid a lookup here. reset timestamp.
- */
- last_timestamp = map->timestamp;
+ }
/*
* The entry could have been clipped or it
entry = first_entry;
SAVE_HINT(map, entry->vme_prev);
}
+ last_timestamp = map->timestamp;
continue;
} /* end in_transition */
* error.
*/
if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) {
+ wait_result_t wait_result;
s = entry->vme_start;
entry->needs_wakeup = TRUE;
- vm_map_entry_wait(map, interruptible);
+ wait_result = vm_map_entry_wait(map,
+ interruptible);
if (interruptible &&
- current_thread()->wait_result ==
- THREAD_INTERRUPTED)
+ wait_result == THREAD_INTERRUPTED) {
/*
* We do not clear the
* needs_wakeup flag, since we
* cannot tell if we were the
* only one.
*/
+ vm_map_unlock(map);
return KERN_ABORTED;
-
- vm_map_lock(map);
- /*
- * Cannot avoid a lookup here. reset
- * timestamp.
- */
- last_timestamp = map->timestamp;
+ }
/*
* The entry could have been clipped or
entry = first_entry;
SAVE_HINT(map, entry->vme_prev);
}
+ last_timestamp = map->timestamp;
continue;
}
else {
vm_map_unlock(map);
vm_fault_unwire(map, &tmp_entry,
tmp_entry.object.vm_object == kernel_object,
- map->pmap);
+ map->pmap, tmp_entry.vme_start);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
if(entry->is_sub_map) {
if(entry->use_pmap) {
#ifndef i386
- pmap_unnest(map->pmap, entry->vme_start,
- entry->vme_end - entry->vme_start);
+ pmap_unnest(map->pmap, (addr64_t)entry->vme_start);
#endif
+ if((map->mapped) && (map->ref_count)) {
+ /* clean up parent map/maps */
+ vm_map_submap_pmap_clean(
+ map, entry->vme_start,
+ entry->vme_end,
+ entry->object.sub_map,
+ entry->offset);
+ }
} else {
vm_map_submap_pmap_clean(
map, entry->vme_start, entry->vme_end,
entry->offset);
}
} else {
- pmap_remove(map->pmap,
- entry->vme_start, entry->vme_end);
+ object = entry->object.vm_object;
+ if((map->mapped) && (map->ref_count)) {
+ vm_object_pmap_protect(
+ object, entry->offset,
+ entry->vme_end - entry->vme_start,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else if(object != NULL) {
+ if ((object->shadow != NULL) ||
+ (object->phys_contiguous) ||
+ (object->resident_page_count >
+ atop((entry->vme_end - entry->vme_start)/4))) {
+ pmap_remove(map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end));
+ } else {
+ vm_page_t p;
+ vm_object_offset_t start_off;
+ vm_object_offset_t end_off;
+ start_off = entry->offset;
+ end_off = start_off +
+ (entry->vme_end - entry->vme_start);
+ vm_object_lock(object);
+ queue_iterate(&object->memq,
+ p, vm_page_t, listq) {
+ if ((!p->fictitious) &&
+ (p->offset >= start_off) &&
+ (p->offset < end_off)) {
+ vm_offset_t start;
+ start = entry->vme_start;
+ start += p->offset - start_off;
+ pmap_remove(
+ map->pmap, start,
+ start + PAGE_SIZE);
+ }
+ }
+ vm_object_unlock(object);
+ }
+ }
}
}
register boolean_t flags)
{
register kern_return_t result;
+ boolean_t funnel_set = FALSE;
+ funnel_t *curflock;
+ thread_t cur_thread;
+ cur_thread = current_thread();
+
+ if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
+ funnel_set = TRUE;
+ curflock = cur_thread->funnel_lock;
+ thread_funnel_set( curflock , FALSE);
+ }
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
result = vm_map_delete(map, start, end, flags);
vm_map_unlock(map);
-
+ if (funnel_set) {
+ thread_funnel_set( curflock, TRUE);
+ funnel_set = FALSE;
+ }
return(result);
}
* splitting entries in strange ways.
*/
- dst_end = round_page(dst_addr + dst_size);
+ dst_end = round_page_32(dst_addr + dst_size);
+ vm_map_lock(dst_map);
start_pass_1:
- vm_map_lock(dst_map);
if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
for (entry = tmp_entry;;) {
vm_map_entry_t next;
!page_aligned (dst_addr))
{
aligned = FALSE;
- dst_end = round_page(dst_addr + copy->size);
+ dst_end = round_page_32(dst_addr + copy->size);
} else {
dst_end = dst_addr + copy->size;
}
-start_pass_1:
vm_map_lock(dst_map);
+
+start_pass_1:
if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
for (entry = tmp_entry;;) {
vm_map_entry_t next = entry->vme_next;
entry->needs_wakeup = TRUE;
vm_map_entry_wait(dst_map, THREAD_UNINT);
- vm_map_lock(dst_map);
if(!vm_map_lookup_entry(dst_map, base_addr,
&tmp_entry)) {
vm_map_unlock(dst_map);
break;
}
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(base_addr));
entry = tmp_entry;
} /* while */
if (old_object != VM_OBJECT_NULL) {
if(entry->is_sub_map) {
- if(entry->use_pmap) {
+ if(entry->use_pmap) {
#ifndef i386
- pmap_unnest(dst_map->pmap,
- entry->vme_start,
- entry->vme_end - entry->vme_start);
+ pmap_unnest(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end
+ - entry->vme_start);
#endif
- } else {
- vm_map_submap_pmap_clean(
- dst_map, entry->vme_start,
- entry->vme_end,
- entry->object.sub_map,
- entry->offset);
- }
- vm_map_deallocate(
+ if(dst_map->mapped) {
+ /* clean up parent */
+ /* map/maps */
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ entry->object.sub_map,
+ entry->offset);
+ }
+ } else {
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ entry->object.sub_map,
+ entry->offset);
+ }
+ vm_map_deallocate(
entry->object.sub_map);
- } else {
- vm_object_pmap_protect(
- old_object,
- old_offset,
- size,
- pmap,
- tmp_entry->vme_start,
- VM_PROT_NONE);
-
+ } else {
+ if(dst_map->mapped) {
+ vm_object_pmap_protect(
+ entry->object.vm_object,
+ entry->offset,
+ entry->vme_end
+ - entry->vme_start,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else {
+ pmap_remove(dst_map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end));
+ }
vm_object_deallocate(old_object);
- }
+ }
}
entry->is_sub_map = FALSE;
/* a pc to execute it. */
/* No isync here */
- PMAP_ENTER(pmap, va, m,
- prot, FALSE);
+ PMAP_ENTER(pmap, va, m, prot,
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
+ FALSE);
vm_object_lock(object);
vm_page_lock_queues();
vm_object_reference(dst_object);
- version.main_timestamp = dst_map->timestamp;
+ /* account for unlock bumping up timestamp */
+ version.main_timestamp = dst_map->timestamp + 1;
vm_map_unlock(dst_map);
start += copy_size;
vm_map_lock(dst_map);
- if ((version.main_timestamp + 1) == dst_map->timestamp) {
+ if (version.main_timestamp == dst_map->timestamp) {
/* We can safely use saved tmp_entry value */
vm_map_clip_end(dst_map, tmp_entry, start);
VM_MAP_REMOVE_INTERRUPTIBLE;
}
if (src_destroy) {
- (void) vm_map_remove(src_map, trunc_page(src_addr),
- round_page(src_addr + len),
+ (void) vm_map_remove(src_map, trunc_page_32(src_addr),
+ round_page_32(src_addr + len),
flags);
}
*copy_result = copy;
*addr = 0;
kr = vm_map_enter(map,
addr,
- round_page(copy->size),
+ round_page_32(copy->size),
(vm_offset_t) 0,
TRUE,
VM_OBJECT_NULL,
vm_object_offset_t offset;
offset = trunc_page_64(copy->offset);
- size = round_page(copy->size +
+ size = round_page_32(copy->size +
(vm_size_t)(copy->offset - offset));
*dst_addr = 0;
kr = vm_map_enter(dst_map, dst_addr, size,
*/
vm_copy_start = trunc_page_64(copy->offset);
- size = round_page((vm_size_t)copy->offset + copy->size)
+ size = round_page_32((vm_size_t)copy->offset + copy->size)
- vm_copy_start;
StartAgain: ;
m->busy = TRUE;
vm_object_unlock(object);
- PMAP_ENTER(dst_map->pmap, va, m,
- entry->protection, TRUE);
+ PMAP_ENTER(dst_map->pmap, va, m, entry->protection,
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
+ TRUE);
vm_object_lock(object);
PAGE_WAKEUP_DONE(m);
prot &= ~VM_PROT_WRITE;
PMAP_ENTER(dst_map->pmap, va,
- m, prot, FALSE);
+ m, prot,
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
+ FALSE);
vm_object_lock(object);
vm_page_lock_queues();
* Compute start and end of region
*/
- src_start = trunc_page(src_addr);
- src_end = round_page(src_addr + len);
+ src_start = trunc_page_32(src_addr);
+ src_end = round_page_32(src_addr + len);
XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0);
#define RETURN(x) \
MACRO_BEGIN \
vm_map_unlock(src_map); \
+ if(src_map != base_map) \
+ vm_map_deallocate(src_map); \
if (new_entry != VM_MAP_ENTRY_NULL) \
vm_map_copy_entry_dispose(copy,new_entry); \
vm_map_copy_discard(copy); \
\
for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
parent_maps=parent_maps->next; \
+ if (ptr->parent_map != base_map) \
+ vm_map_deallocate(ptr->parent_map); \
kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
} \
} \
src_end = src_start + submap_len;
src_map = tmp_entry->object.sub_map;
vm_map_lock(src_map);
+ /* keep an outstanding reference for all maps in */
+ /* the parents tree except the base map */
+ vm_map_reference(src_map);
vm_map_unlock(ptr->parent_map);
if (!vm_map_lookup_entry(
src_map, src_start, &tmp_entry))
}
if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
(tmp_entry->object.vm_object->phys_contiguous)) {
- /* This is not, cannot be supported for now */
- /* we need a description of the caching mode */
- /* reflected in the object before we can */
- /* support copyin, and then the support will */
- /* be for direct copy */
+ /* This is not, supported for now.In future */
+ /* we will need to detect the phys_contig */
+ /* condition and then upgrade copy_slowly */
+ /* to do physical copy from the device mem */
+ /* based object. We can piggy-back off of */
+ /* the was wired boolean to set-up the */
+ /* proper handling */
RETURN(KERN_PROTECTION_FAILURE);
}
/*
XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
src_object, new_entry, new_entry->object.vm_object,
was_wired, 0);
- if (!was_wired &&
- vm_object_copy_quickly(
+ if ((src_object == VM_OBJECT_NULL ||
+ (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+ vm_object_copy_quickly(
&new_entry->object.vm_object,
src_offset,
src_size,
*/
if (src_needs_copy && !tmp_entry->needs_copy) {
- if (tmp_entry->is_shared ||
- tmp_entry->object.vm_object->true_share ||
- map_share) {
- vm_map_unlock(src_map);
- new_entry->object.vm_object =
- vm_object_copy_delayed(
- src_object,
- src_offset,
- src_size);
- /* dec ref gained in copy_quickly */
- vm_object_lock(src_object);
- src_object->ref_count--;
- assert(src_object->ref_count > 0);
- vm_object_res_deallocate(src_object);
- vm_object_unlock(src_object);
- vm_map_lock(src_map);
- /*
- * it turns out that we have
- * finished our copy. No matter
- * what the state of the map
- * we will lock it again here
- * knowing that if there is
- * additional data to copy
- * it will be checked at
- * the top of the loop
- *
- * Don't do timestamp check
- */
-
- } else {
- vm_object_pmap_protect(
- src_object,
- src_offset,
- src_size,
- (src_entry->is_shared ?
- PMAP_NULL
- : src_map->pmap),
- src_entry->vme_start,
- src_entry->protection &
- ~VM_PROT_WRITE);
-
- tmp_entry->needs_copy = TRUE;
- }
+ vm_object_pmap_protect(
+ src_object,
+ src_offset,
+ src_size,
+ (src_entry->is_shared ?
+ PMAP_NULL
+ : src_map->pmap),
+ src_entry->vme_start,
+ src_entry->protection &
+ ~VM_PROT_WRITE);
+ tmp_entry->needs_copy = TRUE;
}
/*
goto CopySuccessful;
}
- new_entry->needs_copy = FALSE;
-
/*
* Take an object reference, so that we may
* release the map lock(s).
*/
version.main_timestamp = src_map->timestamp;
- vm_map_unlock(src_map);
+ vm_map_unlock(src_map); /* Increments timestamp once! */
/*
* Perform the copy
*/
if (was_wired) {
+ CopySlowly:
vm_object_lock(src_object);
result = vm_object_copy_slowly(
src_object,
&new_entry->object.vm_object);
new_entry->offset = 0;
new_entry->needs_copy = FALSE;
+
+ }
+ else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+ (tmp_entry->is_shared || map_share)) {
+ vm_object_t new_object;
+
+ vm_object_lock(src_object);
+ new_object = vm_object_copy_delayed(
+ src_object,
+ src_offset,
+ src_size);
+ if (new_object == VM_OBJECT_NULL)
+ goto CopySlowly;
+
+ new_entry->object.vm_object = new_object;
+ new_entry->needs_copy = TRUE;
+ result = KERN_SUCCESS;
+
} else {
result = vm_object_copy_strategically(src_object,
src_offset,
&new_entry_needs_copy);
new_entry->needs_copy = new_entry_needs_copy;
-
}
if (result != KERN_SUCCESS &&
* changed while the copy was being made.
*/
- vm_map_lock(src_map); /* Increments timestamp once! */
+ vm_map_lock(src_map);
if ((version.main_timestamp + 1) == src_map->timestamp)
goto VerificationSuccessful;
ptr = parent_maps;
assert(ptr != NULL);
parent_maps = parent_maps->next;
- vm_map_lock(ptr->parent_map);
vm_map_unlock(src_map);
+ vm_map_deallocate(src_map);
+ vm_map_lock(ptr->parent_map);
src_map = ptr->parent_map;
src_start = ptr->base_start;
src_end = ptr->base_end;
*/
if (src_destroy) {
(void) vm_map_delete(src_map,
- trunc_page(src_addr),
+ trunc_page_32(src_addr),
src_end,
(src_map == kernel_map) ?
VM_MAP_REMOVE_KUNWIRE :
if(old_entry->use_pmap) {
result = pmap_nest(new_map->pmap,
(old_entry->object.sub_map)->pmap,
- old_entry->vme_start,
- old_entry->vme_end - old_entry->vme_start);
+ (addr64_t)old_entry->vme_start,
+ (addr64_t)old_entry->vme_start,
+ (uint64_t)(old_entry->vme_end - old_entry->vme_start));
if(result)
panic("vm_map_fork_share: pmap_nest failed!");
}
* to remove write permission.
*/
-/* CDY FIX this! page_protect! */
if (!old_entry->needs_copy &&
(old_entry->protection & VM_PROT_WRITE)) {
- if(old_entry->is_sub_map && old_entry->use_pmap) {
- pmap_protect(old_entry->object.sub_map->pmap,
- old_entry->vme_start,
- old_entry->vme_end,
- old_entry->protection & ~VM_PROT_WRITE);
+ if(old_map->mapped) {
+ vm_object_pmap_protect(
+ old_entry->object.vm_object,
+ old_entry->offset,
+ (old_entry->vme_end -
+ old_entry->vme_start),
+ PMAP_NULL,
+ old_entry->vme_start,
+ old_entry->protection & ~VM_PROT_WRITE);
} else {
- pmap_protect(vm_map_pmap(old_map),
+ pmap_protect(old_map->pmap,
old_entry->vme_start,
old_entry->vme_end,
old_entry->protection & ~VM_PROT_WRITE);
*/
vm_map_lock(old_map);
if (!vm_map_lookup_entry(old_map, start, &last) ||
- last->max_protection & VM_PROT_READ ==
- VM_PROT_NONE) {
+ (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
last = last->vme_next;
}
*old_entry_p = last;
if(old_entry->is_sub_map)
break;
- if (old_entry->wired_count != 0) {
+ if ((old_entry->wired_count != 0) ||
+ ((old_entry->object.vm_object != NULL) &&
+ (old_entry->object.vm_object->true_share))) {
goto slow_vm_map_fork_copy;
}
(old_entry->vme_end -
old_entry->vme_start),
((old_entry->is_shared
- || old_entry->is_sub_map)
+ || old_map->mapped)
? PMAP_NULL :
old_map->pmap),
old_entry->vme_start,
submap_entry->offset,
submap_entry->vme_end -
submap_entry->vme_start,
- submap_entry->is_shared ?
+ (submap_entry->is_shared
+ || map->mapped) ?
PMAP_NULL : map->pmap,
submap_entry->vme_start,
submap_entry->protection &
recurse_count = *nesting_depth;
LOOKUP_NEXT_BASE_ENTRY:
- vm_map_lock_read(map);
+ if (not_in_kdp)
+ vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
- vm_map_unlock_read(map);
- return(KERN_INVALID_ADDRESS);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
while(entry->is_sub_map && recurse_count) {
recurse_count--;
- vm_map_lock_read(entry->object.sub_map);
+ if (not_in_kdp)
+ vm_map_lock_read(entry->object.sub_map);
if(entry == base_entry) {
}
submap = entry->object.sub_map;
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = submap;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next)
== vm_map_to_entry(map)) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
}
if(base_next <=
(base_addr += (entry->vme_start - start))) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
}
base_addr += entry->vme_start;
if(base_addr >= base_next) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
extended.pages_dirtied = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
-
+
+ if (not_in_kdp)
if(!entry->is_sub_map) {
vm_region_walk(entry, &extended, entry->offset,
entry->vme_end - start, map, start);
submap_info->pages_dirtied = extended.pages_dirtied;
submap_info->external_pager = extended.external_pager;
submap_info->shadow_depth = extended.shadow_depth;
-
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
register struct vm_object *obj, *tmp_obj;
register int ref_count;
- if (entry->object.vm_object == 0) {
+ if (entry->object.vm_object == 0 || entry->is_sub_map) {
top->share_mode = SM_EMPTY;
top->ref_count = 0;
top->obj_id = 0;
return;
}
- if (entry->is_sub_map)
- vm_region_top_walk((vm_map_entry_t)entry->object.sub_map, top);
- else {
+ {
obj = entry->object.vm_object;
vm_object_lock(obj);
register int ref_count;
void vm_region_look_for_page();
- if ((entry->object.vm_object == 0) ||
+ if ((entry->object.vm_object == 0) ||
+ (entry->is_sub_map) ||
(entry->object.vm_object->phys_contiguous)) {
extended->share_mode = SM_EMPTY;
extended->ref_count = 0;
return;
}
- if (entry->is_sub_map)
- vm_region_walk((vm_map_entry_t)entry->object.sub_map, extended, offset + entry->offset,
- range, map, va);
- else {
+ {
obj = entry->object.vm_object;
vm_object_lock(obj);
if (shadow && (max_refcnt == 1))
extended->pages_shared_now_private++;
- if (p->dirty || pmap_is_modified(p->phys_addr))
+ if (!p->fictitious &&
+ (p->dirty || pmap_is_modified(p->phys_page)))
extended->pages_dirtied++;
extended->pages_resident++;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
return;
}
extended->pages_swapped_out++;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
return;
}
}
if (shadow) {
- vm_object_lock(shadow);
+ vm_object_lock(shadow);
if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
ref_count--;
max_refcnt = ref_count;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
object = shadow;
shadow = object->shadow;
continue;
}
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
break;
}
}
return(0);
if (entry->is_sub_map)
- ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object);
+ return(0);
else {
ref_count = 0;
if (chk_obj == object)
ref_count++;
if (tmp_obj = chk_obj->shadow)
- vm_object_lock(tmp_obj);
+ vm_object_lock(tmp_obj);
vm_object_unlock(chk_obj);
-
+
chk_obj = tmp_obj;
}
}
vm_machine_attribute_val_t* value) /* IN/OUT */
{
kern_return_t ret;
-
+ vm_size_t sync_size;
+ vm_offset_t start;
+ vm_map_entry_t entry;
+
if (address < vm_map_min(map) ||
(address + size) > vm_map_max(map))
return KERN_INVALID_ADDRESS;
vm_map_lock(map);
+
+ if (attribute != MATTR_CACHE) {
+ /* If we don't have to find physical addresses, we */
+ /* don't have to do an explicit traversal here. */
+ ret = pmap_attribute(map->pmap,
+ address, size, attribute, value);
+ vm_map_unlock(map);
+ return ret;
+ }
+
+ /* Get the starting address */
+ start = trunc_page_32(address);
+ /* Figure how much memory we need to flush (in page increments) */
+ sync_size = round_page_32(start + size) - start;
+
- ret = pmap_attribute(map->pmap, address, size, attribute, value);
+ ret = KERN_SUCCESS; /* Assume it all worked */
+
+ while(sync_size) {
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_size_t sub_size;
+ if((entry->vme_end - start) > sync_size) {
+ sub_size = sync_size;
+ sync_size = 0;
+ } else {
+ sub_size = entry->vme_end - start;
+ sync_size -= sub_size;
+ }
+ if(entry->is_sub_map) {
+ vm_map_machine_attribute(
+ entry->object.sub_map,
+ (start - entry->vme_start)
+ + entry->offset,
+ sub_size,
+ attribute, value);
+ } else {
+ if(entry->object.vm_object) {
+ vm_page_t m;
+ vm_object_t object;
+ vm_object_t base_object;
+ vm_object_offset_t offset;
+ vm_object_offset_t base_offset;
+ vm_size_t range;
+ range = sub_size;
+ offset = (start - entry->vme_start)
+ + entry->offset;
+ base_offset = offset;
+ object = entry->object.vm_object;
+ base_object = object;
+ while(range) {
+ m = vm_page_lookup(
+ object, offset);
+ if(m && !m->fictitious) {
+
+ ret =
+ pmap_attribute_cache_sync(
+ m->phys_page,
+ PAGE_SIZE,
+ attribute, value);
+ } else if (object->shadow) {
+ offset = offset +
+ object->shadow_offset;
+ object = object->shadow;
+ continue;
+ }
+ range -= PAGE_SIZE;
+ /* Bump to the next page */
+ base_offset += PAGE_SIZE;
+ offset = base_offset;
+ object = base_object;
+
+ }
+ }
+ }
+ start += sub_size;
+ } else {
+ vm_map_unlock(map);
+ return KERN_FAILURE;
+ }
+
+ }
vm_map_unlock(map);
case VM_BEHAVIOR_SEQUENTIAL:
case VM_BEHAVIOR_RSEQNTL:
break;
+ case VM_BEHAVIOR_WILLNEED:
+ case VM_BEHAVIOR_DONTNEED:
+ new_behavior = VM_BEHAVIOR_DEFAULT;
+ break;
default:
return(KERN_INVALID_ARGUMENT);
}
vm_map_links_print(
struct vm_map_links *links)
{
- iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
+ iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
links->prev,
links->next,
links->start,
struct vm_map_header *header)
{
vm_map_links_print(&header->links);
- iprintf("nentries=0x%x, %sentries_pageable\n",
+ iprintf("nentries = %08X, %sentries_pageable\n",
header->nentries,
(header->entries_pageable ? "" : "!"));
}
extern int db_indent;
int shadows;
- iprintf("map entry 0x%x:\n", entry);
+ iprintf("map entry %08X\n", entry);
db_indent += 2;
static char *inheritance_name[4] = { "share", "copy", "none", "?"};
static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" };
- iprintf("map entry 0x%x:\n", entry);
+ iprintf("map entry %08X n", entry);
db_indent += 2;
vm_map_links_print(&entry->links);
- iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
+ iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
entry->vme_start,
entry->vme_end,
entry->protection,
entry->max_protection,
inheritance_name[(entry->inheritance & 0x3)]);
- iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
+ iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
behavior_name[(entry->behavior & 0x3)],
entry->wired_count,
entry->user_wired_count);
(entry->needs_wakeup ? "" : "!"));
if (entry->is_sub_map) {
- iprintf("submap=0x%x, offset=0x%x\n",
+ iprintf("submap = %08X - offset=%08X\n",
entry->object.sub_map,
entry->offset);
} else {
- iprintf("object=0x%x, offset=0x%x, ",
+ iprintf("object=%08X, offset=%08X, ",
entry->object.vm_object,
entry->offset);
printf("%sis_shared, %sneeds_copy\n",
register vm_map_entry_t entry;
extern int db_indent;
- iprintf("task map 0x%x:\n", map);
+ iprintf("task map %08X\n", map);
db_indent += 2;
*/
void
vm_map_print(
- register vm_map_t map)
+ db_addr_t inmap)
{
register vm_map_entry_t entry;
+ vm_map_t map;
extern int db_indent;
char *swstate;
- iprintf("task map 0x%x:\n", map);
+ map = (vm_map_t)inmap; /* Make sure we have the right type */
+
+ iprintf("task map %08X\n", map);
db_indent += 2;
vm_map_header_print(&map->hdr);
- iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
+ iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
map->pmap,
map->size,
map->ref_count,
map->hint,
map->first_free);
- iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
+ iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
(map->wait_for_space ? "" : "!"),
(map->wiring_required ? "" : "!"),
map->timestamp);
swstate = "????";
break;
}
- iprintf("res=%d, sw_state=%s\n", map->res_count, swstate);
+ iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
#endif /* TASK_SWAPPER */
for (entry = vm_map_first_entry(map);
void
vm_map_copy_print(
- vm_map_copy_t copy)
+ db_addr_t incopy)
{
extern int db_indent;
+ vm_map_copy_t copy;
int i, npages;
vm_map_entry_t entry;
+ copy = (vm_map_copy_t)incopy; /* Make sure we have the right type */
+
printf("copy object 0x%x\n", copy);
db_indent += 2;
*/
vm_size_t
db_vm_map_total_size(
- vm_map_t map)
+ db_addr_t inmap)
{
vm_map_entry_t entry;
vm_size_t total;
+ vm_map_t map;
+
+ map = (vm_map_t)inmap; /* Make sure we have the right type */
total = 0;
for (entry = vm_map_first_entry(map);
boolean_t new_entry_needs_copy;
assert(map != VM_MAP_NULL);
- assert(size != 0 && size == round_page(size));
+ assert(size != 0 && size == round_page_32(size));
assert(inheritance == VM_INHERIT_NONE ||
inheritance == VM_INHERIT_COPY ||
inheritance == VM_INHERIT_SHARE);
/*
* Compute start and end of region.
*/
- src_start = trunc_page(addr);
- src_end = round_page(src_start + size);
+ src_start = trunc_page_32(addr);
+ src_end = round_page_32(src_start + size);
/*
* Initialize map_header.
if (!src_entry->needs_copy &&
(src_entry->protection & VM_PROT_WRITE)) {
- pmap_protect(vm_map_pmap(map),
- src_entry->vme_start,
- src_entry->vme_end,
- src_entry->protection &
+ if(map->mapped) {
+ vm_object_pmap_protect(
+ src_entry->object.vm_object,
+ src_entry->offset,
+ entry_size,
+ PMAP_NULL,
+ src_entry->vme_start,
+ src_entry->protection &
+ ~VM_PROT_WRITE);
+ } else {
+ pmap_protect(vm_map_pmap(map),
+ src_entry->vme_start,
+ src_entry->vme_end,
+ src_entry->protection &
~VM_PROT_WRITE);
+ }
}
object = src_entry->object.vm_object;
vm_object_pmap_protect(object,
offset,
entry_size,
- (src_entry->is_shared ?
+ ((src_entry->is_shared
+ || map->mapped) ?
PMAP_NULL : map->pmap),
src_entry->vme_start,
src_entry->protection &
* verification, and unlock the map.
*/
version.main_timestamp = map->timestamp;
- vm_map_unlock(map);
+ vm_map_unlock(map); /* Increments timestamp once! */
/*
* Perform the copy.
* changed while the copy was being made.
*/
- vm_map_lock(map); /* Increments timestamp once! */
+ vm_map_lock(map);
if (version.main_timestamp + 1 != map->timestamp) {
/*
* Simple version comparison failed.
return KERN_INVALID_ARGUMENT;
}
- size = round_page(size);
+ size = round_page_32(size);
result = vm_remap_extract(src_map, memory_address,
size, copy, &map_header,
* Allocate/check a range of free virtual address
* space for the target
*/
- *address = trunc_page(*address);
+ *address = trunc_page_32(*address);
vm_map_lock(target_map);
result = vm_remap_range_allocate(target_map, address, size,
mask, anywhere, &insp_entry);
vm_map_unlock(map);
return(TRUE);
}
+
+/*
+ * This routine is obsolete, but included for backward
+ * compatibility for older drivers.
+ */
+void
+kernel_vm_map_reference(
+ vm_map_t map)
+{
+ vm_map_reference(map);
+}
+
+/*
+ * vm_map_reference:
+ *
+ * Most code internal to the osfmk will go through a
+ * macro defining this. This is always here for the
+ * use of other kernel components.
+ */
+#undef vm_map_reference
+void
+vm_map_reference(
+ register vm_map_t map)
+{
+ if (map == VM_MAP_NULL)
+ return;
+
+ mutex_lock(&map->s_lock);
+#if TASK_SWAPPER
+ assert(map->res_count > 0);
+ assert(map->ref_count >= map->res_count);
+ map->res_count++;
+#endif
+ map->ref_count++;
+ mutex_unlock(&map->s_lock);
+}
+
+/*
+ * vm_map_deallocate:
+ *
+ * Removes a reference from the specified map,
+ * destroying it if no references remain.
+ * The map should not be locked.
+ */
+void
+vm_map_deallocate(
+ register vm_map_t map)
+{
+ unsigned int ref;
+
+ if (map == VM_MAP_NULL)
+ return;
+
+ mutex_lock(&map->s_lock);
+ ref = --map->ref_count;
+ if (ref > 0) {
+ vm_map_res_deallocate(map);
+ mutex_unlock(&map->s_lock);
+ return;
+ }
+ assert(map->ref_count == 0);
+ mutex_unlock(&map->s_lock);
+
+#if TASK_SWAPPER
+ /*
+ * The map residence count isn't decremented here because
+ * the vm_map_delete below will traverse the entire map,
+ * deleting entries, and the residence counts on objects
+ * and sharing maps will go away then.
+ */
+#endif
+
+ vm_map_destroy(map);
+}