*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <mach/vm_behavior.h>
+#include <mach/vm_statistics.h>
#include <kern/assert.h>
#include <kern/counters.h>
#include <kern/zalloc.h>
#include <mach/vm_map_server.h>
#include <mach/mach_host_server.h>
#include <ddb/tr.h>
+#include <machine/db_machdep.h>
#include <kern/xpr.h>
/* Internal prototypes
*/
vm_size_t vm_map_aggressive_enter_max; /* set by bootstrap */
+/* Skip acquiring locks if we're in the midst of a kernel core dump */
+extern unsigned int not_in_kdp;
+
void
vm_map_init(
void)
vm_map_steal_memory(
void)
{
- map_data_size = round_page(10 * sizeof(struct vm_map));
+ map_data_size = round_page_32(10 * sizeof(struct vm_map));
map_data = pmap_steal_memory(map_data_size);
#if 0
kentry_data_size =
- round_page(kentry_count * sizeof(struct vm_map_entry));
+ round_page_32(kentry_count * sizeof(struct vm_map_entry));
kentry_data = pmap_steal_memory(kentry_data_size);
}
entry = vm_map_to_entry(map);
next = entry->vme_next;
- while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) ||
- (trunc_page(next->vme_start) == trunc_page(entry->vme_start) &&
+ while (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_end) ||
+ (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_start) &&
next != vm_map_to_entry(map))) {
entry = next;
next = entry->vme_next;
UFF_map = (map); \
UFF_first_free = (new_first_free); \
UFF_next_entry = UFF_first_free->vme_next; \
- while (trunc_page(UFF_next_entry->vme_start) == \
- trunc_page(UFF_first_free->vme_end) || \
- (trunc_page(UFF_next_entry->vme_start) == \
- trunc_page(UFF_first_free->vme_start) && \
+ while (trunc_page_32(UFF_next_entry->vme_start) == \
+ trunc_page_32(UFF_first_free->vme_end) || \
+ (trunc_page_32(UFF_next_entry->vme_start) == \
+ trunc_page_32(UFF_first_free->vme_start) && \
UFF_next_entry != vm_map_to_entry(UFF_map))) { \
UFF_first_free = UFF_next_entry; \
UFF_next_entry = UFF_first_free->vme_next; \
map->max_offset, VM_MAP_NO_FLAGS);
vm_map_unlock(map);
- pmap_destroy(map->pmap);
+ if(map->pmap)
+ pmap_destroy(map->pmap);
zfree(vm_map_zone, (vm_offset_t) map);
}
* future lookups. Performs necessary interlocks.
*/
#define SAVE_HINT(map,value) \
+MACRO_BEGIN \
mutex_lock(&(map)->s_lock); \
(map)->hint = (value); \
- mutex_unlock(&(map)->s_lock);
+ mutex_unlock(&(map)->s_lock); \
+MACRO_END
/*
* vm_map_lookup_entry: [ internal use only ]
* Start looking either from the head of the
* list, or from the hint.
*/
-
- mutex_lock(&map->s_lock);
+ if (not_in_kdp)
+ mutex_lock(&map->s_lock);
cur = map->hint;
- mutex_unlock(&map->s_lock);
+ if (not_in_kdp)
+ mutex_unlock(&map->s_lock);
if (cur == vm_map_to_entry(map))
cur = cur->vme_next;
*/
*entry = cur;
- SAVE_HINT(map, cur);
+ if (not_in_kdp)
+ SAVE_HINT(map, cur);
return(TRUE);
}
break;
cur = cur->vme_next;
}
*entry = cur->vme_prev;
- SAVE_HINT(map, *entry);
+ if (not_in_kdp)
+ SAVE_HINT(map, *entry);
return(FALSE);
}
{
unsigned int cache_attr;
+ if(map->pmap == 0)
+ return;
+
while (addr < end_addr) {
register vm_page_t m;
m->busy = TRUE;
if (m->no_isync == TRUE) {
- pmap_sync_caches_phys(m->phys_addr);
+ pmap_sync_caches_phys(m->phys_page);
m->no_isync = FALSE;
}
(entry->max_protection == max_protection) &&
(entry->behavior == VM_BEHAVIOR_DEFAULT) &&
(entry->in_transition == 0) &&
- ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) &&
+ ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) &&
(entry->wired_count == 0)) { /* implies user_wired_count == 0 */
if (vm_object_coalesce(entry->object.vm_object,
VM_OBJECT_NULL,
vm_offset_t pmap_base_addr; \
\
pmap_base_addr = 0xF0000000 & entry->vme_start; \
- pmap_unnest(map->pmap, pmap_base_addr, \
- 0x10000000); \
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
entry->use_pmap = FALSE; \
} else if(entry->object.vm_object \
&& !entry->is_sub_map \
&& entry->object.vm_object->phys_contiguous) { \
pmap_remove(map->pmap, \
- entry->vme_start, entry->vme_end); \
+ (addr64_t)(entry->vme_start), \
+ (addr64_t)(entry->vme_end)); \
} \
_vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
} \
vm_offset_t pmap_base_addr; \
\
pmap_base_addr = 0xF0000000 & entry->vme_start; \
- pmap_unnest(map->pmap, pmap_base_addr, \
- 0x10000000); \
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
entry->use_pmap = FALSE; \
} else if(entry->object.vm_object \
&& !entry->is_sub_map \
&& entry->object.vm_object->phys_contiguous) { \
pmap_remove(map->pmap, \
- entry->vme_start, entry->vme_end); \
+ (addr64_t)(entry->vme_start), \
+ (addr64_t)(entry->vme_end)); \
} \
_vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
} \
(object->copy == VM_OBJECT_NULL) &&
(object->shadow == VM_OBJECT_NULL) &&
(!object->pager_created)) {
- entry->offset = (vm_object_offset_t)offset;
- entry->object.vm_object = VM_OBJECT_NULL;
- vm_object_deallocate(object);
- entry->is_sub_map = TRUE;
- vm_map_reference(entry->object.sub_map = submap);
+ entry->offset = (vm_object_offset_t)offset;
+ entry->object.vm_object = VM_OBJECT_NULL;
+ vm_object_deallocate(object);
+ entry->is_sub_map = TRUE;
+ entry->object.sub_map = submap;
+ vm_map_reference(submap);
#ifndef i386
- if ((use_pmap) && (offset == 0)) {
- /* nest if platform code will allow */
- result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap,
- start, end - start);
- if(result)
- panic("pmap_nest failed!");
- entry->use_pmap = TRUE;
- }
+ if ((use_pmap) && (offset == 0)) {
+ /* nest if platform code will allow */
+ if(submap->pmap == NULL) {
+ submap->pmap = pmap_create((vm_size_t) 0);
+ if(submap->pmap == PMAP_NULL) {
+ return(KERN_NO_SPACE);
+ }
+ }
+ result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap,
+ (addr64_t)start, (addr64_t)start, (uint64_t)(end - start));
+ if(result)
+ panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result);
+ entry->use_pmap = TRUE;
+ }
#endif
#ifdef i386
- pmap_remove(map->pmap, start, end);
+ pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end);
#endif
- result = KERN_SUCCESS;
+ result = KERN_SUCCESS;
}
vm_map_unlock(map);
local_entry->use_pmap = FALSE;
local_entry = local_entry->vme_next;
}
- pmap_unnest(map->pmap, pmap_base_addr,
- (pmap_end_addr - pmap_base_addr) + 1);
+ pmap_unnest(map->pmap, (addr64_t)pmap_base_addr);
#endif
}
if (!(current->protection & VM_PROT_WRITE)) {
rc = vm_map_wire_nested(entry->object.sub_map,
sub_start, sub_end,
access_type,
- user_wire, pmap, pmap_addr);
+ user_wire, map_pmap, pmap_addr);
vm_map_lock(map);
}
s = entry->vme_start;
continue;
} else {
vm_map_unlock(map);
- vm_map_unwire_nested(entry->object.sub_map,
- sub_start, sub_end, user_wire, pmap, pmap_addr);
+ vm_map_unwire_nested(entry->object.sub_map,
+ sub_start, sub_end, user_wire, map_pmap,
+ pmap_addr);
vm_map_lock(map);
if (last_timestamp+1 != map->timestamp) {
VM_PROT_NONE);
} else {
pmap_remove(map->pmap,
- start, start + remove_size);
+ (addr64_t)start,
+ (addr64_t)(start + remove_size));
}
}
}
VM_PROT_NONE);
} else {
pmap_remove(map->pmap,
- (start + entry->vme_start) - offset,
- ((start + entry->vme_start)
- - offset) + remove_size);
+ (addr64_t)((start + entry->vme_start)
+ - offset),
+ (addr64_t)(((start + entry->vme_start)
+ - offset) + remove_size));
}
}
entry = entry->vme_next;
if(entry->is_sub_map) {
if(entry->use_pmap) {
#ifndef i386
- pmap_unnest(map->pmap, entry->vme_start,
- entry->vme_end - entry->vme_start);
+ pmap_unnest(map->pmap, (addr64_t)entry->vme_start);
#endif
if((map->mapped) && (map->ref_count)) {
/* clean up parent map/maps */
entry->offset);
}
} else {
- if((map->mapped) && (map->ref_count)) {
- vm_object_pmap_protect(
- entry->object.vm_object,
- entry->offset,
- entry->vme_end - entry->vme_start,
- PMAP_NULL,
- entry->vme_start,
- VM_PROT_NONE);
- } else {
+ object = entry->object.vm_object;
+ if((map->mapped) && (map->ref_count)) {
+ vm_object_pmap_protect(
+ object, entry->offset,
+ entry->vme_end - entry->vme_start,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else if(object != NULL) {
+ if ((object->shadow != NULL) ||
+ (object->phys_contiguous) ||
+ (object->resident_page_count >
+ atop((entry->vme_end - entry->vme_start)/4))) {
pmap_remove(map->pmap,
- entry->vme_start,
- entry->vme_end);
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end));
+ } else {
+ vm_page_t p;
+ vm_object_offset_t start_off;
+ vm_object_offset_t end_off;
+ start_off = entry->offset;
+ end_off = start_off +
+ (entry->vme_end - entry->vme_start);
+ vm_object_lock(object);
+ queue_iterate(&object->memq,
+ p, vm_page_t, listq) {
+ if ((!p->fictitious) &&
+ (p->offset >= start_off) &&
+ (p->offset < end_off)) {
+ vm_offset_t start;
+ start = entry->vme_start;
+ start += p->offset - start_off;
+ pmap_remove(
+ map->pmap, start,
+ start + PAGE_SIZE);
+ }
}
+ vm_object_unlock(object);
+ }
+ }
}
}
* splitting entries in strange ways.
*/
- dst_end = round_page(dst_addr + dst_size);
+ dst_end = round_page_32(dst_addr + dst_size);
vm_map_lock(dst_map);
start_pass_1:
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
for (entry = tmp_entry;;) {
vm_map_entry_t next;
!page_aligned (dst_addr))
{
aligned = FALSE;
- dst_end = round_page(dst_addr + copy->size);
+ dst_end = round_page_32(dst_addr + copy->size);
} else {
dst_end = dst_addr + copy->size;
}
vm_map_unlock(dst_map);
return(KERN_INVALID_ADDRESS);
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
for (entry = tmp_entry;;) {
vm_map_entry_t next = entry->vme_next;
break;
}
}
- vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr));
+ vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(base_addr));
entry = tmp_entry;
} /* while */
entry->vme_start,
VM_PROT_NONE);
} else {
- pmap_remove(dst_map->pmap,
- entry->vme_start,
- entry->vme_end);
+ pmap_remove(dst_map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end));
}
vm_object_deallocate(old_object);
}
/* No isync here */
PMAP_ENTER(pmap, va, m, prot,
- VM_WIMG_USE_DEFAULT, FALSE);
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
+ FALSE);
vm_object_lock(object);
vm_page_lock_queues();
VM_MAP_REMOVE_INTERRUPTIBLE;
}
if (src_destroy) {
- (void) vm_map_remove(src_map, trunc_page(src_addr),
- round_page(src_addr + len),
+ (void) vm_map_remove(src_map, trunc_page_32(src_addr),
+ round_page_32(src_addr + len),
flags);
}
*copy_result = copy;
*addr = 0;
kr = vm_map_enter(map,
addr,
- round_page(copy->size),
+ round_page_32(copy->size),
(vm_offset_t) 0,
TRUE,
VM_OBJECT_NULL,
vm_object_offset_t offset;
offset = trunc_page_64(copy->offset);
- size = round_page(copy->size +
+ size = round_page_32(copy->size +
(vm_size_t)(copy->offset - offset));
*dst_addr = 0;
kr = vm_map_enter(dst_map, dst_addr, size,
*/
vm_copy_start = trunc_page_64(copy->offset);
- size = round_page((vm_size_t)copy->offset + copy->size)
+ size = round_page_32((vm_size_t)copy->offset + copy->size)
- vm_copy_start;
StartAgain: ;
vm_object_unlock(object);
PMAP_ENTER(dst_map->pmap, va, m, entry->protection,
- VM_WIMG_USE_DEFAULT, TRUE);
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
+ TRUE);
vm_object_lock(object);
PAGE_WAKEUP_DONE(m);
PMAP_ENTER(dst_map->pmap, va,
m, prot,
- VM_WIMG_USE_DEFAULT,
+ ((unsigned int)
+ (m->object->wimg_bits))
+ & VM_WIMG_MASK,
FALSE);
vm_object_lock(object);
* Compute start and end of region
*/
- src_start = trunc_page(src_addr);
- src_end = round_page(src_addr + len);
+ src_start = trunc_page_32(src_addr);
+ src_end = round_page_32(src_addr + len);
XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0);
src_entry = tmp_entry;
}
if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
- ((tmp_entry->object.vm_object->wimg_bits != VM_WIMG_DEFAULT) ||
- (tmp_entry->object.vm_object->phys_contiguous))) {
- /* This is not, cannot be supported for now */
- /* we need a description of the caching mode */
- /* reflected in the object before we can */
- /* support copyin, and then the support will */
- /* be for direct copy */
+ (tmp_entry->object.vm_object->phys_contiguous)) {
+ /* This is not, supported for now.In future */
+ /* we will need to detect the phys_contig */
+ /* condition and then upgrade copy_slowly */
+ /* to do physical copy from the device mem */
+ /* based object. We can piggy-back off of */
+ /* the was wired boolean to set-up the */
+ /* proper handling */
RETURN(KERN_PROTECTION_FAILURE);
}
/*
XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
src_object, new_entry, new_entry->object.vm_object,
was_wired, 0);
- if (!was_wired &&
- vm_object_copy_quickly(
+ if ((src_object == VM_OBJECT_NULL ||
+ (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+ vm_object_copy_quickly(
&new_entry->object.vm_object,
src_offset,
src_size,
*/
if (src_needs_copy && !tmp_entry->needs_copy) {
- if (tmp_entry->is_shared ||
- tmp_entry->object.vm_object->true_share ||
- map_share) {
- vm_map_unlock(src_map);
- new_entry->object.vm_object =
- vm_object_copy_delayed(
- src_object,
- src_offset,
- src_size);
- /* dec ref gained in copy_quickly */
- vm_object_lock(src_object);
- src_object->ref_count--;
- assert(src_object->ref_count > 0);
- vm_object_res_deallocate(src_object);
- vm_object_unlock(src_object);
- vm_map_lock(src_map);
- /*
- * it turns out that we have
- * finished our copy. No matter
- * what the state of the map
- * we will lock it again here
- * knowing that if there is
- * additional data to copy
- * it will be checked at
- * the top of the loop
- *
- * Don't do timestamp check
- */
-
- } else {
- vm_object_pmap_protect(
- src_object,
- src_offset,
- src_size,
- (src_entry->is_shared ?
- PMAP_NULL
- : src_map->pmap),
- src_entry->vme_start,
- src_entry->protection &
- ~VM_PROT_WRITE);
-
- tmp_entry->needs_copy = TRUE;
- }
+ vm_object_pmap_protect(
+ src_object,
+ src_offset,
+ src_size,
+ (src_entry->is_shared ?
+ PMAP_NULL
+ : src_map->pmap),
+ src_entry->vme_start,
+ src_entry->protection &
+ ~VM_PROT_WRITE);
+ tmp_entry->needs_copy = TRUE;
}
/*
goto CopySuccessful;
}
- new_entry->needs_copy = FALSE;
-
/*
* Take an object reference, so that we may
* release the map lock(s).
*/
if (was_wired) {
+ CopySlowly:
vm_object_lock(src_object);
result = vm_object_copy_slowly(
src_object,
&new_entry->object.vm_object);
new_entry->offset = 0;
new_entry->needs_copy = FALSE;
+
+ }
+ else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+ (tmp_entry->is_shared || map_share)) {
+ vm_object_t new_object;
+
+ vm_object_lock(src_object);
+ new_object = vm_object_copy_delayed(
+ src_object,
+ src_offset,
+ src_size);
+ if (new_object == VM_OBJECT_NULL)
+ goto CopySlowly;
+
+ new_entry->object.vm_object = new_object;
+ new_entry->needs_copy = TRUE;
+ result = KERN_SUCCESS;
+
} else {
result = vm_object_copy_strategically(src_object,
src_offset,
&new_entry_needs_copy);
new_entry->needs_copy = new_entry_needs_copy;
-
}
if (result != KERN_SUCCESS &&
*/
if (src_destroy) {
(void) vm_map_delete(src_map,
- trunc_page(src_addr),
+ trunc_page_32(src_addr),
src_end,
(src_map == kernel_map) ?
VM_MAP_REMOVE_KUNWIRE :
if(old_entry->use_pmap) {
result = pmap_nest(new_map->pmap,
(old_entry->object.sub_map)->pmap,
- old_entry->vme_start,
- old_entry->vme_end - old_entry->vme_start);
+ (addr64_t)old_entry->vme_start,
+ (addr64_t)old_entry->vme_start,
+ (uint64_t)(old_entry->vme_end - old_entry->vme_start));
if(result)
panic("vm_map_fork_share: pmap_nest failed!");
}
*/
vm_map_lock(old_map);
if (!vm_map_lookup_entry(old_map, start, &last) ||
- last->max_protection & VM_PROT_READ ==
- VM_PROT_NONE) {
+ (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
last = last->vme_next;
}
*old_entry_p = last;
recurse_count = *nesting_depth;
LOOKUP_NEXT_BASE_ENTRY:
- vm_map_lock_read(map);
+ if (not_in_kdp)
+ vm_map_lock_read(map);
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
- vm_map_unlock_read(map);
- return(KERN_INVALID_ADDRESS);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
}
} else {
entry = tmp_entry;
while(entry->is_sub_map && recurse_count) {
recurse_count--;
- vm_map_lock_read(entry->object.sub_map);
+ if (not_in_kdp)
+ vm_map_lock_read(entry->object.sub_map);
if(entry == base_entry) {
}
submap = entry->object.sub_map;
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = submap;
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
if ((entry = tmp_entry->vme_next)
== vm_map_to_entry(map)) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
}
if(base_next <=
(base_addr += (entry->vme_start - start))) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
}
base_addr += entry->vme_start;
if(base_addr >= base_next) {
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
map = base_map;
start = base_next;
recurse_count = 0;
extended.pages_dirtied = 0;
extended.external_pager = 0;
extended.shadow_depth = 0;
-
+
+ if (not_in_kdp)
if(!entry->is_sub_map) {
vm_region_walk(entry, &extended, entry->offset,
entry->vme_end - start, map, start);
submap_info->pages_dirtied = extended.pages_dirtied;
submap_info->external_pager = extended.external_pager;
submap_info->shadow_depth = extended.shadow_depth;
-
- vm_map_unlock_read(map);
+ if (not_in_kdp)
+ vm_map_unlock_read(map);
return(KERN_SUCCESS);
}
if (shadow && (max_refcnt == 1))
extended->pages_shared_now_private++;
- if (p->dirty || pmap_is_modified(p->phys_addr))
+ if (!p->fictitious &&
+ (p->dirty || pmap_is_modified(p->phys_page)))
extended->pages_dirtied++;
extended->pages_resident++;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
return;
}
extended->pages_swapped_out++;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
return;
}
}
if (shadow) {
- vm_object_lock(shadow);
+ vm_object_lock(shadow);
if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
ref_count--;
max_refcnt = ref_count;
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
object = shadow;
shadow = object->shadow;
continue;
}
if(object != caller_object)
- vm_object_unlock(object);
+ vm_object_unlock(object);
break;
}
}
return(0);
if (entry->is_sub_map)
- ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object);
+ return(0);
else {
ref_count = 0;
if (chk_obj == object)
ref_count++;
if (tmp_obj = chk_obj->shadow)
- vm_object_lock(tmp_obj);
+ vm_object_lock(tmp_obj);
vm_object_unlock(chk_obj);
-
+
chk_obj = tmp_obj;
}
}
}
/* Get the starting address */
- start = trunc_page(address);
+ start = trunc_page_32(address);
/* Figure how much memory we need to flush (in page increments) */
- sync_size = round_page(start + size) - start;
+ sync_size = round_page_32(start + size) - start;
ret = KERN_SUCCESS; /* Assume it all worked */
ret =
pmap_attribute_cache_sync(
- m->phys_addr,
+ m->phys_page,
PAGE_SIZE,
attribute, value);
} else if (object->shadow) {
vm_map_links_print(
struct vm_map_links *links)
{
- iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
+ iprintf("prev = %08X next = %08X start = %08X end = %08X\n",
links->prev,
links->next,
links->start,
struct vm_map_header *header)
{
vm_map_links_print(&header->links);
- iprintf("nentries=0x%x, %sentries_pageable\n",
+ iprintf("nentries = %08X, %sentries_pageable\n",
header->nentries,
(header->entries_pageable ? "" : "!"));
}
extern int db_indent;
int shadows;
- iprintf("map entry 0x%x:\n", entry);
+ iprintf("map entry %08X\n", entry);
db_indent += 2;
static char *inheritance_name[4] = { "share", "copy", "none", "?"};
static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" };
- iprintf("map entry 0x%x:\n", entry);
+ iprintf("map entry %08X n", entry);
db_indent += 2;
vm_map_links_print(&entry->links);
- iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
+ iprintf("start = %08X end = %08X, prot=%x/%x/%s\n",
entry->vme_start,
entry->vme_end,
entry->protection,
entry->max_protection,
inheritance_name[(entry->inheritance & 0x3)]);
- iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
+ iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
behavior_name[(entry->behavior & 0x3)],
entry->wired_count,
entry->user_wired_count);
(entry->needs_wakeup ? "" : "!"));
if (entry->is_sub_map) {
- iprintf("submap=0x%x, offset=0x%x\n",
+ iprintf("submap = %08X - offset=%08X\n",
entry->object.sub_map,
entry->offset);
} else {
- iprintf("object=0x%x, offset=0x%x, ",
+ iprintf("object=%08X, offset=%08X, ",
entry->object.vm_object,
entry->offset);
printf("%sis_shared, %sneeds_copy\n",
register vm_map_entry_t entry;
extern int db_indent;
- iprintf("task map 0x%x:\n", map);
+ iprintf("task map %08X\n", map);
db_indent += 2;
*/
void
vm_map_print(
- register vm_map_t map)
+ db_addr_t inmap)
{
register vm_map_entry_t entry;
+ vm_map_t map;
extern int db_indent;
char *swstate;
- iprintf("task map 0x%x:\n", map);
+ map = (vm_map_t)inmap; /* Make sure we have the right type */
+
+ iprintf("task map %08X\n", map);
db_indent += 2;
vm_map_header_print(&map->hdr);
- iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
+ iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
map->pmap,
map->size,
map->ref_count,
map->hint,
map->first_free);
- iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
+ iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
(map->wait_for_space ? "" : "!"),
(map->wiring_required ? "" : "!"),
map->timestamp);
swstate = "????";
break;
}
- iprintf("res=%d, sw_state=%s\n", map->res_count, swstate);
+ iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
#endif /* TASK_SWAPPER */
for (entry = vm_map_first_entry(map);
void
vm_map_copy_print(
- vm_map_copy_t copy)
+ db_addr_t incopy)
{
extern int db_indent;
+ vm_map_copy_t copy;
int i, npages;
vm_map_entry_t entry;
+ copy = (vm_map_copy_t)incopy; /* Make sure we have the right type */
+
printf("copy object 0x%x\n", copy);
db_indent += 2;
*/
vm_size_t
db_vm_map_total_size(
- vm_map_t map)
+ db_addr_t inmap)
{
vm_map_entry_t entry;
vm_size_t total;
+ vm_map_t map;
+
+ map = (vm_map_t)inmap; /* Make sure we have the right type */
total = 0;
for (entry = vm_map_first_entry(map);
boolean_t new_entry_needs_copy;
assert(map != VM_MAP_NULL);
- assert(size != 0 && size == round_page(size));
+ assert(size != 0 && size == round_page_32(size));
assert(inheritance == VM_INHERIT_NONE ||
inheritance == VM_INHERIT_COPY ||
inheritance == VM_INHERIT_SHARE);
/*
* Compute start and end of region.
*/
- src_start = trunc_page(addr);
- src_end = round_page(src_start + size);
+ src_start = trunc_page_32(addr);
+ src_end = round_page_32(src_start + size);
/*
* Initialize map_header.
return KERN_INVALID_ARGUMENT;
}
- size = round_page(size);
+ size = round_page_32(size);
result = vm_remap_extract(src_map, memory_address,
size, copy, &map_header,
* Allocate/check a range of free virtual address
* space for the target
*/
- *address = trunc_page(*address);
+ *address = trunc_page_32(*address);
vm_map_lock(target_map);
result = vm_remap_range_allocate(target_map, address, size,
mask, anywhere, &insp_entry);