2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <mach/thread_act.h>
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/memory_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_purgeable_internal.h>
60 /* BSD VM COMPONENT INTERFACES */
80 return map
->hdr
.nentries
;
84 mach_get_vm_start(vm_map_t map
)
86 return vm_map_first_entry(map
)->vme_start
;
90 mach_get_vm_end(vm_map_t map
)
92 return vm_map_last_entry(map
)->vme_end
;
99 const struct memory_object_pager_ops vnode_pager_ops
= {
100 .memory_object_reference
= vnode_pager_reference
,
101 .memory_object_deallocate
= vnode_pager_deallocate
,
102 .memory_object_init
= vnode_pager_init
,
103 .memory_object_terminate
= vnode_pager_terminate
,
104 .memory_object_data_request
= vnode_pager_data_request
,
105 .memory_object_data_return
= vnode_pager_data_return
,
106 .memory_object_data_initialize
= vnode_pager_data_initialize
,
107 .memory_object_data_unlock
= vnode_pager_data_unlock
,
108 .memory_object_synchronize
= vnode_pager_synchronize
,
109 .memory_object_map
= vnode_pager_map
,
110 .memory_object_last_unmap
= vnode_pager_last_unmap
,
111 .memory_object_data_reclaim
= NULL
,
112 .memory_object_pager_name
= "vnode pager"
115 typedef struct vnode_pager
{
116 /* mandatory generic header */
117 struct memory_object vn_pgr_hdr
;
120 struct os_refcnt ref_count
;
121 struct vnode
*vnode_handle
; /* vnode handle */
126 vnode_pager_cluster_read( /* forward */
134 vnode_pager_cluster_write( /* forward */
138 vm_object_offset_t
*,
144 vnode_object_create( /* forward */
148 vnode_pager_lookup( /* forward */
152 vnode_pager_lookup_vnode( /* forward */
155 ZONE_DECLARE(vnode_pager_zone
, "vnode pager structures",
156 sizeof(struct vnode_pager
), ZC_NOENCRYPT
);
158 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
160 /* TODO: Should be set dynamically by vnode_pager_init() */
161 #define CLUSTER_SHIFT 1
167 #define PAGER_ALL 0xffffffff
168 #define PAGER_INIT 0x00000001
169 #define PAGER_PAGEIN 0x00000002
171 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
173 #define PAGER_DEBUG(LEVEL, A)
176 extern int proc_resetpcontrol(int);
179 extern int uiomove64(addr64_t
, int, void *);
183 memory_object_control_uiomove(
184 memory_object_control_t control
,
185 memory_object_offset_t offset
,
200 vm_page_t page_run
[MAX_RUN
];
201 int dirty_count
; /* keeps track of number of pages dirtied as part of this uiomove */
203 object
= memory_object_control_to_vm_object(control
);
204 if (object
== VM_OBJECT_NULL
) {
207 assert(!object
->internal
);
209 vm_object_lock(object
);
211 if (mark_dirty
&& object
->copy
!= VM_OBJECT_NULL
) {
213 * We can't modify the pages without honoring
214 * copy-on-write obligations first, so fall off
215 * this optimized path and fall back to the regular
218 vm_object_unlock(object
);
221 orig_offset
= start_offset
;
224 while (io_requested
&& retval
== 0) {
225 cur_needed
= (start_offset
+ io_requested
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
227 if (cur_needed
> MAX_RUN
) {
228 cur_needed
= MAX_RUN
;
231 for (cur_run
= 0; cur_run
< cur_needed
;) {
232 if ((dst_page
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
) {
237 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
239 * someone else is playing with the page... if we've
240 * already collected pages into this run, go ahead
241 * and process now, we can't block on this
242 * page while holding other pages in the BUSY state
243 * otherwise we will wait
248 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
251 if (dst_page
->vmp_laundry
) {
252 vm_pageout_steal_laundry(dst_page
, FALSE
);
256 if (dst_page
->vmp_dirty
== FALSE
) {
259 SET_PAGE_DIRTY(dst_page
, FALSE
);
260 if (dst_page
->vmp_cs_validated
&&
261 !dst_page
->vmp_cs_tainted
) {
264 * We're modifying a code-signed
265 * page: force revalidate
267 dst_page
->vmp_cs_validated
= VMP_CS_ALL_FALSE
;
269 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
271 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
274 dst_page
->vmp_busy
= TRUE
;
276 page_run
[cur_run
++] = dst_page
;
278 offset
+= PAGE_SIZE_64
;
282 * we hit a 'hole' in the cache or
283 * a page we don't want to try to handle,
284 * so bail at this point
285 * we'll unlock the object below
289 vm_object_unlock(object
);
291 for (i
= 0; i
< cur_run
; i
++) {
292 dst_page
= page_run
[i
];
294 if ((xsize
= PAGE_SIZE
- start_offset
) > io_requested
) {
295 xsize
= io_requested
;
298 if ((retval
= uiomove64((addr64_t
)(((addr64_t
)(VM_PAGE_GET_PHYS_PAGE(dst_page
)) << PAGE_SHIFT
) + start_offset
), xsize
, uio
))) {
302 io_requested
-= xsize
;
305 vm_object_lock(object
);
308 * if we have more than 1 page to work on
309 * in the current run, or the original request
310 * started at offset 0 of the page, or we're
311 * processing multiple batches, we will move
312 * the pages to the tail of the inactive queue
313 * to implement an LRU for read/write accesses
315 * the check for orig_offset == 0 is there to
316 * mitigate the cost of small (< page_size) requests
317 * to the same page (this way we only move it once)
319 if (take_reference
&& (cur_run
> 1 || orig_offset
== 0)) {
320 vm_page_lockspin_queues();
322 for (i
= 0; i
< cur_run
; i
++) {
323 vm_page_lru(page_run
[i
]);
326 vm_page_unlock_queues();
328 for (i
= 0; i
< cur_run
; i
++) {
329 dst_page
= page_run
[i
];
332 * someone is explicitly referencing this page...
333 * update clustered and speculative state
336 if (dst_page
->vmp_clustered
) {
337 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
340 PAGE_WAKEUP_DONE(dst_page
);
345 task_update_logical_writes(current_task(), (dirty_count
* PAGE_SIZE
), TASK_WRITE_DEFERRED
, vnode_pager_lookup_vnode(object
->pager
));
347 vm_object_unlock(object
);
358 __unused memory_object_t pager
)
360 vnode_pager_t vnode_object
;
362 vnode_object
= vnode_object_create(vp
);
363 if (vnode_object
== VNODE_PAGER_NULL
) {
364 panic("vnode_pager_setup: vnode_object_create() failed");
366 return (memory_object_t
)vnode_object
;
373 vnode_pager_init(memory_object_t mem_obj
,
374 memory_object_control_t control
,
378 memory_object_cluster_size_t pg_size
)
380 vnode_pager_t vnode_object
;
382 memory_object_attr_info_data_t attributes
;
385 PAGER_DEBUG(PAGER_ALL
, ("vnode_pager_init: %p, %p, %lx\n", mem_obj
, control
, (unsigned long)pg_size
));
387 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
388 return KERN_INVALID_ARGUMENT
;
391 vnode_object
= vnode_pager_lookup(mem_obj
);
393 memory_object_control_reference(control
);
395 vnode_object
->vn_pgr_hdr
.mo_control
= control
;
397 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
398 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
399 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
400 attributes
.may_cache_object
= TRUE
;
401 attributes
.temporary
= TRUE
;
403 kr
= memory_object_change_attributes(
405 MEMORY_OBJECT_ATTRIBUTE_INFO
,
406 (memory_object_info_t
) &attributes
,
407 MEMORY_OBJECT_ATTR_INFO_COUNT
);
408 if (kr
!= KERN_SUCCESS
) {
409 panic("vnode_pager_init: memory_object_change_attributes() failed");
419 vnode_pager_data_return(
420 memory_object_t mem_obj
,
421 memory_object_offset_t offset
,
422 memory_object_cluster_size_t data_cnt
,
423 memory_object_offset_t
*resid_offset
,
425 __unused boolean_t dirty
,
426 __unused boolean_t kernel_copy
,
429 vnode_pager_t vnode_object
;
431 assertf(page_aligned(offset
), "offset 0x%llx\n", offset
);
433 vnode_object
= vnode_pager_lookup(mem_obj
);
435 vnode_pager_cluster_write(vnode_object
, offset
, data_cnt
, resid_offset
, io_error
, upl_flags
);
441 vnode_pager_data_initialize(
442 __unused memory_object_t mem_obj
,
443 __unused memory_object_offset_t offset
,
444 __unused memory_object_cluster_size_t data_cnt
)
446 panic("vnode_pager_data_initialize");
451 vnode_pager_data_unlock(
452 __unused memory_object_t mem_obj
,
453 __unused memory_object_offset_t offset
,
454 __unused memory_object_size_t size
,
455 __unused vm_prot_t desired_access
)
462 memory_object_t mem_obj
,
463 vm_object_offset_t s_offset
,
464 vm_object_offset_t e_offset
)
466 vnode_pager_t vnode_object
;
468 if (mem_obj
&& mem_obj
->mo_pager_ops
== &vnode_pager_ops
) {
469 vnode_object
= vnode_pager_lookup(mem_obj
);
470 vnode_pager_was_dirtied(vnode_object
->vnode_handle
, s_offset
, e_offset
);
475 vnode_pager_get_isinuse(
476 memory_object_t mem_obj
,
479 vnode_pager_t vnode_object
;
481 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
483 return KERN_INVALID_ARGUMENT
;
486 vnode_object
= vnode_pager_lookup(mem_obj
);
488 *isinuse
= vnode_pager_isinuse(vnode_object
->vnode_handle
);
493 vnode_pager_get_throttle_io_limit(
494 memory_object_t mem_obj
,
497 vnode_pager_t vnode_object
;
499 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
500 return KERN_INVALID_ARGUMENT
;
503 vnode_object
= vnode_pager_lookup(mem_obj
);
505 (void)vnode_pager_return_throttle_io_limit(vnode_object
->vnode_handle
, limit
);
510 vnode_pager_get_isSSD(
511 memory_object_t mem_obj
,
514 vnode_pager_t vnode_object
;
516 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
517 return KERN_INVALID_ARGUMENT
;
520 vnode_object
= vnode_pager_lookup(mem_obj
);
522 *isSSD
= vnode_pager_isSSD(vnode_object
->vnode_handle
);
527 vnode_pager_get_object_size(
528 memory_object_t mem_obj
,
529 memory_object_offset_t
*length
)
531 vnode_pager_t vnode_object
;
533 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
535 return KERN_INVALID_ARGUMENT
;
538 vnode_object
= vnode_pager_lookup(mem_obj
);
540 *length
= vnode_pager_get_filesize(vnode_object
->vnode_handle
);
545 vnode_pager_get_object_name(
546 memory_object_t mem_obj
,
548 vm_size_t pathname_len
,
550 vm_size_t filename_len
,
551 boolean_t
*truncated_path_p
)
553 vnode_pager_t vnode_object
;
555 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
556 return KERN_INVALID_ARGUMENT
;
559 vnode_object
= vnode_pager_lookup(mem_obj
);
561 return vnode_pager_get_name(vnode_object
->vnode_handle
,
570 vnode_pager_get_object_mtime(
571 memory_object_t mem_obj
,
572 struct timespec
*mtime
,
573 struct timespec
*cs_mtime
)
575 vnode_pager_t vnode_object
;
577 if (mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
578 return KERN_INVALID_ARGUMENT
;
581 vnode_object
= vnode_pager_lookup(mem_obj
);
583 return vnode_pager_get_mtime(vnode_object
->vnode_handle
,
588 #if CHECK_CS_VALIDATION_BITMAP
590 vnode_pager_cs_check_validation_bitmap(
591 memory_object_t mem_obj
,
592 memory_object_offset_t offset
,
595 vnode_pager_t vnode_object
;
597 if (mem_obj
== MEMORY_OBJECT_NULL
||
598 mem_obj
->mo_pager_ops
!= &vnode_pager_ops
) {
599 return KERN_INVALID_ARGUMENT
;
602 vnode_object
= vnode_pager_lookup(mem_obj
);
603 return ubc_cs_check_validation_bitmap( vnode_object
->vnode_handle
, offset
, optype
);
605 #endif /* CHECK_CS_VALIDATION_BITMAP */
611 vnode_pager_data_request(
612 memory_object_t mem_obj
,
613 memory_object_offset_t offset
,
614 __unused memory_object_cluster_size_t length
,
615 __unused vm_prot_t desired_access
,
616 memory_object_fault_info_t fault_info
)
618 vnode_pager_t vnode_object
;
619 memory_object_offset_t base_offset
;
621 uint32_t io_streaming
= 0;
623 assertf(page_aligned(offset
), "offset 0x%llx\n", offset
);
625 vnode_object
= vnode_pager_lookup(mem_obj
);
627 size
= MAX_UPL_TRANSFER_BYTES
;
628 base_offset
= offset
;
630 if (memory_object_cluster_size(vnode_object
->vn_pgr_hdr
.mo_control
,
631 &base_offset
, &size
, &io_streaming
,
632 fault_info
) != KERN_SUCCESS
) {
636 assert(offset
>= base_offset
&&
637 offset
< base_offset
+ size
);
639 return vnode_pager_cluster_read(vnode_object
, base_offset
, offset
, io_streaming
, size
);
646 vnode_pager_reference(
647 memory_object_t mem_obj
)
649 vnode_pager_t vnode_object
;
651 vnode_object
= vnode_pager_lookup(mem_obj
);
652 os_ref_retain(&vnode_object
->ref_count
);
659 vnode_pager_deallocate(
660 memory_object_t mem_obj
)
662 vnode_pager_t vnode_object
;
664 PAGER_DEBUG(PAGER_ALL
, ("vnode_pager_deallocate: %p\n", mem_obj
));
666 vnode_object
= vnode_pager_lookup(mem_obj
);
668 if (os_ref_release(&vnode_object
->ref_count
) == 0) {
669 if (vnode_object
->vnode_handle
!= NULL
) {
670 vnode_pager_vrele(vnode_object
->vnode_handle
);
672 zfree(vnode_pager_zone
, vnode_object
);
680 vnode_pager_terminate(
684 memory_object_t mem_obj
)
686 PAGER_DEBUG(PAGER_ALL
, ("vnode_pager_terminate: %p\n", mem_obj
));
695 vnode_pager_synchronize(
696 __unused memory_object_t mem_obj
,
697 __unused memory_object_offset_t offset
,
698 __unused memory_object_size_t length
,
699 __unused vm_sync_t sync_flags
)
701 panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n");
710 memory_object_t mem_obj
,
713 vnode_pager_t vnode_object
;
717 PAGER_DEBUG(PAGER_ALL
, ("vnode_pager_map: %p %x\n", mem_obj
, prot
));
719 vnode_object
= vnode_pager_lookup(mem_obj
);
721 ret
= ubc_map(vnode_object
->vnode_handle
, prot
);
733 vnode_pager_last_unmap(
734 memory_object_t mem_obj
)
736 vnode_pager_t vnode_object
;
738 PAGER_DEBUG(PAGER_ALL
, ("vnode_pager_last_unmap: %p\n", mem_obj
));
740 vnode_object
= vnode_pager_lookup(mem_obj
);
742 ubc_unmap(vnode_object
->vnode_handle
);
752 vnode_pager_cluster_write(
753 vnode_pager_t vnode_object
,
754 vm_object_offset_t offset
,
756 vm_object_offset_t
* resid_offset
,
763 if (upl_flags
& UPL_MSYNC
) {
764 upl_flags
|= UPL_VNODE_PAGER
;
766 if ((upl_flags
& UPL_IOSYNC
) && io_error
) {
767 upl_flags
|= UPL_KEEPCACHED
;
771 size
= (cnt
< MAX_UPL_TRANSFER_BYTES
) ? cnt
: MAX_UPL_TRANSFER_BYTES
; /* effective max */
773 assert((upl_size_t
) size
== size
);
774 vnode_pageout(vnode_object
->vnode_handle
,
775 NULL
, (upl_offset_t
)0, offset
, (upl_size_t
)size
, upl_flags
, &errno
);
777 if ((upl_flags
& UPL_KEEPCACHED
)) {
778 if ((*io_error
= errno
)) {
786 *resid_offset
= offset
;
789 vm_object_offset_t vnode_size
;
790 vm_object_offset_t base_offset
;
793 * this is the pageout path
795 vnode_size
= vnode_pager_get_filesize(vnode_object
->vnode_handle
);
797 if (vnode_size
> (offset
+ PAGE_SIZE
)) {
799 * preset the maximum size of the cluster
800 * and put us on a nice cluster boundary...
801 * and then clip the size to insure we
802 * don't request past the end of the underlying file
804 size
= MAX_UPL_TRANSFER_BYTES
;
805 base_offset
= offset
& ~((signed)(size
- 1));
807 if ((base_offset
+ size
) > vnode_size
) {
808 size
= round_page(((vm_size_t
)(vnode_size
- base_offset
)));
812 * we've been requested to page out a page beyond the current
813 * end of the 'file'... don't try to cluster in this case...
814 * we still need to send this page through because it might
815 * be marked precious and the underlying filesystem may need
816 * to do something with it (besides page it out)...
818 base_offset
= offset
;
821 assert((upl_size_t
) size
== size
);
822 vnode_pageout(vnode_object
->vnode_handle
,
823 NULL
, (upl_offset_t
)(offset
- base_offset
), base_offset
, (upl_size_t
) size
,
824 (upl_flags
& UPL_IOSYNC
) | UPL_VNODE_PAGER
, NULL
);
833 vnode_pager_cluster_read(
834 vnode_pager_t vnode_object
,
835 vm_object_offset_t base_offset
,
836 vm_object_offset_t offset
,
837 uint32_t io_streaming
,
844 assert(!(cnt
& PAGE_MASK
));
847 flags
|= UPL_IOSTREAMING
;
850 assert((upl_size_t
) cnt
== cnt
);
851 kret
= vnode_pagein(vnode_object
->vnode_handle
,
853 (upl_offset_t
) (offset
- base_offset
),
859 * if(kret == PAGER_ABSENT) {
860 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
861 * defined in bsd/vm/vm_pager.h However, we should not be including
862 * that file here it is a layering violation.
867 unsigned int count
= 0;
870 uplflags
= (UPL_NO_SYNC
|
874 assert((upl_size_t
) cnt
== cnt
);
875 kr
= memory_object_upl_request(vnode_object
->vn_pgr_hdr
.mo_control
,
876 base_offset
, (upl_size_t
) cnt
,
877 &upl
, NULL
, &count
, uplflags
, VM_KERN_MEMORY_NONE
);
878 if (kr
== KERN_SUCCESS
) {
883 * We couldn't gather the page list, probably
884 * because the memory object doesn't have a link
885 * to a VM object anymore (forced unmount, for
886 * example). Just return an error to the vm_fault()
887 * path and let it handle it.
904 vnode_pager_t vnode_object
;
906 vnode_object
= (struct vnode_pager
*) zalloc(vnode_pager_zone
);
907 if (vnode_object
== VNODE_PAGER_NULL
) {
908 return VNODE_PAGER_NULL
;
912 * The vm_map call takes both named entry ports and raw memory
913 * objects in the same parameter. We need to make sure that
914 * vm_map does not see this object as a named entry port. So,
915 * we reserve the first word in the object for a fake ip_kotype
916 * setting - that will tell vm_map to use it as a memory object.
918 vnode_object
->vn_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
919 vnode_object
->vn_pgr_hdr
.mo_pager_ops
= &vnode_pager_ops
;
920 vnode_object
->vn_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
922 os_ref_init(&vnode_object
->ref_count
, NULL
);
923 vnode_object
->vnode_handle
= vp
;
933 memory_object_t name
)
935 vnode_pager_t vnode_object
;
937 vnode_object
= (vnode_pager_t
)name
;
938 assert(vnode_object
->vn_pgr_hdr
.mo_pager_ops
== &vnode_pager_ops
);
944 vnode_pager_lookup_vnode(
945 memory_object_t name
)
947 vnode_pager_t vnode_object
;
948 vnode_object
= (vnode_pager_t
)name
;
949 if (vnode_object
->vn_pgr_hdr
.mo_pager_ops
== &vnode_pager_ops
) {
950 return vnode_object
->vnode_handle
;
956 /*********************** proc_info implementation *************/
958 #include <sys/bsdtask_info.h>
960 static int fill_vnodeinfoforaddr( vm_map_entry_t entry
, uintptr_t * vnodeaddr
, uint32_t * vid
);
963 fill_procregioninfo(task_t task
, uint64_t arg
, struct proc_regioninfo_internal
*pinfo
, uintptr_t *vnodeaddr
, uint32_t *vid
)
966 vm_map_offset_t address
= (vm_map_offset_t
)arg
;
967 vm_map_entry_t tmp_entry
;
968 vm_map_entry_t entry
;
969 vm_map_offset_t start
;
970 vm_region_extended_info_data_t extended
;
971 vm_region_top_info_data_t top
;
972 boolean_t do_region_footprint
;
973 int effective_page_shift
, effective_page_size
;
977 if (map
== VM_MAP_NULL
) {
982 effective_page_shift
= vm_self_region_page_shift(map
);
983 effective_page_size
= (1 << effective_page_shift
);
985 vm_map_reference(map
);
988 do_region_footprint
= task_self_region_footprint();
990 vm_map_lock_read(map
);
994 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
995 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
996 if (do_region_footprint
&&
997 address
== tmp_entry
->vme_end
) {
998 ledger_amount_t ledger_resident
;
999 ledger_amount_t ledger_compressed
;
1002 * This request is right after the last valid
1003 * memory region; instead of reporting the
1004 * end of the address space, report a fake
1005 * memory region to account for non-volatile
1006 * purgeable and/or ledger-tagged memory
1007 * owned by this task.
1009 task_ledgers_footprint(task
->ledger
,
1011 &ledger_compressed
);
1012 if (ledger_resident
+ ledger_compressed
== 0) {
1013 /* nothing to report */
1014 vm_map_unlock_read(map
);
1015 vm_map_deallocate(map
);
1019 /* provide fake region for purgeable */
1020 pinfo
->pri_offset
= address
;
1021 pinfo
->pri_protection
= VM_PROT_DEFAULT
;
1022 pinfo
->pri_max_protection
= VM_PROT_DEFAULT
;
1023 pinfo
->pri_inheritance
= VM_INHERIT_NONE
;
1024 pinfo
->pri_behavior
= VM_BEHAVIOR_DEFAULT
;
1025 pinfo
->pri_user_wired_count
= 0;
1026 pinfo
->pri_user_tag
= -1;
1027 pinfo
->pri_pages_resident
=
1028 (uint32_t) (ledger_resident
/ effective_page_size
);
1029 pinfo
->pri_pages_shared_now_private
= 0;
1030 pinfo
->pri_pages_swapped_out
=
1031 (uint32_t) (ledger_compressed
/ effective_page_size
);
1032 pinfo
->pri_pages_dirtied
=
1033 (uint32_t) (ledger_resident
/ effective_page_size
);
1034 pinfo
->pri_ref_count
= 1;
1035 pinfo
->pri_shadow_depth
= 0;
1036 pinfo
->pri_share_mode
= SM_PRIVATE
;
1037 pinfo
->pri_private_pages_resident
=
1038 (uint32_t) (ledger_resident
/ effective_page_size
);
1039 pinfo
->pri_shared_pages_resident
= 0;
1040 pinfo
->pri_obj_id
= VM_OBJECT_ID_FAKE(map
, task_ledgers
.purgeable_nonvolatile
);
1041 pinfo
->pri_address
= address
;
1043 (uint64_t) (ledger_resident
+ ledger_compressed
);
1044 pinfo
->pri_depth
= 0;
1046 vm_map_unlock_read(map
);
1047 vm_map_deallocate(map
);
1050 vm_map_unlock_read(map
);
1051 vm_map_deallocate(map
);
1058 start
= entry
->vme_start
;
1060 pinfo
->pri_offset
= VME_OFFSET(entry
);
1061 pinfo
->pri_protection
= entry
->protection
;
1062 pinfo
->pri_max_protection
= entry
->max_protection
;
1063 pinfo
->pri_inheritance
= entry
->inheritance
;
1064 pinfo
->pri_behavior
= entry
->behavior
;
1065 pinfo
->pri_user_wired_count
= entry
->user_wired_count
;
1066 pinfo
->pri_user_tag
= VME_ALIAS(entry
);
1068 if (entry
->is_sub_map
) {
1069 pinfo
->pri_flags
|= PROC_REGION_SUBMAP
;
1071 if (entry
->is_shared
) {
1072 pinfo
->pri_flags
|= PROC_REGION_SHARED
;
1077 extended
.protection
= entry
->protection
;
1078 extended
.user_tag
= VME_ALIAS(entry
);
1079 extended
.pages_resident
= 0;
1080 extended
.pages_swapped_out
= 0;
1081 extended
.pages_shared_now_private
= 0;
1082 extended
.pages_dirtied
= 0;
1083 extended
.external_pager
= 0;
1084 extended
.shadow_depth
= 0;
1086 vm_map_region_walk(map
, start
, entry
, VME_OFFSET(entry
), entry
->vme_end
- start
, &extended
, TRUE
, VM_REGION_EXTENDED_INFO_COUNT
);
1088 if (extended
.external_pager
&& extended
.ref_count
== 2 && extended
.share_mode
== SM_SHARED
) {
1089 extended
.share_mode
= SM_PRIVATE
;
1092 top
.private_pages_resident
= 0;
1093 top
.shared_pages_resident
= 0;
1094 vm_map_region_top_walk(entry
, &top
);
1097 pinfo
->pri_pages_resident
= extended
.pages_resident
;
1098 pinfo
->pri_pages_shared_now_private
= extended
.pages_shared_now_private
;
1099 pinfo
->pri_pages_swapped_out
= extended
.pages_swapped_out
;
1100 pinfo
->pri_pages_dirtied
= extended
.pages_dirtied
;
1101 pinfo
->pri_ref_count
= extended
.ref_count
;
1102 pinfo
->pri_shadow_depth
= extended
.shadow_depth
;
1103 pinfo
->pri_share_mode
= extended
.share_mode
;
1105 pinfo
->pri_private_pages_resident
= top
.private_pages_resident
;
1106 pinfo
->pri_shared_pages_resident
= top
.shared_pages_resident
;
1107 pinfo
->pri_obj_id
= top
.obj_id
;
1109 pinfo
->pri_address
= (uint64_t)start
;
1110 pinfo
->pri_size
= (uint64_t)(entry
->vme_end
- start
);
1111 pinfo
->pri_depth
= 0;
1113 if ((vnodeaddr
!= 0) && (entry
->is_sub_map
== 0)) {
1114 *vnodeaddr
= (uintptr_t)0;
1116 if (fill_vnodeinfoforaddr(entry
, vnodeaddr
, vid
) == 0) {
1117 vm_map_unlock_read(map
);
1118 vm_map_deallocate(map
);
1123 vm_map_unlock_read(map
);
1124 vm_map_deallocate(map
);
1129 fill_procregioninfo_onlymappedvnodes(task_t task
, uint64_t arg
, struct proc_regioninfo_internal
*pinfo
, uintptr_t *vnodeaddr
, uint32_t *vid
)
1132 vm_map_offset_t address
= (vm_map_offset_t
)arg
;
1133 vm_map_entry_t tmp_entry
;
1134 vm_map_entry_t entry
;
1138 if (map
== VM_MAP_NULL
) {
1142 vm_map_reference(map
);
1145 vm_map_lock_read(map
);
1147 if (!vm_map_lookup_entry(map
, address
, &tmp_entry
)) {
1148 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
1149 vm_map_unlock_read(map
);
1150 vm_map_deallocate(map
);
1157 while (entry
!= vm_map_to_entry(map
)) {
1161 if (entry
->is_sub_map
== 0) {
1162 if (fill_vnodeinfoforaddr(entry
, vnodeaddr
, vid
)) {
1163 pinfo
->pri_offset
= VME_OFFSET(entry
);
1164 pinfo
->pri_protection
= entry
->protection
;
1165 pinfo
->pri_max_protection
= entry
->max_protection
;
1166 pinfo
->pri_inheritance
= entry
->inheritance
;
1167 pinfo
->pri_behavior
= entry
->behavior
;
1168 pinfo
->pri_user_wired_count
= entry
->user_wired_count
;
1169 pinfo
->pri_user_tag
= VME_ALIAS(entry
);
1171 if (entry
->is_shared
) {
1172 pinfo
->pri_flags
|= PROC_REGION_SHARED
;
1175 pinfo
->pri_pages_resident
= 0;
1176 pinfo
->pri_pages_shared_now_private
= 0;
1177 pinfo
->pri_pages_swapped_out
= 0;
1178 pinfo
->pri_pages_dirtied
= 0;
1179 pinfo
->pri_ref_count
= 0;
1180 pinfo
->pri_shadow_depth
= 0;
1181 pinfo
->pri_share_mode
= 0;
1183 pinfo
->pri_private_pages_resident
= 0;
1184 pinfo
->pri_shared_pages_resident
= 0;
1185 pinfo
->pri_obj_id
= 0;
1187 pinfo
->pri_address
= (uint64_t)entry
->vme_start
;
1188 pinfo
->pri_size
= (uint64_t)(entry
->vme_end
- entry
->vme_start
);
1189 pinfo
->pri_depth
= 0;
1191 vm_map_unlock_read(map
);
1192 vm_map_deallocate(map
);
1197 /* Keep searching for a vnode-backed mapping */
1198 entry
= entry
->vme_next
;
1201 vm_map_unlock_read(map
);
1202 vm_map_deallocate(map
);
1207 find_region_details(task_t task
, vm_map_offset_t offset
,
1208 uintptr_t *vnodeaddr
, uint32_t *vid
,
1209 uint64_t *start
, uint64_t *len
)
1212 vm_map_entry_t tmp_entry
, entry
;
1217 if (map
== VM_MAP_NULL
) {
1221 vm_map_reference(map
);
1224 vm_map_lock_read(map
);
1225 if (!vm_map_lookup_entry(map
, offset
, &tmp_entry
)) {
1226 if ((entry
= tmp_entry
->vme_next
) == vm_map_to_entry(map
)) {
1234 while (entry
!= vm_map_to_entry(map
)) {
1240 if (entry
->is_sub_map
== 0) {
1241 if (fill_vnodeinfoforaddr(entry
, vnodeaddr
, vid
)) {
1242 *start
= entry
->vme_start
;
1243 *len
= entry
->vme_end
- entry
->vme_start
;
1249 entry
= entry
->vme_next
;
1253 vm_map_unlock_read(map
);
1254 vm_map_deallocate(map
);
1259 fill_vnodeinfoforaddr(
1260 vm_map_entry_t entry
,
1261 uintptr_t * vnodeaddr
,
1264 vm_object_t top_object
, object
;
1265 memory_object_t memory_object
;
1266 memory_object_pager_ops_t pager_ops
;
1271 if (entry
->is_sub_map
) {
1275 * The last object in the shadow chain has the
1276 * relevant pager information.
1278 top_object
= VME_OBJECT(entry
);
1279 if (top_object
== VM_OBJECT_NULL
) {
1280 object
= VM_OBJECT_NULL
;
1283 vm_object_lock(top_object
);
1284 for (object
= top_object
, shadow_depth
= 0;
1285 object
->shadow
!= VM_OBJECT_NULL
;
1286 object
= object
->shadow
, shadow_depth
++) {
1287 vm_object_lock(object
->shadow
);
1288 vm_object_unlock(object
);
1293 if (object
== VM_OBJECT_NULL
) {
1295 } else if (object
->internal
) {
1296 vm_object_unlock(object
);
1298 } else if (!object
->pager_ready
||
1299 object
->terminating
||
1301 vm_object_unlock(object
);
1304 memory_object
= object
->pager
;
1305 pager_ops
= memory_object
->mo_pager_ops
;
1306 if (pager_ops
== &vnode_pager_ops
) {
1307 kr
= vnode_pager_get_object_vnode(
1310 if (kr
!= KERN_SUCCESS
) {
1311 vm_object_unlock(object
);
1315 vm_object_unlock(object
);
1319 vm_object_unlock(object
);
1324 vnode_pager_get_object_vnode(
1325 memory_object_t mem_obj
,
1326 uintptr_t * vnodeaddr
,
1329 vnode_pager_t vnode_object
;
1331 vnode_object
= vnode_pager_lookup(mem_obj
);
1332 if (vnode_object
->vnode_handle
) {
1333 *vnodeaddr
= (uintptr_t)vnode_object
->vnode_handle
;
1334 *vid
= (uint32_t)vnode_vid((void *)vnode_object
->vnode_handle
);
1336 return KERN_SUCCESS
;
1339 return KERN_FAILURE
;
1344 vnode_pager_get_object_devvp(
1345 memory_object_t mem_obj
,
1351 if (vnode_pager_get_object_vnode(mem_obj
, (uintptr_t *)&vp
, (uint32_t *)&vid
) != KERN_SUCCESS
) {
1352 return KERN_FAILURE
;
1354 *devvp
= (uintptr_t)vnode_mountdevvp(vp
);
1356 return KERN_SUCCESS
;
1358 return KERN_FAILURE
;
1363 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1364 * object locked, otherwise return NULL with nothing locked.
1369 vm_map_entry_t entry
1372 vm_object_t top_object
, object
;
1373 memory_object_t memory_object
;
1374 memory_object_pager_ops_t pager_ops
;
1376 if (!entry
->is_sub_map
) {
1378 * The last object in the shadow chain has the
1379 * relevant pager information.
1382 top_object
= VME_OBJECT(entry
);
1385 vm_object_lock(top_object
);
1387 for (object
= top_object
; object
->shadow
!= VM_OBJECT_NULL
; object
= object
->shadow
) {
1388 vm_object_lock(object
->shadow
);
1389 vm_object_unlock(object
);
1392 if (object
&& !object
->internal
&& object
->pager_ready
&& !object
->terminating
&&
1394 memory_object
= object
->pager
;
1395 pager_ops
= memory_object
->mo_pager_ops
;
1398 * If this object points to the vnode_pager_ops, then we found what we're
1399 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1400 * vnode and so we fall through to the bottom and return NULL.
1403 if (pager_ops
== &vnode_pager_ops
) {
1404 return object
; /* we return with the object locked */
1408 vm_object_unlock(object
);
1412 return VM_OBJECT_NULL
;