]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
29#include <sys/errno.h>
91447636 30
1c79356b 31#include <mach/mach_types.h>
91447636
A
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
1c79356b 34#include <mach/kern_return.h>
91447636 35#include <mach/memory_object_control.h>
1c79356b
A
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
91447636
A
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41
2d21ac55 42#include <kern/assert.h>
91447636 43#include <kern/host.h>
a39ff7e2 44#include <kern/ledger.h>
91447636 45#include <kern/thread.h>
39037602 46#include <kern/ipc_kobject.h>
0a7de745 47#include <os/refcnt.h>
91447636 48
1c79356b
A
49#include <ipc/ipc_port.h>
50#include <ipc/ipc_space.h>
1c79356b 51
91447636 52#include <vm/vm_map.h>
91447636
A
53#include <vm/vm_pageout.h>
54#include <vm/memory_object.h>
55#include <vm/vm_pageout.h>
56#include <vm/vm_protos.h>
2d21ac55
A
57#include <vm/vm_purgeable_internal.h>
58
1c79356b
A
59
60/* BSD VM COMPONENT INTERFACES */
61int
62get_map_nentries(
63 vm_map_t);
64
65vm_offset_t
66get_map_start(
67 vm_map_t);
68
69vm_offset_t
70get_map_end(
71 vm_map_t);
72
73/*
0a7de745 74 *
1c79356b
A
75 */
76int
77get_map_nentries(
78 vm_map_t map)
79{
0a7de745 80 return map->hdr.nentries;
1c79356b
A
81}
82
91447636
A
83mach_vm_offset_t
84mach_get_vm_start(vm_map_t map)
85{
0a7de745 86 return vm_map_first_entry(map)->vme_start;
91447636
A
87}
88
89mach_vm_offset_t
90mach_get_vm_end(vm_map_t map)
91{
0a7de745 92 return vm_map_last_entry(map)->vme_end;
91447636
A
93}
94
0a7de745
A
95/*
96 * BSD VNODE PAGER
1c79356b
A
97 */
98
0c530ab8 99const struct memory_object_pager_ops vnode_pager_ops = {
cb323159
A
100 .memory_object_reference = vnode_pager_reference,
101 .memory_object_deallocate = vnode_pager_deallocate,
102 .memory_object_init = vnode_pager_init,
103 .memory_object_terminate = vnode_pager_terminate,
104 .memory_object_data_request = vnode_pager_data_request,
105 .memory_object_data_return = vnode_pager_data_return,
106 .memory_object_data_initialize = vnode_pager_data_initialize,
107 .memory_object_data_unlock = vnode_pager_data_unlock,
108 .memory_object_synchronize = vnode_pager_synchronize,
109 .memory_object_map = vnode_pager_map,
110 .memory_object_last_unmap = vnode_pager_last_unmap,
111 .memory_object_data_reclaim = NULL,
112 .memory_object_pager_name = "vnode pager"
0c530ab8 113};
1c79356b 114
1c79356b 115typedef struct vnode_pager {
5ba3f43e
A
116 /* mandatory generic header */
117 struct memory_object vn_pgr_hdr;
118
119 /* pager-specific */
0a7de745
A
120 struct os_refcnt ref_count;
121 struct vnode *vnode_handle; /* vnode handle */
1c79356b
A
122} *vnode_pager_t;
123
3e170ce0 124
1c79356b 125kern_return_t
0a7de745
A
126vnode_pager_cluster_read( /* forward */
127 vnode_pager_t,
b0d623f7
A
128 vm_object_offset_t,
129 vm_object_offset_t,
130 uint32_t,
1c79356b
A
131 vm_size_t);
132
133void
0a7de745 134vnode_pager_cluster_write( /* forward */
1c79356b
A
135 vnode_pager_t,
136 vm_object_offset_t,
91447636
A
137 vm_size_t,
138 vm_object_offset_t *,
139 int *,
140 int);
1c79356b 141
0b4e3aa0 142
1c79356b 143vnode_pager_t
0a7de745 144vnode_object_create( /* forward */
91447636 145 struct vnode *);
1c79356b 146
1c79356b 147vnode_pager_t
0a7de745 148vnode_pager_lookup( /* forward */
0b4e3aa0 149 memory_object_t);
1c79356b 150
39037602 151struct vnode *
0a7de745 152vnode_pager_lookup_vnode( /* forward */
39037602
A
153 memory_object_t);
154
0a7de745 155zone_t vnode_pager_zone;
1c79356b
A
156
157
0a7de745 158#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
1c79356b
A
159
160/* TODO: Should be set dynamically by vnode_pager_init() */
0a7de745 161#define CLUSTER_SHIFT 1
1c79356b
A
162
163/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
0a7de745 164#define MAX_VNODE 10000
1c79356b
A
165
166
167#if DEBUG
0a7de745 168int pagerdebug = 0;
1c79356b 169
0a7de745
A
170#define PAGER_ALL 0xffffffff
171#define PAGER_INIT 0x00000001
172#define PAGER_PAGEIN 0x00000002
1c79356b
A
173
174#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
175#else
176#define PAGER_DEBUG(LEVEL, A)
177#endif
178
b0d623f7
A
179extern int proc_resetpcontrol(int);
180
91447636 181
0a7de745
A
182extern int uiomove64(addr64_t, int, void *);
183#define MAX_RUN 32
91447636
A
184
185int
186memory_object_control_uiomove(
0a7de745
A
187 memory_object_control_t control,
188 memory_object_offset_t offset,
189 void * uio,
190 int start_offset,
191 int io_requested,
192 int mark_dirty,
193 int take_reference)
91447636 194{
0a7de745
A
195 vm_object_t object;
196 vm_page_t dst_page;
197 int xsize;
198 int retval = 0;
199 int cur_run;
200 int cur_needed;
201 int i;
202 int orig_offset;
203 vm_page_t page_run[MAX_RUN];
204 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
91447636 205
91447636
A
206 object = memory_object_control_to_vm_object(control);
207 if (object == VM_OBJECT_NULL) {
0a7de745 208 return 0;
91447636
A
209 }
210 assert(!object->internal);
211
212 vm_object_lock(object);
213
214 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
215 /*
216 * We can't modify the pages without honoring
217 * copy-on-write obligations first, so fall off
218 * this optimized path and fall back to the regular
219 * path.
220 */
221 vm_object_unlock(object);
222 return 0;
223 }
2d21ac55 224 orig_offset = start_offset;
4bd07ac2 225
0a7de745 226 dirty_count = 0;
91447636 227 while (io_requested && retval == 0) {
91447636
A
228 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
229
0a7de745
A
230 if (cur_needed > MAX_RUN) {
231 cur_needed = MAX_RUN;
232 }
91447636 233
0a7de745
A
234 for (cur_run = 0; cur_run < cur_needed;) {
235 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
236 break;
237 }
b0d623f7 238
b0d623f7 239
d9a64523 240 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
b0d623f7 241 /*
91447636
A
242 * someone else is playing with the page... if we've
243 * already collected pages into this run, go ahead
244 * and process now, we can't block on this
245 * page while holding other pages in the BUSY state
246 * otherwise we will wait
247 */
0a7de745 248 if (cur_run) {
b0d623f7 249 break;
0a7de745 250 }
b0d623f7 251 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
91447636
A
252 continue;
253 }
0a7de745 254 if (dst_page->vmp_laundry) {
316670eb 255 vm_pageout_steal_laundry(dst_page, FALSE);
0a7de745 256 }
39037602 257
0a7de745
A
258 if (mark_dirty) {
259 if (dst_page->vmp_dirty == FALSE) {
4bd07ac2 260 dirty_count++;
0a7de745 261 }
316670eb 262 SET_PAGE_DIRTY(dst_page, FALSE);
0a7de745 263 if (dst_page->vmp_cs_validated &&
d9a64523 264 !dst_page->vmp_cs_tainted) {
4a3eedf9
A
265 /*
266 * CODE SIGNING:
267 * We're modifying a code-signed
b0d623f7 268 * page: force revalidate
4a3eedf9 269 */
d9a64523
A
270 dst_page->vmp_cs_validated = FALSE;
271
272 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
273
39037602 274 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
4a3eedf9
A
275 }
276 }
d9a64523 277 dst_page->vmp_busy = TRUE;
91447636
A
278
279 page_run[cur_run++] = dst_page;
280
281 offset += PAGE_SIZE_64;
282 }
0a7de745
A
283 if (cur_run == 0) {
284 /*
b0d623f7
A
285 * we hit a 'hole' in the cache or
286 * a page we don't want to try to handle,
287 * so bail at this point
91447636
A
288 * we'll unlock the object below
289 */
0a7de745
A
290 break;
291 }
91447636
A
292 vm_object_unlock(object);
293
294 for (i = 0; i < cur_run; i++) {
0a7de745 295 dst_page = page_run[i];
91447636 296
0a7de745
A
297 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
298 xsize = io_requested;
299 }
91447636 300
0a7de745
A
301 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
302 break;
303 }
91447636
A
304
305 io_requested -= xsize;
306 start_offset = 0;
307 }
308 vm_object_lock(object);
309
2d21ac55
A
310 /*
311 * if we have more than 1 page to work on
312 * in the current run, or the original request
313 * started at offset 0 of the page, or we're
314 * processing multiple batches, we will move
315 * the pages to the tail of the inactive queue
316 * to implement an LRU for read/write accesses
317 *
0a7de745 318 * the check for orig_offset == 0 is there to
2d21ac55
A
319 * mitigate the cost of small (< page_size) requests
320 * to the same page (this way we only move it once)
321 */
322 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
323 vm_page_lockspin_queues();
b0d623f7 324
0a7de745 325 for (i = 0; i < cur_run; i++) {
b0d623f7 326 vm_page_lru(page_run[i]);
0a7de745 327 }
b0d623f7
A
328
329 vm_page_unlock_queues();
2d21ac55 330 }
91447636 331 for (i = 0; i < cur_run; i++) {
0a7de745 332 dst_page = page_run[i];
91447636 333
2d21ac55
A
334 /*
335 * someone is explicitly referencing this page...
336 * update clustered and speculative state
0a7de745 337 *
2d21ac55 338 */
0a7de745 339 if (dst_page->vmp_clustered) {
fe8ab488 340 VM_PAGE_CONSUME_CLUSTERED(dst_page);
0a7de745 341 }
2d21ac55 342
91447636
A
343 PAGE_WAKEUP_DONE(dst_page);
344 }
2d21ac55 345 orig_offset = 0;
91447636 346 }
0a7de745 347 if (object->pager) {
39037602 348 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
0a7de745 349 }
91447636 350 vm_object_unlock(object);
0a7de745 351 return retval;
91447636
A
352}
353
354
1c79356b
A
355/*
356 *
357 */
358void
359vnode_pager_bootstrap(void)
360{
39037602 361 vm_size_t size;
1c79356b
A
362
363 size = (vm_size_t) sizeof(struct vnode_pager);
0a7de745
A
364 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE * size,
365 PAGE_SIZE, "vnode pager structures");
6d2010ae 366 zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
0b4c1975
A
367 zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
368
6d2010ae 369
593a1d5f 370#if CONFIG_CODE_DECRYPTION
0c530ab8 371 apple_protect_pager_bootstrap();
0a7de745 372#endif /* CONFIG_CODE_DECRYPTION */
b0d623f7 373 swapfile_pager_bootstrap();
5ba3f43e
A
374#if __arm64__
375 fourk_pager_bootstrap();
376#endif /* __arm64__ */
d9a64523
A
377 shared_region_pager_bootstrap();
378
1c79356b
A
379 return;
380}
381
382/*
383 *
384 */
0b4e3aa0 385memory_object_t
1c79356b 386vnode_pager_setup(
0a7de745
A
387 struct vnode *vp,
388 __unused memory_object_t pager)
1c79356b 389{
0a7de745 390 vnode_pager_t vnode_object;
1c79356b
A
391
392 vnode_object = vnode_object_create(vp);
0a7de745 393 if (vnode_object == VNODE_PAGER_NULL) {
1c79356b 394 panic("vnode_pager_setup: vnode_object_create() failed");
0a7de745
A
395 }
396 return (memory_object_t)vnode_object;
1c79356b
A
397}
398
399/*
400 *
401 */
402kern_return_t
0a7de745
A
403vnode_pager_init(memory_object_t mem_obj,
404 memory_object_control_t control,
91447636 405#if !DEBUG
0a7de745 406 __unused
91447636 407#endif
0a7de745 408 memory_object_cluster_size_t pg_size)
1c79356b
A
409{
410 vnode_pager_t vnode_object;
411 kern_return_t kr;
412 memory_object_attr_info_data_t attributes;
1c79356b
A
413
414
b0d623f7 415 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
1c79356b 416
0a7de745 417 if (control == MEMORY_OBJECT_CONTROL_NULL) {
0b4e3aa0 418 return KERN_INVALID_ARGUMENT;
0a7de745 419 }
1c79356b 420
0b4e3aa0 421 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 422
0b4e3aa0 423 memory_object_control_reference(control);
91447636 424
5ba3f43e 425 vnode_object->vn_pgr_hdr.mo_control = control;
1c79356b
A
426
427 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
428 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
429 attributes.cluster_size = (1 << (PAGE_SHIFT));
430 attributes.may_cache_object = TRUE;
431 attributes.temporary = TRUE;
432
433 kr = memory_object_change_attributes(
0a7de745
A
434 control,
435 MEMORY_OBJECT_ATTRIBUTE_INFO,
436 (memory_object_info_t) &attributes,
437 MEMORY_OBJECT_ATTR_INFO_COUNT);
438 if (kr != KERN_SUCCESS) {
1c79356b 439 panic("vnode_pager_init: memory_object_change_attributes() failed");
0a7de745 440 }
1c79356b 441
0a7de745 442 return KERN_SUCCESS;
1c79356b
A
443}
444
445/*
446 *
447 */
448kern_return_t
449vnode_pager_data_return(
0a7de745
A
450 memory_object_t mem_obj,
451 memory_object_offset_t offset,
452 memory_object_cluster_size_t data_cnt,
453 memory_object_offset_t *resid_offset,
454 int *io_error,
455 __unused boolean_t dirty,
456 __unused boolean_t kernel_copy,
457 int upl_flags)
1c79356b 458{
0a7de745 459 vnode_pager_t vnode_object;
1c79356b 460
0b4e3aa0 461 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 462
91447636 463 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
1c79356b
A
464
465 return KERN_SUCCESS;
466}
467
0b4e3aa0
A
468kern_return_t
469vnode_pager_data_initialize(
0a7de745
A
470 __unused memory_object_t mem_obj,
471 __unused memory_object_offset_t offset,
472 __unused memory_object_cluster_size_t data_cnt)
0b4e3aa0 473{
91447636 474 panic("vnode_pager_data_initialize");
0b4e3aa0
A
475 return KERN_FAILURE;
476}
477
478kern_return_t
479vnode_pager_data_unlock(
0a7de745
A
480 __unused memory_object_t mem_obj,
481 __unused memory_object_offset_t offset,
482 __unused memory_object_size_t size,
483 __unused vm_prot_t desired_access)
0b4e3aa0
A
484{
485 return KERN_FAILURE;
486}
487
d9a64523
A
488void
489vnode_pager_dirtied(
0a7de745
A
490 memory_object_t mem_obj,
491 vm_object_offset_t s_offset,
492 vm_object_offset_t e_offset)
d9a64523 493{
0a7de745 494 vnode_pager_t vnode_object;
d9a64523
A
495
496 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
d9a64523
A
497 vnode_object = vnode_pager_lookup(mem_obj);
498 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
499 }
500}
501
b0d623f7
A
502kern_return_t
503vnode_pager_get_isinuse(
0a7de745
A
504 memory_object_t mem_obj,
505 uint32_t *isinuse)
b0d623f7 506{
0a7de745 507 vnode_pager_t vnode_object;
b0d623f7
A
508
509 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
510 *isinuse = 1;
511 return KERN_INVALID_ARGUMENT;
512 }
513
514 vnode_object = vnode_pager_lookup(mem_obj);
515
516 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
517 return KERN_SUCCESS;
518}
519
520kern_return_t
39236c6e 521vnode_pager_get_throttle_io_limit(
0a7de745
A
522 memory_object_t mem_obj,
523 uint32_t *limit)
b0d623f7 524{
0a7de745 525 vnode_pager_t vnode_object;
b0d623f7 526
0a7de745 527 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
b0d623f7 528 return KERN_INVALID_ARGUMENT;
0a7de745 529 }
b0d623f7
A
530
531 vnode_object = vnode_pager_lookup(mem_obj);
532
39236c6e 533 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
b0d623f7
A
534 return KERN_SUCCESS;
535}
536
6d2010ae
A
537kern_return_t
538vnode_pager_get_isSSD(
0a7de745
A
539 memory_object_t mem_obj,
540 boolean_t *isSSD)
6d2010ae 541{
0a7de745 542 vnode_pager_t vnode_object;
6d2010ae 543
0a7de745 544 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
6d2010ae 545 return KERN_INVALID_ARGUMENT;
0a7de745 546 }
6d2010ae
A
547
548 vnode_object = vnode_pager_lookup(mem_obj);
549
550 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
551 return KERN_SUCCESS;
552}
553
0b4e3aa0
A
554kern_return_t
555vnode_pager_get_object_size(
0a7de745
A
556 memory_object_t mem_obj,
557 memory_object_offset_t *length)
0b4e3aa0 558{
0a7de745 559 vnode_pager_t vnode_object;
0b4e3aa0 560
0c530ab8
A
561 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
562 *length = 0;
563 return KERN_INVALID_ARGUMENT;
564 }
565
0b4e3aa0
A
566 vnode_object = vnode_pager_lookup(mem_obj);
567
568 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
569 return KERN_SUCCESS;
570}
571
0c530ab8 572kern_return_t
15129b1c 573vnode_pager_get_object_name(
0a7de745
A
574 memory_object_t mem_obj,
575 char *pathname,
576 vm_size_t pathname_len,
577 char *filename,
578 vm_size_t filename_len,
579 boolean_t *truncated_path_p)
0c530ab8 580{
0a7de745 581 vnode_pager_t vnode_object;
0c530ab8
A
582
583 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
584 return KERN_INVALID_ARGUMENT;
585 }
586
587 vnode_object = vnode_pager_lookup(mem_obj);
588
15129b1c 589 return vnode_pager_get_name(vnode_object->vnode_handle,
0a7de745
A
590 pathname,
591 pathname_len,
592 filename,
593 filename_len,
594 truncated_path_p);
0c530ab8
A
595}
596
597kern_return_t
15129b1c 598vnode_pager_get_object_mtime(
0a7de745
A
599 memory_object_t mem_obj,
600 struct timespec *mtime,
601 struct timespec *cs_mtime)
0c530ab8 602{
0a7de745 603 vnode_pager_t vnode_object;
0c530ab8
A
604
605 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
606 return KERN_INVALID_ARGUMENT;
607 }
608
609 vnode_object = vnode_pager_lookup(mem_obj);
610
15129b1c 611 return vnode_pager_get_mtime(vnode_object->vnode_handle,
0a7de745
A
612 mtime,
613 cs_mtime);
0c530ab8
A
614}
615
6d2010ae
A
616#if CHECK_CS_VALIDATION_BITMAP
617kern_return_t
0a7de745
A
618vnode_pager_cs_check_validation_bitmap(
619 memory_object_t mem_obj,
620 memory_object_offset_t offset,
621 int optype )
6d2010ae 622{
0a7de745 623 vnode_pager_t vnode_object;
6d2010ae
A
624
625 if (mem_obj == MEMORY_OBJECT_NULL ||
626 mem_obj->mo_pager_ops != &vnode_pager_ops) {
627 return KERN_INVALID_ARGUMENT;
628 }
629
630 vnode_object = vnode_pager_lookup(mem_obj);
631 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
632}
633#endif /* CHECK_CS_VALIDATION_BITMAP */
634
1c79356b
A
635/*
636 *
637 */
0a7de745 638kern_return_t
1c79356b 639vnode_pager_data_request(
0a7de745
A
640 memory_object_t mem_obj,
641 memory_object_offset_t offset,
642 __unused memory_object_cluster_size_t length,
643 __unused vm_prot_t desired_access,
644 memory_object_fault_info_t fault_info)
1c79356b 645{
0a7de745
A
646 vnode_pager_t vnode_object;
647 memory_object_offset_t base_offset;
648 vm_size_t size;
649 uint32_t io_streaming = 0;
1c79356b 650
0b4e3aa0 651 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 652
fe8ab488 653 size = MAX_UPL_TRANSFER_BYTES;
b0d623f7 654 base_offset = offset;
2d21ac55 655
5ba3f43e 656 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
0a7de745
A
657 &base_offset, &size, &io_streaming,
658 fault_info) != KERN_SUCCESS) {
659 size = PAGE_SIZE;
660 }
2d21ac55 661
b0d623f7 662 assert(offset >= base_offset &&
0a7de745 663 offset < base_offset + size);
2d21ac55 664
b0d623f7 665 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
1c79356b
A
666}
667
668/*
669 *
670 */
671void
0b4e3aa0 672vnode_pager_reference(
0a7de745
A
673 memory_object_t mem_obj)
674{
675 vnode_pager_t vnode_object;
1c79356b 676
0b4e3aa0 677 vnode_object = vnode_pager_lookup(mem_obj);
0a7de745 678 os_ref_retain(&vnode_object->ref_count);
0b4e3aa0 679}
1c79356b 680
0b4e3aa0
A
681/*
682 *
683 */
684void
685vnode_pager_deallocate(
0a7de745 686 memory_object_t mem_obj)
0b4e3aa0 687{
0a7de745 688 vnode_pager_t vnode_object;
1c79356b 689
2d21ac55 690 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
1c79356b 691
0b4e3aa0 692 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 693
0a7de745 694 if (os_ref_release(&vnode_object->ref_count) == 0) {
91447636 695 if (vnode_object->vnode_handle != NULL) {
0b4e3aa0
A
696 vnode_pager_vrele(vnode_object->vnode_handle);
697 }
91447636 698 zfree(vnode_pager_zone, vnode_object);
0b4e3aa0 699 }
1c79356b
A
700}
701
702/*
703 *
704 */
705kern_return_t
706vnode_pager_terminate(
91447636
A
707#if !DEBUG
708 __unused
709#endif
0a7de745 710 memory_object_t mem_obj)
1c79356b 711{
2d21ac55 712 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
1c79356b 713
0a7de745 714 return KERN_SUCCESS;
0b4e3aa0 715}
1c79356b 716
0b4e3aa0
A
717/*
718 *
719 */
720kern_return_t
721vnode_pager_synchronize(
0a7de745
A
722 __unused memory_object_t mem_obj,
723 __unused memory_object_offset_t offset,
724 __unused memory_object_size_t length,
725 __unused vm_sync_t sync_flags)
0b4e3aa0 726{
5ba3f43e 727 panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n");
0a7de745 728 return KERN_FAILURE;
1c79356b
A
729}
730
731/*
732 *
733 */
734kern_return_t
593a1d5f 735vnode_pager_map(
0a7de745
A
736 memory_object_t mem_obj,
737 vm_prot_t prot)
593a1d5f 738{
0a7de745
A
739 vnode_pager_t vnode_object;
740 int ret;
741 kern_return_t kr;
593a1d5f
A
742
743 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
744
745 vnode_object = vnode_pager_lookup(mem_obj);
746
747 ret = ubc_map(vnode_object->vnode_handle, prot);
748
749 if (ret != 0) {
750 kr = KERN_FAILURE;
751 } else {
752 kr = KERN_SUCCESS;
753 }
754
755 return kr;
756}
757
758kern_return_t
759vnode_pager_last_unmap(
0a7de745 760 memory_object_t mem_obj)
1c79356b 761{
0a7de745 762 vnode_pager_t vnode_object;
1c79356b 763
593a1d5f 764 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
0b4e3aa0
A
765
766 vnode_object = vnode_pager_lookup(mem_obj);
767
768 ubc_unmap(vnode_object->vnode_handle);
769 return KERN_SUCCESS;
1c79356b
A
770}
771
0b4e3aa0 772
b0d623f7 773
1c79356b
A
774/*
775 *
776 */
777void
778vnode_pager_cluster_write(
0a7de745
A
779 vnode_pager_t vnode_object,
780 vm_object_offset_t offset,
781 vm_size_t cnt,
782 vm_object_offset_t * resid_offset,
783 int * io_error,
784 int upl_flags)
1c79356b 785{
0a7de745
A
786 vm_size_t size;
787 int errno;
1c79356b 788
91447636 789 if (upl_flags & UPL_MSYNC) {
0a7de745 790 upl_flags |= UPL_VNODE_PAGER;
1c79356b 791
0a7de745
A
792 if ((upl_flags & UPL_IOSYNC) && io_error) {
793 upl_flags |= UPL_KEEPCACHED;
794 }
91447636 795
0a7de745 796 while (cnt) {
fe8ab488 797 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
0b4e3aa0 798
b0d623f7 799 assert((upl_size_t) size == size);
0a7de745
A
800 vnode_pageout(vnode_object->vnode_handle,
801 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
91447636 802
0a7de745
A
803 if ((upl_flags & UPL_KEEPCACHED)) {
804 if ((*io_error = errno)) {
805 break;
806 }
91447636
A
807 }
808 cnt -= size;
809 offset += size;
810 }
0a7de745 811 if (resid_offset) {
91447636 812 *resid_offset = offset;
0a7de745 813 }
91447636 814 } else {
0a7de745
A
815 vm_object_offset_t vnode_size;
816 vm_object_offset_t base_offset;
91447636 817
0a7de745 818 /*
91447636
A
819 * this is the pageout path
820 */
821 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
822
823 if (vnode_size > (offset + PAGE_SIZE)) {
0a7de745 824 /*
91447636
A
825 * preset the maximum size of the cluster
826 * and put us on a nice cluster boundary...
827 * and then clip the size to insure we
828 * don't request past the end of the underlying file
829 */
0a7de745
A
830 size = MAX_UPL_TRANSFER_BYTES;
831 base_offset = offset & ~((signed)(size - 1));
91447636 832
0a7de745
A
833 if ((base_offset + size) > vnode_size) {
834 size = round_page(((vm_size_t)(vnode_size - base_offset)));
835 }
91447636 836 } else {
0a7de745 837 /*
91447636
A
838 * we've been requested to page out a page beyond the current
839 * end of the 'file'... don't try to cluster in this case...
840 * we still need to send this page through because it might
841 * be marked precious and the underlying filesystem may need
842 * to do something with it (besides page it out)...
843 */
0a7de745 844 base_offset = offset;
91447636 845 size = PAGE_SIZE;
0b4e3aa0 846 }
b0d623f7 847 assert((upl_size_t) size == size);
0a7de745
A
848 vnode_pageout(vnode_object->vnode_handle,
849 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
850 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
1c79356b 851 }
1c79356b
A
852}
853
854
855/*
856 *
857 */
858kern_return_t
859vnode_pager_cluster_read(
0a7de745
A
860 vnode_pager_t vnode_object,
861 vm_object_offset_t base_offset,
862 vm_object_offset_t offset,
863 uint32_t io_streaming,
864 vm_size_t cnt)
1c79356b 865{
0a7de745
A
866 int local_error = 0;
867 int kret;
868 int flags = 0;
1c79356b 869
0a7de745 870 assert(!(cnt & PAGE_MASK));
1c79356b 871
0a7de745 872 if (io_streaming) {
b0d623f7 873 flags |= UPL_IOSTREAMING;
0a7de745 874 }
b0d623f7
A
875
876 assert((upl_size_t) cnt == cnt);
91447636 877 kret = vnode_pagein(vnode_object->vnode_handle,
0a7de745
A
878 (upl_t) NULL,
879 (upl_offset_t) (offset - base_offset),
880 base_offset,
881 (upl_size_t) cnt,
882 flags,
883 &local_error);
0b4e3aa0 884/*
0a7de745
A
885 * if(kret == PAGER_ABSENT) {
886 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
887 * defined in bsd/vm/vm_pager.h However, we should not be including
888 * that file here it is a layering violation.
889 */
91447636 890 if (kret == 1) {
0a7de745
A
891 int uplflags;
892 upl_t upl = NULL;
893 unsigned int count = 0;
894 kern_return_t kr;
91447636
A
895
896 uplflags = (UPL_NO_SYNC |
0a7de745
A
897 UPL_CLEAN_IN_PLACE |
898 UPL_SET_INTERNAL);
91447636 899 count = 0;
b0d623f7 900 assert((upl_size_t) cnt == cnt);
5ba3f43e 901 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
0a7de745
A
902 base_offset, (upl_size_t) cnt,
903 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
91447636 904 if (kr == KERN_SUCCESS) {
0b4e3aa0
A
905 upl_abort(upl, 0);
906 upl_deallocate(upl);
91447636
A
907 } else {
908 /*
909 * We couldn't gather the page list, probably
910 * because the memory object doesn't have a link
911 * to a VM object anymore (forced unmount, for
912 * example). Just return an error to the vm_fault()
913 * path and let it handle it.
914 */
915 }
0b4e3aa0 916
91447636 917 return KERN_FAILURE;
1c79356b 918 }
0b4e3aa0 919
91447636 920 return KERN_SUCCESS;
1c79356b
A
921}
922
1c79356b
A
923/*
924 *
925 */
926vnode_pager_t
927vnode_object_create(
0a7de745 928 struct vnode *vp)
1c79356b 929{
39037602 930 vnode_pager_t vnode_object;
1c79356b
A
931
932 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
0a7de745
A
933 if (vnode_object == VNODE_PAGER_NULL) {
934 return VNODE_PAGER_NULL;
935 }
1c79356b 936
1c79356b 937 /*
0b4e3aa0
A
938 * The vm_map call takes both named entry ports and raw memory
939 * objects in the same parameter. We need to make sure that
940 * vm_map does not see this object as a named entry port. So,
b0d623f7 941 * we reserve the first word in the object for a fake ip_kotype
0b4e3aa0 942 * setting - that will tell vm_map to use it as a memory object.
1c79356b 943 */
5ba3f43e
A
944 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
945 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
946 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
947
0a7de745 948 os_ref_init(&vnode_object->ref_count, NULL);
0b4e3aa0
A
949 vnode_object->vnode_handle = vp;
950
0a7de745 951 return vnode_object;
1c79356b
A
952}
953
954/*
955 *
956 */
957vnode_pager_t
0b4e3aa0 958vnode_pager_lookup(
0a7de745 959 memory_object_t name)
1c79356b 960{
0a7de745 961 vnode_pager_t vnode_object;
1c79356b 962
0b4e3aa0 963 vnode_object = (vnode_pager_t)name;
5ba3f43e 964 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
0a7de745 965 return vnode_object;
1c79356b 966}
0b4e3aa0 967
0c530ab8 968
39037602
A
969struct vnode *
970vnode_pager_lookup_vnode(
971 memory_object_t name)
972{
973 vnode_pager_t vnode_object;
974 vnode_object = (vnode_pager_t)name;
0a7de745
A
975 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
976 return vnode_object->vnode_handle;
977 } else {
39037602 978 return NULL;
0a7de745 979 }
39037602
A
980}
981
0c530ab8
A
982/*********************** proc_info implementation *************/
983
984#include <sys/bsdtask_info.h>
985
b0d623f7 986static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
0c530ab8 987
0c530ab8 988int
b0d623f7 989fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
0c530ab8 990{
935ed37a 991 vm_map_t map;
0a7de745
A
992 vm_map_offset_t address = (vm_map_offset_t)arg;
993 vm_map_entry_t tmp_entry;
994 vm_map_entry_t entry;
995 vm_map_offset_t start;
0c530ab8
A
996 vm_region_extended_info_data_t extended;
997 vm_region_top_info_data_t top;
a39ff7e2 998 boolean_t do_region_footprint;
0c530ab8 999
0a7de745
A
1000 task_lock(task);
1001 map = task->map;
1002 if (map == VM_MAP_NULL) {
1003 task_unlock(task);
1004 return 0;
1005 }
1006 vm_map_reference(map);
1007 task_unlock(task);
a39ff7e2 1008
0a7de745 1009 do_region_footprint = task_self_region_footprint();
a39ff7e2 1010
0a7de745 1011 vm_map_lock_read(map);
0c530ab8 1012
0a7de745 1013 start = address;
a39ff7e2 1014
0a7de745 1015 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
0c530ab8 1016 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
a39ff7e2
A
1017 if (do_region_footprint &&
1018 address == tmp_entry->vme_end) {
cb323159
A
1019 ledger_amount_t ledger_resident;
1020 ledger_amount_t ledger_compressed;
a39ff7e2
A
1021
1022 /*
1023 * This request is right after the last valid
1024 * memory region; instead of reporting the
1025 * end of the address space, report a fake
1026 * memory region to account for non-volatile
cb323159
A
1027 * purgeable and/or ledger-tagged memory
1028 * owned by this task.
a39ff7e2 1029 */
cb323159
A
1030 task_ledgers_footprint(task->ledger,
1031 &ledger_resident,
1032 &ledger_compressed);
1033 if (ledger_resident + ledger_compressed == 0) {
a39ff7e2
A
1034 /* nothing to report */
1035 vm_map_unlock_read(map);
1036 vm_map_deallocate(map);
1037 return 0;
1038 }
cb323159 1039
a39ff7e2
A
1040 /* provide fake region for purgeable */
1041 pinfo->pri_offset = address;
1042 pinfo->pri_protection = VM_PROT_DEFAULT;
1043 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1044 pinfo->pri_inheritance = VM_INHERIT_NONE;
1045 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1046 pinfo->pri_user_wired_count = 0;
1047 pinfo->pri_user_tag = -1;
1048 pinfo->pri_pages_resident =
cb323159 1049 (uint32_t) (ledger_resident / PAGE_SIZE);
a39ff7e2
A
1050 pinfo->pri_pages_shared_now_private = 0;
1051 pinfo->pri_pages_swapped_out =
cb323159 1052 (uint32_t) (ledger_compressed / PAGE_SIZE);
a39ff7e2 1053 pinfo->pri_pages_dirtied =
cb323159 1054 (uint32_t) (ledger_resident / PAGE_SIZE);
a39ff7e2
A
1055 pinfo->pri_ref_count = 1;
1056 pinfo->pri_shadow_depth = 0;
1057 pinfo->pri_share_mode = SM_PRIVATE;
1058 pinfo->pri_private_pages_resident =
cb323159 1059 (uint32_t) (ledger_resident / PAGE_SIZE);
a39ff7e2
A
1060 pinfo->pri_shared_pages_resident = 0;
1061 pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile);
1062 pinfo->pri_address = address;
1063 pinfo->pri_size =
cb323159 1064 (uint64_t) (ledger_resident + ledger_compressed);
a39ff7e2
A
1065 pinfo->pri_depth = 0;
1066
1067 vm_map_unlock_read(map);
1068 vm_map_deallocate(map);
1069 return 1;
1070 }
0c530ab8 1071 vm_map_unlock_read(map);
a39ff7e2
A
1072 vm_map_deallocate(map);
1073 return 0;
0c530ab8 1074 }
0a7de745 1075 } else {
0c530ab8 1076 entry = tmp_entry;
0a7de745 1077 }
0c530ab8 1078
0a7de745 1079 start = entry->vme_start;
0c530ab8 1080
0a7de745
A
1081 pinfo->pri_offset = VME_OFFSET(entry);
1082 pinfo->pri_protection = entry->protection;
1083 pinfo->pri_max_protection = entry->max_protection;
1084 pinfo->pri_inheritance = entry->inheritance;
1085 pinfo->pri_behavior = entry->behavior;
1086 pinfo->pri_user_wired_count = entry->user_wired_count;
1087 pinfo->pri_user_tag = VME_ALIAS(entry);
0c530ab8 1088
0a7de745 1089 if (entry->is_sub_map) {
0c530ab8 1090 pinfo->pri_flags |= PROC_REGION_SUBMAP;
0a7de745
A
1091 } else {
1092 if (entry->is_shared) {
0c530ab8 1093 pinfo->pri_flags |= PROC_REGION_SHARED;
0a7de745
A
1094 }
1095 }
1096
1097
1098 extended.protection = entry->protection;
1099 extended.user_tag = VME_ALIAS(entry);
1100 extended.pages_resident = 0;
1101 extended.pages_swapped_out = 0;
1102 extended.pages_shared_now_private = 0;
1103 extended.pages_dirtied = 0;
1104 extended.external_pager = 0;
1105 extended.shadow_depth = 0;
1106
1107 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1108
1109 if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) {
1110 extended.share_mode = SM_PRIVATE;
1111 }
1112
1113 top.private_pages_resident = 0;
1114 top.shared_pages_resident = 0;
1115 vm_map_region_top_walk(entry, &top);
1116
1117
1118 pinfo->pri_pages_resident = extended.pages_resident;
1119 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1120 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1121 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1122 pinfo->pri_ref_count = extended.ref_count;
1123 pinfo->pri_shadow_depth = extended.shadow_depth;
1124 pinfo->pri_share_mode = extended.share_mode;
1125
1126 pinfo->pri_private_pages_resident = top.private_pages_resident;
1127 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1128 pinfo->pri_obj_id = top.obj_id;
1129
1130 pinfo->pri_address = (uint64_t)start;
1131 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1132 pinfo->pri_depth = 0;
1133
1134 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
b0d623f7 1135 *vnodeaddr = (uintptr_t)0;
0c530ab8 1136
0a7de745 1137 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) == 0) {
0c530ab8 1138 vm_map_unlock_read(map);
0a7de745
A
1139 vm_map_deallocate(map);
1140 return 1;
0c530ab8 1141 }
0a7de745 1142 }
0c530ab8 1143
0a7de745
A
1144 vm_map_unlock_read(map);
1145 vm_map_deallocate(map);
1146 return 1;
0c530ab8
A
1147}
1148
fe8ab488
A
1149int
1150fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1151{
fe8ab488 1152 vm_map_t map;
0a7de745
A
1153 vm_map_offset_t address = (vm_map_offset_t)arg;
1154 vm_map_entry_t tmp_entry;
1155 vm_map_entry_t entry;
fe8ab488
A
1156
1157 task_lock(task);
1158 map = task->map;
0a7de745 1159 if (map == VM_MAP_NULL) {
fe8ab488 1160 task_unlock(task);
0a7de745 1161 return 0;
fe8ab488 1162 }
0a7de745 1163 vm_map_reference(map);
fe8ab488 1164 task_unlock(task);
0a7de745 1165
fe8ab488
A
1166 vm_map_lock_read(map);
1167
1168 if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
1169 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1170 vm_map_unlock_read(map);
0a7de745
A
1171 vm_map_deallocate(map);
1172 return 0;
fe8ab488
A
1173 }
1174 } else {
1175 entry = tmp_entry;
1176 }
1177
3e170ce0 1178 while (entry != vm_map_to_entry(map)) {
fe8ab488
A
1179 *vnodeaddr = 0;
1180 *vid = 0;
1181
1182 if (entry->is_sub_map == 0) {
1183 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
3e170ce0 1184 pinfo->pri_offset = VME_OFFSET(entry);
fe8ab488
A
1185 pinfo->pri_protection = entry->protection;
1186 pinfo->pri_max_protection = entry->max_protection;
1187 pinfo->pri_inheritance = entry->inheritance;
1188 pinfo->pri_behavior = entry->behavior;
1189 pinfo->pri_user_wired_count = entry->user_wired_count;
3e170ce0 1190 pinfo->pri_user_tag = VME_ALIAS(entry);
0a7de745
A
1191
1192 if (entry->is_shared) {
fe8ab488 1193 pinfo->pri_flags |= PROC_REGION_SHARED;
0a7de745
A
1194 }
1195
fe8ab488
A
1196 pinfo->pri_pages_resident = 0;
1197 pinfo->pri_pages_shared_now_private = 0;
1198 pinfo->pri_pages_swapped_out = 0;
1199 pinfo->pri_pages_dirtied = 0;
1200 pinfo->pri_ref_count = 0;
1201 pinfo->pri_shadow_depth = 0;
1202 pinfo->pri_share_mode = 0;
0a7de745 1203
fe8ab488
A
1204 pinfo->pri_private_pages_resident = 0;
1205 pinfo->pri_shared_pages_resident = 0;
1206 pinfo->pri_obj_id = 0;
0a7de745 1207
fe8ab488
A
1208 pinfo->pri_address = (uint64_t)entry->vme_start;
1209 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1210 pinfo->pri_depth = 0;
0a7de745 1211
fe8ab488 1212 vm_map_unlock_read(map);
0a7de745
A
1213 vm_map_deallocate(map);
1214 return 1;
fe8ab488
A
1215 }
1216 }
1217
1218 /* Keep searching for a vnode-backed mapping */
1219 entry = entry->vme_next;
1220 }
1221
1222 vm_map_unlock_read(map);
0a7de745
A
1223 vm_map_deallocate(map);
1224 return 0;
fe8ab488
A
1225}
1226
cb323159
A
1227int
1228find_region_details(task_t task, vm_map_offset_t offset,
1229 uintptr_t *vnodeaddr, uint32_t *vid,
1230 uint64_t *start, uint64_t *len)
1231{
1232 vm_map_t map;
1233 vm_map_entry_t tmp_entry, entry;
1234 int rc = 0;
1235
1236 task_lock(task);
1237 map = task->map;
1238 if (map == VM_MAP_NULL) {
1239 task_unlock(task);
1240 return 0;
1241 }
1242 vm_map_reference(map);
1243 task_unlock(task);
1244
1245 vm_map_lock_read(map);
1246 if (!vm_map_lookup_entry(map, offset, &tmp_entry)) {
1247 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1248 rc = 0;
1249 goto ret;
1250 }
1251 } else {
1252 entry = tmp_entry;
1253 }
1254
1255 while (entry != vm_map_to_entry(map)) {
1256 *vnodeaddr = 0;
1257 *vid = 0;
1258 *start = 0;
1259 *len = 0;
1260
1261 if (entry->is_sub_map == 0) {
1262 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1263 *start = entry->vme_start;
1264 *len = entry->vme_end - entry->vme_start;
1265 rc = 1;
1266 goto ret;
1267 }
1268 }
1269
1270 entry = entry->vme_next;
1271 }
1272
1273ret:
1274 vm_map_unlock_read(map);
1275 vm_map_deallocate(map);
1276 return rc;
1277}
1278
0c530ab8
A
1279static int
1280fill_vnodeinfoforaddr(
0a7de745 1281 vm_map_entry_t entry,
b0d623f7 1282 uintptr_t * vnodeaddr,
0c530ab8
A
1283 uint32_t * vid)
1284{
0a7de745 1285 vm_object_t top_object, object;
0c530ab8
A
1286 memory_object_t memory_object;
1287 memory_object_pager_ops_t pager_ops;
0a7de745
A
1288 kern_return_t kr;
1289 int shadow_depth;
0c530ab8
A
1290
1291
1292 if (entry->is_sub_map) {
0a7de745 1293 return 0;
0c530ab8
A
1294 } else {
1295 /*
1296 * The last object in the shadow chain has the
1297 * relevant pager information.
1298 */
3e170ce0 1299 top_object = VME_OBJECT(entry);
0c530ab8
A
1300 if (top_object == VM_OBJECT_NULL) {
1301 object = VM_OBJECT_NULL;
1302 shadow_depth = 0;
1303 } else {
1304 vm_object_lock(top_object);
1305 for (object = top_object, shadow_depth = 0;
0a7de745
A
1306 object->shadow != VM_OBJECT_NULL;
1307 object = object->shadow, shadow_depth++) {
0c530ab8
A
1308 vm_object_lock(object->shadow);
1309 vm_object_unlock(object);
1310 }
1311 }
1312 }
1313
1314 if (object == VM_OBJECT_NULL) {
0a7de745 1315 return 0;
0c530ab8
A
1316 } else if (object->internal) {
1317 vm_object_unlock(object);
0a7de745
A
1318 return 0;
1319 } else if (!object->pager_ready ||
1320 object->terminating ||
1321 !object->alive) {
0c530ab8 1322 vm_object_unlock(object);
0a7de745 1323 return 0;
0c530ab8
A
1324 } else {
1325 memory_object = object->pager;
1326 pager_ops = memory_object->mo_pager_ops;
1327 if (pager_ops == &vnode_pager_ops) {
1328 kr = vnode_pager_get_object_vnode(
1329 memory_object,
1330 vnodeaddr, vid);
1331 if (kr != KERN_SUCCESS) {
1332 vm_object_unlock(object);
0a7de745 1333 return 0;
0c530ab8
A
1334 }
1335 } else {
1336 vm_object_unlock(object);
0a7de745 1337 return 0;
0c530ab8
A
1338 }
1339 }
1340 vm_object_unlock(object);
0a7de745 1341 return 1;
0c530ab8
A
1342}
1343
0a7de745
A
1344kern_return_t
1345vnode_pager_get_object_vnode(
1346 memory_object_t mem_obj,
b0d623f7 1347 uintptr_t * vnodeaddr,
0c530ab8
A
1348 uint32_t * vid)
1349{
0a7de745 1350 vnode_pager_t vnode_object;
0c530ab8
A
1351
1352 vnode_object = vnode_pager_lookup(mem_obj);
0a7de745 1353 if (vnode_object->vnode_handle) {
b0d623f7 1354 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
0a7de745 1355 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
0c530ab8 1356
0a7de745 1357 return KERN_SUCCESS;
0c530ab8 1358 }
0a7de745
A
1359
1360 return KERN_FAILURE;
0c530ab8
A
1361}
1362
fe8ab488
A
1363#if CONFIG_IOSCHED
1364kern_return_t
1365vnode_pager_get_object_devvp(
0a7de745
A
1366 memory_object_t mem_obj,
1367 uintptr_t *devvp)
fe8ab488 1368{
0a7de745
A
1369 struct vnode *vp;
1370 uint32_t vid;
fe8ab488 1371
0a7de745
A
1372 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1373 return KERN_FAILURE;
1374 }
fe8ab488 1375 *devvp = (uintptr_t)vnode_mountdevvp(vp);
0a7de745
A
1376 if (*devvp) {
1377 return KERN_SUCCESS;
1378 }
1379 return KERN_FAILURE;
fe8ab488
A
1380}
1381#endif
b0d623f7
A
1382
1383/*
1384 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1385 * object locked, otherwise return NULL with nothing locked.
1386 */
1387
1388vm_object_t
1389find_vnode_object(
0a7de745
A
1390 vm_map_entry_t entry
1391 )
b0d623f7 1392{
0a7de745
A
1393 vm_object_t top_object, object;
1394 memory_object_t memory_object;
1395 memory_object_pager_ops_t pager_ops;
b0d623f7
A
1396
1397 if (!entry->is_sub_map) {
b0d623f7
A
1398 /*
1399 * The last object in the shadow chain has the
1400 * relevant pager information.
1401 */
1402
3e170ce0 1403 top_object = VME_OBJECT(entry);
b0d623f7
A
1404
1405 if (top_object) {
1406 vm_object_lock(top_object);
1407
1408 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1409 vm_object_lock(object->shadow);
1410 vm_object_unlock(object);
1411 }
1412
1413 if (object && !object->internal && object->pager_ready && !object->terminating &&
1414 object->alive) {
1415 memory_object = object->pager;
1416 pager_ops = memory_object->mo_pager_ops;
1417
1418 /*
1419 * If this object points to the vnode_pager_ops, then we found what we're
1420 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1421 * vnode and so we fall through to the bottom and return NULL.
1422 */
1423
0a7de745
A
1424 if (pager_ops == &vnode_pager_ops) {
1425 return object; /* we return with the object locked */
1426 }
b0d623f7
A
1427 }
1428
1429 vm_object_unlock(object);
1430 }
b0d623f7
A
1431 }
1432
0a7de745 1433 return VM_OBJECT_NULL;
b0d623f7 1434}