]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
29#include <sys/errno.h>
91447636 30
1c79356b 31#include <mach/mach_types.h>
91447636
A
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
1c79356b 34#include <mach/kern_return.h>
91447636 35#include <mach/memory_object_control.h>
1c79356b
A
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
91447636
A
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41
2d21ac55 42#include <kern/assert.h>
91447636 43#include <kern/host.h>
a39ff7e2 44#include <kern/ledger.h>
91447636 45#include <kern/thread.h>
39037602 46#include <kern/ipc_kobject.h>
91447636 47
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <ipc/ipc_space.h>
1c79356b 50
91447636 51#include <vm/vm_map.h>
91447636
A
52#include <vm/vm_pageout.h>
53#include <vm/memory_object.h>
54#include <vm/vm_pageout.h>
55#include <vm/vm_protos.h>
2d21ac55
A
56#include <vm/vm_purgeable_internal.h>
57
1c79356b
A
58
59/* BSD VM COMPONENT INTERFACES */
60int
61get_map_nentries(
62 vm_map_t);
63
64vm_offset_t
65get_map_start(
66 vm_map_t);
67
68vm_offset_t
69get_map_end(
70 vm_map_t);
71
72/*
73 *
74 */
75int
76get_map_nentries(
77 vm_map_t map)
78{
79 return(map->hdr.nentries);
80}
81
91447636
A
82mach_vm_offset_t
83mach_get_vm_start(vm_map_t map)
84{
85 return( vm_map_first_entry(map)->vme_start);
86}
87
88mach_vm_offset_t
89mach_get_vm_end(vm_map_t map)
90{
91 return( vm_map_last_entry(map)->vme_end);
92}
93
1c79356b
A
94/*
95 * BSD VNODE PAGER
96 */
97
0c530ab8
A
98const struct memory_object_pager_ops vnode_pager_ops = {
99 vnode_pager_reference,
100 vnode_pager_deallocate,
101 vnode_pager_init,
102 vnode_pager_terminate,
103 vnode_pager_data_request,
104 vnode_pager_data_return,
105 vnode_pager_data_initialize,
106 vnode_pager_data_unlock,
107 vnode_pager_synchronize,
593a1d5f
A
108 vnode_pager_map,
109 vnode_pager_last_unmap,
6d2010ae 110 NULL, /* data_reclaim */
0c530ab8
A
111 "vnode pager"
112};
1c79356b 113
1c79356b 114typedef struct vnode_pager {
5ba3f43e
A
115 /* mandatory generic header */
116 struct memory_object vn_pgr_hdr;
117
118 /* pager-specific */
0b4e3aa0 119 unsigned int ref_count; /* reference count */
91447636 120 struct vnode *vnode_handle; /* vnode handle */
1c79356b
A
121} *vnode_pager_t;
122
3e170ce0 123
1c79356b 124kern_return_t
91447636 125vnode_pager_cluster_read( /* forward */
1c79356b 126 vnode_pager_t,
b0d623f7
A
127 vm_object_offset_t,
128 vm_object_offset_t,
129 uint32_t,
1c79356b
A
130 vm_size_t);
131
132void
91447636 133vnode_pager_cluster_write( /* forward */
1c79356b
A
134 vnode_pager_t,
135 vm_object_offset_t,
91447636
A
136 vm_size_t,
137 vm_object_offset_t *,
138 int *,
139 int);
1c79356b 140
0b4e3aa0 141
1c79356b 142vnode_pager_t
91447636
A
143vnode_object_create( /* forward */
144 struct vnode *);
1c79356b 145
1c79356b 146vnode_pager_t
91447636 147vnode_pager_lookup( /* forward */
0b4e3aa0 148 memory_object_t);
1c79356b 149
39037602
A
150struct vnode *
151vnode_pager_lookup_vnode( /* forward */
152 memory_object_t);
153
1c79356b
A
154zone_t vnode_pager_zone;
155
156
157#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
158
159/* TODO: Should be set dynamically by vnode_pager_init() */
160#define CLUSTER_SHIFT 1
161
162/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
163#define MAX_VNODE 10000
164
165
166#if DEBUG
167int pagerdebug=0;
168
169#define PAGER_ALL 0xffffffff
170#define PAGER_INIT 0x00000001
171#define PAGER_PAGEIN 0x00000002
172
173#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
174#else
175#define PAGER_DEBUG(LEVEL, A)
176#endif
177
b0d623f7
A
178extern int proc_resetpcontrol(int);
179
180#if DEVELOPMENT || DEBUG
181extern unsigned long vm_cs_validated_resets;
182#endif
183
91447636
A
184
185extern int uiomove64(addr64_t, int, void *);
186#define MAX_RUN 32
187
188int
189memory_object_control_uiomove(
190 memory_object_control_t control,
191 memory_object_offset_t offset,
192 void * uio,
193 int start_offset,
194 int io_requested,
2d21ac55
A
195 int mark_dirty,
196 int take_reference)
91447636
A
197{
198 vm_object_t object;
199 vm_page_t dst_page;
200 int xsize;
201 int retval = 0;
202 int cur_run;
203 int cur_needed;
204 int i;
2d21ac55 205 int orig_offset;
91447636 206 vm_page_t page_run[MAX_RUN];
4bd07ac2 207 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
91447636 208
91447636
A
209 object = memory_object_control_to_vm_object(control);
210 if (object == VM_OBJECT_NULL) {
211 return (0);
212 }
213 assert(!object->internal);
214
215 vm_object_lock(object);
216
217 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
218 /*
219 * We can't modify the pages without honoring
220 * copy-on-write obligations first, so fall off
221 * this optimized path and fall back to the regular
222 * path.
223 */
224 vm_object_unlock(object);
225 return 0;
226 }
2d21ac55 227 orig_offset = start_offset;
4bd07ac2
A
228
229 dirty_count = 0;
91447636
A
230 while (io_requested && retval == 0) {
231
232 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
233
234 if (cur_needed > MAX_RUN)
235 cur_needed = MAX_RUN;
4bd07ac2 236
91447636
A
237 for (cur_run = 0; cur_run < cur_needed; ) {
238
239 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
240 break;
b0d623f7 241
b0d623f7 242
316670eb 243 if (dst_page->busy || dst_page->cleaning) {
b0d623f7 244 /*
91447636
A
245 * someone else is playing with the page... if we've
246 * already collected pages into this run, go ahead
247 * and process now, we can't block on this
248 * page while holding other pages in the BUSY state
249 * otherwise we will wait
250 */
b0d623f7
A
251 if (cur_run)
252 break;
253 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
91447636
A
254 continue;
255 }
39037602 256 if (dst_page->laundry)
316670eb 257 vm_pageout_steal_laundry(dst_page, FALSE);
39037602 258
4a3eedf9 259 if (mark_dirty) {
4bd07ac2
A
260 if (dst_page->dirty == FALSE)
261 dirty_count++;
316670eb 262 SET_PAGE_DIRTY(dst_page, FALSE);
b0d623f7
A
263 if (dst_page->cs_validated &&
264 !dst_page->cs_tainted) {
4a3eedf9
A
265 /*
266 * CODE SIGNING:
267 * We're modifying a code-signed
b0d623f7 268 * page: force revalidate
4a3eedf9 269 */
b0d623f7
A
270 dst_page->cs_validated = FALSE;
271#if DEVELOPMENT || DEBUG
272 vm_cs_validated_resets++;
273#endif
39037602 274 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
4a3eedf9
A
275 }
276 }
91447636
A
277 dst_page->busy = TRUE;
278
279 page_run[cur_run++] = dst_page;
280
281 offset += PAGE_SIZE_64;
282 }
283 if (cur_run == 0)
284 /*
b0d623f7
A
285 * we hit a 'hole' in the cache or
286 * a page we don't want to try to handle,
287 * so bail at this point
91447636
A
288 * we'll unlock the object below
289 */
290 break;
291 vm_object_unlock(object);
292
293 for (i = 0; i < cur_run; i++) {
294
295 dst_page = page_run[i];
296
297 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
298 xsize = io_requested;
299
39037602 300 if ( (retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio)) )
91447636
A
301 break;
302
303 io_requested -= xsize;
304 start_offset = 0;
305 }
306 vm_object_lock(object);
307
2d21ac55
A
308 /*
309 * if we have more than 1 page to work on
310 * in the current run, or the original request
311 * started at offset 0 of the page, or we're
312 * processing multiple batches, we will move
313 * the pages to the tail of the inactive queue
314 * to implement an LRU for read/write accesses
315 *
316 * the check for orig_offset == 0 is there to
317 * mitigate the cost of small (< page_size) requests
318 * to the same page (this way we only move it once)
319 */
320 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
b0d623f7 321
2d21ac55 322 vm_page_lockspin_queues();
b0d623f7
A
323
324 for (i = 0; i < cur_run; i++)
325 vm_page_lru(page_run[i]);
326
327 vm_page_unlock_queues();
2d21ac55 328 }
91447636
A
329 for (i = 0; i < cur_run; i++) {
330 dst_page = page_run[i];
331
2d21ac55
A
332 /*
333 * someone is explicitly referencing this page...
334 * update clustered and speculative state
335 *
336 */
fe8ab488
A
337 if (dst_page->clustered)
338 VM_PAGE_CONSUME_CLUSTERED(dst_page);
2d21ac55 339
91447636
A
340 PAGE_WAKEUP_DONE(dst_page);
341 }
2d21ac55 342 orig_offset = 0;
91447636 343 }
39037602
A
344 if (object->pager)
345 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
91447636 346 vm_object_unlock(object);
91447636
A
347 return (retval);
348}
349
350
1c79356b
A
351/*
352 *
353 */
354void
355vnode_pager_bootstrap(void)
356{
39037602 357 vm_size_t size;
1c79356b
A
358
359 size = (vm_size_t) sizeof(struct vnode_pager);
360 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
361 PAGE_SIZE, "vnode pager structures");
6d2010ae 362 zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
0b4c1975
A
363 zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
364
6d2010ae 365
593a1d5f 366#if CONFIG_CODE_DECRYPTION
0c530ab8 367 apple_protect_pager_bootstrap();
593a1d5f 368#endif /* CONFIG_CODE_DECRYPTION */
b0d623f7 369 swapfile_pager_bootstrap();
5ba3f43e
A
370#if __arm64__
371 fourk_pager_bootstrap();
372#endif /* __arm64__ */
1c79356b
A
373 return;
374}
375
376/*
377 *
378 */
0b4e3aa0 379memory_object_t
1c79356b 380vnode_pager_setup(
91447636
A
381 struct vnode *vp,
382 __unused memory_object_t pager)
1c79356b
A
383{
384 vnode_pager_t vnode_object;
1c79356b
A
385
386 vnode_object = vnode_object_create(vp);
387 if (vnode_object == VNODE_PAGER_NULL)
388 panic("vnode_pager_setup: vnode_object_create() failed");
0b4e3aa0 389 return((memory_object_t)vnode_object);
1c79356b
A
390}
391
392/*
393 *
394 */
395kern_return_t
0b4e3aa0
A
396vnode_pager_init(memory_object_t mem_obj,
397 memory_object_control_t control,
91447636
A
398#if !DEBUG
399 __unused
400#endif
b0d623f7 401 memory_object_cluster_size_t pg_size)
1c79356b
A
402{
403 vnode_pager_t vnode_object;
404 kern_return_t kr;
405 memory_object_attr_info_data_t attributes;
1c79356b
A
406
407
b0d623f7 408 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
1c79356b 409
0b4e3aa0
A
410 if (control == MEMORY_OBJECT_CONTROL_NULL)
411 return KERN_INVALID_ARGUMENT;
1c79356b 412
0b4e3aa0 413 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 414
0b4e3aa0 415 memory_object_control_reference(control);
91447636 416
5ba3f43e 417 vnode_object->vn_pgr_hdr.mo_control = control;
1c79356b
A
418
419 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
420 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
421 attributes.cluster_size = (1 << (PAGE_SHIFT));
422 attributes.may_cache_object = TRUE;
423 attributes.temporary = TRUE;
424
425 kr = memory_object_change_attributes(
0b4e3aa0 426 control,
1c79356b
A
427 MEMORY_OBJECT_ATTRIBUTE_INFO,
428 (memory_object_info_t) &attributes,
0b4e3aa0 429 MEMORY_OBJECT_ATTR_INFO_COUNT);
1c79356b
A
430 if (kr != KERN_SUCCESS)
431 panic("vnode_pager_init: memory_object_change_attributes() failed");
432
433 return(KERN_SUCCESS);
434}
435
436/*
437 *
438 */
439kern_return_t
440vnode_pager_data_return(
0b4e3aa0
A
441 memory_object_t mem_obj,
442 memory_object_offset_t offset,
b0d623f7 443 memory_object_cluster_size_t data_cnt,
91447636
A
444 memory_object_offset_t *resid_offset,
445 int *io_error,
446 __unused boolean_t dirty,
447 __unused boolean_t kernel_copy,
448 int upl_flags)
1c79356b 449{
39037602 450 vnode_pager_t vnode_object;
1c79356b 451
0b4e3aa0 452 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 453
91447636 454 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
1c79356b
A
455
456 return KERN_SUCCESS;
457}
458
0b4e3aa0
A
459kern_return_t
460vnode_pager_data_initialize(
91447636
A
461 __unused memory_object_t mem_obj,
462 __unused memory_object_offset_t offset,
b0d623f7 463 __unused memory_object_cluster_size_t data_cnt)
0b4e3aa0 464{
91447636 465 panic("vnode_pager_data_initialize");
0b4e3aa0
A
466 return KERN_FAILURE;
467}
468
469kern_return_t
470vnode_pager_data_unlock(
91447636
A
471 __unused memory_object_t mem_obj,
472 __unused memory_object_offset_t offset,
b0d623f7 473 __unused memory_object_size_t size,
91447636 474 __unused vm_prot_t desired_access)
0b4e3aa0
A
475{
476 return KERN_FAILURE;
477}
478
b0d623f7
A
479kern_return_t
480vnode_pager_get_isinuse(
481 memory_object_t mem_obj,
482 uint32_t *isinuse)
483{
484 vnode_pager_t vnode_object;
485
486 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
487 *isinuse = 1;
488 return KERN_INVALID_ARGUMENT;
489 }
490
491 vnode_object = vnode_pager_lookup(mem_obj);
492
493 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
494 return KERN_SUCCESS;
495}
496
497kern_return_t
39236c6e 498vnode_pager_get_throttle_io_limit(
b0d623f7 499 memory_object_t mem_obj,
39236c6e 500 uint32_t *limit)
b0d623f7
A
501{
502 vnode_pager_t vnode_object;
503
504 if (mem_obj->mo_pager_ops != &vnode_pager_ops)
505 return KERN_INVALID_ARGUMENT;
506
507 vnode_object = vnode_pager_lookup(mem_obj);
508
39236c6e 509 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
b0d623f7
A
510 return KERN_SUCCESS;
511}
512
6d2010ae
A
513kern_return_t
514vnode_pager_get_isSSD(
515 memory_object_t mem_obj,
516 boolean_t *isSSD)
517{
518 vnode_pager_t vnode_object;
519
520 if (mem_obj->mo_pager_ops != &vnode_pager_ops)
521 return KERN_INVALID_ARGUMENT;
522
523 vnode_object = vnode_pager_lookup(mem_obj);
524
525 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
526 return KERN_SUCCESS;
527}
528
0b4e3aa0
A
529kern_return_t
530vnode_pager_get_object_size(
531 memory_object_t mem_obj,
532 memory_object_offset_t *length)
533{
534 vnode_pager_t vnode_object;
535
0c530ab8
A
536 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
537 *length = 0;
538 return KERN_INVALID_ARGUMENT;
539 }
540
0b4e3aa0
A
541 vnode_object = vnode_pager_lookup(mem_obj);
542
543 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
544 return KERN_SUCCESS;
545}
546
0c530ab8 547kern_return_t
15129b1c 548vnode_pager_get_object_name(
0c530ab8
A
549 memory_object_t mem_obj,
550 char *pathname,
15129b1c
A
551 vm_size_t pathname_len,
552 char *filename,
553 vm_size_t filename_len,
554 boolean_t *truncated_path_p)
0c530ab8
A
555{
556 vnode_pager_t vnode_object;
557
558 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
559 return KERN_INVALID_ARGUMENT;
560 }
561
562 vnode_object = vnode_pager_lookup(mem_obj);
563
15129b1c
A
564 return vnode_pager_get_name(vnode_object->vnode_handle,
565 pathname,
566 pathname_len,
567 filename,
568 filename_len,
569 truncated_path_p);
0c530ab8
A
570}
571
572kern_return_t
15129b1c
A
573vnode_pager_get_object_mtime(
574 memory_object_t mem_obj,
575 struct timespec *mtime,
576 struct timespec *cs_mtime)
0c530ab8
A
577{
578 vnode_pager_t vnode_object;
579
580 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
581 return KERN_INVALID_ARGUMENT;
582 }
583
584 vnode_object = vnode_pager_lookup(mem_obj);
585
15129b1c
A
586 return vnode_pager_get_mtime(vnode_object->vnode_handle,
587 mtime,
588 cs_mtime);
0c530ab8
A
589}
590
6d2010ae
A
591#if CHECK_CS_VALIDATION_BITMAP
592kern_return_t
593vnode_pager_cs_check_validation_bitmap(
594 memory_object_t mem_obj,
595 memory_object_offset_t offset,
596 int optype )
597{
598 vnode_pager_t vnode_object;
599
600 if (mem_obj == MEMORY_OBJECT_NULL ||
601 mem_obj->mo_pager_ops != &vnode_pager_ops) {
602 return KERN_INVALID_ARGUMENT;
603 }
604
605 vnode_object = vnode_pager_lookup(mem_obj);
606 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
607}
608#endif /* CHECK_CS_VALIDATION_BITMAP */
609
1c79356b
A
610/*
611 *
612 */
613kern_return_t
614vnode_pager_data_request(
0b4e3aa0
A
615 memory_object_t mem_obj,
616 memory_object_offset_t offset,
b0d623f7 617 __unused memory_object_cluster_size_t length,
2d21ac55
A
618 __unused vm_prot_t desired_access,
619 memory_object_fault_info_t fault_info)
1c79356b 620{
b0d623f7
A
621 vnode_pager_t vnode_object;
622 memory_object_offset_t base_offset;
2d21ac55 623 vm_size_t size;
b0d623f7 624 uint32_t io_streaming = 0;
1c79356b 625
0b4e3aa0 626 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 627
fe8ab488 628 size = MAX_UPL_TRANSFER_BYTES;
b0d623f7 629 base_offset = offset;
2d21ac55 630
5ba3f43e
A
631 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
632 &base_offset, &size, &io_streaming,
633 fault_info) != KERN_SUCCESS)
2d21ac55
A
634 size = PAGE_SIZE;
635
b0d623f7
A
636 assert(offset >= base_offset &&
637 offset < base_offset + size);
2d21ac55 638
b0d623f7 639 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
1c79356b
A
640}
641
642/*
643 *
644 */
645void
0b4e3aa0
A
646vnode_pager_reference(
647 memory_object_t mem_obj)
648{
39037602 649 vnode_pager_t vnode_object;
9bccf70c 650 unsigned int new_ref_count;
1c79356b 651
0b4e3aa0 652 vnode_object = vnode_pager_lookup(mem_obj);
9bccf70c
A
653 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
654 assert(new_ref_count > 1);
0b4e3aa0 655}
1c79356b 656
0b4e3aa0
A
657/*
658 *
659 */
660void
661vnode_pager_deallocate(
662 memory_object_t mem_obj)
663{
39037602 664 vnode_pager_t vnode_object;
1c79356b 665
2d21ac55 666 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
1c79356b 667
0b4e3aa0 668 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 669
9bccf70c 670 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
91447636 671 if (vnode_object->vnode_handle != NULL) {
0b4e3aa0
A
672 vnode_pager_vrele(vnode_object->vnode_handle);
673 }
91447636 674 zfree(vnode_pager_zone, vnode_object);
0b4e3aa0 675 }
1c79356b
A
676 return;
677}
678
679/*
680 *
681 */
682kern_return_t
683vnode_pager_terminate(
91447636
A
684#if !DEBUG
685 __unused
686#endif
0b4e3aa0 687 memory_object_t mem_obj)
1c79356b 688{
2d21ac55 689 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
1c79356b 690
0b4e3aa0
A
691 return(KERN_SUCCESS);
692}
1c79356b 693
0b4e3aa0
A
694/*
695 *
696 */
697kern_return_t
698vnode_pager_synchronize(
5ba3f43e
A
699 __unused memory_object_t mem_obj,
700 __unused memory_object_offset_t offset,
701 __unused memory_object_size_t length,
91447636 702 __unused vm_sync_t sync_flags)
0b4e3aa0 703{
5ba3f43e
A
704 panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n");
705 return (KERN_FAILURE);
1c79356b
A
706}
707
708/*
709 *
710 */
711kern_return_t
593a1d5f
A
712vnode_pager_map(
713 memory_object_t mem_obj,
714 vm_prot_t prot)
715{
716 vnode_pager_t vnode_object;
717 int ret;
718 kern_return_t kr;
719
720 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
721
722 vnode_object = vnode_pager_lookup(mem_obj);
723
724 ret = ubc_map(vnode_object->vnode_handle, prot);
725
726 if (ret != 0) {
727 kr = KERN_FAILURE;
728 } else {
729 kr = KERN_SUCCESS;
730 }
731
732 return kr;
733}
734
735kern_return_t
736vnode_pager_last_unmap(
0b4e3aa0 737 memory_object_t mem_obj)
1c79356b 738{
39037602 739 vnode_pager_t vnode_object;
1c79356b 740
593a1d5f 741 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
0b4e3aa0
A
742
743 vnode_object = vnode_pager_lookup(mem_obj);
744
745 ubc_unmap(vnode_object->vnode_handle);
746 return KERN_SUCCESS;
1c79356b
A
747}
748
0b4e3aa0 749
b0d623f7 750
1c79356b
A
751/*
752 *
753 */
754void
755vnode_pager_cluster_write(
756 vnode_pager_t vnode_object,
757 vm_object_offset_t offset,
91447636
A
758 vm_size_t cnt,
759 vm_object_offset_t * resid_offset,
760 int * io_error,
761 int upl_flags)
1c79356b 762{
b0d623f7 763 vm_size_t size;
91447636 764 int errno;
1c79356b 765
91447636 766 if (upl_flags & UPL_MSYNC) {
1c79356b 767
91447636
A
768 upl_flags |= UPL_VNODE_PAGER;
769
770 if ( (upl_flags & UPL_IOSYNC) && io_error)
771 upl_flags |= UPL_KEEPCACHED;
772
773 while (cnt) {
fe8ab488 774 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
0b4e3aa0 775
b0d623f7 776 assert((upl_size_t) size == size);
91447636 777 vnode_pageout(vnode_object->vnode_handle,
b0d623f7 778 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
91447636
A
779
780 if ( (upl_flags & UPL_KEEPCACHED) ) {
781 if ( (*io_error = errno) )
782 break;
783 }
784 cnt -= size;
785 offset += size;
786 }
787 if (resid_offset)
788 *resid_offset = offset;
789
790 } else {
791 vm_object_offset_t vnode_size;
792 vm_object_offset_t base_offset;
91447636
A
793
794 /*
795 * this is the pageout path
796 */
797 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
798
799 if (vnode_size > (offset + PAGE_SIZE)) {
800 /*
801 * preset the maximum size of the cluster
802 * and put us on a nice cluster boundary...
803 * and then clip the size to insure we
804 * don't request past the end of the underlying file
805 */
fe8ab488 806 size = MAX_UPL_TRANSFER_BYTES;
91447636
A
807 base_offset = offset & ~((signed)(size - 1));
808
809 if ((base_offset + size) > vnode_size)
b0d623f7 810 size = round_page(((vm_size_t)(vnode_size - base_offset)));
91447636
A
811 } else {
812 /*
813 * we've been requested to page out a page beyond the current
814 * end of the 'file'... don't try to cluster in this case...
815 * we still need to send this page through because it might
816 * be marked precious and the underlying filesystem may need
817 * to do something with it (besides page it out)...
818 */
819 base_offset = offset;
820 size = PAGE_SIZE;
0b4e3aa0 821 }
b0d623f7 822 assert((upl_size_t) size == size);
91447636 823 vnode_pageout(vnode_object->vnode_handle,
fe8ab488
A
824 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
825 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
1c79356b 826 }
1c79356b
A
827}
828
829
830/*
831 *
832 */
833kern_return_t
834vnode_pager_cluster_read(
835 vnode_pager_t vnode_object,
b0d623f7 836 vm_object_offset_t base_offset,
1c79356b 837 vm_object_offset_t offset,
b0d623f7 838 uint32_t io_streaming,
1c79356b
A
839 vm_size_t cnt)
840{
1c79356b
A
841 int local_error = 0;
842 int kret;
b0d623f7 843 int flags = 0;
1c79356b 844
91447636 845 assert(! (cnt & PAGE_MASK));
1c79356b 846
b0d623f7
A
847 if (io_streaming)
848 flags |= UPL_IOSTREAMING;
849
850 assert((upl_size_t) cnt == cnt);
91447636
A
851 kret = vnode_pagein(vnode_object->vnode_handle,
852 (upl_t) NULL,
b0d623f7
A
853 (upl_offset_t) (offset - base_offset),
854 base_offset,
855 (upl_size_t) cnt,
856 flags,
91447636 857 &local_error);
0b4e3aa0
A
858/*
859 if(kret == PAGER_ABSENT) {
860 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
861 defined in bsd/vm/vm_pager.h However, we should not be including
862 that file here it is a layering violation.
863*/
91447636
A
864 if (kret == 1) {
865 int uplflags;
866 upl_t upl = NULL;
0c530ab8 867 unsigned int count = 0;
91447636
A
868 kern_return_t kr;
869
870 uplflags = (UPL_NO_SYNC |
871 UPL_CLEAN_IN_PLACE |
872 UPL_SET_INTERNAL);
873 count = 0;
b0d623f7 874 assert((upl_size_t) cnt == cnt);
5ba3f43e 875 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
b0d623f7 876 base_offset, (upl_size_t) cnt,
5ba3f43e 877 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
91447636 878 if (kr == KERN_SUCCESS) {
0b4e3aa0
A
879 upl_abort(upl, 0);
880 upl_deallocate(upl);
91447636
A
881 } else {
882 /*
883 * We couldn't gather the page list, probably
884 * because the memory object doesn't have a link
885 * to a VM object anymore (forced unmount, for
886 * example). Just return an error to the vm_fault()
887 * path and let it handle it.
888 */
889 }
0b4e3aa0 890
91447636 891 return KERN_FAILURE;
1c79356b 892 }
0b4e3aa0 893
91447636 894 return KERN_SUCCESS;
1c79356b
A
895
896}
897
1c79356b
A
898/*
899 *
900 */
901vnode_pager_t
902vnode_object_create(
91447636 903 struct vnode *vp)
1c79356b 904{
39037602 905 vnode_pager_t vnode_object;
1c79356b
A
906
907 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
908 if (vnode_object == VNODE_PAGER_NULL)
909 return(VNODE_PAGER_NULL);
1c79356b 910
1c79356b 911 /*
0b4e3aa0
A
912 * The vm_map call takes both named entry ports and raw memory
913 * objects in the same parameter. We need to make sure that
914 * vm_map does not see this object as a named entry port. So,
b0d623f7 915 * we reserve the first word in the object for a fake ip_kotype
0b4e3aa0 916 * setting - that will tell vm_map to use it as a memory object.
1c79356b 917 */
5ba3f43e
A
918 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
919 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
920 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
921
0b4e3aa0 922 vnode_object->ref_count = 1;
0b4e3aa0
A
923 vnode_object->vnode_handle = vp;
924
925 return(vnode_object);
1c79356b
A
926}
927
928/*
929 *
930 */
931vnode_pager_t
0b4e3aa0
A
932vnode_pager_lookup(
933 memory_object_t name)
1c79356b 934{
0b4e3aa0 935 vnode_pager_t vnode_object;
1c79356b 936
0b4e3aa0 937 vnode_object = (vnode_pager_t)name;
5ba3f43e 938 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
0b4e3aa0 939 return (vnode_object);
1c79356b 940}
0b4e3aa0 941
0c530ab8 942
39037602
A
943struct vnode *
944vnode_pager_lookup_vnode(
945 memory_object_t name)
946{
947 vnode_pager_t vnode_object;
948 vnode_object = (vnode_pager_t)name;
5ba3f43e 949 if(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops)
39037602
A
950 return (vnode_object->vnode_handle);
951 else
952 return NULL;
953}
954
0c530ab8
A
955/*********************** proc_info implementation *************/
956
957#include <sys/bsdtask_info.h>
958
b0d623f7 959static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
0c530ab8
A
960
961
962int
b0d623f7 963fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
0c530ab8
A
964{
965
935ed37a 966 vm_map_t map;
0c530ab8
A
967 vm_map_offset_t address = (vm_map_offset_t )arg;
968 vm_map_entry_t tmp_entry;
969 vm_map_entry_t entry;
970 vm_map_offset_t start;
971 vm_region_extended_info_data_t extended;
972 vm_region_top_info_data_t top;
a39ff7e2 973 boolean_t do_region_footprint;
0c530ab8 974
935ed37a
A
975 task_lock(task);
976 map = task->map;
977 if (map == VM_MAP_NULL)
978 {
979 task_unlock(task);
980 return(0);
981 }
982 vm_map_reference(map);
983 task_unlock(task);
a39ff7e2
A
984
985 do_region_footprint = task_self_region_footprint();
986
0c530ab8
A
987 vm_map_lock_read(map);
988
989 start = address;
a39ff7e2 990
0c530ab8
A
991 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
992 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
a39ff7e2
A
993 if (do_region_footprint &&
994 address == tmp_entry->vme_end) {
995 ledger_amount_t nonvol, nonvol_compressed;
996
997 /*
998 * This request is right after the last valid
999 * memory region; instead of reporting the
1000 * end of the address space, report a fake
1001 * memory region to account for non-volatile
1002 * purgeable memory owned by this task.
1003 */
1004
1005 ledger_get_balance(
1006 task->ledger,
1007 task_ledgers.purgeable_nonvolatile,
1008 &nonvol);
1009 ledger_get_balance(
1010 task->ledger,
1011 task_ledgers.purgeable_nonvolatile_compressed,
1012 &nonvol_compressed);
1013 if (nonvol + nonvol_compressed == 0) {
1014 /* nothing to report */
1015 vm_map_unlock_read(map);
1016 vm_map_deallocate(map);
1017 return 0;
1018 }
1019 /* provide fake region for purgeable */
1020 pinfo->pri_offset = address;
1021 pinfo->pri_protection = VM_PROT_DEFAULT;
1022 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1023 pinfo->pri_inheritance = VM_INHERIT_NONE;
1024 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1025 pinfo->pri_user_wired_count = 0;
1026 pinfo->pri_user_tag = -1;
1027 pinfo->pri_pages_resident =
1028 (uint32_t) (nonvol / PAGE_SIZE);
1029 pinfo->pri_pages_shared_now_private = 0;
1030 pinfo->pri_pages_swapped_out =
1031 (uint32_t) (nonvol_compressed / PAGE_SIZE);
1032 pinfo->pri_pages_dirtied =
1033 (uint32_t) (nonvol / PAGE_SIZE);
1034 pinfo->pri_ref_count = 1;
1035 pinfo->pri_shadow_depth = 0;
1036 pinfo->pri_share_mode = SM_PRIVATE;
1037 pinfo->pri_private_pages_resident =
1038 (uint32_t) (nonvol / PAGE_SIZE);
1039 pinfo->pri_shared_pages_resident = 0;
1040 pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile);
1041 pinfo->pri_address = address;
1042 pinfo->pri_size =
1043 (uint64_t) (nonvol + nonvol_compressed);
1044 pinfo->pri_depth = 0;
1045
1046 vm_map_unlock_read(map);
1047 vm_map_deallocate(map);
1048 return 1;
1049 }
0c530ab8 1050 vm_map_unlock_read(map);
a39ff7e2
A
1051 vm_map_deallocate(map);
1052 return 0;
0c530ab8
A
1053 }
1054 } else {
1055 entry = tmp_entry;
1056 }
1057
1058 start = entry->vme_start;
1059
3e170ce0 1060 pinfo->pri_offset = VME_OFFSET(entry);
0c530ab8
A
1061 pinfo->pri_protection = entry->protection;
1062 pinfo->pri_max_protection = entry->max_protection;
1063 pinfo->pri_inheritance = entry->inheritance;
1064 pinfo->pri_behavior = entry->behavior;
1065 pinfo->pri_user_wired_count = entry->user_wired_count;
3e170ce0 1066 pinfo->pri_user_tag = VME_ALIAS(entry);
0c530ab8
A
1067
1068 if (entry->is_sub_map) {
1069 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1070 } else {
1071 if (entry->is_shared)
1072 pinfo->pri_flags |= PROC_REGION_SHARED;
1073 }
1074
1075
1076 extended.protection = entry->protection;
3e170ce0 1077 extended.user_tag = VME_ALIAS(entry);
0c530ab8
A
1078 extended.pages_resident = 0;
1079 extended.pages_swapped_out = 0;
1080 extended.pages_shared_now_private = 0;
1081 extended.pages_dirtied = 0;
1082 extended.external_pager = 0;
1083 extended.shadow_depth = 0;
1084
a39ff7e2 1085 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
0c530ab8
A
1086
1087 if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
1088 extended.share_mode = SM_PRIVATE;
1089
1090 top.private_pages_resident = 0;
1091 top.shared_pages_resident = 0;
1092 vm_map_region_top_walk(entry, &top);
1093
1094
1095 pinfo->pri_pages_resident = extended.pages_resident;
1096 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1097 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1098 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1099 pinfo->pri_ref_count = extended.ref_count;
1100 pinfo->pri_shadow_depth = extended.shadow_depth;
1101 pinfo->pri_share_mode = extended.share_mode;
1102
1103 pinfo->pri_private_pages_resident = top.private_pages_resident;
1104 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1105 pinfo->pri_obj_id = top.obj_id;
1106
1107 pinfo->pri_address = (uint64_t)start;
1108 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1109 pinfo->pri_depth = 0;
1110
1111 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
b0d623f7 1112 *vnodeaddr = (uintptr_t)0;
0c530ab8
A
1113
1114 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
1115 vm_map_unlock_read(map);
935ed37a 1116 vm_map_deallocate(map);
0c530ab8
A
1117 return(1);
1118 }
1119 }
1120
1121 vm_map_unlock_read(map);
935ed37a 1122 vm_map_deallocate(map);
0c530ab8
A
1123 return(1);
1124}
1125
fe8ab488
A
1126int
1127fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1128{
1129
1130 vm_map_t map;
1131 vm_map_offset_t address = (vm_map_offset_t )arg;
1132 vm_map_entry_t tmp_entry;
1133 vm_map_entry_t entry;
1134
1135 task_lock(task);
1136 map = task->map;
1137 if (map == VM_MAP_NULL)
1138 {
1139 task_unlock(task);
1140 return(0);
1141 }
1142 vm_map_reference(map);
1143 task_unlock(task);
1144
1145 vm_map_lock_read(map);
1146
1147 if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
1148 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1149 vm_map_unlock_read(map);
1150 vm_map_deallocate(map);
1151 return(0);
1152 }
1153 } else {
1154 entry = tmp_entry;
1155 }
1156
3e170ce0 1157 while (entry != vm_map_to_entry(map)) {
fe8ab488
A
1158 *vnodeaddr = 0;
1159 *vid = 0;
1160
1161 if (entry->is_sub_map == 0) {
1162 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1163
3e170ce0 1164 pinfo->pri_offset = VME_OFFSET(entry);
fe8ab488
A
1165 pinfo->pri_protection = entry->protection;
1166 pinfo->pri_max_protection = entry->max_protection;
1167 pinfo->pri_inheritance = entry->inheritance;
1168 pinfo->pri_behavior = entry->behavior;
1169 pinfo->pri_user_wired_count = entry->user_wired_count;
3e170ce0 1170 pinfo->pri_user_tag = VME_ALIAS(entry);
fe8ab488
A
1171
1172 if (entry->is_shared)
1173 pinfo->pri_flags |= PROC_REGION_SHARED;
1174
1175 pinfo->pri_pages_resident = 0;
1176 pinfo->pri_pages_shared_now_private = 0;
1177 pinfo->pri_pages_swapped_out = 0;
1178 pinfo->pri_pages_dirtied = 0;
1179 pinfo->pri_ref_count = 0;
1180 pinfo->pri_shadow_depth = 0;
1181 pinfo->pri_share_mode = 0;
1182
1183 pinfo->pri_private_pages_resident = 0;
1184 pinfo->pri_shared_pages_resident = 0;
1185 pinfo->pri_obj_id = 0;
1186
1187 pinfo->pri_address = (uint64_t)entry->vme_start;
1188 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1189 pinfo->pri_depth = 0;
1190
1191 vm_map_unlock_read(map);
1192 vm_map_deallocate(map);
1193 return(1);
1194 }
1195 }
1196
1197 /* Keep searching for a vnode-backed mapping */
1198 entry = entry->vme_next;
1199 }
1200
1201 vm_map_unlock_read(map);
1202 vm_map_deallocate(map);
1203 return(0);
1204}
1205
0c530ab8
A
1206static int
1207fill_vnodeinfoforaddr(
1208 vm_map_entry_t entry,
b0d623f7 1209 uintptr_t * vnodeaddr,
0c530ab8
A
1210 uint32_t * vid)
1211{
1212 vm_object_t top_object, object;
1213 memory_object_t memory_object;
1214 memory_object_pager_ops_t pager_ops;
1215 kern_return_t kr;
1216 int shadow_depth;
1217
1218
1219 if (entry->is_sub_map) {
1220 return(0);
1221 } else {
1222 /*
1223 * The last object in the shadow chain has the
1224 * relevant pager information.
1225 */
3e170ce0 1226 top_object = VME_OBJECT(entry);
0c530ab8
A
1227 if (top_object == VM_OBJECT_NULL) {
1228 object = VM_OBJECT_NULL;
1229 shadow_depth = 0;
1230 } else {
1231 vm_object_lock(top_object);
1232 for (object = top_object, shadow_depth = 0;
1233 object->shadow != VM_OBJECT_NULL;
1234 object = object->shadow, shadow_depth++) {
1235 vm_object_lock(object->shadow);
1236 vm_object_unlock(object);
1237 }
1238 }
1239 }
1240
1241 if (object == VM_OBJECT_NULL) {
1242 return(0);
1243 } else if (object->internal) {
1244 vm_object_unlock(object);
1245 return(0);
1246 } else if (! object->pager_ready ||
1247 object->terminating ||
1248 ! object->alive) {
1249 vm_object_unlock(object);
1250 return(0);
1251 } else {
1252 memory_object = object->pager;
1253 pager_ops = memory_object->mo_pager_ops;
1254 if (pager_ops == &vnode_pager_ops) {
1255 kr = vnode_pager_get_object_vnode(
1256 memory_object,
1257 vnodeaddr, vid);
1258 if (kr != KERN_SUCCESS) {
1259 vm_object_unlock(object);
1260 return(0);
1261 }
1262 } else {
1263 vm_object_unlock(object);
1264 return(0);
1265 }
1266 }
1267 vm_object_unlock(object);
1268 return(1);
1269}
1270
1271kern_return_t
1272vnode_pager_get_object_vnode (
1273 memory_object_t mem_obj,
b0d623f7 1274 uintptr_t * vnodeaddr,
0c530ab8
A
1275 uint32_t * vid)
1276{
1277 vnode_pager_t vnode_object;
1278
1279 vnode_object = vnode_pager_lookup(mem_obj);
1280 if (vnode_object->vnode_handle) {
b0d623f7 1281 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
0c530ab8
A
1282 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1283
1284 return(KERN_SUCCESS);
1285 }
1286
1287 return(KERN_FAILURE);
1288}
1289
fe8ab488
A
1290#if CONFIG_IOSCHED
1291kern_return_t
1292vnode_pager_get_object_devvp(
1293 memory_object_t mem_obj,
1294 uintptr_t *devvp)
1295{
1296 struct vnode *vp;
1297 uint32_t vid;
1298
1299 if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS)
1300 return (KERN_FAILURE);
1301 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1302 if (*devvp)
1303 return (KERN_SUCCESS);
1304 return (KERN_FAILURE);
1305}
1306#endif
b0d623f7
A
1307
1308/*
1309 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1310 * object locked, otherwise return NULL with nothing locked.
1311 */
1312
1313vm_object_t
1314find_vnode_object(
1315 vm_map_entry_t entry
1316)
1317{
1318 vm_object_t top_object, object;
1319 memory_object_t memory_object;
1320 memory_object_pager_ops_t pager_ops;
1321
1322 if (!entry->is_sub_map) {
1323
1324 /*
1325 * The last object in the shadow chain has the
1326 * relevant pager information.
1327 */
1328
3e170ce0 1329 top_object = VME_OBJECT(entry);
b0d623f7
A
1330
1331 if (top_object) {
1332 vm_object_lock(top_object);
1333
1334 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1335 vm_object_lock(object->shadow);
1336 vm_object_unlock(object);
1337 }
1338
1339 if (object && !object->internal && object->pager_ready && !object->terminating &&
1340 object->alive) {
1341 memory_object = object->pager;
1342 pager_ops = memory_object->mo_pager_ops;
1343
1344 /*
1345 * If this object points to the vnode_pager_ops, then we found what we're
1346 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1347 * vnode and so we fall through to the bottom and return NULL.
1348 */
1349
1350 if (pager_ops == &vnode_pager_ops)
1351 return object; /* we return with the object locked */
1352 }
1353
1354 vm_object_unlock(object);
1355 }
1356
1357 }
1358
1359 return(VM_OBJECT_NULL);
1360}