]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
29#include <sys/errno.h>
91447636 30
1c79356b 31#include <mach/mach_types.h>
91447636
A
32#include <mach/mach_traps.h>
33#include <mach/host_priv.h>
1c79356b 34#include <mach/kern_return.h>
91447636 35#include <mach/memory_object_control.h>
1c79356b
A
36#include <mach/memory_object_types.h>
37#include <mach/port.h>
38#include <mach/policy.h>
91447636
A
39#include <mach/upl.h>
40#include <mach/thread_act.h>
41
2d21ac55 42#include <kern/assert.h>
91447636
A
43#include <kern/host.h>
44#include <kern/thread.h>
45
1c79356b
A
46#include <ipc/ipc_port.h>
47#include <ipc/ipc_space.h>
1c79356b 48
0b4e3aa0 49#include <default_pager/default_pager_types.h>
91447636
A
50#include <default_pager/default_pager_object_server.h>
51
52#include <vm/vm_map.h>
91447636
A
53#include <vm/vm_pageout.h>
54#include <vm/memory_object.h>
55#include <vm/vm_pageout.h>
56#include <vm/vm_protos.h>
2d21ac55
A
57#include <vm/vm_purgeable_internal.h>
58
1c79356b
A
59
60/* BSD VM COMPONENT INTERFACES */
61int
62get_map_nentries(
63 vm_map_t);
64
65vm_offset_t
66get_map_start(
67 vm_map_t);
68
69vm_offset_t
70get_map_end(
71 vm_map_t);
72
73/*
74 *
75 */
76int
77get_map_nentries(
78 vm_map_t map)
79{
80 return(map->hdr.nentries);
81}
82
91447636
A
83mach_vm_offset_t
84mach_get_vm_start(vm_map_t map)
85{
86 return( vm_map_first_entry(map)->vme_start);
87}
88
89mach_vm_offset_t
90mach_get_vm_end(vm_map_t map)
91{
92 return( vm_map_last_entry(map)->vme_end);
93}
94
1c79356b
A
95/*
96 * BSD VNODE PAGER
97 */
98
0c530ab8
A
99const struct memory_object_pager_ops vnode_pager_ops = {
100 vnode_pager_reference,
101 vnode_pager_deallocate,
102 vnode_pager_init,
103 vnode_pager_terminate,
104 vnode_pager_data_request,
105 vnode_pager_data_return,
106 vnode_pager_data_initialize,
107 vnode_pager_data_unlock,
108 vnode_pager_synchronize,
593a1d5f
A
109 vnode_pager_map,
110 vnode_pager_last_unmap,
6d2010ae 111 NULL, /* data_reclaim */
0c530ab8
A
112 "vnode pager"
113};
1c79356b 114
1c79356b 115typedef struct vnode_pager {
b0d623f7 116 struct ipc_object_header pager_header; /* fake ip_kotype() */
0c530ab8 117 memory_object_pager_ops_t pager_ops; /* == &vnode_pager_ops */
0b4e3aa0
A
118 unsigned int ref_count; /* reference count */
119 memory_object_control_t control_handle; /* mem object control handle */
91447636 120 struct vnode *vnode_handle; /* vnode handle */
1c79356b
A
121} *vnode_pager_t;
122
3e170ce0 123
b0d623f7 124#define pager_ikot pager_header.io_bits
1c79356b
A
125
126ipc_port_t
91447636 127trigger_name_to_port( /* forward */
1c79356b
A
128 mach_port_t);
129
1c79356b 130kern_return_t
91447636 131vnode_pager_cluster_read( /* forward */
1c79356b 132 vnode_pager_t,
b0d623f7
A
133 vm_object_offset_t,
134 vm_object_offset_t,
135 uint32_t,
1c79356b
A
136 vm_size_t);
137
138void
91447636 139vnode_pager_cluster_write( /* forward */
1c79356b
A
140 vnode_pager_t,
141 vm_object_offset_t,
91447636
A
142 vm_size_t,
143 vm_object_offset_t *,
144 int *,
145 int);
1c79356b 146
0b4e3aa0 147
1c79356b 148vnode_pager_t
91447636
A
149vnode_object_create( /* forward */
150 struct vnode *);
1c79356b 151
1c79356b 152vnode_pager_t
91447636 153vnode_pager_lookup( /* forward */
0b4e3aa0 154 memory_object_t);
1c79356b 155
1c79356b
A
156zone_t vnode_pager_zone;
157
158
159#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
160
161/* TODO: Should be set dynamically by vnode_pager_init() */
162#define CLUSTER_SHIFT 1
163
164/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
165#define MAX_VNODE 10000
166
167
168#if DEBUG
169int pagerdebug=0;
170
171#define PAGER_ALL 0xffffffff
172#define PAGER_INIT 0x00000001
173#define PAGER_PAGEIN 0x00000002
174
175#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
176#else
177#define PAGER_DEBUG(LEVEL, A)
178#endif
179
b0d623f7
A
180extern int proc_resetpcontrol(int);
181
182#if DEVELOPMENT || DEBUG
183extern unsigned long vm_cs_validated_resets;
184#endif
185
1c79356b 186/*
b0d623f7 187 * Routine: mach_macx_triggers
1c79356b
A
188 * Function:
189 * Syscall interface to set the call backs for low and
190 * high water marks.
191 */
192int
b0d623f7 193mach_macx_triggers(
91447636 194 struct macx_triggers_args *args)
1c79356b 195{
91447636
A
196 int hi_water = args->hi_water;
197 int low_water = args->low_water;
198 int flags = args->flags;
199 mach_port_t trigger_name = args->alert_port;
1c79356b 200 kern_return_t kr;
0b4e3aa0 201 memory_object_default_t default_pager;
1c79356b
A
202 ipc_port_t trigger_port;
203
0b4e3aa0 204 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b 205 kr = host_default_memory_manager(host_priv_self(),
0b4e3aa0 206 &default_pager, 0);
1c79356b
A
207 if(kr != KERN_SUCCESS) {
208 return EINVAL;
209 }
91447636 210
b0d623f7
A
211 if (((flags & SWAP_ENCRYPT_ON) && (flags & SWAP_ENCRYPT_OFF)) ||
212 ((flags & SWAP_COMPACT_ENABLE) && (flags & SWAP_COMPACT_DISABLE))) {
91447636
A
213 /* can't have it both ways */
214 return EINVAL;
215 }
216
2d21ac55 217 if (default_pager_init_flag == 0) {
0c530ab8
A
218 start_def_pager(NULL);
219 default_pager_init_flag = 1;
2d21ac55 220 }
0c530ab8 221
91447636
A
222 if (flags & SWAP_ENCRYPT_ON) {
223 /* ENCRYPTED SWAP: tell default_pager to encrypt */
224 default_pager_triggers(default_pager,
225 0, 0,
226 SWAP_ENCRYPT_ON,
227 IP_NULL);
228 } else if (flags & SWAP_ENCRYPT_OFF) {
229 /* ENCRYPTED SWAP: tell default_pager not to encrypt */
230 default_pager_triggers(default_pager,
231 0, 0,
232 SWAP_ENCRYPT_OFF,
233 IP_NULL);
234 }
235
b0d623f7
A
236 if (flags & USE_EMERGENCY_SWAP_FILE_FIRST) {
237 /*
238 * Time to switch to the emergency segment.
239 */
240 return default_pager_triggers(default_pager,
241 0, 0,
242 USE_EMERGENCY_SWAP_FILE_FIRST,
243 IP_NULL);
244 }
245
246 if (flags & SWAP_FILE_CREATION_ERROR) {
247 /*
248 * For some reason, the dynamic pager failed to create a swap file.
249 */
250 trigger_port = trigger_name_to_port(trigger_name);
251 if(trigger_port == NULL) {
252 return EINVAL;
253 }
254 /* trigger_port is locked and active */
255 ipc_port_make_send_locked(trigger_port);
39236c6e 256 ip_unlock(trigger_port);
b0d623f7
A
257 default_pager_triggers(default_pager,
258 0, 0,
259 SWAP_FILE_CREATION_ERROR,
260 trigger_port);
261 }
262
0b4e3aa0
A
263 if (flags & HI_WAT_ALERT) {
264 trigger_port = trigger_name_to_port(trigger_name);
265 if(trigger_port == NULL) {
266 return EINVAL;
267 }
268 /* trigger_port is locked and active */
269 ipc_port_make_send_locked(trigger_port);
39236c6e 270 ip_unlock(trigger_port);
0b4e3aa0
A
271 default_pager_triggers(default_pager,
272 hi_water, low_water,
273 HI_WAT_ALERT, trigger_port);
274 }
275
276 if (flags & LO_WAT_ALERT) {
277 trigger_port = trigger_name_to_port(trigger_name);
278 if(trigger_port == NULL) {
279 return EINVAL;
280 }
281 /* trigger_port is locked and active */
282 ipc_port_make_send_locked(trigger_port);
39236c6e 283 ip_unlock(trigger_port);
0b4e3aa0
A
284 default_pager_triggers(default_pager,
285 hi_water, low_water,
286 LO_WAT_ALERT, trigger_port);
1c79356b 287 }
1c79356b 288
b0d623f7
A
289
290 if (flags & PROC_RESUME) {
291
292 /*
293 * For this call, hi_water is used to pass in the pid of the process we want to resume
294 * or unthrottle. This is of course restricted to the superuser (checked inside of
295 * proc_resetpcontrol).
296 */
297
298 return proc_resetpcontrol(hi_water);
299 }
300
1c79356b
A
301 /*
302 * Set thread scheduling priority and policy for the current thread
303 * it is assumed for the time being that the thread setting the alert
55e303ae
A
304 * is the same one which will be servicing it.
305 *
306 * XXX This does not belong in the kernel XXX
1c79356b 307 */
b0d623f7 308 if (flags & HI_WAT_ALERT) {
55e303ae
A
309 thread_precedence_policy_data_t pre;
310 thread_extended_policy_data_t ext;
311
312 ext.timeshare = FALSE;
313 pre.importance = INT32_MAX;
314
91447636
A
315 thread_policy_set(current_thread(),
316 THREAD_EXTENDED_POLICY,
317 (thread_policy_t)&ext,
318 THREAD_EXTENDED_POLICY_COUNT);
55e303ae 319
91447636
A
320 thread_policy_set(current_thread(),
321 THREAD_PRECEDENCE_POLICY,
322 (thread_policy_t)&pre,
323 THREAD_PRECEDENCE_POLICY_COUNT);
b0d623f7
A
324
325 current_thread()->options |= TH_OPT_VMPRIV;
1c79356b
A
326 }
327
b0d623f7
A
328 if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) {
329 return macx_backing_store_compaction(flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE));
330 }
91447636
A
331
332 return 0;
1c79356b
A
333}
334
335/*
336 *
337 */
338ipc_port_t
339trigger_name_to_port(
340 mach_port_t trigger_name)
341{
342 ipc_port_t trigger_port;
343 ipc_space_t space;
344
345 if (trigger_name == 0)
346 return (NULL);
347
348 space = current_space();
b0d623f7 349 if(ipc_port_translate_receive(space, CAST_MACH_PORT_TO_NAME(trigger_name),
1c79356b
A
350 &trigger_port) != KERN_SUCCESS)
351 return (NULL);
352 return trigger_port;
353}
354
91447636
A
355
356extern int uiomove64(addr64_t, int, void *);
357#define MAX_RUN 32
358
359int
360memory_object_control_uiomove(
361 memory_object_control_t control,
362 memory_object_offset_t offset,
363 void * uio,
364 int start_offset,
365 int io_requested,
2d21ac55
A
366 int mark_dirty,
367 int take_reference)
91447636
A
368{
369 vm_object_t object;
370 vm_page_t dst_page;
371 int xsize;
372 int retval = 0;
373 int cur_run;
374 int cur_needed;
375 int i;
2d21ac55 376 int orig_offset;
91447636
A
377 vm_page_t page_run[MAX_RUN];
378
91447636
A
379 object = memory_object_control_to_vm_object(control);
380 if (object == VM_OBJECT_NULL) {
381 return (0);
382 }
383 assert(!object->internal);
384
385 vm_object_lock(object);
386
387 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
388 /*
389 * We can't modify the pages without honoring
390 * copy-on-write obligations first, so fall off
391 * this optimized path and fall back to the regular
392 * path.
393 */
394 vm_object_unlock(object);
395 return 0;
396 }
2d21ac55 397 orig_offset = start_offset;
91447636
A
398
399 while (io_requested && retval == 0) {
400
401 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
402
403 if (cur_needed > MAX_RUN)
404 cur_needed = MAX_RUN;
405
406 for (cur_run = 0; cur_run < cur_needed; ) {
407
408 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
409 break;
b0d623f7 410
b0d623f7 411
316670eb 412 if (dst_page->busy || dst_page->cleaning) {
b0d623f7 413 /*
91447636
A
414 * someone else is playing with the page... if we've
415 * already collected pages into this run, go ahead
416 * and process now, we can't block on this
417 * page while holding other pages in the BUSY state
418 * otherwise we will wait
419 */
b0d623f7
A
420 if (cur_run)
421 break;
422 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
91447636
A
423 continue;
424 }
316670eb
A
425 if (dst_page->laundry) {
426 dst_page->pageout = FALSE;
427
428 vm_pageout_steal_laundry(dst_page, FALSE);
429 }
91447636
A
430 /*
431 * this routine is only called when copying
432 * to/from real files... no need to consider
433 * encrypted swap pages
434 */
435 assert(!dst_page->encrypted);
436
4a3eedf9 437 if (mark_dirty) {
316670eb 438 SET_PAGE_DIRTY(dst_page, FALSE);
b0d623f7
A
439 if (dst_page->cs_validated &&
440 !dst_page->cs_tainted) {
4a3eedf9
A
441 /*
442 * CODE SIGNING:
443 * We're modifying a code-signed
b0d623f7 444 * page: force revalidate
4a3eedf9 445 */
b0d623f7
A
446 dst_page->cs_validated = FALSE;
447#if DEVELOPMENT || DEBUG
448 vm_cs_validated_resets++;
449#endif
450 pmap_disconnect(dst_page->phys_page);
4a3eedf9
A
451 }
452 }
91447636
A
453 dst_page->busy = TRUE;
454
455 page_run[cur_run++] = dst_page;
456
457 offset += PAGE_SIZE_64;
458 }
459 if (cur_run == 0)
460 /*
b0d623f7
A
461 * we hit a 'hole' in the cache or
462 * a page we don't want to try to handle,
463 * so bail at this point
91447636
A
464 * we'll unlock the object below
465 */
466 break;
467 vm_object_unlock(object);
468
469 for (i = 0; i < cur_run; i++) {
470
471 dst_page = page_run[i];
472
473 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
474 xsize = io_requested;
475
fe8ab488 476 if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << PAGE_SHIFT) + start_offset), xsize, uio)) )
91447636
A
477 break;
478
479 io_requested -= xsize;
480 start_offset = 0;
481 }
482 vm_object_lock(object);
483
2d21ac55
A
484 /*
485 * if we have more than 1 page to work on
486 * in the current run, or the original request
487 * started at offset 0 of the page, or we're
488 * processing multiple batches, we will move
489 * the pages to the tail of the inactive queue
490 * to implement an LRU for read/write accesses
491 *
492 * the check for orig_offset == 0 is there to
493 * mitigate the cost of small (< page_size) requests
494 * to the same page (this way we only move it once)
495 */
496 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
b0d623f7 497
2d21ac55 498 vm_page_lockspin_queues();
b0d623f7
A
499
500 for (i = 0; i < cur_run; i++)
501 vm_page_lru(page_run[i]);
502
503 vm_page_unlock_queues();
2d21ac55 504 }
91447636
A
505 for (i = 0; i < cur_run; i++) {
506 dst_page = page_run[i];
507
2d21ac55
A
508 /*
509 * someone is explicitly referencing this page...
510 * update clustered and speculative state
511 *
512 */
fe8ab488
A
513 if (dst_page->clustered)
514 VM_PAGE_CONSUME_CLUSTERED(dst_page);
2d21ac55 515
91447636
A
516 PAGE_WAKEUP_DONE(dst_page);
517 }
2d21ac55 518 orig_offset = 0;
91447636
A
519 }
520 vm_object_unlock(object);
521
522 return (retval);
523}
524
525
1c79356b
A
526/*
527 *
528 */
529void
530vnode_pager_bootstrap(void)
531{
532 register vm_size_t size;
533
534 size = (vm_size_t) sizeof(struct vnode_pager);
535 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
536 PAGE_SIZE, "vnode pager structures");
6d2010ae 537 zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE);
0b4c1975
A
538 zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE);
539
6d2010ae 540
593a1d5f 541#if CONFIG_CODE_DECRYPTION
0c530ab8 542 apple_protect_pager_bootstrap();
593a1d5f 543#endif /* CONFIG_CODE_DECRYPTION */
b0d623f7 544 swapfile_pager_bootstrap();
1c79356b
A
545 return;
546}
547
548/*
549 *
550 */
0b4e3aa0 551memory_object_t
1c79356b 552vnode_pager_setup(
91447636
A
553 struct vnode *vp,
554 __unused memory_object_t pager)
1c79356b
A
555{
556 vnode_pager_t vnode_object;
1c79356b
A
557
558 vnode_object = vnode_object_create(vp);
559 if (vnode_object == VNODE_PAGER_NULL)
560 panic("vnode_pager_setup: vnode_object_create() failed");
0b4e3aa0 561 return((memory_object_t)vnode_object);
1c79356b
A
562}
563
564/*
565 *
566 */
567kern_return_t
0b4e3aa0
A
568vnode_pager_init(memory_object_t mem_obj,
569 memory_object_control_t control,
91447636
A
570#if !DEBUG
571 __unused
572#endif
b0d623f7 573 memory_object_cluster_size_t pg_size)
1c79356b
A
574{
575 vnode_pager_t vnode_object;
576 kern_return_t kr;
577 memory_object_attr_info_data_t attributes;
1c79356b
A
578
579
b0d623f7 580 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
1c79356b 581
0b4e3aa0
A
582 if (control == MEMORY_OBJECT_CONTROL_NULL)
583 return KERN_INVALID_ARGUMENT;
1c79356b 584
0b4e3aa0 585 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 586
0b4e3aa0 587 memory_object_control_reference(control);
91447636 588
0b4e3aa0 589 vnode_object->control_handle = control;
1c79356b
A
590
591 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
592 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
593 attributes.cluster_size = (1 << (PAGE_SHIFT));
594 attributes.may_cache_object = TRUE;
595 attributes.temporary = TRUE;
596
597 kr = memory_object_change_attributes(
0b4e3aa0 598 control,
1c79356b
A
599 MEMORY_OBJECT_ATTRIBUTE_INFO,
600 (memory_object_info_t) &attributes,
0b4e3aa0 601 MEMORY_OBJECT_ATTR_INFO_COUNT);
1c79356b
A
602 if (kr != KERN_SUCCESS)
603 panic("vnode_pager_init: memory_object_change_attributes() failed");
604
605 return(KERN_SUCCESS);
606}
607
608/*
609 *
610 */
611kern_return_t
612vnode_pager_data_return(
0b4e3aa0
A
613 memory_object_t mem_obj,
614 memory_object_offset_t offset,
b0d623f7 615 memory_object_cluster_size_t data_cnt,
91447636
A
616 memory_object_offset_t *resid_offset,
617 int *io_error,
618 __unused boolean_t dirty,
619 __unused boolean_t kernel_copy,
620 int upl_flags)
1c79356b
A
621{
622 register vnode_pager_t vnode_object;
623
0b4e3aa0 624 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 625
91447636 626 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
1c79356b
A
627
628 return KERN_SUCCESS;
629}
630
0b4e3aa0
A
631kern_return_t
632vnode_pager_data_initialize(
91447636
A
633 __unused memory_object_t mem_obj,
634 __unused memory_object_offset_t offset,
b0d623f7 635 __unused memory_object_cluster_size_t data_cnt)
0b4e3aa0 636{
91447636 637 panic("vnode_pager_data_initialize");
0b4e3aa0
A
638 return KERN_FAILURE;
639}
640
641kern_return_t
642vnode_pager_data_unlock(
91447636
A
643 __unused memory_object_t mem_obj,
644 __unused memory_object_offset_t offset,
b0d623f7 645 __unused memory_object_size_t size,
91447636 646 __unused vm_prot_t desired_access)
0b4e3aa0
A
647{
648 return KERN_FAILURE;
649}
650
b0d623f7
A
651kern_return_t
652vnode_pager_get_isinuse(
653 memory_object_t mem_obj,
654 uint32_t *isinuse)
655{
656 vnode_pager_t vnode_object;
657
658 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
659 *isinuse = 1;
660 return KERN_INVALID_ARGUMENT;
661 }
662
663 vnode_object = vnode_pager_lookup(mem_obj);
664
665 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
666 return KERN_SUCCESS;
667}
668
669kern_return_t
39236c6e 670vnode_pager_get_throttle_io_limit(
b0d623f7 671 memory_object_t mem_obj,
39236c6e 672 uint32_t *limit)
b0d623f7
A
673{
674 vnode_pager_t vnode_object;
675
676 if (mem_obj->mo_pager_ops != &vnode_pager_ops)
677 return KERN_INVALID_ARGUMENT;
678
679 vnode_object = vnode_pager_lookup(mem_obj);
680
39236c6e 681 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
b0d623f7
A
682 return KERN_SUCCESS;
683}
684
6d2010ae
A
685kern_return_t
686vnode_pager_get_isSSD(
687 memory_object_t mem_obj,
688 boolean_t *isSSD)
689{
690 vnode_pager_t vnode_object;
691
692 if (mem_obj->mo_pager_ops != &vnode_pager_ops)
693 return KERN_INVALID_ARGUMENT;
694
695 vnode_object = vnode_pager_lookup(mem_obj);
696
697 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
698 return KERN_SUCCESS;
699}
700
0b4e3aa0
A
701kern_return_t
702vnode_pager_get_object_size(
703 memory_object_t mem_obj,
704 memory_object_offset_t *length)
705{
706 vnode_pager_t vnode_object;
707
0c530ab8
A
708 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
709 *length = 0;
710 return KERN_INVALID_ARGUMENT;
711 }
712
0b4e3aa0
A
713 vnode_object = vnode_pager_lookup(mem_obj);
714
715 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
716 return KERN_SUCCESS;
717}
718
0c530ab8 719kern_return_t
15129b1c 720vnode_pager_get_object_name(
0c530ab8
A
721 memory_object_t mem_obj,
722 char *pathname,
15129b1c
A
723 vm_size_t pathname_len,
724 char *filename,
725 vm_size_t filename_len,
726 boolean_t *truncated_path_p)
0c530ab8
A
727{
728 vnode_pager_t vnode_object;
729
730 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
731 return KERN_INVALID_ARGUMENT;
732 }
733
734 vnode_object = vnode_pager_lookup(mem_obj);
735
15129b1c
A
736 return vnode_pager_get_name(vnode_object->vnode_handle,
737 pathname,
738 pathname_len,
739 filename,
740 filename_len,
741 truncated_path_p);
0c530ab8
A
742}
743
744kern_return_t
15129b1c
A
745vnode_pager_get_object_mtime(
746 memory_object_t mem_obj,
747 struct timespec *mtime,
748 struct timespec *cs_mtime)
0c530ab8
A
749{
750 vnode_pager_t vnode_object;
751
752 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
753 return KERN_INVALID_ARGUMENT;
754 }
755
756 vnode_object = vnode_pager_lookup(mem_obj);
757
15129b1c
A
758 return vnode_pager_get_mtime(vnode_object->vnode_handle,
759 mtime,
760 cs_mtime);
0c530ab8
A
761}
762
2d21ac55
A
763kern_return_t
764vnode_pager_get_object_cs_blobs(
765 memory_object_t mem_obj,
766 void **blobs)
767{
768 vnode_pager_t vnode_object;
769
770 if (mem_obj == MEMORY_OBJECT_NULL ||
771 mem_obj->mo_pager_ops != &vnode_pager_ops) {
772 return KERN_INVALID_ARGUMENT;
773 }
774
775 vnode_object = vnode_pager_lookup(mem_obj);
776
777 return vnode_pager_get_cs_blobs(vnode_object->vnode_handle,
778 blobs);
779}
780
6d2010ae
A
781#if CHECK_CS_VALIDATION_BITMAP
782kern_return_t
783vnode_pager_cs_check_validation_bitmap(
784 memory_object_t mem_obj,
785 memory_object_offset_t offset,
786 int optype )
787{
788 vnode_pager_t vnode_object;
789
790 if (mem_obj == MEMORY_OBJECT_NULL ||
791 mem_obj->mo_pager_ops != &vnode_pager_ops) {
792 return KERN_INVALID_ARGUMENT;
793 }
794
795 vnode_object = vnode_pager_lookup(mem_obj);
796 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
797}
798#endif /* CHECK_CS_VALIDATION_BITMAP */
799
1c79356b
A
800/*
801 *
802 */
803kern_return_t
804vnode_pager_data_request(
0b4e3aa0
A
805 memory_object_t mem_obj,
806 memory_object_offset_t offset,
b0d623f7 807 __unused memory_object_cluster_size_t length,
2d21ac55
A
808 __unused vm_prot_t desired_access,
809 memory_object_fault_info_t fault_info)
1c79356b 810{
b0d623f7
A
811 vnode_pager_t vnode_object;
812 memory_object_offset_t base_offset;
2d21ac55 813 vm_size_t size;
b0d623f7 814 uint32_t io_streaming = 0;
1c79356b 815
0b4e3aa0 816 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 817
fe8ab488 818 size = MAX_UPL_TRANSFER_BYTES;
b0d623f7 819 base_offset = offset;
2d21ac55 820
b0d623f7 821 if (memory_object_cluster_size(vnode_object->control_handle, &base_offset, &size, &io_streaming, fault_info) != KERN_SUCCESS)
2d21ac55
A
822 size = PAGE_SIZE;
823
b0d623f7
A
824 assert(offset >= base_offset &&
825 offset < base_offset + size);
2d21ac55 826
b0d623f7 827 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
1c79356b
A
828}
829
830/*
831 *
832 */
833void
0b4e3aa0
A
834vnode_pager_reference(
835 memory_object_t mem_obj)
836{
1c79356b 837 register vnode_pager_t vnode_object;
9bccf70c 838 unsigned int new_ref_count;
1c79356b 839
0b4e3aa0 840 vnode_object = vnode_pager_lookup(mem_obj);
9bccf70c
A
841 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
842 assert(new_ref_count > 1);
0b4e3aa0 843}
1c79356b 844
0b4e3aa0
A
845/*
846 *
847 */
848void
849vnode_pager_deallocate(
850 memory_object_t mem_obj)
851{
852 register vnode_pager_t vnode_object;
1c79356b 853
2d21ac55 854 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
1c79356b 855
0b4e3aa0 856 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 857
9bccf70c 858 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
91447636 859 if (vnode_object->vnode_handle != NULL) {
0b4e3aa0
A
860 vnode_pager_vrele(vnode_object->vnode_handle);
861 }
91447636 862 zfree(vnode_pager_zone, vnode_object);
0b4e3aa0 863 }
1c79356b
A
864 return;
865}
866
867/*
868 *
869 */
870kern_return_t
871vnode_pager_terminate(
91447636
A
872#if !DEBUG
873 __unused
874#endif
0b4e3aa0 875 memory_object_t mem_obj)
1c79356b 876{
2d21ac55 877 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
1c79356b 878
0b4e3aa0
A
879 return(KERN_SUCCESS);
880}
1c79356b 881
0b4e3aa0
A
882/*
883 *
884 */
885kern_return_t
886vnode_pager_synchronize(
887 memory_object_t mem_obj,
888 memory_object_offset_t offset,
b0d623f7 889 memory_object_size_t length,
91447636 890 __unused vm_sync_t sync_flags)
0b4e3aa0
A
891{
892 register vnode_pager_t vnode_object;
1c79356b 893
2d21ac55 894 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %p\n", mem_obj));
1c79356b 895
0b4e3aa0 896 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 897
0b4e3aa0 898 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
1c79356b 899
0b4e3aa0 900 return (KERN_SUCCESS);
1c79356b
A
901}
902
903/*
904 *
905 */
906kern_return_t
593a1d5f
A
907vnode_pager_map(
908 memory_object_t mem_obj,
909 vm_prot_t prot)
910{
911 vnode_pager_t vnode_object;
912 int ret;
913 kern_return_t kr;
914
915 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
916
917 vnode_object = vnode_pager_lookup(mem_obj);
918
919 ret = ubc_map(vnode_object->vnode_handle, prot);
920
921 if (ret != 0) {
922 kr = KERN_FAILURE;
923 } else {
924 kr = KERN_SUCCESS;
925 }
926
927 return kr;
928}
929
930kern_return_t
931vnode_pager_last_unmap(
0b4e3aa0 932 memory_object_t mem_obj)
1c79356b 933{
0b4e3aa0 934 register vnode_pager_t vnode_object;
1c79356b 935
593a1d5f 936 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
0b4e3aa0
A
937
938 vnode_object = vnode_pager_lookup(mem_obj);
939
940 ubc_unmap(vnode_object->vnode_handle);
941 return KERN_SUCCESS;
1c79356b
A
942}
943
0b4e3aa0 944
b0d623f7 945
1c79356b
A
946/*
947 *
948 */
949void
950vnode_pager_cluster_write(
951 vnode_pager_t vnode_object,
952 vm_object_offset_t offset,
91447636
A
953 vm_size_t cnt,
954 vm_object_offset_t * resid_offset,
955 int * io_error,
956 int upl_flags)
1c79356b 957{
b0d623f7 958 vm_size_t size;
91447636 959 int errno;
1c79356b 960
91447636 961 if (upl_flags & UPL_MSYNC) {
1c79356b 962
91447636
A
963 upl_flags |= UPL_VNODE_PAGER;
964
965 if ( (upl_flags & UPL_IOSYNC) && io_error)
966 upl_flags |= UPL_KEEPCACHED;
967
968 while (cnt) {
fe8ab488 969 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
0b4e3aa0 970
b0d623f7 971 assert((upl_size_t) size == size);
91447636 972 vnode_pageout(vnode_object->vnode_handle,
b0d623f7 973 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
91447636
A
974
975 if ( (upl_flags & UPL_KEEPCACHED) ) {
976 if ( (*io_error = errno) )
977 break;
978 }
979 cnt -= size;
980 offset += size;
981 }
982 if (resid_offset)
983 *resid_offset = offset;
984
985 } else {
986 vm_object_offset_t vnode_size;
987 vm_object_offset_t base_offset;
91447636
A
988
989 /*
990 * this is the pageout path
991 */
992 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
993
994 if (vnode_size > (offset + PAGE_SIZE)) {
995 /*
996 * preset the maximum size of the cluster
997 * and put us on a nice cluster boundary...
998 * and then clip the size to insure we
999 * don't request past the end of the underlying file
1000 */
fe8ab488 1001 size = MAX_UPL_TRANSFER_BYTES;
91447636
A
1002 base_offset = offset & ~((signed)(size - 1));
1003
1004 if ((base_offset + size) > vnode_size)
b0d623f7 1005 size = round_page(((vm_size_t)(vnode_size - base_offset)));
91447636
A
1006 } else {
1007 /*
1008 * we've been requested to page out a page beyond the current
1009 * end of the 'file'... don't try to cluster in this case...
1010 * we still need to send this page through because it might
1011 * be marked precious and the underlying filesystem may need
1012 * to do something with it (besides page it out)...
1013 */
1014 base_offset = offset;
1015 size = PAGE_SIZE;
0b4e3aa0 1016 }
b0d623f7 1017 assert((upl_size_t) size == size);
91447636 1018 vnode_pageout(vnode_object->vnode_handle,
fe8ab488
A
1019 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
1020 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
1c79356b 1021 }
1c79356b
A
1022}
1023
1024
1025/*
1026 *
1027 */
1028kern_return_t
1029vnode_pager_cluster_read(
1030 vnode_pager_t vnode_object,
b0d623f7 1031 vm_object_offset_t base_offset,
1c79356b 1032 vm_object_offset_t offset,
b0d623f7 1033 uint32_t io_streaming,
1c79356b
A
1034 vm_size_t cnt)
1035{
1c79356b
A
1036 int local_error = 0;
1037 int kret;
b0d623f7 1038 int flags = 0;
1c79356b 1039
91447636 1040 assert(! (cnt & PAGE_MASK));
1c79356b 1041
b0d623f7
A
1042 if (io_streaming)
1043 flags |= UPL_IOSTREAMING;
1044
1045 assert((upl_size_t) cnt == cnt);
91447636
A
1046 kret = vnode_pagein(vnode_object->vnode_handle,
1047 (upl_t) NULL,
b0d623f7
A
1048 (upl_offset_t) (offset - base_offset),
1049 base_offset,
1050 (upl_size_t) cnt,
1051 flags,
91447636 1052 &local_error);
0b4e3aa0
A
1053/*
1054 if(kret == PAGER_ABSENT) {
1055 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
1056 defined in bsd/vm/vm_pager.h However, we should not be including
1057 that file here it is a layering violation.
1058*/
91447636
A
1059 if (kret == 1) {
1060 int uplflags;
1061 upl_t upl = NULL;
0c530ab8 1062 unsigned int count = 0;
91447636
A
1063 kern_return_t kr;
1064
1065 uplflags = (UPL_NO_SYNC |
1066 UPL_CLEAN_IN_PLACE |
1067 UPL_SET_INTERNAL);
1068 count = 0;
b0d623f7 1069 assert((upl_size_t) cnt == cnt);
91447636 1070 kr = memory_object_upl_request(vnode_object->control_handle,
b0d623f7 1071 base_offset, (upl_size_t) cnt,
91447636
A
1072 &upl, NULL, &count, uplflags);
1073 if (kr == KERN_SUCCESS) {
0b4e3aa0
A
1074 upl_abort(upl, 0);
1075 upl_deallocate(upl);
91447636
A
1076 } else {
1077 /*
1078 * We couldn't gather the page list, probably
1079 * because the memory object doesn't have a link
1080 * to a VM object anymore (forced unmount, for
1081 * example). Just return an error to the vm_fault()
1082 * path and let it handle it.
1083 */
1084 }
0b4e3aa0 1085
91447636 1086 return KERN_FAILURE;
1c79356b 1087 }
0b4e3aa0 1088
91447636 1089 return KERN_SUCCESS;
1c79356b
A
1090
1091}
1092
1093
1094/*
1095 *
1096 */
1097void
1098vnode_pager_release_from_cache(
1099 int *cnt)
1100{
1101 memory_object_free_from_cache(
0c530ab8 1102 &realhost, &vnode_pager_ops, cnt);
1c79356b
A
1103}
1104
1105/*
1106 *
1107 */
1108vnode_pager_t
1109vnode_object_create(
91447636 1110 struct vnode *vp)
1c79356b
A
1111{
1112 register vnode_pager_t vnode_object;
1113
1114 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
1115 if (vnode_object == VNODE_PAGER_NULL)
1116 return(VNODE_PAGER_NULL);
1c79356b 1117
1c79356b 1118 /*
0b4e3aa0
A
1119 * The vm_map call takes both named entry ports and raw memory
1120 * objects in the same parameter. We need to make sure that
1121 * vm_map does not see this object as a named entry port. So,
b0d623f7 1122 * we reserve the first word in the object for a fake ip_kotype
0b4e3aa0 1123 * setting - that will tell vm_map to use it as a memory object.
1c79356b 1124 */
0c530ab8 1125 vnode_object->pager_ops = &vnode_pager_ops;
0b4e3aa0
A
1126 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
1127 vnode_object->ref_count = 1;
1128 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
1129 vnode_object->vnode_handle = vp;
1130
1131 return(vnode_object);
1c79356b
A
1132}
1133
1134/*
1135 *
1136 */
1137vnode_pager_t
0b4e3aa0
A
1138vnode_pager_lookup(
1139 memory_object_t name)
1c79356b 1140{
0b4e3aa0 1141 vnode_pager_t vnode_object;
1c79356b 1142
0b4e3aa0 1143 vnode_object = (vnode_pager_t)name;
0c530ab8 1144 assert(vnode_object->pager_ops == &vnode_pager_ops);
0b4e3aa0 1145 return (vnode_object);
1c79356b 1146}
0b4e3aa0 1147
0c530ab8
A
1148
1149/*********************** proc_info implementation *************/
1150
1151#include <sys/bsdtask_info.h>
1152
b0d623f7 1153static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
0c530ab8
A
1154
1155
1156int
b0d623f7 1157fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
0c530ab8
A
1158{
1159
935ed37a 1160 vm_map_t map;
0c530ab8
A
1161 vm_map_offset_t address = (vm_map_offset_t )arg;
1162 vm_map_entry_t tmp_entry;
1163 vm_map_entry_t entry;
1164 vm_map_offset_t start;
1165 vm_region_extended_info_data_t extended;
1166 vm_region_top_info_data_t top;
1167
935ed37a
A
1168 task_lock(task);
1169 map = task->map;
1170 if (map == VM_MAP_NULL)
1171 {
1172 task_unlock(task);
1173 return(0);
1174 }
1175 vm_map_reference(map);
1176 task_unlock(task);
1177
0c530ab8
A
1178 vm_map_lock_read(map);
1179
1180 start = address;
1181 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
1182 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1183 vm_map_unlock_read(map);
935ed37a 1184 vm_map_deallocate(map);
0c530ab8
A
1185 return(0);
1186 }
1187 } else {
1188 entry = tmp_entry;
1189 }
1190
1191 start = entry->vme_start;
1192
3e170ce0 1193 pinfo->pri_offset = VME_OFFSET(entry);
0c530ab8
A
1194 pinfo->pri_protection = entry->protection;
1195 pinfo->pri_max_protection = entry->max_protection;
1196 pinfo->pri_inheritance = entry->inheritance;
1197 pinfo->pri_behavior = entry->behavior;
1198 pinfo->pri_user_wired_count = entry->user_wired_count;
3e170ce0 1199 pinfo->pri_user_tag = VME_ALIAS(entry);
0c530ab8
A
1200
1201 if (entry->is_sub_map) {
1202 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1203 } else {
1204 if (entry->is_shared)
1205 pinfo->pri_flags |= PROC_REGION_SHARED;
1206 }
1207
1208
1209 extended.protection = entry->protection;
3e170ce0 1210 extended.user_tag = VME_ALIAS(entry);
0c530ab8
A
1211 extended.pages_resident = 0;
1212 extended.pages_swapped_out = 0;
1213 extended.pages_shared_now_private = 0;
1214 extended.pages_dirtied = 0;
1215 extended.external_pager = 0;
1216 extended.shadow_depth = 0;
1217
3e170ce0 1218 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended);
0c530ab8
A
1219
1220 if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
1221 extended.share_mode = SM_PRIVATE;
1222
1223 top.private_pages_resident = 0;
1224 top.shared_pages_resident = 0;
1225 vm_map_region_top_walk(entry, &top);
1226
1227
1228 pinfo->pri_pages_resident = extended.pages_resident;
1229 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1230 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1231 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1232 pinfo->pri_ref_count = extended.ref_count;
1233 pinfo->pri_shadow_depth = extended.shadow_depth;
1234 pinfo->pri_share_mode = extended.share_mode;
1235
1236 pinfo->pri_private_pages_resident = top.private_pages_resident;
1237 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1238 pinfo->pri_obj_id = top.obj_id;
1239
1240 pinfo->pri_address = (uint64_t)start;
1241 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1242 pinfo->pri_depth = 0;
1243
1244 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
b0d623f7 1245 *vnodeaddr = (uintptr_t)0;
0c530ab8
A
1246
1247 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
1248 vm_map_unlock_read(map);
935ed37a 1249 vm_map_deallocate(map);
0c530ab8
A
1250 return(1);
1251 }
1252 }
1253
1254 vm_map_unlock_read(map);
935ed37a 1255 vm_map_deallocate(map);
0c530ab8
A
1256 return(1);
1257}
1258
fe8ab488
A
1259int
1260fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1261{
1262
1263 vm_map_t map;
1264 vm_map_offset_t address = (vm_map_offset_t )arg;
1265 vm_map_entry_t tmp_entry;
1266 vm_map_entry_t entry;
1267
1268 task_lock(task);
1269 map = task->map;
1270 if (map == VM_MAP_NULL)
1271 {
1272 task_unlock(task);
1273 return(0);
1274 }
1275 vm_map_reference(map);
1276 task_unlock(task);
1277
1278 vm_map_lock_read(map);
1279
1280 if (!vm_map_lookup_entry(map, address, &tmp_entry)) {
1281 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1282 vm_map_unlock_read(map);
1283 vm_map_deallocate(map);
1284 return(0);
1285 }
1286 } else {
1287 entry = tmp_entry;
1288 }
1289
3e170ce0 1290 while (entry != vm_map_to_entry(map)) {
fe8ab488
A
1291 *vnodeaddr = 0;
1292 *vid = 0;
1293
1294 if (entry->is_sub_map == 0) {
1295 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1296
3e170ce0 1297 pinfo->pri_offset = VME_OFFSET(entry);
fe8ab488
A
1298 pinfo->pri_protection = entry->protection;
1299 pinfo->pri_max_protection = entry->max_protection;
1300 pinfo->pri_inheritance = entry->inheritance;
1301 pinfo->pri_behavior = entry->behavior;
1302 pinfo->pri_user_wired_count = entry->user_wired_count;
3e170ce0 1303 pinfo->pri_user_tag = VME_ALIAS(entry);
fe8ab488
A
1304
1305 if (entry->is_shared)
1306 pinfo->pri_flags |= PROC_REGION_SHARED;
1307
1308 pinfo->pri_pages_resident = 0;
1309 pinfo->pri_pages_shared_now_private = 0;
1310 pinfo->pri_pages_swapped_out = 0;
1311 pinfo->pri_pages_dirtied = 0;
1312 pinfo->pri_ref_count = 0;
1313 pinfo->pri_shadow_depth = 0;
1314 pinfo->pri_share_mode = 0;
1315
1316 pinfo->pri_private_pages_resident = 0;
1317 pinfo->pri_shared_pages_resident = 0;
1318 pinfo->pri_obj_id = 0;
1319
1320 pinfo->pri_address = (uint64_t)entry->vme_start;
1321 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1322 pinfo->pri_depth = 0;
1323
1324 vm_map_unlock_read(map);
1325 vm_map_deallocate(map);
1326 return(1);
1327 }
1328 }
1329
1330 /* Keep searching for a vnode-backed mapping */
1331 entry = entry->vme_next;
1332 }
1333
1334 vm_map_unlock_read(map);
1335 vm_map_deallocate(map);
1336 return(0);
1337}
1338
0c530ab8
A
1339static int
1340fill_vnodeinfoforaddr(
1341 vm_map_entry_t entry,
b0d623f7 1342 uintptr_t * vnodeaddr,
0c530ab8
A
1343 uint32_t * vid)
1344{
1345 vm_object_t top_object, object;
1346 memory_object_t memory_object;
1347 memory_object_pager_ops_t pager_ops;
1348 kern_return_t kr;
1349 int shadow_depth;
1350
1351
1352 if (entry->is_sub_map) {
1353 return(0);
1354 } else {
1355 /*
1356 * The last object in the shadow chain has the
1357 * relevant pager information.
1358 */
3e170ce0 1359 top_object = VME_OBJECT(entry);
0c530ab8
A
1360 if (top_object == VM_OBJECT_NULL) {
1361 object = VM_OBJECT_NULL;
1362 shadow_depth = 0;
1363 } else {
1364 vm_object_lock(top_object);
1365 for (object = top_object, shadow_depth = 0;
1366 object->shadow != VM_OBJECT_NULL;
1367 object = object->shadow, shadow_depth++) {
1368 vm_object_lock(object->shadow);
1369 vm_object_unlock(object);
1370 }
1371 }
1372 }
1373
1374 if (object == VM_OBJECT_NULL) {
1375 return(0);
1376 } else if (object->internal) {
1377 vm_object_unlock(object);
1378 return(0);
1379 } else if (! object->pager_ready ||
1380 object->terminating ||
1381 ! object->alive) {
1382 vm_object_unlock(object);
1383 return(0);
1384 } else {
1385 memory_object = object->pager;
1386 pager_ops = memory_object->mo_pager_ops;
1387 if (pager_ops == &vnode_pager_ops) {
1388 kr = vnode_pager_get_object_vnode(
1389 memory_object,
1390 vnodeaddr, vid);
1391 if (kr != KERN_SUCCESS) {
1392 vm_object_unlock(object);
1393 return(0);
1394 }
1395 } else {
1396 vm_object_unlock(object);
1397 return(0);
1398 }
1399 }
1400 vm_object_unlock(object);
1401 return(1);
1402}
1403
1404kern_return_t
1405vnode_pager_get_object_vnode (
1406 memory_object_t mem_obj,
b0d623f7 1407 uintptr_t * vnodeaddr,
0c530ab8
A
1408 uint32_t * vid)
1409{
1410 vnode_pager_t vnode_object;
1411
1412 vnode_object = vnode_pager_lookup(mem_obj);
1413 if (vnode_object->vnode_handle) {
b0d623f7 1414 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
0c530ab8
A
1415 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1416
1417 return(KERN_SUCCESS);
1418 }
1419
1420 return(KERN_FAILURE);
1421}
1422
fe8ab488
A
1423#if CONFIG_IOSCHED
1424kern_return_t
1425vnode_pager_get_object_devvp(
1426 memory_object_t mem_obj,
1427 uintptr_t *devvp)
1428{
1429 struct vnode *vp;
1430 uint32_t vid;
1431
1432 if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS)
1433 return (KERN_FAILURE);
1434 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1435 if (*devvp)
1436 return (KERN_SUCCESS);
1437 return (KERN_FAILURE);
1438}
1439#endif
b0d623f7
A
1440
1441/*
1442 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1443 * object locked, otherwise return NULL with nothing locked.
1444 */
1445
1446vm_object_t
1447find_vnode_object(
1448 vm_map_entry_t entry
1449)
1450{
1451 vm_object_t top_object, object;
1452 memory_object_t memory_object;
1453 memory_object_pager_ops_t pager_ops;
1454
1455 if (!entry->is_sub_map) {
1456
1457 /*
1458 * The last object in the shadow chain has the
1459 * relevant pager information.
1460 */
1461
3e170ce0 1462 top_object = VME_OBJECT(entry);
b0d623f7
A
1463
1464 if (top_object) {
1465 vm_object_lock(top_object);
1466
1467 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1468 vm_object_lock(object->shadow);
1469 vm_object_unlock(object);
1470 }
1471
1472 if (object && !object->internal && object->pager_ready && !object->terminating &&
1473 object->alive) {
1474 memory_object = object->pager;
1475 pager_ops = memory_object->mo_pager_ops;
1476
1477 /*
1478 * If this object points to the vnode_pager_ops, then we found what we're
1479 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1480 * vnode and so we fall through to the bottom and return NULL.
1481 */
1482
1483 if (pager_ops == &vnode_pager_ops)
1484 return object; /* we return with the object locked */
1485 }
1486
1487 vm_object_unlock(object);
1488 }
1489
1490 }
1491
1492 return(VM_OBJECT_NULL);
1493}