]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/vm/memory_object.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/memory_object.c
60 * Author: Michael Wayne Young
61 *
62 * External memory management interface control functions.
63 */
64
65/*
66 * Interface dependencies:
67 */
68
69#include <mach/std_types.h> /* For pointer_t */
70#include <mach/mach_types.h>
71
72#include <mach/mig.h>
73#include <mach/kern_return.h>
74#include <mach/memory_object.h>
75#include <mach/memory_object_default.h>
76#include <mach/memory_object_control_server.h>
77#include <mach/host_priv_server.h>
78#include <mach/boolean.h>
79#include <mach/vm_prot.h>
80#include <mach/message.h>
81
82/*
83 * Implementation dependencies:
84 */
85#include <string.h> /* For memcpy() */
86
87#include <kern/host.h>
88#include <kern/thread.h> /* For current_thread() */
89#include <kern/ipc_mig.h>
90#include <kern/misc_protos.h>
91
92#include <vm/vm_object.h>
93#include <vm/vm_fault.h>
94#include <vm/memory_object.h>
95#include <vm/vm_page.h>
96#include <vm/vm_pageout.h>
97#include <vm/pmap.h> /* For pmap_clear_modify */
98#include <vm/vm_kern.h> /* For kernel_map, vm_move */
99#include <vm/vm_map.h> /* For vm_map_pageable */
100#include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
101#include <vm/vm_shared_region.h>
102
103#include <vm/vm_external.h>
104
105#include <vm/vm_protos.h>
106
107memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
108LCK_MTX_EARLY_DECLARE(memory_manager_default_lock, &vm_object_lck_grp);
109
110
111/*
112 * Routine: memory_object_should_return_page
113 *
114 * Description:
115 * Determine whether the given page should be returned,
116 * based on the page's state and on the given return policy.
117 *
118 * We should return the page if one of the following is true:
119 *
120 * 1. Page is dirty and should_return is not RETURN_NONE.
121 * 2. Page is precious and should_return is RETURN_ALL.
122 * 3. Should_return is RETURN_ANYTHING.
123 *
124 * As a side effect, m->vmp_dirty will be made consistent
125 * with pmap_is_modified(m), if should_return is not
126 * MEMORY_OBJECT_RETURN_NONE.
127 */
128
129#define memory_object_should_return_page(m, should_return) \
130 (should_return != MEMORY_OBJECT_RETURN_NONE && \
131 (((m)->vmp_dirty || ((m)->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
132 ((m)->vmp_precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
133 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
134
135typedef int memory_object_lock_result_t;
136
137#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
138#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
139#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2
140#define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3
141
142memory_object_lock_result_t memory_object_lock_page(
143 vm_page_t m,
144 memory_object_return_t should_return,
145 boolean_t should_flush,
146 vm_prot_t prot);
147
148/*
149 * Routine: memory_object_lock_page
150 *
151 * Description:
152 * Perform the appropriate lock operations on the
153 * given page. See the description of
154 * "memory_object_lock_request" for the meanings
155 * of the arguments.
156 *
157 * Returns an indication that the operation
158 * completed, blocked, or that the page must
159 * be cleaned.
160 */
161memory_object_lock_result_t
162memory_object_lock_page(
163 vm_page_t m,
164 memory_object_return_t should_return,
165 boolean_t should_flush,
166 vm_prot_t prot)
167{
168 if (m->vmp_busy || m->vmp_cleaning) {
169 return MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK;
170 }
171
172 if (m->vmp_laundry) {
173 vm_pageout_steal_laundry(m, FALSE);
174 }
175
176 /*
177 * Don't worry about pages for which the kernel
178 * does not have any data.
179 */
180 if (m->vmp_absent || m->vmp_error || m->vmp_restart) {
181 if (m->vmp_error && should_flush && !VM_PAGE_WIRED(m)) {
182 /*
183 * dump the page, pager wants us to
184 * clean it up and there is no
185 * relevant data to return
186 */
187 return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE;
188 }
189 return MEMORY_OBJECT_LOCK_RESULT_DONE;
190 }
191 assert(!m->vmp_fictitious);
192
193 if (VM_PAGE_WIRED(m)) {
194 /*
195 * The page is wired... just clean or return the page if needed.
196 * Wired pages don't get flushed or disconnected from the pmap.
197 */
198 if (memory_object_should_return_page(m, should_return)) {
199 return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN;
200 }
201
202 return MEMORY_OBJECT_LOCK_RESULT_DONE;
203 }
204
205 if (should_flush) {
206 /*
207 * must do the pmap_disconnect before determining the
208 * need to return the page... otherwise it's possible
209 * for the page to go from the clean to the dirty state
210 * after we've made our decision
211 */
212 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) {
213 SET_PAGE_DIRTY(m, FALSE);
214 }
215 } else {
216 /*
217 * If we are decreasing permission, do it now;
218 * let the fault handler take care of increases
219 * (pmap_page_protect may not increase protection).
220 */
221 if (prot != VM_PROT_NO_CHANGE) {
222 pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot);
223 }
224 }
225 /*
226 * Handle returning dirty or precious pages
227 */
228 if (memory_object_should_return_page(m, should_return)) {
229 /*
230 * we use to do a pmap_disconnect here in support
231 * of memory_object_lock_request, but that routine
232 * no longer requires this... in any event, in
233 * our world, it would turn into a big noop since
234 * we don't lock the page in any way and as soon
235 * as we drop the object lock, the page can be
236 * faulted back into an address space
237 *
238 * if (!should_flush)
239 * pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
240 */
241 return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN;
242 }
243
244 /*
245 * Handle flushing clean pages
246 */
247 if (should_flush) {
248 return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE;
249 }
250
251 /*
252 * we use to deactivate clean pages at this point,
253 * but we do not believe that an msync should change
254 * the 'age' of a page in the cache... here is the
255 * original comment and code concerning this...
256 *
257 * XXX Make clean but not flush a paging hint,
258 * and deactivate the pages. This is a hack
259 * because it overloads flush/clean with
260 * implementation-dependent meaning. This only
261 * happens to pages that are already clean.
262 *
263 * if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE))
264 * return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE);
265 */
266
267 return MEMORY_OBJECT_LOCK_RESULT_DONE;
268}
269
270
271
272/*
273 * Routine: memory_object_lock_request [user interface]
274 *
275 * Description:
276 * Control use of the data associated with the given
277 * memory object. For each page in the given range,
278 * perform the following operations, in order:
279 * 1) restrict access to the page (disallow
280 * forms specified by "prot");
281 * 2) return data to the manager (if "should_return"
282 * is RETURN_DIRTY and the page is dirty, or
283 * "should_return" is RETURN_ALL and the page
284 * is either dirty or precious); and,
285 * 3) flush the cached copy (if "should_flush"
286 * is asserted).
287 * The set of pages is defined by a starting offset
288 * ("offset") and size ("size"). Only pages with the
289 * same page alignment as the starting offset are
290 * considered.
291 *
292 * A single acknowledgement is sent (to the "reply_to"
293 * port) when these actions are complete. If successful,
294 * the naked send right for reply_to is consumed.
295 */
296
297kern_return_t
298memory_object_lock_request(
299 memory_object_control_t control,
300 memory_object_offset_t offset,
301 memory_object_size_t size,
302 memory_object_offset_t * resid_offset,
303 int * io_errno,
304 memory_object_return_t should_return,
305 int flags,
306 vm_prot_t prot)
307{
308 vm_object_t object;
309
310 /*
311 * Check for bogus arguments.
312 */
313 object = memory_object_control_to_vm_object(control);
314 if (object == VM_OBJECT_NULL) {
315 return KERN_INVALID_ARGUMENT;
316 }
317
318 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
319 return KERN_INVALID_ARGUMENT;
320 }
321
322 size = round_page_64(size);
323
324 /*
325 * Lock the object, and acquire a paging reference to
326 * prevent the memory_object reference from being released.
327 */
328 vm_object_lock(object);
329 vm_object_paging_begin(object);
330
331 if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
332 if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) {
333 flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL;
334 flags |= MEMORY_OBJECT_DATA_FLUSH;
335 }
336 }
337 offset -= object->paging_offset;
338
339 if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) {
340 vm_object_reap_pages(object, REAP_DATA_FLUSH);
341 } else {
342 (void)vm_object_update(object, offset, size, resid_offset,
343 io_errno, should_return, flags, prot);
344 }
345
346 vm_object_paging_end(object);
347 vm_object_unlock(object);
348
349 return KERN_SUCCESS;
350}
351
352/*
353 * memory_object_release_name: [interface]
354 *
355 * Enforces name semantic on memory_object reference count decrement
356 * This routine should not be called unless the caller holds a name
357 * reference gained through the memory_object_named_create or the
358 * memory_object_rename call.
359 * If the TERMINATE_IDLE flag is set, the call will return if the
360 * reference count is not 1. i.e. idle with the only remaining reference
361 * being the name.
362 * If the decision is made to proceed the name field flag is set to
363 * false and the reference count is decremented. If the RESPECT_CACHE
364 * flag is set and the reference count has gone to zero, the
365 * memory_object is checked to see if it is cacheable otherwise when
366 * the reference count is zero, it is simply terminated.
367 */
368
369kern_return_t
370memory_object_release_name(
371 memory_object_control_t control,
372 int flags)
373{
374 vm_object_t object;
375
376 object = memory_object_control_to_vm_object(control);
377 if (object == VM_OBJECT_NULL) {
378 return KERN_INVALID_ARGUMENT;
379 }
380
381 return vm_object_release_name(object, flags);
382}
383
384
385
386/*
387 * Routine: memory_object_destroy [user interface]
388 * Purpose:
389 * Shut down a memory object, despite the
390 * presence of address map (or other) references
391 * to the vm_object.
392 */
393kern_return_t
394memory_object_destroy(
395 memory_object_control_t control,
396 kern_return_t reason)
397{
398 vm_object_t object;
399
400 object = memory_object_control_to_vm_object(control);
401 if (object == VM_OBJECT_NULL) {
402 return KERN_INVALID_ARGUMENT;
403 }
404
405 return vm_object_destroy(object, reason);
406}
407
408/*
409 * Routine: vm_object_sync
410 *
411 * Kernel internal function to synch out pages in a given
412 * range within an object to its memory manager. Much the
413 * same as memory_object_lock_request but page protection
414 * is not changed.
415 *
416 * If the should_flush and should_return flags are true pages
417 * are flushed, that is dirty & precious pages are written to
418 * the memory manager and then discarded. If should_return
419 * is false, only precious pages are returned to the memory
420 * manager.
421 *
422 * If should flush is false and should_return true, the memory
423 * manager's copy of the pages is updated. If should_return
424 * is also false, only the precious pages are updated. This
425 * last option is of limited utility.
426 *
427 * Returns:
428 * FALSE if no pages were returned to the pager
429 * TRUE otherwise.
430 */
431
432boolean_t
433vm_object_sync(
434 vm_object_t object,
435 vm_object_offset_t offset,
436 vm_object_size_t size,
437 boolean_t should_flush,
438 boolean_t should_return,
439 boolean_t should_iosync)
440{
441 boolean_t rv;
442 int flags;
443
444 /*
445 * Lock the object, and acquire a paging reference to
446 * prevent the memory_object and control ports from
447 * being destroyed.
448 */
449 vm_object_lock(object);
450 vm_object_paging_begin(object);
451
452 if (should_flush) {
453 flags = MEMORY_OBJECT_DATA_FLUSH;
454 /*
455 * This flush is from an msync(), not a truncate(), so the
456 * contents of the file are not affected.
457 * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
458 * that the data is not changed and that there's no need to
459 * push the old contents to a copy object.
460 */
461 flags |= MEMORY_OBJECT_DATA_NO_CHANGE;
462 } else {
463 flags = 0;
464 }
465
466 if (should_iosync) {
467 flags |= MEMORY_OBJECT_IO_SYNC;
468 }
469
470 rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL,
471 (should_return) ?
472 MEMORY_OBJECT_RETURN_ALL :
473 MEMORY_OBJECT_RETURN_NONE,
474 flags,
475 VM_PROT_NO_CHANGE);
476
477
478 vm_object_paging_end(object);
479 vm_object_unlock(object);
480 return rv;
481}
482
483
484
485#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \
486MACRO_BEGIN \
487 \
488 int upl_flags; \
489 memory_object_t pager; \
490 \
491 if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
492 vm_object_paging_begin(object); \
493 vm_object_unlock(object); \
494 \
495 if (iosync) \
496 upl_flags = UPL_MSYNC | UPL_IOSYNC; \
497 else \
498 upl_flags = UPL_MSYNC; \
499 \
500 (void) memory_object_data_return(pager, \
501 po, \
502 (memory_object_cluster_size_t)data_cnt, \
503 ro, \
504 ioerr, \
505 FALSE, \
506 FALSE, \
507 upl_flags); \
508 \
509 vm_object_lock(object); \
510 vm_object_paging_end(object); \
511 } \
512MACRO_END
513
514extern struct vnode *
515vnode_pager_lookup_vnode(memory_object_t);
516
517static int
518vm_object_update_extent(
519 vm_object_t object,
520 vm_object_offset_t offset,
521 vm_object_offset_t offset_end,
522 vm_object_offset_t *offset_resid,
523 int *io_errno,
524 boolean_t should_flush,
525 memory_object_return_t should_return,
526 boolean_t should_iosync,
527 vm_prot_t prot)
528{
529 vm_page_t m;
530 int retval = 0;
531 vm_object_offset_t paging_offset = 0;
532 vm_object_offset_t next_offset = offset;
533 memory_object_lock_result_t page_lock_result;
534 memory_object_cluster_size_t data_cnt = 0;
535 struct vm_page_delayed_work dw_array;
536 struct vm_page_delayed_work *dwp, *dwp_start;
537 bool dwp_finish_ctx = TRUE;
538 int dw_count;
539 int dw_limit;
540 int dirty_count;
541
542 dwp_start = dwp = NULL;
543 dw_count = 0;
544 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
545 dwp_start = vm_page_delayed_work_get_ctx();
546 if (dwp_start == NULL) {
547 dwp_start = &dw_array;
548 dw_limit = 1;
549 dwp_finish_ctx = FALSE;
550 }
551 dwp = dwp_start;
552
553 dirty_count = 0;
554
555 for (;
556 offset < offset_end && object->resident_page_count;
557 offset += PAGE_SIZE_64) {
558 /*
559 * Limit the number of pages to be cleaned at once to a contiguous
560 * run, or at most MAX_UPL_TRANSFER_BYTES
561 */
562 if (data_cnt) {
563 if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) {
564 if (dw_count) {
565 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
566 dwp = dwp_start;
567 dw_count = 0;
568 }
569 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
570 paging_offset, offset_resid, io_errno, should_iosync);
571 data_cnt = 0;
572 }
573 }
574 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
575 dwp->dw_mask = 0;
576
577 page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
578
579 if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) {
580 /*
581 * End of a run of dirty/precious pages.
582 */
583 if (dw_count) {
584 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
585 dwp = dwp_start;
586 dw_count = 0;
587 }
588 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
589 paging_offset, offset_resid, io_errno, should_iosync);
590 /*
591 * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
592 * allow the state of page 'm' to change... we need to re-lookup
593 * the current offset
594 */
595 data_cnt = 0;
596 continue;
597 }
598
599 switch (page_lock_result) {
600 case MEMORY_OBJECT_LOCK_RESULT_DONE:
601 break;
602
603 case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE:
604 if (m->vmp_dirty == TRUE) {
605 dirty_count++;
606 }
607 dwp->dw_mask |= DW_vm_page_free;
608 break;
609
610 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
611 PAGE_SLEEP(object, m, THREAD_UNINT);
612 continue;
613
614 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
615 if (data_cnt == 0) {
616 paging_offset = offset;
617 }
618
619 data_cnt += PAGE_SIZE;
620 next_offset = offset + PAGE_SIZE_64;
621
622 /*
623 * wired pages shouldn't be flushed and
624 * since they aren't on any queue,
625 * no need to remove them
626 */
627 if (!VM_PAGE_WIRED(m)) {
628 if (should_flush) {
629 /*
630 * add additional state for the flush
631 */
632 m->vmp_free_when_done = TRUE;
633 }
634 /*
635 * we use to remove the page from the queues at this
636 * point, but we do not believe that an msync
637 * should cause the 'age' of a page to be changed
638 *
639 * else
640 * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
641 */
642 }
643 retval = 1;
644 break;
645 }
646 if (dwp->dw_mask) {
647 VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count);
648
649 if (dw_count >= dw_limit) {
650 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
651 dwp = dwp_start;
652 dw_count = 0;
653 }
654 }
655 break;
656 }
657 }
658
659 if (object->pager) {
660 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager));
661 }
662 /*
663 * We have completed the scan for applicable pages.
664 * Clean any pages that have been saved.
665 */
666 if (dw_count) {
667 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
668 }
669
670 if (data_cnt) {
671 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
672 paging_offset, offset_resid, io_errno, should_iosync);
673 }
674
675 if (dwp_start && dwp_finish_ctx) {
676 vm_page_delayed_work_finish_ctx(dwp_start);
677 dwp_start = dwp = NULL;
678 }
679
680 return retval;
681}
682
683
684
685/*
686 * Routine: vm_object_update
687 * Description:
688 * Work function for m_o_lock_request(), vm_o_sync().
689 *
690 * Called with object locked and paging ref taken.
691 */
692kern_return_t
693vm_object_update(
694 vm_object_t object,
695 vm_object_offset_t offset,
696 vm_object_size_t size,
697 vm_object_offset_t *resid_offset,
698 int *io_errno,
699 memory_object_return_t should_return,
700 int flags,
701 vm_prot_t protection)
702{
703 vm_object_t copy_object = VM_OBJECT_NULL;
704 boolean_t data_returned = FALSE;
705 boolean_t update_cow;
706 boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE;
707 boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE;
708 vm_fault_return_t result;
709 int num_of_extents;
710 int n;
711#define MAX_EXTENTS 8
712#define EXTENT_SIZE (1024 * 1024 * 256)
713#define RESIDENT_LIMIT (1024 * 32)
714 struct extent {
715 vm_object_offset_t e_base;
716 vm_object_offset_t e_min;
717 vm_object_offset_t e_max;
718 } extents[MAX_EXTENTS];
719
720 /*
721 * To avoid blocking while scanning for pages, save
722 * dirty pages to be cleaned all at once.
723 *
724 * XXXO A similar strategy could be used to limit the
725 * number of times that a scan must be restarted for
726 * other reasons. Those pages that would require blocking
727 * could be temporarily collected in another list, or
728 * their offsets could be recorded in a small array.
729 */
730
731 /*
732 * XXX NOTE: May want to consider converting this to a page list
733 * XXX vm_map_copy interface. Need to understand object
734 * XXX coalescing implications before doing so.
735 */
736
737 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
738 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
739 !(flags & MEMORY_OBJECT_DATA_PURGE)))
740 || (flags & MEMORY_OBJECT_COPY_SYNC);
741
742 if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) {
743 int collisions = 0;
744
745 while ((copy_object = object->copy) != VM_OBJECT_NULL) {
746 /*
747 * need to do a try here since we're swimming upstream
748 * against the normal lock ordering... however, we need
749 * to hold the object stable until we gain control of the
750 * copy object so we have to be careful how we approach this
751 */
752 if (vm_object_lock_try(copy_object)) {
753 /*
754 * we 'won' the lock on the copy object...
755 * no need to hold the object lock any longer...
756 * take a real reference on the copy object because
757 * we're going to call vm_fault_page on it which may
758 * under certain conditions drop the lock and the paging
759 * reference we're about to take... the reference
760 * will keep the copy object from going away if that happens
761 */
762 vm_object_unlock(object);
763 vm_object_reference_locked(copy_object);
764 break;
765 }
766 vm_object_unlock(object);
767
768 collisions++;
769 mutex_pause(collisions);
770
771 vm_object_lock(object);
772 }
773 }
774 if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) {
775 vm_object_offset_t i;
776 vm_object_size_t copy_size;
777 vm_object_offset_t copy_offset;
778 vm_prot_t prot;
779 vm_page_t page;
780 vm_page_t top_page;
781 kern_return_t error = 0;
782 struct vm_object_fault_info fault_info = {};
783
784 if (copy_object != VM_OBJECT_NULL) {
785 /*
786 * translate offset with respect to shadow's offset
787 */
788 copy_offset = (offset >= copy_object->vo_shadow_offset) ?
789 (offset - copy_object->vo_shadow_offset) : 0;
790
791 if (copy_offset > copy_object->vo_size) {
792 copy_offset = copy_object->vo_size;
793 }
794
795 /*
796 * clip size with respect to shadow offset
797 */
798 if (offset >= copy_object->vo_shadow_offset) {
799 copy_size = size;
800 } else if (size >= copy_object->vo_shadow_offset - offset) {
801 copy_size = (size - (copy_object->vo_shadow_offset - offset));
802 } else {
803 copy_size = 0;
804 }
805
806 if (copy_offset + copy_size > copy_object->vo_size) {
807 if (copy_object->vo_size >= copy_offset) {
808 copy_size = copy_object->vo_size - copy_offset;
809 } else {
810 copy_size = 0;
811 }
812 }
813 copy_size += copy_offset;
814 } else {
815 copy_object = object;
816
817 copy_size = offset + size;
818 copy_offset = offset;
819 }
820 fault_info.interruptible = THREAD_UNINT;
821 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
822 fault_info.lo_offset = copy_offset;
823 fault_info.hi_offset = copy_size;
824 fault_info.stealth = TRUE;
825 assert(fault_info.cs_bypass == FALSE);
826 assert(fault_info.pmap_cs_associated == FALSE);
827
828 vm_object_paging_begin(copy_object);
829
830 for (i = copy_offset; i < copy_size; i += PAGE_SIZE) {
831RETRY_COW_OF_LOCK_REQUEST:
832 fault_info.cluster_size = (vm_size_t) (copy_size - i);
833 assert(fault_info.cluster_size == copy_size - i);
834
835 prot = VM_PROT_WRITE | VM_PROT_READ;
836 page = VM_PAGE_NULL;
837 result = vm_fault_page(copy_object, i,
838 VM_PROT_WRITE | VM_PROT_READ,
839 FALSE,
840 FALSE, /* page not looked up */
841 &prot,
842 &page,
843 &top_page,
844 (int *)0,
845 &error,
846 FALSE,
847 FALSE, &fault_info);
848
849 switch (result) {
850 case VM_FAULT_SUCCESS:
851 if (top_page) {
852 vm_fault_cleanup(
853 VM_PAGE_OBJECT(page), top_page);
854 vm_object_lock(copy_object);
855 vm_object_paging_begin(copy_object);
856 }
857 if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
858 vm_page_lockspin_queues();
859
860 if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) {
861 vm_page_deactivate(page);
862 }
863 vm_page_unlock_queues();
864 }
865 PAGE_WAKEUP_DONE(page);
866 break;
867 case VM_FAULT_RETRY:
868 prot = VM_PROT_WRITE | VM_PROT_READ;
869 vm_object_lock(copy_object);
870 vm_object_paging_begin(copy_object);
871 goto RETRY_COW_OF_LOCK_REQUEST;
872 case VM_FAULT_INTERRUPTED:
873 prot = VM_PROT_WRITE | VM_PROT_READ;
874 vm_object_lock(copy_object);
875 vm_object_paging_begin(copy_object);
876 goto RETRY_COW_OF_LOCK_REQUEST;
877 case VM_FAULT_MEMORY_SHORTAGE:
878 VM_PAGE_WAIT();
879 prot = VM_PROT_WRITE | VM_PROT_READ;
880 vm_object_lock(copy_object);
881 vm_object_paging_begin(copy_object);
882 goto RETRY_COW_OF_LOCK_REQUEST;
883 case VM_FAULT_SUCCESS_NO_VM_PAGE:
884 /* success but no VM page: fail */
885 vm_object_paging_end(copy_object);
886 vm_object_unlock(copy_object);
887 OS_FALLTHROUGH;
888 case VM_FAULT_MEMORY_ERROR:
889 if (object != copy_object) {
890 vm_object_deallocate(copy_object);
891 }
892 vm_object_lock(object);
893 goto BYPASS_COW_COPYIN;
894 default:
895 panic("vm_object_update: unexpected error 0x%x"
896 " from vm_fault_page()\n", result);
897 }
898 }
899 vm_object_paging_end(copy_object);
900 }
901 if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
902 if (copy_object != VM_OBJECT_NULL && copy_object != object) {
903 vm_object_unlock(copy_object);
904 vm_object_deallocate(copy_object);
905 vm_object_lock(object);
906 }
907 return KERN_SUCCESS;
908 }
909 if (copy_object != VM_OBJECT_NULL && copy_object != object) {
910 if ((flags & MEMORY_OBJECT_DATA_PURGE)) {
911 vm_object_lock_assert_exclusive(copy_object);
912 copy_object->shadow_severed = TRUE;
913 copy_object->shadowed = FALSE;
914 copy_object->shadow = NULL;
915 /*
916 * delete the ref the COW was holding on the target object
917 */
918 vm_object_deallocate(object);
919 }
920 vm_object_unlock(copy_object);
921 vm_object_deallocate(copy_object);
922 vm_object_lock(object);
923 }
924BYPASS_COW_COPYIN:
925
926 /*
927 * when we have a really large range to check relative
928 * to the number of actual resident pages, we'd like
929 * to use the resident page list to drive our checks
930 * however, the object lock will get dropped while processing
931 * the page which means the resident queue can change which
932 * means we can't walk the queue as we process the pages
933 * we also want to do the processing in offset order to allow
934 * 'runs' of pages to be collected if we're being told to
935 * flush to disk... the resident page queue is NOT ordered.
936 *
937 * a temporary solution (until we figure out how to deal with
938 * large address spaces more generically) is to pre-flight
939 * the resident page queue (if it's small enough) and develop
940 * a collection of extents (that encompass actual resident pages)
941 * to visit. This will at least allow us to deal with some of the
942 * more pathological cases in a more efficient manner. The current
943 * worst case (a single resident page at the end of an extremely large
944 * range) can take minutes to complete for ranges in the terrabyte
945 * category... since this routine is called when truncating a file,
946 * and we currently support files up to 16 Tbytes in size, this
947 * is not a theoretical problem
948 */
949
950 if ((object->resident_page_count < RESIDENT_LIMIT) &&
951 (atop_64(size) > (unsigned)(object->resident_page_count / (8 * MAX_EXTENTS)))) {
952 vm_page_t next;
953 vm_object_offset_t start;
954 vm_object_offset_t end;
955 vm_object_size_t e_mask;
956 vm_page_t m;
957
958 start = offset;
959 end = offset + size;
960 num_of_extents = 0;
961 e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
962
963 m = (vm_page_t) vm_page_queue_first(&object->memq);
964
965 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) {
966 next = (vm_page_t) vm_page_queue_next(&m->vmp_listq);
967
968 if ((m->vmp_offset >= start) && (m->vmp_offset < end)) {
969 /*
970 * this is a page we're interested in
971 * try to fit it into a current extent
972 */
973 for (n = 0; n < num_of_extents; n++) {
974 if ((m->vmp_offset & e_mask) == extents[n].e_base) {
975 /*
976 * use (PAGE_SIZE - 1) to determine the
977 * max offset so that we don't wrap if
978 * we're at the last page of the space
979 */
980 if (m->vmp_offset < extents[n].e_min) {
981 extents[n].e_min = m->vmp_offset;
982 } else if ((m->vmp_offset + (PAGE_SIZE - 1)) > extents[n].e_max) {
983 extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1);
984 }
985 break;
986 }
987 }
988 if (n == num_of_extents) {
989 /*
990 * didn't find a current extent that can encompass
991 * this page
992 */
993 if (n < MAX_EXTENTS) {
994 /*
995 * if we still have room,
996 * create a new extent
997 */
998 extents[n].e_base = m->vmp_offset & e_mask;
999 extents[n].e_min = m->vmp_offset;
1000 extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1);
1001
1002 num_of_extents++;
1003 } else {
1004 /*
1005 * no room to create a new extent...
1006 * fall back to a single extent based
1007 * on the min and max page offsets
1008 * we find in the range we're interested in...
1009 * first, look through the extent list and
1010 * develop the overall min and max for the
1011 * pages we've looked at up to this point
1012 */
1013 for (n = 1; n < num_of_extents; n++) {
1014 if (extents[n].e_min < extents[0].e_min) {
1015 extents[0].e_min = extents[n].e_min;
1016 }
1017 if (extents[n].e_max > extents[0].e_max) {
1018 extents[0].e_max = extents[n].e_max;
1019 }
1020 }
1021 /*
1022 * now setup to run through the remaining pages
1023 * to determine the overall min and max
1024 * offset for the specified range
1025 */
1026 extents[0].e_base = 0;
1027 e_mask = 0;
1028 num_of_extents = 1;
1029
1030 /*
1031 * by continuing, we'll reprocess the
1032 * page that forced us to abandon trying
1033 * to develop multiple extents
1034 */
1035 continue;
1036 }
1037 }
1038 }
1039 m = next;
1040 }
1041 } else {
1042 extents[0].e_min = offset;
1043 extents[0].e_max = offset + (size - 1);
1044
1045 num_of_extents = 1;
1046 }
1047 for (n = 0; n < num_of_extents; n++) {
1048 if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno,
1049 should_flush, should_return, should_iosync, protection)) {
1050 data_returned = TRUE;
1051 }
1052 }
1053 return data_returned;
1054}
1055
1056
1057static kern_return_t
1058vm_object_set_attributes_common(
1059 vm_object_t object,
1060 boolean_t may_cache,
1061 memory_object_copy_strategy_t copy_strategy)
1062{
1063 boolean_t object_became_ready;
1064
1065 if (object == VM_OBJECT_NULL) {
1066 return KERN_INVALID_ARGUMENT;
1067 }
1068
1069 /*
1070 * Verify the attributes of importance
1071 */
1072
1073 switch (copy_strategy) {
1074 case MEMORY_OBJECT_COPY_NONE:
1075 case MEMORY_OBJECT_COPY_DELAY:
1076 break;
1077 default:
1078 return KERN_INVALID_ARGUMENT;
1079 }
1080
1081 if (may_cache) {
1082 may_cache = TRUE;
1083 }
1084
1085 vm_object_lock(object);
1086
1087 /*
1088 * Copy the attributes
1089 */
1090 assert(!object->internal);
1091 object_became_ready = !object->pager_ready;
1092 object->copy_strategy = copy_strategy;
1093 object->can_persist = may_cache;
1094
1095 /*
1096 * Wake up anyone waiting for the ready attribute
1097 * to become asserted.
1098 */
1099
1100 if (object_became_ready) {
1101 object->pager_ready = TRUE;
1102 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1103 }
1104
1105 vm_object_unlock(object);
1106
1107 return KERN_SUCCESS;
1108}
1109
1110
1111kern_return_t
1112memory_object_synchronize_completed(
1113 __unused memory_object_control_t control,
1114 __unused memory_object_offset_t offset,
1115 __unused memory_object_size_t length)
1116{
1117 panic("memory_object_synchronize_completed no longer supported\n");
1118 return KERN_FAILURE;
1119}
1120
1121
1122/*
1123 * Set the memory object attribute as provided.
1124 *
1125 * XXX This routine cannot be completed until the vm_msync, clean
1126 * in place, and cluster work is completed. See ifdef notyet
1127 * below and note that vm_object_set_attributes_common()
1128 * may have to be expanded.
1129 */
1130kern_return_t
1131memory_object_change_attributes(
1132 memory_object_control_t control,
1133 memory_object_flavor_t flavor,
1134 memory_object_info_t attributes,
1135 mach_msg_type_number_t count)
1136{
1137 vm_object_t object;
1138 kern_return_t result = KERN_SUCCESS;
1139 boolean_t may_cache;
1140 boolean_t invalidate;
1141 memory_object_copy_strategy_t copy_strategy;
1142
1143 object = memory_object_control_to_vm_object(control);
1144 if (object == VM_OBJECT_NULL) {
1145 return KERN_INVALID_ARGUMENT;
1146 }
1147
1148 vm_object_lock(object);
1149
1150 may_cache = object->can_persist;
1151 copy_strategy = object->copy_strategy;
1152#if notyet
1153 invalidate = object->invalidate;
1154#endif
1155 vm_object_unlock(object);
1156
1157 switch (flavor) {
1158 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1159 {
1160 old_memory_object_behave_info_t behave;
1161
1162 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1163 result = KERN_INVALID_ARGUMENT;
1164 break;
1165 }
1166
1167 behave = (old_memory_object_behave_info_t) attributes;
1168
1169 invalidate = behave->invalidate;
1170 copy_strategy = behave->copy_strategy;
1171
1172 break;
1173 }
1174
1175 case MEMORY_OBJECT_BEHAVIOR_INFO:
1176 {
1177 memory_object_behave_info_t behave;
1178
1179 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1180 result = KERN_INVALID_ARGUMENT;
1181 break;
1182 }
1183
1184 behave = (memory_object_behave_info_t) attributes;
1185
1186 invalidate = behave->invalidate;
1187 copy_strategy = behave->copy_strategy;
1188 break;
1189 }
1190
1191 case MEMORY_OBJECT_PERFORMANCE_INFO:
1192 {
1193 memory_object_perf_info_t perf;
1194
1195 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1196 result = KERN_INVALID_ARGUMENT;
1197 break;
1198 }
1199
1200 perf = (memory_object_perf_info_t) attributes;
1201
1202 may_cache = perf->may_cache;
1203
1204 break;
1205 }
1206
1207 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1208 {
1209 old_memory_object_attr_info_t attr;
1210
1211 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1212 result = KERN_INVALID_ARGUMENT;
1213 break;
1214 }
1215
1216 attr = (old_memory_object_attr_info_t) attributes;
1217
1218 may_cache = attr->may_cache;
1219 copy_strategy = attr->copy_strategy;
1220
1221 break;
1222 }
1223
1224 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1225 {
1226 memory_object_attr_info_t attr;
1227
1228 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1229 result = KERN_INVALID_ARGUMENT;
1230 break;
1231 }
1232
1233 attr = (memory_object_attr_info_t) attributes;
1234
1235 copy_strategy = attr->copy_strategy;
1236 may_cache = attr->may_cache_object;
1237
1238 break;
1239 }
1240
1241 default:
1242 result = KERN_INVALID_ARGUMENT;
1243 break;
1244 }
1245
1246 if (result != KERN_SUCCESS) {
1247 return result;
1248 }
1249
1250 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1251 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1252 }
1253
1254 /*
1255 * XXX may_cache may become a tri-valued variable to handle
1256 * XXX uncache if not in use.
1257 */
1258 return vm_object_set_attributes_common(object,
1259 may_cache,
1260 copy_strategy);
1261}
1262
1263kern_return_t
1264memory_object_get_attributes(
1265 memory_object_control_t control,
1266 memory_object_flavor_t flavor,
1267 memory_object_info_t attributes, /* pointer to OUT array */
1268 mach_msg_type_number_t *count) /* IN/OUT */
1269{
1270 kern_return_t ret = KERN_SUCCESS;
1271 vm_object_t object;
1272
1273 object = memory_object_control_to_vm_object(control);
1274 if (object == VM_OBJECT_NULL) {
1275 return KERN_INVALID_ARGUMENT;
1276 }
1277
1278 vm_object_lock(object);
1279
1280 switch (flavor) {
1281 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1282 {
1283 old_memory_object_behave_info_t behave;
1284
1285 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1286 ret = KERN_INVALID_ARGUMENT;
1287 break;
1288 }
1289
1290 behave = (old_memory_object_behave_info_t) attributes;
1291 behave->copy_strategy = object->copy_strategy;
1292 behave->temporary = FALSE;
1293#if notyet /* remove when vm_msync complies and clean in place fini */
1294 behave->invalidate = object->invalidate;
1295#else
1296 behave->invalidate = FALSE;
1297#endif
1298
1299 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1300 break;
1301 }
1302
1303 case MEMORY_OBJECT_BEHAVIOR_INFO:
1304 {
1305 memory_object_behave_info_t behave;
1306
1307 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1308 ret = KERN_INVALID_ARGUMENT;
1309 break;
1310 }
1311
1312 behave = (memory_object_behave_info_t) attributes;
1313 behave->copy_strategy = object->copy_strategy;
1314 behave->temporary = FALSE;
1315#if notyet /* remove when vm_msync complies and clean in place fini */
1316 behave->invalidate = object->invalidate;
1317#else
1318 behave->invalidate = FALSE;
1319#endif
1320 behave->advisory_pageout = FALSE;
1321 behave->silent_overwrite = FALSE;
1322 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1323 break;
1324 }
1325
1326 case MEMORY_OBJECT_PERFORMANCE_INFO:
1327 {
1328 memory_object_perf_info_t perf;
1329
1330 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1331 ret = KERN_INVALID_ARGUMENT;
1332 break;
1333 }
1334
1335 perf = (memory_object_perf_info_t) attributes;
1336 perf->cluster_size = PAGE_SIZE;
1337 perf->may_cache = object->can_persist;
1338
1339 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1340 break;
1341 }
1342
1343 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1344 {
1345 old_memory_object_attr_info_t attr;
1346
1347 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1348 ret = KERN_INVALID_ARGUMENT;
1349 break;
1350 }
1351
1352 attr = (old_memory_object_attr_info_t) attributes;
1353 attr->may_cache = object->can_persist;
1354 attr->copy_strategy = object->copy_strategy;
1355
1356 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1357 break;
1358 }
1359
1360 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1361 {
1362 memory_object_attr_info_t attr;
1363
1364 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1365 ret = KERN_INVALID_ARGUMENT;
1366 break;
1367 }
1368
1369 attr = (memory_object_attr_info_t) attributes;
1370 attr->copy_strategy = object->copy_strategy;
1371 attr->cluster_size = PAGE_SIZE;
1372 attr->may_cache_object = object->can_persist;
1373 attr->temporary = FALSE;
1374
1375 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1376 break;
1377 }
1378
1379 default:
1380 ret = KERN_INVALID_ARGUMENT;
1381 break;
1382 }
1383
1384 vm_object_unlock(object);
1385
1386 return ret;
1387}
1388
1389
1390kern_return_t
1391memory_object_iopl_request(
1392 ipc_port_t port,
1393 memory_object_offset_t offset,
1394 upl_size_t *upl_size,
1395 upl_t *upl_ptr,
1396 upl_page_info_array_t user_page_list,
1397 unsigned int *page_list_count,
1398 upl_control_flags_t *flags,
1399 vm_tag_t tag)
1400{
1401 vm_object_t object;
1402 kern_return_t ret;
1403 upl_control_flags_t caller_flags;
1404
1405 caller_flags = *flags;
1406
1407 if (caller_flags & ~UPL_VALID_FLAGS) {
1408 /*
1409 * For forward compatibility's sake,
1410 * reject any unknown flag.
1411 */
1412 return KERN_INVALID_VALUE;
1413 }
1414
1415 if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
1416 vm_named_entry_t named_entry;
1417
1418 named_entry = (vm_named_entry_t) ip_get_kobject(port);
1419 /* a few checks to make sure user is obeying rules */
1420 if (*upl_size == 0) {
1421 if (offset >= named_entry->size) {
1422 return KERN_INVALID_RIGHT;
1423 }
1424 *upl_size = (upl_size_t)(named_entry->size - offset);
1425 if (*upl_size != named_entry->size - offset) {
1426 return KERN_INVALID_ARGUMENT;
1427 }
1428 }
1429 if (caller_flags & UPL_COPYOUT_FROM) {
1430 if ((named_entry->protection & VM_PROT_READ)
1431 != VM_PROT_READ) {
1432 return KERN_INVALID_RIGHT;
1433 }
1434 } else {
1435 if ((named_entry->protection &
1436 (VM_PROT_READ | VM_PROT_WRITE))
1437 != (VM_PROT_READ | VM_PROT_WRITE)) {
1438 return KERN_INVALID_RIGHT;
1439 }
1440 }
1441 if (named_entry->size < (offset + *upl_size)) {
1442 return KERN_INVALID_ARGUMENT;
1443 }
1444
1445 /* the callers parameter offset is defined to be the */
1446 /* offset from beginning of named entry offset in object */
1447 offset = offset + named_entry->offset;
1448 offset += named_entry->data_offset;
1449
1450 if (named_entry->is_sub_map ||
1451 named_entry->is_copy) {
1452 return KERN_INVALID_ARGUMENT;
1453 }
1454 if (!named_entry->is_object) {
1455 return KERN_INVALID_ARGUMENT;
1456 }
1457
1458 named_entry_lock(named_entry);
1459
1460 object = vm_named_entry_to_vm_object(named_entry);
1461 assert(object != VM_OBJECT_NULL);
1462 vm_object_reference(object);
1463 named_entry_unlock(named_entry);
1464 } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
1465 panic("unexpected IKOT_MEM_OBJ_CONTROL: %p", port);
1466 } else {
1467 return KERN_INVALID_ARGUMENT;
1468 }
1469 if (object == VM_OBJECT_NULL) {
1470 return KERN_INVALID_ARGUMENT;
1471 }
1472
1473 if (!object->private) {
1474 if (object->phys_contiguous) {
1475 *flags = UPL_PHYS_CONTIG;
1476 } else {
1477 *flags = 0;
1478 }
1479 } else {
1480 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
1481 }
1482
1483 ret = vm_object_iopl_request(object,
1484 offset,
1485 *upl_size,
1486 upl_ptr,
1487 user_page_list,
1488 page_list_count,
1489 caller_flags,
1490 tag);
1491 vm_object_deallocate(object);
1492 return ret;
1493}
1494
1495/*
1496 * Routine: memory_object_upl_request [interface]
1497 * Purpose:
1498 * Cause the population of a portion of a vm_object.
1499 * Depending on the nature of the request, the pages
1500 * returned may be contain valid data or be uninitialized.
1501 *
1502 */
1503
1504kern_return_t
1505memory_object_upl_request(
1506 memory_object_control_t control,
1507 memory_object_offset_t offset,
1508 upl_size_t size,
1509 upl_t *upl_ptr,
1510 upl_page_info_array_t user_page_list,
1511 unsigned int *page_list_count,
1512 int cntrl_flags,
1513 int tag)
1514{
1515 vm_object_t object;
1516 vm_tag_t vmtag = (vm_tag_t)tag;
1517 assert(vmtag == tag);
1518
1519 object = memory_object_control_to_vm_object(control);
1520 if (object == VM_OBJECT_NULL) {
1521 return KERN_TERMINATED;
1522 }
1523
1524 return vm_object_upl_request(object,
1525 offset,
1526 size,
1527 upl_ptr,
1528 user_page_list,
1529 page_list_count,
1530 (upl_control_flags_t)(unsigned int) cntrl_flags,
1531 vmtag);
1532}
1533
1534/*
1535 * Routine: memory_object_super_upl_request [interface]
1536 * Purpose:
1537 * Cause the population of a portion of a vm_object
1538 * in much the same way as memory_object_upl_request.
1539 * Depending on the nature of the request, the pages
1540 * returned may be contain valid data or be uninitialized.
1541 * However, the region may be expanded up to the super
1542 * cluster size provided.
1543 */
1544
1545kern_return_t
1546memory_object_super_upl_request(
1547 memory_object_control_t control,
1548 memory_object_offset_t offset,
1549 upl_size_t size,
1550 upl_size_t super_cluster,
1551 upl_t *upl,
1552 upl_page_info_t *user_page_list,
1553 unsigned int *page_list_count,
1554 int cntrl_flags,
1555 int tag)
1556{
1557 vm_object_t object;
1558 vm_tag_t vmtag = (vm_tag_t)tag;
1559 assert(vmtag == tag);
1560
1561 object = memory_object_control_to_vm_object(control);
1562 if (object == VM_OBJECT_NULL) {
1563 return KERN_INVALID_ARGUMENT;
1564 }
1565
1566 return vm_object_super_upl_request(object,
1567 offset,
1568 size,
1569 super_cluster,
1570 upl,
1571 user_page_list,
1572 page_list_count,
1573 (upl_control_flags_t)(unsigned int) cntrl_flags,
1574 vmtag);
1575}
1576
1577kern_return_t
1578memory_object_cluster_size(
1579 memory_object_control_t control,
1580 memory_object_offset_t *start,
1581 vm_size_t *length,
1582 uint32_t *io_streaming,
1583 memory_object_fault_info_t mo_fault_info)
1584{
1585 vm_object_t object;
1586 vm_object_fault_info_t fault_info;
1587
1588 object = memory_object_control_to_vm_object(control);
1589
1590 if (object == VM_OBJECT_NULL || object->paging_offset > *start) {
1591 return KERN_INVALID_ARGUMENT;
1592 }
1593
1594 *start -= object->paging_offset;
1595
1596 fault_info = (vm_object_fault_info_t)(uintptr_t) mo_fault_info;
1597 vm_object_cluster_size(object,
1598 (vm_object_offset_t *)start,
1599 length,
1600 fault_info,
1601 io_streaming);
1602
1603 *start += object->paging_offset;
1604
1605 return KERN_SUCCESS;
1606}
1607
1608
1609/*
1610 * Routine: host_default_memory_manager [interface]
1611 * Purpose:
1612 * set/get the default memory manager port and default cluster
1613 * size.
1614 *
1615 * If successful, consumes the supplied naked send right.
1616 */
1617kern_return_t
1618host_default_memory_manager(
1619 host_priv_t host_priv,
1620 memory_object_default_t *default_manager,
1621 __unused memory_object_cluster_size_t cluster_size)
1622{
1623 memory_object_default_t current_manager;
1624 memory_object_default_t new_manager;
1625 memory_object_default_t returned_manager;
1626 kern_return_t result = KERN_SUCCESS;
1627
1628 if (host_priv == HOST_PRIV_NULL) {
1629 return KERN_INVALID_HOST;
1630 }
1631
1632 new_manager = *default_manager;
1633 lck_mtx_lock(&memory_manager_default_lock);
1634 current_manager = memory_manager_default;
1635 returned_manager = MEMORY_OBJECT_DEFAULT_NULL;
1636
1637 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1638 /*
1639 * Retrieve the current value.
1640 */
1641 returned_manager = current_manager;
1642 memory_object_default_reference(returned_manager);
1643 } else {
1644 /*
1645 * Only allow the kernel to change the value.
1646 */
1647 extern task_t kernel_task;
1648 if (current_task() != kernel_task) {
1649 result = KERN_NO_ACCESS;
1650 goto out;
1651 }
1652
1653 /*
1654 * If this is the first non-null manager, start
1655 * up the internal pager support.
1656 */
1657 if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1658 result = vm_pageout_internal_start();
1659 if (result != KERN_SUCCESS) {
1660 goto out;
1661 }
1662 }
1663
1664 /*
1665 * Retrieve the current value,
1666 * and replace it with the supplied value.
1667 * We return the old reference to the caller
1668 * but we have to take a reference on the new
1669 * one.
1670 */
1671 returned_manager = current_manager;
1672 memory_manager_default = new_manager;
1673 memory_object_default_reference(new_manager);
1674
1675 /*
1676 * In case anyone's been waiting for a memory
1677 * manager to be established, wake them up.
1678 */
1679
1680 thread_wakeup((event_t) &memory_manager_default);
1681
1682 /*
1683 * Now that we have a default pager for anonymous memory,
1684 * reactivate all the throttled pages (i.e. dirty pages with
1685 * no pager).
1686 */
1687 if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1688 vm_page_reactivate_all_throttled();
1689 }
1690 }
1691out:
1692 lck_mtx_unlock(&memory_manager_default_lock);
1693
1694 *default_manager = returned_manager;
1695 return result;
1696}
1697
1698/*
1699 * Routine: memory_manager_default_reference
1700 * Purpose:
1701 * Returns a naked send right for the default
1702 * memory manager. The returned right is always
1703 * valid (not IP_NULL or IP_DEAD).
1704 */
1705
1706__private_extern__ memory_object_default_t
1707memory_manager_default_reference(void)
1708{
1709 memory_object_default_t current_manager;
1710
1711 lck_mtx_lock(&memory_manager_default_lock);
1712 current_manager = memory_manager_default;
1713 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1714 wait_result_t res;
1715
1716 res = lck_mtx_sleep(&memory_manager_default_lock,
1717 LCK_SLEEP_DEFAULT,
1718 (event_t) &memory_manager_default,
1719 THREAD_UNINT);
1720 assert(res == THREAD_AWAKENED);
1721 current_manager = memory_manager_default;
1722 }
1723 memory_object_default_reference(current_manager);
1724 lck_mtx_unlock(&memory_manager_default_lock);
1725
1726 return current_manager;
1727}
1728
1729/*
1730 * Routine: memory_manager_default_check
1731 *
1732 * Purpose:
1733 * Check whether a default memory manager has been set
1734 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1735 * and KERN_FAILURE if dmm does not exist.
1736 *
1737 * If there is no default memory manager, log an error,
1738 * but only the first time.
1739 *
1740 */
1741__private_extern__ kern_return_t
1742memory_manager_default_check(void)
1743{
1744 memory_object_default_t current;
1745
1746 lck_mtx_lock(&memory_manager_default_lock);
1747 current = memory_manager_default;
1748 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1749 static boolean_t logged; /* initialized to 0 */
1750 boolean_t complain = !logged;
1751 logged = TRUE;
1752 lck_mtx_unlock(&memory_manager_default_lock);
1753 if (complain) {
1754 printf("Warning: No default memory manager\n");
1755 }
1756 return KERN_FAILURE;
1757 } else {
1758 lck_mtx_unlock(&memory_manager_default_lock);
1759 return KERN_SUCCESS;
1760 }
1761}
1762
1763/* Allow manipulation of individual page state. This is actually part of */
1764/* the UPL regimen but takes place on the object rather than on a UPL */
1765
1766kern_return_t
1767memory_object_page_op(
1768 memory_object_control_t control,
1769 memory_object_offset_t offset,
1770 int ops,
1771 ppnum_t *phys_entry,
1772 int *flags)
1773{
1774 vm_object_t object;
1775
1776 object = memory_object_control_to_vm_object(control);
1777 if (object == VM_OBJECT_NULL) {
1778 return KERN_INVALID_ARGUMENT;
1779 }
1780
1781 return vm_object_page_op(object, offset, ops, phys_entry, flags);
1782}
1783
1784/*
1785 * memory_object_range_op offers performance enhancement over
1786 * memory_object_page_op for page_op functions which do not require page
1787 * level state to be returned from the call. Page_op was created to provide
1788 * a low-cost alternative to page manipulation via UPLs when only a single
1789 * page was involved. The range_op call establishes the ability in the _op
1790 * family of functions to work on multiple pages where the lack of page level
1791 * state handling allows the caller to avoid the overhead of the upl structures.
1792 */
1793
1794kern_return_t
1795memory_object_range_op(
1796 memory_object_control_t control,
1797 memory_object_offset_t offset_beg,
1798 memory_object_offset_t offset_end,
1799 int ops,
1800 int *range)
1801{
1802 vm_object_t object;
1803
1804 object = memory_object_control_to_vm_object(control);
1805 if (object == VM_OBJECT_NULL) {
1806 return KERN_INVALID_ARGUMENT;
1807 }
1808
1809 return vm_object_range_op(object,
1810 offset_beg,
1811 offset_end,
1812 ops,
1813 (uint32_t *) range);
1814}
1815
1816
1817void
1818memory_object_mark_used(
1819 memory_object_control_t control)
1820{
1821 vm_object_t object;
1822
1823 if (control == NULL) {
1824 return;
1825 }
1826
1827 object = memory_object_control_to_vm_object(control);
1828
1829 if (object != VM_OBJECT_NULL) {
1830 vm_object_cache_remove(object);
1831 }
1832}
1833
1834
1835void
1836memory_object_mark_unused(
1837 memory_object_control_t control,
1838 __unused boolean_t rage)
1839{
1840 vm_object_t object;
1841
1842 if (control == NULL) {
1843 return;
1844 }
1845
1846 object = memory_object_control_to_vm_object(control);
1847
1848 if (object != VM_OBJECT_NULL) {
1849 vm_object_cache_add(object);
1850 }
1851}
1852
1853void
1854memory_object_mark_io_tracking(
1855 memory_object_control_t control)
1856{
1857 vm_object_t object;
1858
1859 if (control == NULL) {
1860 return;
1861 }
1862 object = memory_object_control_to_vm_object(control);
1863
1864 if (object != VM_OBJECT_NULL) {
1865 vm_object_lock(object);
1866 object->io_tracking = TRUE;
1867 vm_object_unlock(object);
1868 }
1869}
1870
1871void
1872memory_object_mark_trusted(
1873 memory_object_control_t control)
1874{
1875 vm_object_t object;
1876
1877 if (control == NULL) {
1878 return;
1879 }
1880 object = memory_object_control_to_vm_object(control);
1881
1882 if (object != VM_OBJECT_NULL) {
1883 vm_object_lock(object);
1884 object->pager_trusted = TRUE;
1885 vm_object_unlock(object);
1886 }
1887}
1888
1889#if CONFIG_SECLUDED_MEMORY
1890void
1891memory_object_mark_eligible_for_secluded(
1892 memory_object_control_t control,
1893 boolean_t eligible_for_secluded)
1894{
1895 vm_object_t object;
1896
1897 if (control == NULL) {
1898 return;
1899 }
1900 object = memory_object_control_to_vm_object(control);
1901
1902 if (object == VM_OBJECT_NULL) {
1903 return;
1904 }
1905
1906 vm_object_lock(object);
1907 if (eligible_for_secluded &&
1908 secluded_for_filecache && /* global boot-arg */
1909 !object->eligible_for_secluded) {
1910 object->eligible_for_secluded = TRUE;
1911 vm_page_secluded.eligible_for_secluded += object->resident_page_count;
1912 } else if (!eligible_for_secluded &&
1913 object->eligible_for_secluded) {
1914 object->eligible_for_secluded = FALSE;
1915 vm_page_secluded.eligible_for_secluded -= object->resident_page_count;
1916 if (object->resident_page_count) {
1917 /* XXX FBDP TODO: flush pages from secluded queue? */
1918 // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
1919 }
1920 }
1921 vm_object_unlock(object);
1922}
1923#endif /* CONFIG_SECLUDED_MEMORY */
1924
1925kern_return_t
1926memory_object_pages_resident(
1927 memory_object_control_t control,
1928 boolean_t * has_pages_resident)
1929{
1930 vm_object_t object;
1931
1932 *has_pages_resident = FALSE;
1933
1934 object = memory_object_control_to_vm_object(control);
1935 if (object == VM_OBJECT_NULL) {
1936 return KERN_INVALID_ARGUMENT;
1937 }
1938
1939 if (object->resident_page_count) {
1940 *has_pages_resident = TRUE;
1941 }
1942
1943 return KERN_SUCCESS;
1944}
1945
1946kern_return_t
1947memory_object_signed(
1948 memory_object_control_t control,
1949 boolean_t is_signed)
1950{
1951 vm_object_t object;
1952
1953 object = memory_object_control_to_vm_object(control);
1954 if (object == VM_OBJECT_NULL) {
1955 return KERN_INVALID_ARGUMENT;
1956 }
1957
1958 vm_object_lock(object);
1959 object->code_signed = is_signed;
1960 vm_object_unlock(object);
1961
1962 return KERN_SUCCESS;
1963}
1964
1965boolean_t
1966memory_object_is_signed(
1967 memory_object_control_t control)
1968{
1969 boolean_t is_signed;
1970 vm_object_t object;
1971
1972 object = memory_object_control_to_vm_object(control);
1973 if (object == VM_OBJECT_NULL) {
1974 return FALSE;
1975 }
1976
1977 vm_object_lock_shared(object);
1978 is_signed = object->code_signed;
1979 vm_object_unlock(object);
1980
1981 return is_signed;
1982}
1983
1984boolean_t
1985memory_object_is_shared_cache(
1986 memory_object_control_t control)
1987{
1988 vm_object_t object = VM_OBJECT_NULL;
1989
1990 object = memory_object_control_to_vm_object(control);
1991 if (object == VM_OBJECT_NULL) {
1992 return FALSE;
1993 }
1994
1995 return object->object_is_shared_cache;
1996}
1997
1998__private_extern__ memory_object_control_t
1999memory_object_control_allocate(
2000 vm_object_t object)
2001{
2002 return object;
2003}
2004
2005__private_extern__ void
2006memory_object_control_collapse(
2007 memory_object_control_t *control,
2008 vm_object_t object)
2009{
2010 *control = object;
2011}
2012
2013__private_extern__ vm_object_t
2014memory_object_control_to_vm_object(
2015 memory_object_control_t control)
2016{
2017 return control;
2018}
2019
2020__private_extern__ vm_object_t
2021memory_object_to_vm_object(
2022 memory_object_t mem_obj)
2023{
2024 memory_object_control_t mo_control;
2025
2026 if (mem_obj == MEMORY_OBJECT_NULL) {
2027 return VM_OBJECT_NULL;
2028 }
2029 mo_control = mem_obj->mo_control;
2030 if (mo_control == NULL) {
2031 return VM_OBJECT_NULL;
2032 }
2033 return memory_object_control_to_vm_object(mo_control);
2034}
2035
2036memory_object_control_t
2037convert_port_to_mo_control(
2038 __unused mach_port_t port)
2039{
2040 return MEMORY_OBJECT_CONTROL_NULL;
2041}
2042
2043
2044mach_port_t
2045convert_mo_control_to_port(
2046 __unused memory_object_control_t control)
2047{
2048 return MACH_PORT_NULL;
2049}
2050
2051void
2052memory_object_control_reference(
2053 __unused memory_object_control_t control)
2054{
2055 return;
2056}
2057
2058/*
2059 * We only every issue one of these references, so kill it
2060 * when that gets released (should switch the real reference
2061 * counting in true port-less EMMI).
2062 */
2063void
2064memory_object_control_deallocate(
2065 __unused memory_object_control_t control)
2066{
2067}
2068
2069void
2070memory_object_control_disable(
2071 memory_object_control_t *control)
2072{
2073 assert(*control != VM_OBJECT_NULL);
2074 *control = VM_OBJECT_NULL;
2075}
2076
2077void
2078memory_object_default_reference(
2079 memory_object_default_t dmm)
2080{
2081 ipc_port_make_send(dmm);
2082}
2083
2084void
2085memory_object_default_deallocate(
2086 memory_object_default_t dmm)
2087{
2088 ipc_port_release_send(dmm);
2089}
2090
2091memory_object_t
2092convert_port_to_memory_object(
2093 __unused mach_port_t port)
2094{
2095 return MEMORY_OBJECT_NULL;
2096}
2097
2098
2099mach_port_t
2100convert_memory_object_to_port(
2101 __unused memory_object_t object)
2102{
2103 return MACH_PORT_NULL;
2104}
2105
2106
2107/* Routine memory_object_reference */
2108void
2109memory_object_reference(
2110 memory_object_t memory_object)
2111{
2112 (memory_object->mo_pager_ops->memory_object_reference)(
2113 memory_object);
2114}
2115
2116/* Routine memory_object_deallocate */
2117void
2118memory_object_deallocate(
2119 memory_object_t memory_object)
2120{
2121 (memory_object->mo_pager_ops->memory_object_deallocate)(
2122 memory_object);
2123}
2124
2125
2126/* Routine memory_object_init */
2127kern_return_t
2128memory_object_init
2129(
2130 memory_object_t memory_object,
2131 memory_object_control_t memory_control,
2132 memory_object_cluster_size_t memory_object_page_size
2133)
2134{
2135 return (memory_object->mo_pager_ops->memory_object_init)(
2136 memory_object,
2137 memory_control,
2138 memory_object_page_size);
2139}
2140
2141/* Routine memory_object_terminate */
2142kern_return_t
2143memory_object_terminate
2144(
2145 memory_object_t memory_object
2146)
2147{
2148 return (memory_object->mo_pager_ops->memory_object_terminate)(
2149 memory_object);
2150}
2151
2152/* Routine memory_object_data_request */
2153kern_return_t
2154memory_object_data_request
2155(
2156 memory_object_t memory_object,
2157 memory_object_offset_t offset,
2158 memory_object_cluster_size_t length,
2159 vm_prot_t desired_access,
2160 memory_object_fault_info_t fault_info
2161)
2162{
2163 return (memory_object->mo_pager_ops->memory_object_data_request)(
2164 memory_object,
2165 offset,
2166 length,
2167 desired_access,
2168 fault_info);
2169}
2170
2171/* Routine memory_object_data_return */
2172kern_return_t
2173memory_object_data_return
2174(
2175 memory_object_t memory_object,
2176 memory_object_offset_t offset,
2177 memory_object_cluster_size_t size,
2178 memory_object_offset_t *resid_offset,
2179 int *io_error,
2180 boolean_t dirty,
2181 boolean_t kernel_copy,
2182 int upl_flags
2183)
2184{
2185 return (memory_object->mo_pager_ops->memory_object_data_return)(
2186 memory_object,
2187 offset,
2188 size,
2189 resid_offset,
2190 io_error,
2191 dirty,
2192 kernel_copy,
2193 upl_flags);
2194}
2195
2196/* Routine memory_object_data_initialize */
2197kern_return_t
2198memory_object_data_initialize
2199(
2200 memory_object_t memory_object,
2201 memory_object_offset_t offset,
2202 memory_object_cluster_size_t size
2203)
2204{
2205 return (memory_object->mo_pager_ops->memory_object_data_initialize)(
2206 memory_object,
2207 offset,
2208 size);
2209}
2210
2211/* Routine memory_object_data_unlock */
2212kern_return_t
2213memory_object_data_unlock
2214(
2215 memory_object_t memory_object,
2216 memory_object_offset_t offset,
2217 memory_object_size_t size,
2218 vm_prot_t desired_access
2219)
2220{
2221 return (memory_object->mo_pager_ops->memory_object_data_unlock)(
2222 memory_object,
2223 offset,
2224 size,
2225 desired_access);
2226}
2227
2228/* Routine memory_object_synchronize */
2229kern_return_t
2230memory_object_synchronize
2231(
2232 memory_object_t memory_object,
2233 memory_object_offset_t offset,
2234 memory_object_size_t size,
2235 vm_sync_t sync_flags
2236)
2237{
2238 panic("memory_object_syncrhonize no longer supported\n");
2239
2240 return (memory_object->mo_pager_ops->memory_object_synchronize)(
2241 memory_object,
2242 offset,
2243 size,
2244 sync_flags);
2245}
2246
2247
2248/*
2249 * memory_object_map() is called by VM (in vm_map_enter() and its variants)
2250 * each time a "named" VM object gets mapped directly or indirectly
2251 * (copy-on-write mapping). A "named" VM object has an extra reference held
2252 * by the pager to keep it alive until the pager decides that the
2253 * memory object (and its VM object) can be reclaimed.
2254 * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
2255 * the mappings of that memory object have been removed.
2256 *
2257 * For a given VM object, calls to memory_object_map() and memory_object_unmap()
2258 * are serialized (through object->mapping_in_progress), to ensure that the
2259 * pager gets a consistent view of the mapping status of the memory object.
2260 *
2261 * This allows the pager to keep track of how many times a memory object
2262 * has been mapped and with which protections, to decide when it can be
2263 * reclaimed.
2264 */
2265
2266/* Routine memory_object_map */
2267kern_return_t
2268memory_object_map
2269(
2270 memory_object_t memory_object,
2271 vm_prot_t prot
2272)
2273{
2274 return (memory_object->mo_pager_ops->memory_object_map)(
2275 memory_object,
2276 prot);
2277}
2278
2279/* Routine memory_object_last_unmap */
2280kern_return_t
2281memory_object_last_unmap
2282(
2283 memory_object_t memory_object
2284)
2285{
2286 return (memory_object->mo_pager_ops->memory_object_last_unmap)(
2287 memory_object);
2288}
2289
2290/* Routine memory_object_data_reclaim */
2291kern_return_t
2292memory_object_data_reclaim
2293(
2294 memory_object_t memory_object,
2295 boolean_t reclaim_backing_store
2296)
2297{
2298 if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL) {
2299 return KERN_NOT_SUPPORTED;
2300 }
2301 return (memory_object->mo_pager_ops->memory_object_data_reclaim)(
2302 memory_object,
2303 reclaim_backing_store);
2304}
2305
2306boolean_t
2307memory_object_backing_object
2308(
2309 memory_object_t memory_object,
2310 memory_object_offset_t offset,
2311 vm_object_t *backing_object,
2312 vm_object_offset_t *backing_offset)
2313{
2314 if (memory_object->mo_pager_ops->memory_object_backing_object == NULL) {
2315 return FALSE;
2316 }
2317 return (memory_object->mo_pager_ops->memory_object_backing_object)(
2318 memory_object,
2319 offset,
2320 backing_object,
2321 backing_offset);
2322}
2323
2324upl_t
2325convert_port_to_upl(
2326 ipc_port_t port)
2327{
2328 upl_t upl;
2329
2330 ip_lock(port);
2331 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
2332 ip_unlock(port);
2333 return (upl_t)NULL;
2334 }
2335 upl = (upl_t) ip_get_kobject(port);
2336 ip_unlock(port);
2337 upl_lock(upl);
2338 upl->ref_count += 1;
2339 upl_unlock(upl);
2340 return upl;
2341}
2342
2343mach_port_t
2344convert_upl_to_port(
2345 __unused upl_t upl)
2346{
2347 return MACH_PORT_NULL;
2348}
2349
2350__private_extern__ void
2351upl_no_senders(
2352 __unused ipc_port_t port,
2353 __unused mach_port_mscount_t mscount)
2354{
2355 return;
2356}