]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/memory_object.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/memory_object.c
57 * Author: Michael Wayne Young
58 *
59 * External memory management interface control functions.
60 */
61
62 #include <advisory_pageout.h>
63
64 /*
65 * Interface dependencies:
66 */
67
68 #include <mach/std_types.h> /* For pointer_t */
69 #include <mach/mach_types.h>
70
71 #include <mach/mig.h>
72 #include <mach/kern_return.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_prot.h>
79 #include <mach/message.h>
80
81 /*
82 * Implementation dependencies:
83 */
84 #include <string.h> /* For memcpy() */
85
86 #include <kern/xpr.h>
87 #include <kern/host.h>
88 #include <kern/thread.h> /* For current_thread() */
89 #include <kern/ipc_mig.h>
90 #include <kern/misc_protos.h>
91
92 #include <vm/vm_object.h>
93 #include <vm/vm_fault.h>
94 #include <vm/memory_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/pmap.h> /* For pmap_clear_modify */
98 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
99 #include <vm/vm_map.h> /* For vm_map_pageable */
100
101 #if MACH_PAGEMAP
102 #include <vm/vm_external.h>
103 #endif /* MACH_PAGEMAP */
104
105 memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
106 vm_size_t memory_manager_default_cluster = 0;
107 decl_mutex_data(, memory_manager_default_lock)
108
109 /*
110 * Forward ref to file-local function:
111 */
112 boolean_t
113 vm_object_update(vm_object_t, vm_object_offset_t,
114 vm_size_t, memory_object_return_t, int, vm_prot_t);
115
116
117 /*
118 * Routine: memory_object_should_return_page
119 *
120 * Description:
121 * Determine whether the given page should be returned,
122 * based on the page's state and on the given return policy.
123 *
124 * We should return the page if one of the following is true:
125 *
126 * 1. Page is dirty and should_return is not RETURN_NONE.
127 * 2. Page is precious and should_return is RETURN_ALL.
128 * 3. Should_return is RETURN_ANYTHING.
129 *
130 * As a side effect, m->dirty will be made consistent
131 * with pmap_is_modified(m), if should_return is not
132 * MEMORY_OBJECT_RETURN_NONE.
133 */
134
135 #define memory_object_should_return_page(m, should_return) \
136 (should_return != MEMORY_OBJECT_RETURN_NONE && \
137 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
138 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
139 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
140
141 typedef int memory_object_lock_result_t;
142
143 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
144 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
145 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
146 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
147
148 memory_object_lock_result_t memory_object_lock_page(
149 vm_page_t m,
150 memory_object_return_t should_return,
151 boolean_t should_flush,
152 vm_prot_t prot);
153
154 /*
155 * Routine: memory_object_lock_page
156 *
157 * Description:
158 * Perform the appropriate lock operations on the
159 * given page. See the description of
160 * "memory_object_lock_request" for the meanings
161 * of the arguments.
162 *
163 * Returns an indication that the operation
164 * completed, blocked, or that the page must
165 * be cleaned.
166 */
167 memory_object_lock_result_t
168 memory_object_lock_page(
169 vm_page_t m,
170 memory_object_return_t should_return,
171 boolean_t should_flush,
172 vm_prot_t prot)
173 {
174 XPR(XPR_MEMORY_OBJECT,
175 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
176 (integer_t)m, should_return, should_flush, prot, 0);
177
178 /*
179 * If we cannot change access to the page,
180 * either because a mapping is in progress
181 * (busy page) or because a mapping has been
182 * wired, then give up.
183 */
184
185 if (m->busy || m->cleaning)
186 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
187
188 /*
189 * Don't worry about pages for which the kernel
190 * does not have any data.
191 */
192
193 if (m->absent || m->error || m->restart) {
194 if(m->error && should_flush) {
195 /* dump the page, pager wants us to */
196 /* clean it up and there is no */
197 /* relevant data to return */
198 if(m->wire_count == 0) {
199 VM_PAGE_FREE(m);
200 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
201 }
202 } else {
203 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
204 }
205 }
206
207 assert(!m->fictitious);
208
209 if (m->wire_count != 0) {
210 /*
211 * If no change would take place
212 * anyway, return successfully.
213 *
214 * No change means:
215 * Not flushing AND
216 * No change to page lock [2 checks] AND
217 * Should not return page
218 *
219 * XXX This doesn't handle sending a copy of a wired
220 * XXX page to the pager, but that will require some
221 * XXX significant surgery.
222 */
223 if (!should_flush &&
224 (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
225 ! memory_object_should_return_page(m, should_return)) {
226
227 /*
228 * Restart page unlock requests,
229 * even though no change took place.
230 * [Memory managers may be expecting
231 * to see new requests.]
232 */
233 m->unlock_request = VM_PROT_NONE;
234 PAGE_WAKEUP(m);
235
236 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
237 }
238
239 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
240 }
241
242 /*
243 * If the page is to be flushed, allow
244 * that to be done as part of the protection.
245 */
246
247 if (should_flush)
248 prot = VM_PROT_ALL;
249
250 /*
251 * Set the page lock.
252 *
253 * If we are decreasing permission, do it now;
254 * let the fault handler take care of increases
255 * (pmap_page_protect may not increase protection).
256 */
257
258 if (prot != VM_PROT_NO_CHANGE) {
259 if ((m->page_lock ^ prot) & prot) {
260 pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
261 }
262 #if 0
263 /* code associated with the vestigial
264 * memory_object_data_unlock
265 */
266 m->page_lock = prot;
267 m->lock_supplied = TRUE;
268 if (prot != VM_PROT_NONE)
269 m->unusual = TRUE;
270 else
271 m->unusual = FALSE;
272
273 /*
274 * Restart any past unlock requests, even if no
275 * change resulted. If the manager explicitly
276 * requested no protection change, then it is assumed
277 * to be remembering past requests.
278 */
279
280 m->unlock_request = VM_PROT_NONE;
281 #endif /* 0 */
282 PAGE_WAKEUP(m);
283 }
284
285 /*
286 * Handle page returning.
287 */
288
289 if (memory_object_should_return_page(m, should_return)) {
290
291 /*
292 * If we weren't planning
293 * to flush the page anyway,
294 * we may need to remove the
295 * page from the pageout
296 * system and from physical
297 * maps now.
298 */
299
300 vm_page_lock_queues();
301 VM_PAGE_QUEUES_REMOVE(m);
302 vm_page_unlock_queues();
303
304 if (!should_flush)
305 pmap_page_protect(m->phys_page, VM_PROT_NONE);
306
307 if (m->dirty)
308 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
309 else
310 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
311 }
312
313 /*
314 * Handle flushing
315 */
316
317 if (should_flush) {
318 VM_PAGE_FREE(m);
319 } else {
320 extern boolean_t vm_page_deactivate_hint;
321
322 /*
323 * XXX Make clean but not flush a paging hint,
324 * and deactivate the pages. This is a hack
325 * because it overloads flush/clean with
326 * implementation-dependent meaning. This only
327 * happens to pages that are already clean.
328 */
329
330 if (vm_page_deactivate_hint &&
331 (should_return != MEMORY_OBJECT_RETURN_NONE)) {
332 vm_page_lock_queues();
333 vm_page_deactivate(m);
334 vm_page_unlock_queues();
335 }
336 }
337
338 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
339 }
340
341 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
342 MACRO_BEGIN \
343 \
344 register int i; \
345 register vm_page_t hp; \
346 \
347 vm_object_unlock(object); \
348 \
349 (void) memory_object_data_return(object->pager, \
350 po, \
351 data_cnt, \
352 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
353 !should_flush); \
354 \
355 vm_object_lock(object); \
356 MACRO_END
357
358 /*
359 * Routine: memory_object_lock_request [user interface]
360 *
361 * Description:
362 * Control use of the data associated with the given
363 * memory object. For each page in the given range,
364 * perform the following operations, in order:
365 * 1) restrict access to the page (disallow
366 * forms specified by "prot");
367 * 2) return data to the manager (if "should_return"
368 * is RETURN_DIRTY and the page is dirty, or
369 * "should_return" is RETURN_ALL and the page
370 * is either dirty or precious); and,
371 * 3) flush the cached copy (if "should_flush"
372 * is asserted).
373 * The set of pages is defined by a starting offset
374 * ("offset") and size ("size"). Only pages with the
375 * same page alignment as the starting offset are
376 * considered.
377 *
378 * A single acknowledgement is sent (to the "reply_to"
379 * port) when these actions are complete. If successful,
380 * the naked send right for reply_to is consumed.
381 */
382
383 kern_return_t
384 memory_object_lock_request(
385 memory_object_control_t control,
386 memory_object_offset_t offset,
387 memory_object_size_t size,
388 memory_object_return_t should_return,
389 int flags,
390 vm_prot_t prot)
391 {
392 vm_object_t object;
393 vm_object_offset_t original_offset = offset;
394 boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH;
395
396 XPR(XPR_MEMORY_OBJECT,
397 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
398 (integer_t)control, offset, size,
399 (((should_return&1)<<1)|should_flush), prot);
400
401 /*
402 * Check for bogus arguments.
403 */
404 object = memory_object_control_to_vm_object(control);
405 if (object == VM_OBJECT_NULL)
406 return (KERN_INVALID_ARGUMENT);
407
408 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
409 return (KERN_INVALID_ARGUMENT);
410
411 size = round_page_64(size);
412
413 /*
414 * Lock the object, and acquire a paging reference to
415 * prevent the memory_object reference from being released.
416 */
417 vm_object_lock(object);
418 vm_object_paging_begin(object);
419 offset -= object->paging_offset;
420
421 (void)vm_object_update(object,
422 offset, size, should_return, flags, prot);
423
424 vm_object_paging_end(object);
425 vm_object_unlock(object);
426
427 return (KERN_SUCCESS);
428 }
429
430 /*
431 * memory_object_release_name: [interface]
432 *
433 * Enforces name semantic on memory_object reference count decrement
434 * This routine should not be called unless the caller holds a name
435 * reference gained through the memory_object_named_create or the
436 * memory_object_rename call.
437 * If the TERMINATE_IDLE flag is set, the call will return if the
438 * reference count is not 1. i.e. idle with the only remaining reference
439 * being the name.
440 * If the decision is made to proceed the name field flag is set to
441 * false and the reference count is decremented. If the RESPECT_CACHE
442 * flag is set and the reference count has gone to zero, the
443 * memory_object is checked to see if it is cacheable otherwise when
444 * the reference count is zero, it is simply terminated.
445 */
446
447 kern_return_t
448 memory_object_release_name(
449 memory_object_control_t control,
450 int flags)
451 {
452 vm_object_t object;
453
454 object = memory_object_control_to_vm_object(control);
455 if (object == VM_OBJECT_NULL)
456 return (KERN_INVALID_ARGUMENT);
457
458 return vm_object_release_name(object, flags);
459 }
460
461
462
463 /*
464 * Routine: memory_object_destroy [user interface]
465 * Purpose:
466 * Shut down a memory object, despite the
467 * presence of address map (or other) references
468 * to the vm_object.
469 */
470 kern_return_t
471 memory_object_destroy(
472 memory_object_control_t control,
473 kern_return_t reason)
474 {
475 vm_object_t object;
476
477 object = memory_object_control_to_vm_object(control);
478 if (object == VM_OBJECT_NULL)
479 return (KERN_INVALID_ARGUMENT);
480
481 return (vm_object_destroy(object, reason));
482 }
483
484 /*
485 * Routine: vm_object_sync
486 *
487 * Kernel internal function to synch out pages in a given
488 * range within an object to its memory manager. Much the
489 * same as memory_object_lock_request but page protection
490 * is not changed.
491 *
492 * If the should_flush and should_return flags are true pages
493 * are flushed, that is dirty & precious pages are written to
494 * the memory manager and then discarded. If should_return
495 * is false, only precious pages are returned to the memory
496 * manager.
497 *
498 * If should flush is false and should_return true, the memory
499 * manager's copy of the pages is updated. If should_return
500 * is also false, only the precious pages are updated. This
501 * last option is of limited utility.
502 *
503 * Returns:
504 * FALSE if no pages were returned to the pager
505 * TRUE otherwise.
506 */
507
508 boolean_t
509 vm_object_sync(
510 vm_object_t object,
511 vm_object_offset_t offset,
512 vm_size_t size,
513 boolean_t should_flush,
514 boolean_t should_return)
515 {
516 boolean_t rv;
517
518 XPR(XPR_VM_OBJECT,
519 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
520 (integer_t)object, offset, size, should_flush, should_return);
521
522 /*
523 * Lock the object, and acquire a paging reference to
524 * prevent the memory_object and control ports from
525 * being destroyed.
526 */
527 vm_object_lock(object);
528 vm_object_paging_begin(object);
529
530 rv = vm_object_update(object, offset, size,
531 (should_return) ?
532 MEMORY_OBJECT_RETURN_ALL :
533 MEMORY_OBJECT_RETURN_NONE,
534 (should_flush) ?
535 MEMORY_OBJECT_DATA_FLUSH : 0,
536 VM_PROT_NO_CHANGE);
537
538
539 vm_object_paging_end(object);
540 vm_object_unlock(object);
541 return rv;
542 }
543
544 /*
545 * Routine: vm_object_update
546 * Description:
547 * Work function for m_o_lock_request(), vm_o_sync().
548 *
549 * Called with object locked and paging ref taken.
550 */
551 kern_return_t
552 vm_object_update(
553 register vm_object_t object,
554 register vm_object_offset_t offset,
555 register vm_size_t size,
556 memory_object_return_t should_return,
557 int flags,
558 vm_prot_t prot)
559 {
560 register vm_page_t m;
561 vm_page_t holding_page;
562 vm_size_t original_size = size;
563 vm_object_offset_t paging_offset = 0;
564 vm_object_t copy_object;
565 vm_size_t data_cnt = 0;
566 vm_object_offset_t last_offset = offset;
567 memory_object_lock_result_t page_lock_result;
568 memory_object_lock_result_t pageout_action;
569 boolean_t data_returned = FALSE;
570 boolean_t update_cow;
571 boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
572 boolean_t pending_pageout = FALSE;
573
574 /*
575 * To avoid blocking while scanning for pages, save
576 * dirty pages to be cleaned all at once.
577 *
578 * XXXO A similar strategy could be used to limit the
579 * number of times that a scan must be restarted for
580 * other reasons. Those pages that would require blocking
581 * could be temporarily collected in another list, or
582 * their offsets could be recorded in a small array.
583 */
584
585 /*
586 * XXX NOTE: May want to consider converting this to a page list
587 * XXX vm_map_copy interface. Need to understand object
588 * XXX coalescing implications before doing so.
589 */
590
591 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
592 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
593 !(flags & MEMORY_OBJECT_DATA_PURGE)))
594 || (flags & MEMORY_OBJECT_COPY_SYNC);
595
596
597 if((((copy_object = object->copy) != NULL) && update_cow) ||
598 (flags & MEMORY_OBJECT_DATA_SYNC)) {
599 vm_size_t i;
600 vm_size_t copy_size;
601 vm_object_offset_t copy_offset;
602 vm_prot_t prot;
603 vm_page_t page;
604 vm_page_t top_page;
605 kern_return_t error = 0;
606
607 if(copy_object != NULL) {
608 /* translate offset with respect to shadow's offset */
609 copy_offset = (offset >= copy_object->shadow_offset)?
610 offset - copy_object->shadow_offset :
611 (vm_object_offset_t) 0;
612 if(copy_offset > copy_object->size)
613 copy_offset = copy_object->size;
614
615 /* clip size with respect to shadow offset */
616 copy_size = (offset >= copy_object->shadow_offset) ?
617 size : size - (copy_object->shadow_offset - offset);
618
619 if(copy_size <= 0) {
620 copy_size = 0;
621 } else {
622 copy_size = ((copy_offset + copy_size)
623 <= copy_object->size) ?
624 copy_size : copy_object->size - copy_offset;
625 }
626 /* check for a copy_offset which is beyond the end of */
627 /* the copy_object */
628 if(copy_size < 0)
629 copy_size = 0;
630
631 copy_size+=copy_offset;
632
633 vm_object_unlock(object);
634 vm_object_lock(copy_object);
635 } else {
636 copy_object = object;
637
638 copy_size = offset + size;
639 copy_offset = offset;
640 }
641
642 vm_object_paging_begin(copy_object);
643 for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
644 RETRY_COW_OF_LOCK_REQUEST:
645 prot = VM_PROT_WRITE|VM_PROT_READ;
646 switch (vm_fault_page(copy_object, i,
647 VM_PROT_WRITE|VM_PROT_READ,
648 FALSE,
649 THREAD_UNINT,
650 copy_offset,
651 copy_offset+copy_size,
652 VM_BEHAVIOR_SEQUENTIAL,
653 &prot,
654 &page,
655 &top_page,
656 (int *)0,
657 &error,
658 FALSE,
659 FALSE, NULL, 0)) {
660
661 case VM_FAULT_SUCCESS:
662 if(top_page) {
663 vm_fault_cleanup(
664 page->object, top_page);
665 PAGE_WAKEUP_DONE(page);
666 vm_page_lock_queues();
667 if (!page->active && !page->inactive)
668 vm_page_activate(page);
669 vm_page_unlock_queues();
670 vm_object_lock(copy_object);
671 vm_object_paging_begin(copy_object);
672 } else {
673 PAGE_WAKEUP_DONE(page);
674 vm_page_lock_queues();
675 if (!page->active && !page->inactive)
676 vm_page_activate(page);
677 vm_page_unlock_queues();
678 }
679 break;
680 case VM_FAULT_RETRY:
681 prot = VM_PROT_WRITE|VM_PROT_READ;
682 vm_object_lock(copy_object);
683 vm_object_paging_begin(copy_object);
684 goto RETRY_COW_OF_LOCK_REQUEST;
685 case VM_FAULT_INTERRUPTED:
686 prot = VM_PROT_WRITE|VM_PROT_READ;
687 vm_object_lock(copy_object);
688 vm_object_paging_begin(copy_object);
689 goto RETRY_COW_OF_LOCK_REQUEST;
690 case VM_FAULT_MEMORY_SHORTAGE:
691 VM_PAGE_WAIT();
692 prot = VM_PROT_WRITE|VM_PROT_READ;
693 vm_object_lock(copy_object);
694 vm_object_paging_begin(copy_object);
695 goto RETRY_COW_OF_LOCK_REQUEST;
696 case VM_FAULT_FICTITIOUS_SHORTAGE:
697 vm_page_more_fictitious();
698 prot = VM_PROT_WRITE|VM_PROT_READ;
699 vm_object_lock(copy_object);
700 vm_object_paging_begin(copy_object);
701 goto RETRY_COW_OF_LOCK_REQUEST;
702 case VM_FAULT_MEMORY_ERROR:
703 vm_object_lock(object);
704 goto BYPASS_COW_COPYIN;
705 }
706
707 }
708 vm_object_paging_end(copy_object);
709 if(copy_object != object) {
710 vm_object_unlock(copy_object);
711 vm_object_lock(object);
712 }
713 }
714 if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
715 return KERN_SUCCESS;
716 }
717 if(((copy_object = object->copy) != NULL) &&
718 (flags & MEMORY_OBJECT_DATA_PURGE)) {
719 copy_object->shadow_severed = TRUE;
720 copy_object->shadowed = FALSE;
721 copy_object->shadow = NULL;
722 /* delete the ref the COW was holding on the target object */
723 vm_object_deallocate(object);
724 }
725 BYPASS_COW_COPYIN:
726
727 for (;
728 size != 0;
729 size -= PAGE_SIZE, offset += PAGE_SIZE_64)
730 {
731 /*
732 * Limit the number of pages to be cleaned at once.
733 */
734 if (pending_pageout &&
735 data_cnt >= PAGE_SIZE * DATA_WRITE_MAX)
736 {
737 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
738 pageout_action, paging_offset);
739 data_cnt = 0;
740 pending_pageout = FALSE;
741 }
742
743 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
744 page_lock_result = memory_object_lock_page(m, should_return,
745 should_flush, prot);
746
747 XPR(XPR_MEMORY_OBJECT,
748 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
749 (integer_t)object, offset, page_lock_result, 0, 0);
750
751 switch (page_lock_result)
752 {
753 case MEMORY_OBJECT_LOCK_RESULT_DONE:
754 /*
755 * End of a cluster of dirty pages.
756 */
757 if(pending_pageout) {
758 LIST_REQ_PAGEOUT_PAGES(object,
759 data_cnt, pageout_action,
760 paging_offset);
761 data_cnt = 0;
762 pending_pageout = FALSE;
763 continue;
764 }
765 break;
766
767 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
768 /*
769 * Since it is necessary to block,
770 * clean any dirty pages now.
771 */
772 if(pending_pageout) {
773 LIST_REQ_PAGEOUT_PAGES(object,
774 data_cnt, pageout_action,
775 paging_offset);
776 pending_pageout = FALSE;
777 data_cnt = 0;
778 continue;
779 }
780
781 PAGE_SLEEP(object, m, THREAD_UNINT);
782 continue;
783
784 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
786 /*
787 * The clean and return cases are similar.
788 *
789 */
790
791 /*
792 * if this would form a discontiguous block,
793 * clean the old pages and start anew.
794 *
795 */
796
797 /*
798 * Mark the page busy since we unlock the
799 * object below.
800 */
801 m->busy = TRUE;
802 if (pending_pageout &&
803 (last_offset != offset ||
804 pageout_action != page_lock_result)) {
805 LIST_REQ_PAGEOUT_PAGES(object,
806 data_cnt, pageout_action,
807 paging_offset);
808 pending_pageout = FALSE;
809 data_cnt = 0;
810 }
811 m->busy = FALSE;
812 holding_page = VM_PAGE_NULL;
813 if(m->cleaning) {
814 PAGE_SLEEP(object, m, THREAD_UNINT);
815 continue;
816 }
817 if(!pending_pageout) {
818 pending_pageout = TRUE;
819 pageout_action = page_lock_result;
820 paging_offset = offset;
821 }
822 if (should_flush) {
823 vm_page_lock_queues();
824 m->list_req_pending = TRUE;
825 m->cleaning = TRUE;
826 m->busy = TRUE;
827 m->pageout = TRUE;
828 vm_page_wire(m);
829 vm_page_unlock_queues();
830 } else {
831 /*
832 * Clean but do not flush
833 */
834 vm_page_lock_queues();
835 m->list_req_pending = TRUE;
836 m->cleaning = TRUE;
837 vm_page_unlock_queues();
838
839 }
840 vm_object_unlock(object);
841
842
843 data_cnt += PAGE_SIZE;
844 last_offset = offset + PAGE_SIZE_64;
845 data_returned = TRUE;
846
847 vm_object_lock(object);
848 break;
849 }
850 break;
851 }
852 }
853
854 /*
855 * We have completed the scan for applicable pages.
856 * Clean any pages that have been saved.
857 */
858 if (pending_pageout) {
859 LIST_REQ_PAGEOUT_PAGES(object,
860 data_cnt, pageout_action, paging_offset);
861 }
862 return (data_returned);
863 }
864
865 /*
866 * Routine: memory_object_synchronize_completed [user interface]
867 *
868 * Tell kernel that previously synchronized data
869 * (memory_object_synchronize) has been queue or placed on the
870 * backing storage.
871 *
872 * Note: there may be multiple synchronize requests for a given
873 * memory object outstanding but they will not overlap.
874 */
875
876 kern_return_t
877 memory_object_synchronize_completed(
878 memory_object_control_t control,
879 memory_object_offset_t offset,
880 vm_offset_t length)
881 {
882 vm_object_t object;
883 msync_req_t msr;
884
885 XPR(XPR_MEMORY_OBJECT,
886 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
887 (integer_t)object, offset, length, 0, 0);
888
889 /*
890 * Look for bogus arguments
891 */
892
893 object = memory_object_control_to_vm_object(control);
894 if (object == VM_OBJECT_NULL)
895 return (KERN_INVALID_ARGUMENT);
896
897 vm_object_lock(object);
898
899 /*
900 * search for sync request structure
901 */
902 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
903 if (msr->offset == offset && msr->length == length) {
904 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
905 break;
906 }
907 }/* queue_iterate */
908
909 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
910 vm_object_unlock(object);
911 return KERN_INVALID_ARGUMENT;
912 }
913
914 msr_lock(msr);
915 vm_object_unlock(object);
916 msr->flag = VM_MSYNC_DONE;
917 msr_unlock(msr);
918 thread_wakeup((event_t) msr);
919
920 return KERN_SUCCESS;
921 }/* memory_object_synchronize_completed */
922
923 static kern_return_t
924 vm_object_set_attributes_common(
925 vm_object_t object,
926 boolean_t may_cache,
927 memory_object_copy_strategy_t copy_strategy,
928 boolean_t temporary,
929 vm_size_t cluster_size,
930 boolean_t silent_overwrite,
931 boolean_t advisory_pageout)
932 {
933 boolean_t object_became_ready;
934
935 XPR(XPR_MEMORY_OBJECT,
936 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
937 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
938
939 if (object == VM_OBJECT_NULL)
940 return(KERN_INVALID_ARGUMENT);
941
942 /*
943 * Verify the attributes of importance
944 */
945
946 switch(copy_strategy) {
947 case MEMORY_OBJECT_COPY_NONE:
948 case MEMORY_OBJECT_COPY_DELAY:
949 break;
950 default:
951 return(KERN_INVALID_ARGUMENT);
952 }
953
954 #if !ADVISORY_PAGEOUT
955 if (silent_overwrite || advisory_pageout)
956 return(KERN_INVALID_ARGUMENT);
957
958 #endif /* !ADVISORY_PAGEOUT */
959 if (may_cache)
960 may_cache = TRUE;
961 if (temporary)
962 temporary = TRUE;
963 if (cluster_size != 0) {
964 int pages_per_cluster;
965 pages_per_cluster = atop_32(cluster_size);
966 /*
967 * Cluster size must be integral multiple of page size,
968 * and be a power of 2 number of pages.
969 */
970 if ((cluster_size & (PAGE_SIZE-1)) ||
971 ((pages_per_cluster-1) & pages_per_cluster))
972 return KERN_INVALID_ARGUMENT;
973 }
974
975 vm_object_lock(object);
976
977 /*
978 * Copy the attributes
979 */
980 assert(!object->internal);
981 object_became_ready = !object->pager_ready;
982 object->copy_strategy = copy_strategy;
983 object->can_persist = may_cache;
984 object->temporary = temporary;
985 object->silent_overwrite = silent_overwrite;
986 object->advisory_pageout = advisory_pageout;
987 if (cluster_size == 0)
988 cluster_size = PAGE_SIZE;
989 object->cluster_size = cluster_size;
990
991 assert(cluster_size >= PAGE_SIZE &&
992 cluster_size % PAGE_SIZE == 0);
993
994 /*
995 * Wake up anyone waiting for the ready attribute
996 * to become asserted.
997 */
998
999 if (object_became_ready) {
1000 object->pager_ready = TRUE;
1001 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1002 }
1003
1004 vm_object_unlock(object);
1005
1006 return(KERN_SUCCESS);
1007 }
1008
1009 /*
1010 * Set the memory object attribute as provided.
1011 *
1012 * XXX This routine cannot be completed until the vm_msync, clean
1013 * in place, and cluster work is completed. See ifdef notyet
1014 * below and note that vm_object_set_attributes_common()
1015 * may have to be expanded.
1016 */
1017 kern_return_t
1018 memory_object_change_attributes(
1019 memory_object_control_t control,
1020 memory_object_flavor_t flavor,
1021 memory_object_info_t attributes,
1022 mach_msg_type_number_t count)
1023 {
1024 vm_object_t object;
1025 kern_return_t result = KERN_SUCCESS;
1026 boolean_t temporary;
1027 boolean_t may_cache;
1028 boolean_t invalidate;
1029 vm_size_t cluster_size;
1030 memory_object_copy_strategy_t copy_strategy;
1031 boolean_t silent_overwrite;
1032 boolean_t advisory_pageout;
1033
1034 object = memory_object_control_to_vm_object(control);
1035 if (object == VM_OBJECT_NULL)
1036 return (KERN_INVALID_ARGUMENT);
1037
1038 vm_object_lock(object);
1039
1040 temporary = object->temporary;
1041 may_cache = object->can_persist;
1042 copy_strategy = object->copy_strategy;
1043 silent_overwrite = object->silent_overwrite;
1044 advisory_pageout = object->advisory_pageout;
1045 #if notyet
1046 invalidate = object->invalidate;
1047 #endif
1048 cluster_size = object->cluster_size;
1049 vm_object_unlock(object);
1050
1051 switch (flavor) {
1052 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1053 {
1054 old_memory_object_behave_info_t behave;
1055
1056 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1057 result = KERN_INVALID_ARGUMENT;
1058 break;
1059 }
1060
1061 behave = (old_memory_object_behave_info_t) attributes;
1062
1063 temporary = behave->temporary;
1064 invalidate = behave->invalidate;
1065 copy_strategy = behave->copy_strategy;
1066
1067 break;
1068 }
1069
1070 case MEMORY_OBJECT_BEHAVIOR_INFO:
1071 {
1072 memory_object_behave_info_t behave;
1073
1074 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1075 result = KERN_INVALID_ARGUMENT;
1076 break;
1077 }
1078
1079 behave = (memory_object_behave_info_t) attributes;
1080
1081 temporary = behave->temporary;
1082 invalidate = behave->invalidate;
1083 copy_strategy = behave->copy_strategy;
1084 silent_overwrite = behave->silent_overwrite;
1085 advisory_pageout = behave->advisory_pageout;
1086 break;
1087 }
1088
1089 case MEMORY_OBJECT_PERFORMANCE_INFO:
1090 {
1091 memory_object_perf_info_t perf;
1092
1093 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1094 result = KERN_INVALID_ARGUMENT;
1095 break;
1096 }
1097
1098 perf = (memory_object_perf_info_t) attributes;
1099
1100 may_cache = perf->may_cache;
1101 cluster_size = round_page_32(perf->cluster_size);
1102
1103 break;
1104 }
1105
1106 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1107 {
1108 old_memory_object_attr_info_t attr;
1109
1110 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1111 result = KERN_INVALID_ARGUMENT;
1112 break;
1113 }
1114
1115 attr = (old_memory_object_attr_info_t) attributes;
1116
1117 may_cache = attr->may_cache;
1118 copy_strategy = attr->copy_strategy;
1119 cluster_size = page_size;
1120
1121 break;
1122 }
1123
1124 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1125 {
1126 memory_object_attr_info_t attr;
1127
1128 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1129 result = KERN_INVALID_ARGUMENT;
1130 break;
1131 }
1132
1133 attr = (memory_object_attr_info_t) attributes;
1134
1135 copy_strategy = attr->copy_strategy;
1136 may_cache = attr->may_cache_object;
1137 cluster_size = attr->cluster_size;
1138 temporary = attr->temporary;
1139
1140 break;
1141 }
1142
1143 default:
1144 result = KERN_INVALID_ARGUMENT;
1145 break;
1146 }
1147
1148 if (result != KERN_SUCCESS)
1149 return(result);
1150
1151 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1152 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1153 temporary = TRUE;
1154 } else {
1155 temporary = FALSE;
1156 }
1157
1158 /*
1159 * XXX may_cache may become a tri-valued variable to handle
1160 * XXX uncache if not in use.
1161 */
1162 return (vm_object_set_attributes_common(object,
1163 may_cache,
1164 copy_strategy,
1165 temporary,
1166 cluster_size,
1167 silent_overwrite,
1168 advisory_pageout));
1169 }
1170
1171 kern_return_t
1172 memory_object_get_attributes(
1173 memory_object_control_t control,
1174 memory_object_flavor_t flavor,
1175 memory_object_info_t attributes, /* pointer to OUT array */
1176 mach_msg_type_number_t *count) /* IN/OUT */
1177 {
1178 kern_return_t ret = KERN_SUCCESS;
1179 vm_object_t object;
1180
1181 object = memory_object_control_to_vm_object(control);
1182 if (object == VM_OBJECT_NULL)
1183 return (KERN_INVALID_ARGUMENT);
1184
1185 vm_object_lock(object);
1186
1187 switch (flavor) {
1188 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1189 {
1190 old_memory_object_behave_info_t behave;
1191
1192 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1193 ret = KERN_INVALID_ARGUMENT;
1194 break;
1195 }
1196
1197 behave = (old_memory_object_behave_info_t) attributes;
1198 behave->copy_strategy = object->copy_strategy;
1199 behave->temporary = object->temporary;
1200 #if notyet /* remove when vm_msync complies and clean in place fini */
1201 behave->invalidate = object->invalidate;
1202 #else
1203 behave->invalidate = FALSE;
1204 #endif
1205
1206 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1207 break;
1208 }
1209
1210 case MEMORY_OBJECT_BEHAVIOR_INFO:
1211 {
1212 memory_object_behave_info_t behave;
1213
1214 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1215 ret = KERN_INVALID_ARGUMENT;
1216 break;
1217 }
1218
1219 behave = (memory_object_behave_info_t) attributes;
1220 behave->copy_strategy = object->copy_strategy;
1221 behave->temporary = object->temporary;
1222 #if notyet /* remove when vm_msync complies and clean in place fini */
1223 behave->invalidate = object->invalidate;
1224 #else
1225 behave->invalidate = FALSE;
1226 #endif
1227 behave->advisory_pageout = object->advisory_pageout;
1228 behave->silent_overwrite = object->silent_overwrite;
1229 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1230 break;
1231 }
1232
1233 case MEMORY_OBJECT_PERFORMANCE_INFO:
1234 {
1235 memory_object_perf_info_t perf;
1236
1237 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1238 ret = KERN_INVALID_ARGUMENT;
1239 break;
1240 }
1241
1242 perf = (memory_object_perf_info_t) attributes;
1243 perf->cluster_size = object->cluster_size;
1244 perf->may_cache = object->can_persist;
1245
1246 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1247 break;
1248 }
1249
1250 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1251 {
1252 old_memory_object_attr_info_t attr;
1253
1254 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1255 ret = KERN_INVALID_ARGUMENT;
1256 break;
1257 }
1258
1259 attr = (old_memory_object_attr_info_t) attributes;
1260 attr->may_cache = object->can_persist;
1261 attr->copy_strategy = object->copy_strategy;
1262
1263 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1264 break;
1265 }
1266
1267 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1268 {
1269 memory_object_attr_info_t attr;
1270
1271 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1272 ret = KERN_INVALID_ARGUMENT;
1273 break;
1274 }
1275
1276 attr = (memory_object_attr_info_t) attributes;
1277 attr->copy_strategy = object->copy_strategy;
1278 attr->cluster_size = object->cluster_size;
1279 attr->may_cache_object = object->can_persist;
1280 attr->temporary = object->temporary;
1281
1282 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1283 break;
1284 }
1285
1286 default:
1287 ret = KERN_INVALID_ARGUMENT;
1288 break;
1289 }
1290
1291 vm_object_unlock(object);
1292
1293 return(ret);
1294 }
1295
1296
1297 kern_return_t
1298 memory_object_iopl_request(
1299 ipc_port_t port,
1300 memory_object_offset_t offset,
1301 vm_size_t *upl_size,
1302 upl_t *upl_ptr,
1303 upl_page_info_array_t user_page_list,
1304 unsigned int *page_list_count,
1305 int *flags)
1306 {
1307 vm_object_t object;
1308 kern_return_t ret;
1309 int caller_flags;
1310
1311 caller_flags = *flags;
1312
1313 if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
1314 vm_named_entry_t named_entry;
1315
1316 named_entry = (vm_named_entry_t)port->ip_kobject;
1317 /* a few checks to make sure user is obeying rules */
1318 if(*upl_size == 0) {
1319 if(offset >= named_entry->size)
1320 return(KERN_INVALID_RIGHT);
1321 *upl_size = named_entry->size - offset;
1322 }
1323 if(caller_flags & UPL_COPYOUT_FROM) {
1324 if((named_entry->protection & VM_PROT_READ)
1325 != VM_PROT_READ) {
1326 return(KERN_INVALID_RIGHT);
1327 }
1328 } else {
1329 if((named_entry->protection &
1330 (VM_PROT_READ | VM_PROT_WRITE))
1331 != (VM_PROT_READ | VM_PROT_WRITE)) {
1332 return(KERN_INVALID_RIGHT);
1333 }
1334 }
1335 if(named_entry->size < (offset + *upl_size))
1336 return(KERN_INVALID_ARGUMENT);
1337
1338 /* the callers parameter offset is defined to be the */
1339 /* offset from beginning of named entry offset in object */
1340 offset = offset + named_entry->offset;
1341
1342 if(named_entry->is_sub_map)
1343 return (KERN_INVALID_ARGUMENT);
1344
1345 named_entry_lock(named_entry);
1346
1347 if(named_entry->object) {
1348 /* This is the case where we are going to map */
1349 /* an already mapped object. If the object is */
1350 /* not ready it is internal. An external */
1351 /* object cannot be mapped until it is ready */
1352 /* we can therefore avoid the ready check */
1353 /* in this case. */
1354 vm_object_reference(named_entry->object);
1355 object = named_entry->object;
1356 named_entry_unlock(named_entry);
1357 } else {
1358 object = vm_object_enter(named_entry->backing.pager,
1359 named_entry->offset + named_entry->size,
1360 named_entry->internal,
1361 FALSE,
1362 FALSE);
1363 if (object == VM_OBJECT_NULL) {
1364 named_entry_unlock(named_entry);
1365 return(KERN_INVALID_OBJECT);
1366 }
1367 vm_object_lock(object);
1368
1369 /* create an extra reference for the named entry */
1370 vm_object_reference_locked(object);
1371 named_entry->object = object;
1372 named_entry_unlock(named_entry);
1373
1374 /* wait for object to be ready */
1375 while (!object->pager_ready) {
1376 vm_object_wait(object,
1377 VM_OBJECT_EVENT_PAGER_READY,
1378 THREAD_UNINT);
1379 vm_object_lock(object);
1380 }
1381 vm_object_unlock(object);
1382 }
1383 } else {
1384 memory_object_control_t control;
1385 control = (memory_object_control_t)port->ip_kobject;
1386 if (control == NULL)
1387 return (KERN_INVALID_ARGUMENT);
1388 object = memory_object_control_to_vm_object(control);
1389 if (object == VM_OBJECT_NULL)
1390 return (KERN_INVALID_ARGUMENT);
1391 vm_object_reference(object);
1392 }
1393 if (object == VM_OBJECT_NULL)
1394 return (KERN_INVALID_ARGUMENT);
1395
1396 if (!object->private) {
1397 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
1398 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
1399 if (object->phys_contiguous) {
1400 *flags = UPL_PHYS_CONTIG;
1401 } else {
1402 *flags = 0;
1403 }
1404 } else {
1405 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
1406 }
1407
1408 ret = vm_object_iopl_request(object,
1409 offset,
1410 *upl_size,
1411 upl_ptr,
1412 user_page_list,
1413 page_list_count,
1414 caller_flags);
1415 vm_object_deallocate(object);
1416 return ret;
1417 }
1418
1419 /*
1420 * Routine: memory_object_upl_request [interface]
1421 * Purpose:
1422 * Cause the population of a portion of a vm_object.
1423 * Depending on the nature of the request, the pages
1424 * returned may be contain valid data or be uninitialized.
1425 *
1426 */
1427
1428 kern_return_t
1429 memory_object_upl_request(
1430 memory_object_control_t control,
1431 memory_object_offset_t offset,
1432 vm_size_t size,
1433 upl_t *upl_ptr,
1434 upl_page_info_array_t user_page_list,
1435 unsigned int *page_list_count,
1436 int cntrl_flags)
1437 {
1438 vm_object_t object;
1439
1440 object = memory_object_control_to_vm_object(control);
1441 if (object == VM_OBJECT_NULL)
1442 return (KERN_INVALID_ARGUMENT);
1443
1444 return vm_object_upl_request(object,
1445 offset,
1446 size,
1447 upl_ptr,
1448 user_page_list,
1449 page_list_count,
1450 cntrl_flags);
1451 }
1452
1453 /*
1454 * Routine: memory_object_super_upl_request [interface]
1455 * Purpose:
1456 * Cause the population of a portion of a vm_object
1457 * in much the same way as memory_object_upl_request.
1458 * Depending on the nature of the request, the pages
1459 * returned may be contain valid data or be uninitialized.
1460 * However, the region may be expanded up to the super
1461 * cluster size provided.
1462 */
1463
1464 kern_return_t
1465 memory_object_super_upl_request(
1466 memory_object_control_t control,
1467 memory_object_offset_t offset,
1468 vm_size_t size,
1469 vm_size_t super_cluster,
1470 upl_t *upl,
1471 upl_page_info_t *user_page_list,
1472 unsigned int *page_list_count,
1473 int cntrl_flags)
1474 {
1475 vm_object_t object;
1476
1477 object = memory_object_control_to_vm_object(control);
1478 if (object == VM_OBJECT_NULL)
1479 return (KERN_INVALID_ARGUMENT);
1480
1481 return vm_object_super_upl_request(object,
1482 offset,
1483 size,
1484 super_cluster,
1485 upl,
1486 user_page_list,
1487 page_list_count,
1488 cntrl_flags);
1489 }
1490
1491 int vm_stat_discard_cleared_reply = 0;
1492 int vm_stat_discard_cleared_unset = 0;
1493 int vm_stat_discard_cleared_too_late = 0;
1494
1495
1496
1497 /*
1498 * Routine: host_default_memory_manager [interface]
1499 * Purpose:
1500 * set/get the default memory manager port and default cluster
1501 * size.
1502 *
1503 * If successful, consumes the supplied naked send right.
1504 */
1505 kern_return_t
1506 host_default_memory_manager(
1507 host_priv_t host_priv,
1508 memory_object_default_t *default_manager,
1509 vm_size_t cluster_size)
1510 {
1511 memory_object_default_t current_manager;
1512 memory_object_default_t new_manager;
1513 memory_object_default_t returned_manager;
1514
1515 if (host_priv == HOST_PRIV_NULL)
1516 return(KERN_INVALID_HOST);
1517
1518 assert(host_priv == &realhost);
1519
1520 new_manager = *default_manager;
1521 mutex_lock(&memory_manager_default_lock);
1522 current_manager = memory_manager_default;
1523
1524 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1525 /*
1526 * Retrieve the current value.
1527 */
1528 memory_object_default_reference(current_manager);
1529 returned_manager = current_manager;
1530 } else {
1531 /*
1532 * Retrieve the current value,
1533 * and replace it with the supplied value.
1534 * We return the old reference to the caller
1535 * but we have to take a reference on the new
1536 * one.
1537 */
1538
1539 returned_manager = current_manager;
1540 memory_manager_default = new_manager;
1541 memory_object_default_reference(new_manager);
1542
1543 if (cluster_size % PAGE_SIZE != 0) {
1544 #if 0
1545 mutex_unlock(&memory_manager_default_lock);
1546 return KERN_INVALID_ARGUMENT;
1547 #else
1548 cluster_size = round_page_32(cluster_size);
1549 #endif
1550 }
1551 memory_manager_default_cluster = cluster_size;
1552
1553 /*
1554 * In case anyone's been waiting for a memory
1555 * manager to be established, wake them up.
1556 */
1557
1558 thread_wakeup((event_t) &memory_manager_default);
1559 }
1560
1561 mutex_unlock(&memory_manager_default_lock);
1562
1563 *default_manager = returned_manager;
1564 return(KERN_SUCCESS);
1565 }
1566
1567 /*
1568 * Routine: memory_manager_default_reference
1569 * Purpose:
1570 * Returns a naked send right for the default
1571 * memory manager. The returned right is always
1572 * valid (not IP_NULL or IP_DEAD).
1573 */
1574
1575 __private_extern__ memory_object_default_t
1576 memory_manager_default_reference(
1577 vm_size_t *cluster_size)
1578 {
1579 memory_object_default_t current_manager;
1580
1581 mutex_lock(&memory_manager_default_lock);
1582 current_manager = memory_manager_default;
1583 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1584 wait_result_t res;
1585
1586 res = thread_sleep_mutex((event_t) &memory_manager_default,
1587 &memory_manager_default_lock,
1588 THREAD_UNINT);
1589 assert(res == THREAD_AWAKENED);
1590 current_manager = memory_manager_default;
1591 }
1592 memory_object_default_reference(current_manager);
1593 *cluster_size = memory_manager_default_cluster;
1594 mutex_unlock(&memory_manager_default_lock);
1595
1596 return current_manager;
1597 }
1598
1599 /*
1600 * Routine: memory_manager_default_check
1601 *
1602 * Purpose:
1603 * Check whether a default memory manager has been set
1604 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1605 * and KERN_FAILURE if dmm does not exist.
1606 *
1607 * If there is no default memory manager, log an error,
1608 * but only the first time.
1609 *
1610 */
1611 __private_extern__ kern_return_t
1612 memory_manager_default_check(void)
1613 {
1614 memory_object_default_t current;
1615
1616 mutex_lock(&memory_manager_default_lock);
1617 current = memory_manager_default;
1618 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1619 static boolean_t logged; /* initialized to 0 */
1620 boolean_t complain = !logged;
1621 logged = TRUE;
1622 mutex_unlock(&memory_manager_default_lock);
1623 if (complain)
1624 printf("Warning: No default memory manager\n");
1625 return(KERN_FAILURE);
1626 } else {
1627 mutex_unlock(&memory_manager_default_lock);
1628 return(KERN_SUCCESS);
1629 }
1630 }
1631
1632 __private_extern__ void
1633 memory_manager_default_init(void)
1634 {
1635 memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
1636 mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN);
1637 }
1638
1639
1640 void
1641 memory_object_deactivate_pages(
1642 vm_object_t object,
1643 vm_object_offset_t offset,
1644 vm_object_size_t size,
1645 boolean_t kill_page)
1646 {
1647 vm_object_t orig_object;
1648 int pages_moved = 0;
1649 int pages_found = 0;
1650
1651 /*
1652 * entered with object lock held, acquire a paging reference to
1653 * prevent the memory_object and control ports from
1654 * being destroyed.
1655 */
1656 orig_object = object;
1657
1658 for (;;) {
1659 register vm_page_t m;
1660 vm_object_offset_t toffset;
1661 vm_object_size_t tsize;
1662
1663 vm_object_paging_begin(object);
1664 vm_page_lock_queues();
1665
1666 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1667
1668 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1669
1670 pages_found++;
1671
1672 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1673
1674 m->reference = FALSE;
1675 pmap_clear_reference(m->phys_page);
1676
1677 if ((kill_page) && (object->internal)) {
1678 m->precious = FALSE;
1679 m->dirty = FALSE;
1680 pmap_clear_modify(m->phys_page);
1681 vm_external_state_clr(object->existence_map, offset);
1682 }
1683 VM_PAGE_QUEUES_REMOVE(m);
1684
1685 if(m->zero_fill) {
1686 queue_enter_first(
1687 &vm_page_queue_zf,
1688 m, vm_page_t, pageq);
1689 } else {
1690 queue_enter_first(
1691 &vm_page_queue_inactive,
1692 m, vm_page_t, pageq);
1693 }
1694
1695 m->inactive = TRUE;
1696 if (!m->fictitious)
1697 vm_page_inactive_count++;
1698
1699 pages_moved++;
1700 }
1701 }
1702 }
1703 vm_page_unlock_queues();
1704 vm_object_paging_end(object);
1705
1706 if (object->shadow) {
1707 vm_object_t tmp_object;
1708
1709 kill_page = 0;
1710
1711 offset += object->shadow_offset;
1712
1713 tmp_object = object->shadow;
1714 vm_object_lock(tmp_object);
1715
1716 if (object != orig_object)
1717 vm_object_unlock(object);
1718 object = tmp_object;
1719 } else
1720 break;
1721 }
1722 if (object != orig_object)
1723 vm_object_unlock(object);
1724 }
1725
1726 /* Allow manipulation of individual page state. This is actually part of */
1727 /* the UPL regimen but takes place on the object rather than on a UPL */
1728
1729 kern_return_t
1730 memory_object_page_op(
1731 memory_object_control_t control,
1732 memory_object_offset_t offset,
1733 int ops,
1734 ppnum_t *phys_entry,
1735 int *flags)
1736 {
1737 vm_object_t object;
1738 vm_page_t dst_page;
1739
1740
1741 object = memory_object_control_to_vm_object(control);
1742 if (object == VM_OBJECT_NULL)
1743 return (KERN_INVALID_ARGUMENT);
1744
1745 vm_object_lock(object);
1746
1747 if(ops & UPL_POP_PHYSICAL) {
1748 if(object->phys_contiguous) {
1749 if (phys_entry) {
1750 *phys_entry = (ppnum_t)
1751 (object->shadow_offset >> 12);
1752 }
1753 vm_object_unlock(object);
1754 return KERN_SUCCESS;
1755 } else {
1756 vm_object_unlock(object);
1757 return KERN_INVALID_OBJECT;
1758 }
1759 }
1760 if(object->phys_contiguous) {
1761 vm_object_unlock(object);
1762 return KERN_INVALID_OBJECT;
1763 }
1764
1765 while(TRUE) {
1766 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
1767 vm_object_unlock(object);
1768 return KERN_FAILURE;
1769 }
1770
1771 /* Sync up on getting the busy bit */
1772 if((dst_page->busy || dst_page->cleaning) &&
1773 (((ops & UPL_POP_SET) &&
1774 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
1775 /* someone else is playing with the page, we will */
1776 /* have to wait */
1777 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
1778 continue;
1779 }
1780
1781 if (ops & UPL_POP_DUMP) {
1782 vm_page_lock_queues();
1783
1784 if (dst_page->no_isync == FALSE)
1785 pmap_page_protect(dst_page->phys_page, VM_PROT_NONE);
1786 vm_page_free(dst_page);
1787
1788 vm_page_unlock_queues();
1789 break;
1790 }
1791
1792 if (flags) {
1793 *flags = 0;
1794
1795 /* Get the condition of flags before requested ops */
1796 /* are undertaken */
1797
1798 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
1799 if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
1800 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
1801 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
1802 if(dst_page->busy) *flags |= UPL_POP_BUSY;
1803 }
1804 if (phys_entry)
1805 *phys_entry = dst_page->phys_page;
1806
1807 /* The caller should have made a call either contingent with */
1808 /* or prior to this call to set UPL_POP_BUSY */
1809 if(ops & UPL_POP_SET) {
1810 /* The protection granted with this assert will */
1811 /* not be complete. If the caller violates the */
1812 /* convention and attempts to change page state */
1813 /* without first setting busy we may not see it */
1814 /* because the page may already be busy. However */
1815 /* if such violations occur we will assert sooner */
1816 /* or later. */
1817 assert(dst_page->busy || (ops & UPL_POP_BUSY));
1818 if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
1819 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
1820 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
1821 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
1822 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
1823 }
1824
1825 if(ops & UPL_POP_CLR) {
1826 assert(dst_page->busy);
1827 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
1828 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
1829 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
1830 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
1831 if (ops & UPL_POP_BUSY) {
1832 dst_page->busy = FALSE;
1833 PAGE_WAKEUP(dst_page);
1834 }
1835 }
1836 break;
1837 }
1838
1839 vm_object_unlock(object);
1840 return KERN_SUCCESS;
1841
1842 }
1843
1844 /*
1845 * memory_object_range_op offers performance enhancement over
1846 * memory_object_page_op for page_op functions which do not require page
1847 * level state to be returned from the call. Page_op was created to provide
1848 * a low-cost alternative to page manipulation via UPLs when only a single
1849 * page was involved. The range_op call establishes the ability in the _op
1850 * family of functions to work on multiple pages where the lack of page level
1851 * state handling allows the caller to avoid the overhead of the upl structures.
1852 */
1853
1854 kern_return_t
1855 memory_object_range_op(
1856 memory_object_control_t control,
1857 memory_object_offset_t offset_beg,
1858 memory_object_offset_t offset_end,
1859 int ops,
1860 int *range)
1861 {
1862 memory_object_offset_t offset;
1863 vm_object_t object;
1864 vm_page_t dst_page;
1865
1866 object = memory_object_control_to_vm_object(control);
1867 if (object == VM_OBJECT_NULL)
1868 return (KERN_INVALID_ARGUMENT);
1869
1870 if (object->resident_page_count == 0) {
1871 if (range) {
1872 if (ops & UPL_ROP_PRESENT)
1873 *range = 0;
1874 else
1875 *range = offset_end - offset_beg;
1876 }
1877 return KERN_SUCCESS;
1878 }
1879 vm_object_lock(object);
1880
1881 if (object->phys_contiguous)
1882 return KERN_INVALID_OBJECT;
1883
1884 offset = offset_beg;
1885
1886 while (offset < offset_end) {
1887 if (dst_page = vm_page_lookup(object, offset)) {
1888 if (ops & UPL_ROP_DUMP) {
1889 if (dst_page->busy || dst_page->cleaning) {
1890 /*
1891 * someone else is playing with the
1892 * page, we will have to wait
1893 */
1894 PAGE_SLEEP(object,
1895 dst_page, THREAD_UNINT);
1896 /*
1897 * need to relook the page up since it's
1898 * state may have changed while we slept
1899 * it might even belong to a different object
1900 * at this point
1901 */
1902 continue;
1903 }
1904 vm_page_lock_queues();
1905
1906 if (dst_page->no_isync == FALSE)
1907 pmap_page_protect(dst_page->phys_page, VM_PROT_NONE);
1908 vm_page_free(dst_page);
1909
1910 vm_page_unlock_queues();
1911 } else if (ops & UPL_ROP_ABSENT)
1912 break;
1913 } else if (ops & UPL_ROP_PRESENT)
1914 break;
1915
1916 offset += PAGE_SIZE;
1917 }
1918 vm_object_unlock(object);
1919
1920 if (range)
1921 *range = offset - offset_beg;
1922
1923 return KERN_SUCCESS;
1924 }
1925
1926 static zone_t mem_obj_control_zone;
1927
1928 __private_extern__ void
1929 memory_object_control_bootstrap(void)
1930 {
1931 int i;
1932
1933 i = (vm_size_t) sizeof (struct memory_object_control);
1934 mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
1935 return;
1936 }
1937
1938 __private_extern__ memory_object_control_t
1939 memory_object_control_allocate(
1940 vm_object_t object)
1941 {
1942 memory_object_control_t control;
1943
1944 control = (memory_object_control_t)zalloc(mem_obj_control_zone);
1945 if (control != MEMORY_OBJECT_CONTROL_NULL)
1946 control->object = object;
1947 return (control);
1948 }
1949
1950 __private_extern__ void
1951 memory_object_control_collapse(
1952 memory_object_control_t control,
1953 vm_object_t object)
1954 {
1955 assert((control->object != VM_OBJECT_NULL) &&
1956 (control->object != object));
1957 control->object = object;
1958 }
1959
1960 __private_extern__ vm_object_t
1961 memory_object_control_to_vm_object(
1962 memory_object_control_t control)
1963 {
1964 if (control == MEMORY_OBJECT_CONTROL_NULL)
1965 return VM_OBJECT_NULL;
1966
1967 return (control->object);
1968 }
1969
1970 memory_object_control_t
1971 convert_port_to_mo_control(
1972 mach_port_t port)
1973 {
1974 return MEMORY_OBJECT_CONTROL_NULL;
1975 }
1976
1977
1978 mach_port_t
1979 convert_mo_control_to_port(
1980 memory_object_control_t control)
1981 {
1982 return MACH_PORT_NULL;
1983 }
1984
1985 void
1986 memory_object_control_reference(
1987 memory_object_control_t control)
1988 {
1989 return;
1990 }
1991
1992 /*
1993 * We only every issue one of these references, so kill it
1994 * when that gets released (should switch the real reference
1995 * counting in true port-less EMMI).
1996 */
1997 void
1998 memory_object_control_deallocate(
1999 memory_object_control_t control)
2000 {
2001 zfree(mem_obj_control_zone, (vm_offset_t)control);
2002 }
2003
2004 void
2005 memory_object_control_disable(
2006 memory_object_control_t control)
2007 {
2008 assert(control->object != VM_OBJECT_NULL);
2009 control->object = VM_OBJECT_NULL;
2010 }
2011
2012 void
2013 memory_object_default_reference(
2014 memory_object_default_t dmm)
2015 {
2016 ipc_port_make_send(dmm);
2017 }
2018
2019 void
2020 memory_object_default_deallocate(
2021 memory_object_default_t dmm)
2022 {
2023 ipc_port_release_send(dmm);
2024 }
2025
2026 memory_object_t
2027 convert_port_to_memory_object(
2028 mach_port_t port)
2029 {
2030 return (MEMORY_OBJECT_NULL);
2031 }
2032
2033
2034 mach_port_t
2035 convert_memory_object_to_port(
2036 memory_object_t object)
2037 {
2038 return (MACH_PORT_NULL);
2039 }
2040
2041 #ifdef MACH_BSD
2042 /* remove after component interface available */
2043 extern int vnode_pager_workaround;
2044 extern int device_pager_workaround;
2045 #endif
2046
2047
2048 /* Routine memory_object_reference */
2049 void memory_object_reference(
2050 memory_object_t memory_object)
2051 {
2052 extern void dp_memory_object_reference(memory_object_t);
2053
2054 #ifdef MACH_BSD
2055 extern void vnode_pager_reference(memory_object_t);
2056 extern void device_pager_reference(memory_object_t);
2057
2058 if(memory_object->pager == &vnode_pager_workaround) {
2059 vnode_pager_reference(memory_object);
2060 } else if(memory_object->pager == &device_pager_workaround) {
2061 device_pager_reference(memory_object);
2062 } else
2063 #endif
2064 dp_memory_object_reference(memory_object);
2065 }
2066
2067 /* Routine memory_object_deallocate */
2068 void memory_object_deallocate(
2069 memory_object_t memory_object)
2070 {
2071 extern void dp_memory_object_deallocate(memory_object_t);
2072
2073 #ifdef MACH_BSD
2074 extern void vnode_pager_deallocate(memory_object_t);
2075 extern void device_pager_deallocate(memory_object_t);
2076
2077 if(memory_object->pager == &vnode_pager_workaround) {
2078 vnode_pager_deallocate(memory_object);
2079 } else if(memory_object->pager == &device_pager_workaround) {
2080 device_pager_deallocate(memory_object);
2081 } else
2082 #endif
2083 dp_memory_object_deallocate(memory_object);
2084 }
2085
2086
2087 /* Routine memory_object_init */
2088 kern_return_t memory_object_init
2089 (
2090 memory_object_t memory_object,
2091 memory_object_control_t memory_control,
2092 vm_size_t memory_object_page_size
2093 )
2094 {
2095 extern kern_return_t dp_memory_object_init(memory_object_t,
2096 memory_object_control_t,
2097 vm_size_t);
2098 #ifdef MACH_BSD
2099 extern kern_return_t vnode_pager_init(memory_object_t,
2100 memory_object_control_t,
2101 vm_size_t);
2102 extern kern_return_t device_pager_init(memory_object_t,
2103 memory_object_control_t,
2104 vm_size_t);
2105
2106 if(memory_object->pager == &vnode_pager_workaround) {
2107 return vnode_pager_init(memory_object,
2108 memory_control,
2109 memory_object_page_size);
2110 } else if(memory_object->pager == &device_pager_workaround) {
2111 return device_pager_init(memory_object,
2112 memory_control,
2113 memory_object_page_size);
2114 } else
2115 #endif
2116 return dp_memory_object_init(memory_object,
2117 memory_control,
2118 memory_object_page_size);
2119 }
2120
2121 /* Routine memory_object_terminate */
2122 kern_return_t memory_object_terminate
2123 (
2124 memory_object_t memory_object
2125 )
2126 {
2127 extern kern_return_t dp_memory_object_terminate(memory_object_t);
2128
2129 #ifdef MACH_BSD
2130 extern kern_return_t vnode_pager_terminate(memory_object_t);
2131 extern kern_return_t device_pager_terminate(memory_object_t);
2132
2133 if(memory_object->pager == &vnode_pager_workaround) {
2134 return vnode_pager_terminate(memory_object);
2135 } else if(memory_object->pager == &device_pager_workaround) {
2136 return device_pager_terminate(memory_object);
2137 } else
2138 #endif
2139 return dp_memory_object_terminate(memory_object);
2140 }
2141
2142 /* Routine memory_object_data_request */
2143 kern_return_t memory_object_data_request
2144 (
2145 memory_object_t memory_object,
2146 memory_object_offset_t offset,
2147 vm_size_t length,
2148 vm_prot_t desired_access
2149 )
2150 {
2151 extern kern_return_t dp_memory_object_data_request(memory_object_t,
2152 memory_object_offset_t, vm_size_t, vm_prot_t);
2153
2154 #ifdef MACH_BSD
2155 extern kern_return_t vnode_pager_data_request(memory_object_t,
2156 memory_object_offset_t, vm_size_t, vm_prot_t);
2157 extern kern_return_t device_pager_data_request(memory_object_t,
2158 memory_object_offset_t, vm_size_t, vm_prot_t);
2159
2160 if (memory_object->pager == &vnode_pager_workaround) {
2161 return vnode_pager_data_request(memory_object,
2162 offset,
2163 length,
2164 desired_access);
2165 } else if (memory_object->pager == &device_pager_workaround) {
2166 return device_pager_data_request(memory_object,
2167 offset,
2168 length,
2169 desired_access);
2170 } else
2171 #endif
2172 return dp_memory_object_data_request(memory_object,
2173 offset,
2174 length,
2175 desired_access);
2176 }
2177
2178 /* Routine memory_object_data_return */
2179 kern_return_t memory_object_data_return
2180 (
2181 memory_object_t memory_object,
2182 memory_object_offset_t offset,
2183 vm_size_t size,
2184 boolean_t dirty,
2185 boolean_t kernel_copy
2186 )
2187 {
2188 extern kern_return_t dp_memory_object_data_return(memory_object_t,
2189 memory_object_offset_t,
2190 vm_size_t,
2191 boolean_t,
2192 boolean_t);
2193 #ifdef MACH_BSD
2194 extern kern_return_t vnode_pager_data_return(memory_object_t,
2195 memory_object_offset_t,
2196 vm_size_t,
2197 boolean_t,
2198 boolean_t);
2199 extern kern_return_t device_pager_data_return(memory_object_t,
2200 memory_object_offset_t,
2201 vm_size_t,
2202 boolean_t,
2203 boolean_t);
2204
2205 if (memory_object->pager == &vnode_pager_workaround) {
2206 return vnode_pager_data_return(memory_object,
2207 offset,
2208 size,
2209 dirty,
2210 kernel_copy);
2211 } else if (memory_object->pager == &device_pager_workaround) {
2212 return device_pager_data_return(memory_object,
2213 offset,
2214 size,
2215 dirty,
2216 kernel_copy);
2217 } else
2218 #endif
2219 return dp_memory_object_data_return(memory_object,
2220 offset,
2221 size,
2222 dirty,
2223 kernel_copy);
2224 }
2225
2226 /* Routine memory_object_data_initialize */
2227 kern_return_t memory_object_data_initialize
2228 (
2229 memory_object_t memory_object,
2230 memory_object_offset_t offset,
2231 vm_size_t size
2232 )
2233 {
2234
2235 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
2236 memory_object_offset_t,
2237 vm_size_t);
2238 #ifdef MACH_BSD
2239 extern kern_return_t vnode_pager_data_initialize(memory_object_t,
2240 memory_object_offset_t,
2241 vm_size_t);
2242 extern kern_return_t device_pager_data_initialize(memory_object_t,
2243 memory_object_offset_t,
2244 vm_size_t);
2245
2246 if (memory_object->pager == &vnode_pager_workaround) {
2247 return vnode_pager_data_initialize(memory_object,
2248 offset,
2249 size);
2250 } else if (memory_object->pager == &device_pager_workaround) {
2251 return device_pager_data_initialize(memory_object,
2252 offset,
2253 size);
2254 } else
2255 #endif
2256 return dp_memory_object_data_initialize(memory_object,
2257 offset,
2258 size);
2259 }
2260
2261 /* Routine memory_object_data_unlock */
2262 kern_return_t memory_object_data_unlock
2263 (
2264 memory_object_t memory_object,
2265 memory_object_offset_t offset,
2266 vm_size_t size,
2267 vm_prot_t desired_access
2268 )
2269 {
2270 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
2271 memory_object_offset_t,
2272 vm_size_t,
2273 vm_prot_t);
2274 #ifdef MACH_BSD
2275 extern kern_return_t vnode_pager_data_unlock(memory_object_t,
2276 memory_object_offset_t,
2277 vm_size_t,
2278 vm_prot_t);
2279 extern kern_return_t device_pager_data_unlock(memory_object_t,
2280 memory_object_offset_t,
2281 vm_size_t,
2282 vm_prot_t);
2283
2284 if (memory_object->pager == &vnode_pager_workaround) {
2285 return vnode_pager_data_unlock(memory_object,
2286 offset,
2287 size,
2288 desired_access);
2289 } else if (memory_object->pager == &device_pager_workaround) {
2290 return device_pager_data_unlock(memory_object,
2291 offset,
2292 size,
2293 desired_access);
2294 } else
2295 #endif
2296 return dp_memory_object_data_unlock(memory_object,
2297 offset,
2298 size,
2299 desired_access);
2300
2301 }
2302
2303 /* Routine memory_object_synchronize */
2304 kern_return_t memory_object_synchronize
2305 (
2306 memory_object_t memory_object,
2307 memory_object_offset_t offset,
2308 vm_size_t size,
2309 vm_sync_t sync_flags
2310 )
2311 {
2312 extern kern_return_t dp_memory_object_data_synchronize(memory_object_t,
2313 memory_object_offset_t,
2314 vm_size_t,
2315 vm_sync_t);
2316 #ifdef MACH_BSD
2317 extern kern_return_t vnode_pager_data_synchronize(memory_object_t,
2318 memory_object_offset_t,
2319 vm_size_t,
2320 vm_sync_t);
2321 extern kern_return_t device_pager_data_synchronize(memory_object_t,
2322 memory_object_offset_t,
2323 vm_size_t,
2324 vm_sync_t);
2325
2326 if (memory_object->pager == &vnode_pager_workaround) {
2327 return vnode_pager_synchronize(
2328 memory_object,
2329 offset,
2330 size,
2331 sync_flags);
2332 } else if (memory_object->pager == &device_pager_workaround) {
2333 return device_pager_synchronize(
2334 memory_object,
2335 offset,
2336 size,
2337 sync_flags);
2338 } else
2339 #endif
2340 return dp_memory_object_synchronize(
2341 memory_object,
2342 offset,
2343 size,
2344 sync_flags);
2345 }
2346
2347 /* Routine memory_object_unmap */
2348 kern_return_t memory_object_unmap
2349 (
2350 memory_object_t memory_object
2351 )
2352 {
2353 extern kern_return_t dp_memory_object_unmap(memory_object_t);
2354 #ifdef MACH_BSD
2355 extern kern_return_t vnode_pager_unmap(memory_object_t);
2356 extern kern_return_t device_pager_unmap(memory_object_t);
2357
2358 if (memory_object->pager == &vnode_pager_workaround) {
2359 return vnode_pager_unmap(memory_object);
2360 } else if (memory_object->pager == &device_pager_workaround) {
2361 return device_pager_unmap(memory_object);
2362 } else
2363 #endif
2364 return dp_memory_object_unmap(memory_object);
2365 }
2366
2367 /* Routine memory_object_create */
2368 kern_return_t memory_object_create
2369 (
2370 memory_object_default_t default_memory_manager,
2371 vm_size_t new_memory_object_size,
2372 memory_object_t *new_memory_object
2373 )
2374 {
2375 extern kern_return_t default_pager_memory_object_create(memory_object_default_t,
2376 vm_size_t,
2377 memory_object_t *);
2378
2379 return default_pager_memory_object_create(default_memory_manager,
2380 new_memory_object_size,
2381 new_memory_object);
2382 }
2383