]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/memory_object.c
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/memory_object.c
54 * Author: Michael Wayne Young
55 *
56 * External memory management interface control functions.
57 */
58
1c79356b
A
59#include <advisory_pageout.h>
60
61/*
62 * Interface dependencies:
63 */
64
65#include <mach/std_types.h> /* For pointer_t */
66#include <mach/mach_types.h>
67
0b4e3aa0 68#include <mach/mig.h>
1c79356b
A
69#include <mach/kern_return.h>
70#include <mach/memory_object.h>
71#include <mach/memory_object_default.h>
72#include <mach/memory_object_control_server.h>
0b4e3aa0 73#include <mach/host_priv_server.h>
1c79356b
A
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76#include <mach/message.h>
77
1c79356b
A
78/*
79 * Implementation dependencies:
80 */
81#include <string.h> /* For memcpy() */
82
0b4e3aa0
A
83#include <kern/xpr.h>
84#include <kern/host.h>
85#include <kern/thread.h> /* For current_thread() */
86#include <kern/ipc_mig.h>
87#include <kern/misc_protos.h>
88
89#include <vm/vm_object.h>
90#include <vm/vm_fault.h>
1c79356b
A
91#include <vm/memory_object.h>
92#include <vm/vm_page.h>
93#include <vm/vm_pageout.h>
94#include <vm/pmap.h> /* For pmap_clear_modify */
1c79356b
A
95#include <vm/vm_kern.h> /* For kernel_map, vm_move */
96#include <vm/vm_map.h> /* For vm_map_pageable */
1c79356b
A
97
98#if MACH_PAGEMAP
99#include <vm/vm_external.h>
100#endif /* MACH_PAGEMAP */
101
0b4e3aa0
A
102memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
103vm_size_t memory_manager_default_cluster = 0;
104decl_mutex_data(, memory_manager_default_lock)
1c79356b
A
105
106/*
107 * Forward ref to file-local function:
108 */
109boolean_t
0b4e3aa0 110vm_object_update(vm_object_t, vm_object_offset_t,
1c79356b
A
111 vm_size_t, memory_object_return_t, int, vm_prot_t);
112
113
114/*
115 * Routine: memory_object_should_return_page
116 *
117 * Description:
118 * Determine whether the given page should be returned,
119 * based on the page's state and on the given return policy.
120 *
121 * We should return the page if one of the following is true:
122 *
123 * 1. Page is dirty and should_return is not RETURN_NONE.
124 * 2. Page is precious and should_return is RETURN_ALL.
125 * 3. Should_return is RETURN_ANYTHING.
126 *
127 * As a side effect, m->dirty will be made consistent
128 * with pmap_is_modified(m), if should_return is not
129 * MEMORY_OBJECT_RETURN_NONE.
130 */
131
132#define memory_object_should_return_page(m, should_return) \
133 (should_return != MEMORY_OBJECT_RETURN_NONE && \
55e303ae 134 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
1c79356b
A
135 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
136 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
137
138typedef int memory_object_lock_result_t;
139
140#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
141#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
142#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
143#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
144
145memory_object_lock_result_t memory_object_lock_page(
146 vm_page_t m,
147 memory_object_return_t should_return,
148 boolean_t should_flush,
149 vm_prot_t prot);
150
151/*
152 * Routine: memory_object_lock_page
153 *
154 * Description:
155 * Perform the appropriate lock operations on the
156 * given page. See the description of
157 * "memory_object_lock_request" for the meanings
158 * of the arguments.
159 *
160 * Returns an indication that the operation
161 * completed, blocked, or that the page must
162 * be cleaned.
163 */
164memory_object_lock_result_t
165memory_object_lock_page(
166 vm_page_t m,
167 memory_object_return_t should_return,
168 boolean_t should_flush,
169 vm_prot_t prot)
170{
171 XPR(XPR_MEMORY_OBJECT,
172 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
173 (integer_t)m, should_return, should_flush, prot, 0);
174
175 /*
176 * If we cannot change access to the page,
177 * either because a mapping is in progress
178 * (busy page) or because a mapping has been
179 * wired, then give up.
180 */
181
182 if (m->busy || m->cleaning)
183 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
184
185 /*
186 * Don't worry about pages for which the kernel
187 * does not have any data.
188 */
189
765c9de3
A
190 if (m->absent || m->error || m->restart) {
191 if(m->error && should_flush) {
192 /* dump the page, pager wants us to */
193 /* clean it up and there is no */
194 /* relevant data to return */
195 if(m->wire_count == 0) {
196 VM_PAGE_FREE(m);
197 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
198 }
199 } else {
200 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
201 }
202 }
1c79356b
A
203
204 assert(!m->fictitious);
205
206 if (m->wire_count != 0) {
207 /*
208 * If no change would take place
209 * anyway, return successfully.
210 *
211 * No change means:
212 * Not flushing AND
213 * No change to page lock [2 checks] AND
214 * Should not return page
215 *
216 * XXX This doesn't handle sending a copy of a wired
217 * XXX page to the pager, but that will require some
218 * XXX significant surgery.
219 */
220 if (!should_flush &&
221 (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
222 ! memory_object_should_return_page(m, should_return)) {
223
224 /*
225 * Restart page unlock requests,
226 * even though no change took place.
227 * [Memory managers may be expecting
228 * to see new requests.]
229 */
230 m->unlock_request = VM_PROT_NONE;
231 PAGE_WAKEUP(m);
232
233 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
234 }
235
236 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
237 }
238
239 /*
240 * If the page is to be flushed, allow
241 * that to be done as part of the protection.
242 */
243
244 if (should_flush)
245 prot = VM_PROT_ALL;
246
247 /*
248 * Set the page lock.
249 *
250 * If we are decreasing permission, do it now;
251 * let the fault handler take care of increases
252 * (pmap_page_protect may not increase protection).
253 */
254
255 if (prot != VM_PROT_NO_CHANGE) {
0b4e3aa0 256 if ((m->page_lock ^ prot) & prot) {
55e303ae 257 pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
0b4e3aa0 258 }
1c79356b
A
259#if 0
260 /* code associated with the vestigial
261 * memory_object_data_unlock
262 */
1c79356b
A
263 m->page_lock = prot;
264 m->lock_supplied = TRUE;
265 if (prot != VM_PROT_NONE)
266 m->unusual = TRUE;
267 else
268 m->unusual = FALSE;
269
270 /*
271 * Restart any past unlock requests, even if no
272 * change resulted. If the manager explicitly
273 * requested no protection change, then it is assumed
274 * to be remembering past requests.
275 */
276
277 m->unlock_request = VM_PROT_NONE;
278#endif /* 0 */
279 PAGE_WAKEUP(m);
280 }
281
282 /*
283 * Handle page returning.
284 */
285
286 if (memory_object_should_return_page(m, should_return)) {
287
288 /*
289 * If we weren't planning
290 * to flush the page anyway,
291 * we may need to remove the
292 * page from the pageout
293 * system and from physical
294 * maps now.
295 */
296
297 vm_page_lock_queues();
298 VM_PAGE_QUEUES_REMOVE(m);
299 vm_page_unlock_queues();
300
301 if (!should_flush)
55e303ae 302 pmap_page_protect(m->phys_page, VM_PROT_NONE);
1c79356b
A
303
304 if (m->dirty)
305 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
306 else
307 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
308 }
309
310 /*
311 * Handle flushing
312 */
313
314 if (should_flush) {
315 VM_PAGE_FREE(m);
316 } else {
317 extern boolean_t vm_page_deactivate_hint;
318
319 /*
320 * XXX Make clean but not flush a paging hint,
321 * and deactivate the pages. This is a hack
322 * because it overloads flush/clean with
323 * implementation-dependent meaning. This only
324 * happens to pages that are already clean.
325 */
326
327 if (vm_page_deactivate_hint &&
328 (should_return != MEMORY_OBJECT_RETURN_NONE)) {
329 vm_page_lock_queues();
330 vm_page_deactivate(m);
331 vm_page_unlock_queues();
332 }
333 }
334
335 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
336}
0b4e3aa0 337
1c79356b
A
338#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
339MACRO_BEGIN \
340 \
341 register int i; \
342 register vm_page_t hp; \
343 \
344 vm_object_unlock(object); \
345 \
1c79356b 346 (void) memory_object_data_return(object->pager, \
1c79356b 347 po, \
1c79356b
A
348 data_cnt, \
349 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
350 !should_flush); \
1c79356b
A
351 \
352 vm_object_lock(object); \
1c79356b
A
353MACRO_END
354
1c79356b
A
355/*
356 * Routine: memory_object_lock_request [user interface]
357 *
358 * Description:
359 * Control use of the data associated with the given
360 * memory object. For each page in the given range,
361 * perform the following operations, in order:
362 * 1) restrict access to the page (disallow
363 * forms specified by "prot");
364 * 2) return data to the manager (if "should_return"
365 * is RETURN_DIRTY and the page is dirty, or
366 * "should_return" is RETURN_ALL and the page
367 * is either dirty or precious); and,
368 * 3) flush the cached copy (if "should_flush"
369 * is asserted).
370 * The set of pages is defined by a starting offset
371 * ("offset") and size ("size"). Only pages with the
372 * same page alignment as the starting offset are
373 * considered.
374 *
375 * A single acknowledgement is sent (to the "reply_to"
376 * port) when these actions are complete. If successful,
377 * the naked send right for reply_to is consumed.
378 */
379
380kern_return_t
381memory_object_lock_request(
0b4e3aa0
A
382 memory_object_control_t control,
383 memory_object_offset_t offset,
384 memory_object_size_t size,
1c79356b
A
385 memory_object_return_t should_return,
386 int flags,
0b4e3aa0 387 vm_prot_t prot)
1c79356b 388{
0b4e3aa0 389 vm_object_t object;
1c79356b
A
390 vm_object_offset_t original_offset = offset;
391 boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH;
392
393 XPR(XPR_MEMORY_OBJECT,
0b4e3aa0
A
394 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
395 (integer_t)control, offset, size,
1c79356b
A
396 (((should_return&1)<<1)|should_flush), prot);
397
398 /*
399 * Check for bogus arguments.
400 */
0b4e3aa0 401 object = memory_object_control_to_vm_object(control);
1c79356b
A
402 if (object == VM_OBJECT_NULL)
403 return (KERN_INVALID_ARGUMENT);
404
0b4e3aa0 405 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
1c79356b 406 return (KERN_INVALID_ARGUMENT);
1c79356b 407
55e303ae 408 size = round_page_64(size);
1c79356b
A
409
410 /*
411 * Lock the object, and acquire a paging reference to
0b4e3aa0 412 * prevent the memory_object reference from being released.
1c79356b 413 */
1c79356b
A
414 vm_object_lock(object);
415 vm_object_paging_begin(object);
416 offset -= object->paging_offset;
417
0b4e3aa0 418 (void)vm_object_update(object,
1c79356b
A
419 offset, size, should_return, flags, prot);
420
1c79356b
A
421 vm_object_paging_end(object);
422 vm_object_unlock(object);
1c79356b
A
423
424 return (KERN_SUCCESS);
425}
426
427/*
0b4e3aa0
A
428 * memory_object_release_name: [interface]
429 *
430 * Enforces name semantic on memory_object reference count decrement
431 * This routine should not be called unless the caller holds a name
432 * reference gained through the memory_object_named_create or the
433 * memory_object_rename call.
434 * If the TERMINATE_IDLE flag is set, the call will return if the
435 * reference count is not 1. i.e. idle with the only remaining reference
436 * being the name.
437 * If the decision is made to proceed the name field flag is set to
438 * false and the reference count is decremented. If the RESPECT_CACHE
439 * flag is set and the reference count has gone to zero, the
440 * memory_object is checked to see if it is cacheable otherwise when
441 * the reference count is zero, it is simply terminated.
442 */
443
444kern_return_t
445memory_object_release_name(
446 memory_object_control_t control,
447 int flags)
448{
449 vm_object_t object;
450
451 object = memory_object_control_to_vm_object(control);
452 if (object == VM_OBJECT_NULL)
453 return (KERN_INVALID_ARGUMENT);
454
455 return vm_object_release_name(object, flags);
456}
457
458
459
460/*
461 * Routine: memory_object_destroy [user interface]
462 * Purpose:
463 * Shut down a memory object, despite the
464 * presence of address map (or other) references
465 * to the vm_object.
466 */
467kern_return_t
468memory_object_destroy(
469 memory_object_control_t control,
470 kern_return_t reason)
471{
472 vm_object_t object;
473
474 object = memory_object_control_to_vm_object(control);
475 if (object == VM_OBJECT_NULL)
476 return (KERN_INVALID_ARGUMENT);
477
478 return (vm_object_destroy(object, reason));
479}
480
481/*
482 * Routine: vm_object_sync
1c79356b
A
483 *
484 * Kernel internal function to synch out pages in a given
485 * range within an object to its memory manager. Much the
486 * same as memory_object_lock_request but page protection
487 * is not changed.
488 *
489 * If the should_flush and should_return flags are true pages
490 * are flushed, that is dirty & precious pages are written to
491 * the memory manager and then discarded. If should_return
492 * is false, only precious pages are returned to the memory
493 * manager.
494 *
495 * If should flush is false and should_return true, the memory
496 * manager's copy of the pages is updated. If should_return
497 * is also false, only the precious pages are updated. This
498 * last option is of limited utility.
499 *
500 * Returns:
501 * FALSE if no pages were returned to the pager
502 * TRUE otherwise.
503 */
504
505boolean_t
0b4e3aa0 506vm_object_sync(
1c79356b
A
507 vm_object_t object,
508 vm_object_offset_t offset,
0b4e3aa0 509 vm_size_t size,
1c79356b
A
510 boolean_t should_flush,
511 boolean_t should_return)
512{
513 boolean_t rv;
514
0b4e3aa0
A
515 XPR(XPR_VM_OBJECT,
516 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
1c79356b
A
517 (integer_t)object, offset, size, should_flush, should_return);
518
519 /*
520 * Lock the object, and acquire a paging reference to
521 * prevent the memory_object and control ports from
522 * being destroyed.
523 */
524 vm_object_lock(object);
525 vm_object_paging_begin(object);
526
0b4e3aa0 527 rv = vm_object_update(object, offset, size,
1c79356b
A
528 (should_return) ?
529 MEMORY_OBJECT_RETURN_ALL :
530 MEMORY_OBJECT_RETURN_NONE,
531 (should_flush) ?
532 MEMORY_OBJECT_DATA_FLUSH : 0,
533 VM_PROT_NO_CHANGE);
534
535
536 vm_object_paging_end(object);
537 vm_object_unlock(object);
538 return rv;
539}
540
541/*
0b4e3aa0 542 * Routine: vm_object_update
1c79356b 543 * Description:
0b4e3aa0 544 * Work function for m_o_lock_request(), vm_o_sync().
1c79356b
A
545 *
546 * Called with object locked and paging ref taken.
547 */
548kern_return_t
0b4e3aa0 549vm_object_update(
1c79356b
A
550 register vm_object_t object,
551 register vm_object_offset_t offset,
552 register vm_size_t size,
553 memory_object_return_t should_return,
554 int flags,
555 vm_prot_t prot)
556{
557 register vm_page_t m;
558 vm_page_t holding_page;
559 vm_size_t original_size = size;
560 vm_object_offset_t paging_offset = 0;
561 vm_object_t copy_object;
562 vm_size_t data_cnt = 0;
563 vm_object_offset_t last_offset = offset;
564 memory_object_lock_result_t page_lock_result;
565 memory_object_lock_result_t pageout_action;
566 boolean_t data_returned = FALSE;
567 boolean_t update_cow;
568 boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
1c79356b 569 boolean_t pending_pageout = FALSE;
1c79356b
A
570
571 /*
572 * To avoid blocking while scanning for pages, save
573 * dirty pages to be cleaned all at once.
574 *
575 * XXXO A similar strategy could be used to limit the
576 * number of times that a scan must be restarted for
577 * other reasons. Those pages that would require blocking
578 * could be temporarily collected in another list, or
579 * their offsets could be recorded in a small array.
580 */
581
582 /*
583 * XXX NOTE: May want to consider converting this to a page list
584 * XXX vm_map_copy interface. Need to understand object
585 * XXX coalescing implications before doing so.
586 */
587
588 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
589 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
590 !(flags & MEMORY_OBJECT_DATA_PURGE)))
591 || (flags & MEMORY_OBJECT_COPY_SYNC);
592
593
594 if((((copy_object = object->copy) != NULL) && update_cow) ||
595 (flags & MEMORY_OBJECT_DATA_SYNC)) {
596 vm_size_t i;
597 vm_size_t copy_size;
598 vm_object_offset_t copy_offset;
599 vm_prot_t prot;
600 vm_page_t page;
601 vm_page_t top_page;
602 kern_return_t error = 0;
603
604 if(copy_object != NULL) {
605 /* translate offset with respect to shadow's offset */
606 copy_offset = (offset >= copy_object->shadow_offset)?
607 offset - copy_object->shadow_offset :
608 (vm_object_offset_t) 0;
609 if(copy_offset > copy_object->size)
610 copy_offset = copy_object->size;
611
612 /* clip size with respect to shadow offset */
613 copy_size = (offset >= copy_object->shadow_offset) ?
614 size : size - (copy_object->shadow_offset - offset);
615
616 if(copy_size <= 0) {
617 copy_size = 0;
618 } else {
619 copy_size = ((copy_offset + copy_size)
620 <= copy_object->size) ?
621 copy_size : copy_object->size - copy_offset;
622 }
623 /* check for a copy_offset which is beyond the end of */
624 /* the copy_object */
625 if(copy_size < 0)
626 copy_size = 0;
627
55e303ae 628 copy_size+=copy_offset;
1c79356b
A
629
630 vm_object_unlock(object);
631 vm_object_lock(copy_object);
632 } else {
633 copy_object = object;
634
635 copy_size = offset + size;
636 copy_offset = offset;
637 }
638
639 vm_object_paging_begin(copy_object);
640 for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
641 RETRY_COW_OF_LOCK_REQUEST:
642 prot = VM_PROT_WRITE|VM_PROT_READ;
643 switch (vm_fault_page(copy_object, i,
644 VM_PROT_WRITE|VM_PROT_READ,
645 FALSE,
646 THREAD_UNINT,
647 copy_offset,
648 copy_offset+copy_size,
649 VM_BEHAVIOR_SEQUENTIAL,
650 &prot,
651 &page,
652 &top_page,
653 (int *)0,
654 &error,
655 FALSE,
0b4e3aa0 656 FALSE, NULL, 0)) {
1c79356b
A
657
658 case VM_FAULT_SUCCESS:
659 if(top_page) {
660 vm_fault_cleanup(
661 page->object, top_page);
662 PAGE_WAKEUP_DONE(page);
663 vm_page_lock_queues();
664 if (!page->active && !page->inactive)
665 vm_page_activate(page);
666 vm_page_unlock_queues();
667 vm_object_lock(copy_object);
668 vm_object_paging_begin(copy_object);
669 } else {
670 PAGE_WAKEUP_DONE(page);
671 vm_page_lock_queues();
672 if (!page->active && !page->inactive)
673 vm_page_activate(page);
674 vm_page_unlock_queues();
675 }
676 break;
677 case VM_FAULT_RETRY:
678 prot = VM_PROT_WRITE|VM_PROT_READ;
679 vm_object_lock(copy_object);
680 vm_object_paging_begin(copy_object);
681 goto RETRY_COW_OF_LOCK_REQUEST;
682 case VM_FAULT_INTERRUPTED:
683 prot = VM_PROT_WRITE|VM_PROT_READ;
684 vm_object_lock(copy_object);
685 vm_object_paging_begin(copy_object);
686 goto RETRY_COW_OF_LOCK_REQUEST;
687 case VM_FAULT_MEMORY_SHORTAGE:
688 VM_PAGE_WAIT();
689 prot = VM_PROT_WRITE|VM_PROT_READ;
690 vm_object_lock(copy_object);
691 vm_object_paging_begin(copy_object);
692 goto RETRY_COW_OF_LOCK_REQUEST;
693 case VM_FAULT_FICTITIOUS_SHORTAGE:
694 vm_page_more_fictitious();
695 prot = VM_PROT_WRITE|VM_PROT_READ;
696 vm_object_lock(copy_object);
697 vm_object_paging_begin(copy_object);
698 goto RETRY_COW_OF_LOCK_REQUEST;
699 case VM_FAULT_MEMORY_ERROR:
700 vm_object_lock(object);
701 goto BYPASS_COW_COPYIN;
702 }
703
704 }
705 vm_object_paging_end(copy_object);
706 if(copy_object != object) {
707 vm_object_unlock(copy_object);
708 vm_object_lock(object);
709 }
710 }
711 if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
712 return KERN_SUCCESS;
713 }
714 if(((copy_object = object->copy) != NULL) &&
715 (flags & MEMORY_OBJECT_DATA_PURGE)) {
716 copy_object->shadow_severed = TRUE;
717 copy_object->shadowed = FALSE;
718 copy_object->shadow = NULL;
719 /* delete the ref the COW was holding on the target object */
720 vm_object_deallocate(object);
721 }
722BYPASS_COW_COPYIN:
723
724 for (;
725 size != 0;
726 size -= PAGE_SIZE, offset += PAGE_SIZE_64)
727 {
728 /*
729 * Limit the number of pages to be cleaned at once.
730 */
731 if (pending_pageout &&
732 data_cnt >= PAGE_SIZE * DATA_WRITE_MAX)
733 {
734 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
735 pageout_action, paging_offset);
736 data_cnt = 0;
737 pending_pageout = FALSE;
738 }
739
740 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
741 page_lock_result = memory_object_lock_page(m, should_return,
742 should_flush, prot);
743
744 XPR(XPR_MEMORY_OBJECT,
745 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
746 (integer_t)object, offset, page_lock_result, 0, 0);
747
748 switch (page_lock_result)
749 {
750 case MEMORY_OBJECT_LOCK_RESULT_DONE:
751 /*
752 * End of a cluster of dirty pages.
753 */
754 if(pending_pageout) {
755 LIST_REQ_PAGEOUT_PAGES(object,
756 data_cnt, pageout_action,
757 paging_offset);
758 data_cnt = 0;
759 pending_pageout = FALSE;
760 continue;
761 }
762 break;
763
764 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
765 /*
766 * Since it is necessary to block,
767 * clean any dirty pages now.
768 */
769 if(pending_pageout) {
770 LIST_REQ_PAGEOUT_PAGES(object,
771 data_cnt, pageout_action,
772 paging_offset);
773 pending_pageout = FALSE;
774 data_cnt = 0;
775 continue;
776 }
777
9bccf70c 778 PAGE_SLEEP(object, m, THREAD_UNINT);
1c79356b
A
779 continue;
780
781 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
782 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
783 /*
784 * The clean and return cases are similar.
785 *
786 */
787
788 /*
789 * if this would form a discontiguous block,
790 * clean the old pages and start anew.
791 *
792 */
793
794 /*
795 * Mark the page busy since we unlock the
796 * object below.
797 */
798 m->busy = TRUE;
799 if (pending_pageout &&
800 (last_offset != offset ||
801 pageout_action != page_lock_result)) {
802 LIST_REQ_PAGEOUT_PAGES(object,
803 data_cnt, pageout_action,
804 paging_offset);
805 pending_pageout = FALSE;
806 data_cnt = 0;
807 }
808 m->busy = FALSE;
809 holding_page = VM_PAGE_NULL;
810 if(m->cleaning) {
9bccf70c 811 PAGE_SLEEP(object, m, THREAD_UNINT);
1c79356b
A
812 continue;
813 }
814 if(!pending_pageout) {
815 pending_pageout = TRUE;
816 pageout_action = page_lock_result;
817 paging_offset = offset;
818 }
819 if (should_flush) {
820 vm_page_lock_queues();
821 m->list_req_pending = TRUE;
822 m->cleaning = TRUE;
823 m->busy = TRUE;
824 m->pageout = TRUE;
825 vm_page_wire(m);
826 vm_page_unlock_queues();
827 } else {
828 /*
829 * Clean but do not flush
830 */
831 vm_page_lock_queues();
832 m->list_req_pending = TRUE;
833 m->cleaning = TRUE;
834 vm_page_unlock_queues();
835
836 }
837 vm_object_unlock(object);
838
839
840 data_cnt += PAGE_SIZE;
841 last_offset = offset + PAGE_SIZE_64;
842 data_returned = TRUE;
843
844 vm_object_lock(object);
845 break;
846 }
847 break;
848 }
849 }
850
851 /*
852 * We have completed the scan for applicable pages.
853 * Clean any pages that have been saved.
854 */
1c79356b
A
855 if (pending_pageout) {
856 LIST_REQ_PAGEOUT_PAGES(object,
857 data_cnt, pageout_action, paging_offset);
858 }
1c79356b
A
859 return (data_returned);
860}
861
862/*
863 * Routine: memory_object_synchronize_completed [user interface]
864 *
865 * Tell kernel that previously synchronized data
866 * (memory_object_synchronize) has been queue or placed on the
867 * backing storage.
868 *
869 * Note: there may be multiple synchronize requests for a given
870 * memory object outstanding but they will not overlap.
871 */
872
873kern_return_t
874memory_object_synchronize_completed(
0b4e3aa0
A
875 memory_object_control_t control,
876 memory_object_offset_t offset,
877 vm_offset_t length)
1c79356b 878{
0b4e3aa0
A
879 vm_object_t object;
880 msync_req_t msr;
1c79356b
A
881
882 XPR(XPR_MEMORY_OBJECT,
883 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
884 (integer_t)object, offset, length, 0, 0);
885
886 /*
887 * Look for bogus arguments
888 */
889
0b4e3aa0
A
890 object = memory_object_control_to_vm_object(control);
891 if (object == VM_OBJECT_NULL)
892 return (KERN_INVALID_ARGUMENT);
1c79356b
A
893
894 vm_object_lock(object);
895
896/*
897 * search for sync request structure
898 */
899 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
900 if (msr->offset == offset && msr->length == length) {
901 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
902 break;
903 }
904 }/* queue_iterate */
905
906 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
907 vm_object_unlock(object);
1c79356b
A
908 return KERN_INVALID_ARGUMENT;
909 }
910
911 msr_lock(msr);
912 vm_object_unlock(object);
913 msr->flag = VM_MSYNC_DONE;
914 msr_unlock(msr);
915 thread_wakeup((event_t) msr);
1c79356b
A
916
917 return KERN_SUCCESS;
918}/* memory_object_synchronize_completed */
0b4e3aa0
A
919
920static kern_return_t
921vm_object_set_attributes_common(
1c79356b
A
922 vm_object_t object,
923 boolean_t may_cache,
924 memory_object_copy_strategy_t copy_strategy,
925 boolean_t temporary,
926 vm_size_t cluster_size,
927 boolean_t silent_overwrite,
928 boolean_t advisory_pageout)
929{
930 boolean_t object_became_ready;
931
932 XPR(XPR_MEMORY_OBJECT,
933 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
934 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
935
936 if (object == VM_OBJECT_NULL)
937 return(KERN_INVALID_ARGUMENT);
938
939 /*
940 * Verify the attributes of importance
941 */
942
943 switch(copy_strategy) {
944 case MEMORY_OBJECT_COPY_NONE:
945 case MEMORY_OBJECT_COPY_DELAY:
946 break;
947 default:
1c79356b
A
948 return(KERN_INVALID_ARGUMENT);
949 }
950
951#if !ADVISORY_PAGEOUT
0b4e3aa0 952 if (silent_overwrite || advisory_pageout)
1c79356b 953 return(KERN_INVALID_ARGUMENT);
0b4e3aa0 954
1c79356b
A
955#endif /* !ADVISORY_PAGEOUT */
956 if (may_cache)
957 may_cache = TRUE;
958 if (temporary)
959 temporary = TRUE;
960 if (cluster_size != 0) {
961 int pages_per_cluster;
55e303ae 962 pages_per_cluster = atop_32(cluster_size);
1c79356b
A
963 /*
964 * Cluster size must be integral multiple of page size,
965 * and be a power of 2 number of pages.
966 */
967 if ((cluster_size & (PAGE_SIZE-1)) ||
0b4e3aa0 968 ((pages_per_cluster-1) & pages_per_cluster))
1c79356b 969 return KERN_INVALID_ARGUMENT;
1c79356b
A
970 }
971
972 vm_object_lock(object);
973
974 /*
975 * Copy the attributes
976 */
977 assert(!object->internal);
978 object_became_ready = !object->pager_ready;
979 object->copy_strategy = copy_strategy;
980 object->can_persist = may_cache;
981 object->temporary = temporary;
982 object->silent_overwrite = silent_overwrite;
983 object->advisory_pageout = advisory_pageout;
984 if (cluster_size == 0)
985 cluster_size = PAGE_SIZE;
986 object->cluster_size = cluster_size;
987
988 assert(cluster_size >= PAGE_SIZE &&
989 cluster_size % PAGE_SIZE == 0);
990
991 /*
992 * Wake up anyone waiting for the ready attribute
993 * to become asserted.
994 */
995
996 if (object_became_ready) {
997 object->pager_ready = TRUE;
998 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
999 }
1000
1001 vm_object_unlock(object);
1002
1c79356b
A
1003 return(KERN_SUCCESS);
1004}
1005
1006/*
1007 * Set the memory object attribute as provided.
1008 *
1009 * XXX This routine cannot be completed until the vm_msync, clean
1010 * in place, and cluster work is completed. See ifdef notyet
0b4e3aa0 1011 * below and note that vm_object_set_attributes_common()
1c79356b
A
1012 * may have to be expanded.
1013 */
1014kern_return_t
1015memory_object_change_attributes(
0b4e3aa0
A
1016 memory_object_control_t control,
1017 memory_object_flavor_t flavor,
1018 memory_object_info_t attributes,
1019 mach_msg_type_number_t count)
1c79356b 1020{
0b4e3aa0
A
1021 vm_object_t object;
1022 kern_return_t result = KERN_SUCCESS;
1023 boolean_t temporary;
1024 boolean_t may_cache;
1025 boolean_t invalidate;
1c79356b
A
1026 vm_size_t cluster_size;
1027 memory_object_copy_strategy_t copy_strategy;
0b4e3aa0 1028 boolean_t silent_overwrite;
1c79356b
A
1029 boolean_t advisory_pageout;
1030
0b4e3aa0 1031 object = memory_object_control_to_vm_object(control);
1c79356b 1032 if (object == VM_OBJECT_NULL)
0b4e3aa0 1033 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1034
1035 vm_object_lock(object);
0b4e3aa0 1036
1c79356b
A
1037 temporary = object->temporary;
1038 may_cache = object->can_persist;
1039 copy_strategy = object->copy_strategy;
1040 silent_overwrite = object->silent_overwrite;
1041 advisory_pageout = object->advisory_pageout;
1042#if notyet
1043 invalidate = object->invalidate;
1044#endif
1045 cluster_size = object->cluster_size;
1046 vm_object_unlock(object);
1047
1048 switch (flavor) {
1049 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1050 {
1051 old_memory_object_behave_info_t behave;
1052
1053 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1054 result = KERN_INVALID_ARGUMENT;
1055 break;
1056 }
1057
1058 behave = (old_memory_object_behave_info_t) attributes;
1059
1060 temporary = behave->temporary;
1061 invalidate = behave->invalidate;
1062 copy_strategy = behave->copy_strategy;
1063
1064 break;
1065 }
1066
1067 case MEMORY_OBJECT_BEHAVIOR_INFO:
1068 {
1069 memory_object_behave_info_t behave;
1070
1071 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1072 result = KERN_INVALID_ARGUMENT;
1073 break;
1074 }
1075
1076 behave = (memory_object_behave_info_t) attributes;
1077
1078 temporary = behave->temporary;
1079 invalidate = behave->invalidate;
1080 copy_strategy = behave->copy_strategy;
1081 silent_overwrite = behave->silent_overwrite;
1082 advisory_pageout = behave->advisory_pageout;
1083 break;
1084 }
1085
1086 case MEMORY_OBJECT_PERFORMANCE_INFO:
1087 {
1088 memory_object_perf_info_t perf;
1089
1090 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1091 result = KERN_INVALID_ARGUMENT;
1092 break;
1093 }
1094
1095 perf = (memory_object_perf_info_t) attributes;
1096
1097 may_cache = perf->may_cache;
55e303ae 1098 cluster_size = round_page_32(perf->cluster_size);
1c79356b
A
1099
1100 break;
1101 }
1102
1103 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1104 {
1105 old_memory_object_attr_info_t attr;
1106
1107 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1108 result = KERN_INVALID_ARGUMENT;
1109 break;
1110 }
1111
1112 attr = (old_memory_object_attr_info_t) attributes;
1113
1114 may_cache = attr->may_cache;
1115 copy_strategy = attr->copy_strategy;
1116 cluster_size = page_size;
1117
1118 break;
1119 }
1120
1121 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1122 {
1123 memory_object_attr_info_t attr;
1124
1125 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1126 result = KERN_INVALID_ARGUMENT;
1127 break;
1128 }
1129
1130 attr = (memory_object_attr_info_t) attributes;
1131
1132 copy_strategy = attr->copy_strategy;
1133 may_cache = attr->may_cache_object;
1134 cluster_size = attr->cluster_size;
1135 temporary = attr->temporary;
1136
1137 break;
1138 }
1139
1140 default:
1141 result = KERN_INVALID_ARGUMENT;
1142 break;
1143 }
1144
0b4e3aa0 1145 if (result != KERN_SUCCESS)
1c79356b 1146 return(result);
1c79356b
A
1147
1148 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1149 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1150 temporary = TRUE;
1151 } else {
1152 temporary = FALSE;
1153 }
1154
1155 /*
1c79356b
A
1156 * XXX may_cache may become a tri-valued variable to handle
1157 * XXX uncache if not in use.
1158 */
0b4e3aa0 1159 return (vm_object_set_attributes_common(object,
1c79356b
A
1160 may_cache,
1161 copy_strategy,
1162 temporary,
1163 cluster_size,
1164 silent_overwrite,
0b4e3aa0 1165 advisory_pageout));
1c79356b
A
1166}
1167
1168kern_return_t
1169memory_object_get_attributes(
0b4e3aa0 1170 memory_object_control_t control,
1c79356b
A
1171 memory_object_flavor_t flavor,
1172 memory_object_info_t attributes, /* pointer to OUT array */
1173 mach_msg_type_number_t *count) /* IN/OUT */
1174{
0b4e3aa0
A
1175 kern_return_t ret = KERN_SUCCESS;
1176 vm_object_t object;
1c79356b 1177
0b4e3aa0
A
1178 object = memory_object_control_to_vm_object(control);
1179 if (object == VM_OBJECT_NULL)
1180 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1181
1182 vm_object_lock(object);
1183
1184 switch (flavor) {
1185 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1186 {
1187 old_memory_object_behave_info_t behave;
1188
1189 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1190 ret = KERN_INVALID_ARGUMENT;
1191 break;
1192 }
1193
1194 behave = (old_memory_object_behave_info_t) attributes;
1195 behave->copy_strategy = object->copy_strategy;
1196 behave->temporary = object->temporary;
1197#if notyet /* remove when vm_msync complies and clean in place fini */
1198 behave->invalidate = object->invalidate;
1199#else
1200 behave->invalidate = FALSE;
1201#endif
1202
1203 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1204 break;
1205 }
1206
1207 case MEMORY_OBJECT_BEHAVIOR_INFO:
1208 {
1209 memory_object_behave_info_t behave;
1210
1211 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1212 ret = KERN_INVALID_ARGUMENT;
1213 break;
1214 }
1215
1216 behave = (memory_object_behave_info_t) attributes;
1217 behave->copy_strategy = object->copy_strategy;
1218 behave->temporary = object->temporary;
1219#if notyet /* remove when vm_msync complies and clean in place fini */
1220 behave->invalidate = object->invalidate;
1221#else
1222 behave->invalidate = FALSE;
1223#endif
1224 behave->advisory_pageout = object->advisory_pageout;
1225 behave->silent_overwrite = object->silent_overwrite;
1226 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1227 break;
1228 }
1229
1230 case MEMORY_OBJECT_PERFORMANCE_INFO:
1231 {
1232 memory_object_perf_info_t perf;
1233
1234 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1235 ret = KERN_INVALID_ARGUMENT;
1236 break;
1237 }
1238
1239 perf = (memory_object_perf_info_t) attributes;
1240 perf->cluster_size = object->cluster_size;
1241 perf->may_cache = object->can_persist;
1242
1243 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1244 break;
1245 }
1246
1247 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1248 {
1249 old_memory_object_attr_info_t attr;
1250
1251 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1252 ret = KERN_INVALID_ARGUMENT;
1253 break;
1254 }
1255
1256 attr = (old_memory_object_attr_info_t) attributes;
1257 attr->may_cache = object->can_persist;
1258 attr->copy_strategy = object->copy_strategy;
1259
1260 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1261 break;
1262 }
1263
1264 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1265 {
1266 memory_object_attr_info_t attr;
1267
1268 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1269 ret = KERN_INVALID_ARGUMENT;
1270 break;
1271 }
1272
1273 attr = (memory_object_attr_info_t) attributes;
1274 attr->copy_strategy = object->copy_strategy;
1275 attr->cluster_size = object->cluster_size;
1276 attr->may_cache_object = object->can_persist;
1277 attr->temporary = object->temporary;
1278
1279 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1280 break;
1281 }
1282
1283 default:
1284 ret = KERN_INVALID_ARGUMENT;
1285 break;
1286 }
1287
1288 vm_object_unlock(object);
1289
1c79356b
A
1290 return(ret);
1291}
1292
1c79356b 1293
55e303ae
A
1294kern_return_t
1295memory_object_iopl_request(
1296 ipc_port_t port,
1297 memory_object_offset_t offset,
1298 vm_size_t *upl_size,
1299 upl_t *upl_ptr,
1300 upl_page_info_array_t user_page_list,
1301 unsigned int *page_list_count,
1302 int *flags)
1303{
1304 vm_object_t object;
1305 kern_return_t ret;
1306 int caller_flags;
1307
1308 caller_flags = *flags;
1309
1310 if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
1311 vm_named_entry_t named_entry;
1312
1313 named_entry = (vm_named_entry_t)port->ip_kobject;
1314 /* a few checks to make sure user is obeying rules */
1315 if(*upl_size == 0) {
1316 if(offset >= named_entry->size)
1317 return(KERN_INVALID_RIGHT);
1318 *upl_size = named_entry->size - offset;
1319 }
1320 if(caller_flags & UPL_COPYOUT_FROM) {
1321 if((named_entry->protection & VM_PROT_READ)
1322 != VM_PROT_READ) {
1323 return(KERN_INVALID_RIGHT);
1324 }
1325 } else {
1326 if((named_entry->protection &
1327 (VM_PROT_READ | VM_PROT_WRITE))
1328 != (VM_PROT_READ | VM_PROT_WRITE)) {
1329 return(KERN_INVALID_RIGHT);
1330 }
1331 }
1332 if(named_entry->size < (offset + *upl_size))
1333 return(KERN_INVALID_ARGUMENT);
1334
1335 /* the callers parameter offset is defined to be the */
1336 /* offset from beginning of named entry offset in object */
1337 offset = offset + named_entry->offset;
1338
1339 if(named_entry->is_sub_map)
1340 return (KERN_INVALID_ARGUMENT);
1341
1342 named_entry_lock(named_entry);
1343
1344 if(named_entry->object) {
1345 /* This is the case where we are going to map */
1346 /* an already mapped object. If the object is */
1347 /* not ready it is internal. An external */
1348 /* object cannot be mapped until it is ready */
1349 /* we can therefore avoid the ready check */
1350 /* in this case. */
1351 vm_object_reference(named_entry->object);
1352 object = named_entry->object;
1353 named_entry_unlock(named_entry);
1354 } else {
1355 object = vm_object_enter(named_entry->backing.pager,
1356 named_entry->offset + named_entry->size,
1357 named_entry->internal,
1358 FALSE,
1359 FALSE);
1360 if (object == VM_OBJECT_NULL) {
1361 named_entry_unlock(named_entry);
1362 return(KERN_INVALID_OBJECT);
1363 }
1364 vm_object_lock(object);
1365
1366 /* create an extra reference for the named entry */
1367 vm_object_reference_locked(object);
1368 named_entry->object = object;
1369 named_entry_unlock(named_entry);
1370
1371 /* wait for object to be ready */
1372 while (!object->pager_ready) {
1373 vm_object_wait(object,
1374 VM_OBJECT_EVENT_PAGER_READY,
1375 THREAD_UNINT);
1376 vm_object_lock(object);
1377 }
1378 vm_object_unlock(object);
1379 }
1380 } else {
1381 memory_object_control_t control;
1382 control = (memory_object_control_t)port->ip_kobject;
1383 if (control == NULL)
1384 return (KERN_INVALID_ARGUMENT);
1385 object = memory_object_control_to_vm_object(control);
1386 if (object == VM_OBJECT_NULL)
1387 return (KERN_INVALID_ARGUMENT);
1388 vm_object_reference(object);
1389 }
1390 if (object == VM_OBJECT_NULL)
1391 return (KERN_INVALID_ARGUMENT);
1392
1393 if (!object->private) {
1394 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
1395 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
1396 if (object->phys_contiguous) {
1397 *flags = UPL_PHYS_CONTIG;
1398 } else {
1399 *flags = 0;
1400 }
1401 } else {
1402 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
1403 }
1404
1405 ret = vm_object_iopl_request(object,
1406 offset,
1407 *upl_size,
1408 upl_ptr,
1409 user_page_list,
1410 page_list_count,
1411 caller_flags);
1412 vm_object_deallocate(object);
1413 return ret;
1414}
1415
0b4e3aa0
A
1416/*
1417 * Routine: memory_object_upl_request [interface]
1418 * Purpose:
1419 * Cause the population of a portion of a vm_object.
1420 * Depending on the nature of the request, the pages
1421 * returned may be contain valid data or be uninitialized.
1422 *
1423 */
1c79356b 1424
0b4e3aa0
A
1425kern_return_t
1426memory_object_upl_request(
1427 memory_object_control_t control,
1428 memory_object_offset_t offset,
1429 vm_size_t size,
1430 upl_t *upl_ptr,
1431 upl_page_info_array_t user_page_list,
1432 unsigned int *page_list_count,
1433 int cntrl_flags)
1434{
1435 vm_object_t object;
1436
1437 object = memory_object_control_to_vm_object(control);
1438 if (object == VM_OBJECT_NULL)
1439 return (KERN_INVALID_ARGUMENT);
1440
1441 return vm_object_upl_request(object,
1442 offset,
1443 size,
1444 upl_ptr,
1445 user_page_list,
1446 page_list_count,
1447 cntrl_flags);
1448}
1449
1450/*
1451 * Routine: memory_object_super_upl_request [interface]
1452 * Purpose:
1453 * Cause the population of a portion of a vm_object
1454 * in much the same way as memory_object_upl_request.
1455 * Depending on the nature of the request, the pages
1456 * returned may be contain valid data or be uninitialized.
1457 * However, the region may be expanded up to the super
1458 * cluster size provided.
1c79356b 1459 */
0b4e3aa0 1460
1c79356b 1461kern_return_t
0b4e3aa0
A
1462memory_object_super_upl_request(
1463 memory_object_control_t control,
1464 memory_object_offset_t offset,
1465 vm_size_t size,
1466 vm_size_t super_cluster,
1467 upl_t *upl,
1468 upl_page_info_t *user_page_list,
1469 unsigned int *page_list_count,
1470 int cntrl_flags)
1c79356b 1471{
0b4e3aa0
A
1472 vm_object_t object;
1473
1474 object = memory_object_control_to_vm_object(control);
1475 if (object == VM_OBJECT_NULL)
1476 return (KERN_INVALID_ARGUMENT);
1477
1478 return vm_object_super_upl_request(object,
1479 offset,
1480 size,
1481 super_cluster,
1482 upl,
1483 user_page_list,
1484 page_list_count,
1485 cntrl_flags);
1c79356b
A
1486}
1487
0b4e3aa0
A
1488int vm_stat_discard_cleared_reply = 0;
1489int vm_stat_discard_cleared_unset = 0;
1490int vm_stat_discard_cleared_too_late = 0;
1491
1492
1493
1c79356b 1494/*
0b4e3aa0 1495 * Routine: host_default_memory_manager [interface]
1c79356b
A
1496 * Purpose:
1497 * set/get the default memory manager port and default cluster
1498 * size.
1499 *
1500 * If successful, consumes the supplied naked send right.
1501 */
1502kern_return_t
1503host_default_memory_manager(
0b4e3aa0
A
1504 host_priv_t host_priv,
1505 memory_object_default_t *default_manager,
1506 vm_size_t cluster_size)
1c79356b 1507{
0b4e3aa0
A
1508 memory_object_default_t current_manager;
1509 memory_object_default_t new_manager;
1510 memory_object_default_t returned_manager;
1c79356b
A
1511
1512 if (host_priv == HOST_PRIV_NULL)
1513 return(KERN_INVALID_HOST);
1514
1515 assert(host_priv == &realhost);
1516
1517 new_manager = *default_manager;
1518 mutex_lock(&memory_manager_default_lock);
1519 current_manager = memory_manager_default;
1520
0b4e3aa0 1521 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1522 /*
1523 * Retrieve the current value.
1524 */
0b4e3aa0
A
1525 memory_object_default_reference(current_manager);
1526 returned_manager = current_manager;
1c79356b
A
1527 } else {
1528 /*
1529 * Retrieve the current value,
1530 * and replace it with the supplied value.
0b4e3aa0
A
1531 * We return the old reference to the caller
1532 * but we have to take a reference on the new
1533 * one.
1c79356b
A
1534 */
1535
1536 returned_manager = current_manager;
1537 memory_manager_default = new_manager;
0b4e3aa0
A
1538 memory_object_default_reference(new_manager);
1539
1c79356b
A
1540 if (cluster_size % PAGE_SIZE != 0) {
1541#if 0
1542 mutex_unlock(&memory_manager_default_lock);
1543 return KERN_INVALID_ARGUMENT;
1544#else
55e303ae 1545 cluster_size = round_page_32(cluster_size);
1c79356b
A
1546#endif
1547 }
1548 memory_manager_default_cluster = cluster_size;
1549
1550 /*
1551 * In case anyone's been waiting for a memory
1552 * manager to be established, wake them up.
1553 */
1554
1555 thread_wakeup((event_t) &memory_manager_default);
1556 }
1557
1558 mutex_unlock(&memory_manager_default_lock);
1559
1560 *default_manager = returned_manager;
1561 return(KERN_SUCCESS);
1562}
1563
1564/*
1565 * Routine: memory_manager_default_reference
1566 * Purpose:
1567 * Returns a naked send right for the default
1568 * memory manager. The returned right is always
1569 * valid (not IP_NULL or IP_DEAD).
1570 */
1571
0b4e3aa0 1572__private_extern__ memory_object_default_t
1c79356b
A
1573memory_manager_default_reference(
1574 vm_size_t *cluster_size)
1575{
0b4e3aa0 1576 memory_object_default_t current_manager;
1c79356b
A
1577
1578 mutex_lock(&memory_manager_default_lock);
0b4e3aa0
A
1579 current_manager = memory_manager_default;
1580 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
9bccf70c
A
1581 wait_result_t res;
1582
1583 res = thread_sleep_mutex((event_t) &memory_manager_default,
1584 &memory_manager_default_lock,
1585 THREAD_UNINT);
1586 assert(res == THREAD_AWAKENED);
0b4e3aa0 1587 current_manager = memory_manager_default;
1c79356b 1588 }
0b4e3aa0 1589 memory_object_default_reference(current_manager);
1c79356b 1590 *cluster_size = memory_manager_default_cluster;
1c79356b
A
1591 mutex_unlock(&memory_manager_default_lock);
1592
1593 return current_manager;
1594}
1595
1c79356b
A
1596/*
1597 * Routine: memory_manager_default_check
1598 *
1599 * Purpose:
1600 * Check whether a default memory manager has been set
1601 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1602 * and KERN_FAILURE if dmm does not exist.
1603 *
1604 * If there is no default memory manager, log an error,
1605 * but only the first time.
1606 *
1607 */
0b4e3aa0 1608__private_extern__ kern_return_t
1c79356b
A
1609memory_manager_default_check(void)
1610{
0b4e3aa0 1611 memory_object_default_t current;
1c79356b
A
1612
1613 mutex_lock(&memory_manager_default_lock);
1614 current = memory_manager_default;
0b4e3aa0 1615 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1616 static boolean_t logged; /* initialized to 0 */
1617 boolean_t complain = !logged;
1618 logged = TRUE;
1619 mutex_unlock(&memory_manager_default_lock);
1620 if (complain)
1621 printf("Warning: No default memory manager\n");
1622 return(KERN_FAILURE);
1623 } else {
1624 mutex_unlock(&memory_manager_default_lock);
1625 return(KERN_SUCCESS);
1626 }
1627}
1628
0b4e3aa0 1629__private_extern__ void
1c79356b
A
1630memory_manager_default_init(void)
1631{
0b4e3aa0 1632 memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b
A
1633 mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN);
1634}
1635
1636
1637void
1638memory_object_deactivate_pages(
1639 vm_object_t object,
1640 vm_object_offset_t offset,
1641 vm_object_size_t size,
1642 boolean_t kill_page)
1643{
1644 vm_object_t orig_object;
1645 int pages_moved = 0;
1646 int pages_found = 0;
1647
1648 /*
1649 * entered with object lock held, acquire a paging reference to
1650 * prevent the memory_object and control ports from
1651 * being destroyed.
1652 */
1653 orig_object = object;
1654
1655 for (;;) {
1656 register vm_page_t m;
1657 vm_object_offset_t toffset;
1658 vm_object_size_t tsize;
1659
1660 vm_object_paging_begin(object);
1661 vm_page_lock_queues();
1662
1663 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1664
1665 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1666
1667 pages_found++;
1668
1669 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1670
1671 m->reference = FALSE;
55e303ae 1672 pmap_clear_reference(m->phys_page);
1c79356b
A
1673
1674 if ((kill_page) && (object->internal)) {
1675 m->precious = FALSE;
1676 m->dirty = FALSE;
55e303ae 1677 pmap_clear_modify(m->phys_page);
1c79356b
A
1678 vm_external_state_clr(object->existence_map, offset);
1679 }
1680 VM_PAGE_QUEUES_REMOVE(m);
1681
9bccf70c
A
1682 if(m->zero_fill) {
1683 queue_enter_first(
1684 &vm_page_queue_zf,
1685 m, vm_page_t, pageq);
1686 } else {
1687 queue_enter_first(
1688 &vm_page_queue_inactive,
1689 m, vm_page_t, pageq);
1690 }
1c79356b
A
1691
1692 m->inactive = TRUE;
1693 if (!m->fictitious)
1694 vm_page_inactive_count++;
1695
1696 pages_moved++;
1697 }
1698 }
1699 }
1700 vm_page_unlock_queues();
1701 vm_object_paging_end(object);
1702
1703 if (object->shadow) {
1704 vm_object_t tmp_object;
1705
1706 kill_page = 0;
1707
1708 offset += object->shadow_offset;
1709
1710 tmp_object = object->shadow;
1711 vm_object_lock(tmp_object);
1712
1713 if (object != orig_object)
1714 vm_object_unlock(object);
1715 object = tmp_object;
1716 } else
1717 break;
1718 }
1719 if (object != orig_object)
1720 vm_object_unlock(object);
1721}
1722
1723/* Allow manipulation of individual page state. This is actually part of */
1724/* the UPL regimen but takes place on the object rather than on a UPL */
1725
1726kern_return_t
1727memory_object_page_op(
0b4e3aa0
A
1728 memory_object_control_t control,
1729 memory_object_offset_t offset,
1730 int ops,
55e303ae 1731 ppnum_t *phys_entry,
0b4e3aa0 1732 int *flags)
1c79356b 1733{
0b4e3aa0
A
1734 vm_object_t object;
1735 vm_page_t dst_page;
1736
1737
1738 object = memory_object_control_to_vm_object(control);
1739 if (object == VM_OBJECT_NULL)
1740 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1741
1742 vm_object_lock(object);
1743
0b4e3aa0
A
1744 if(ops & UPL_POP_PHYSICAL) {
1745 if(object->phys_contiguous) {
1746 if (phys_entry) {
55e303ae
A
1747 *phys_entry = (ppnum_t)
1748 (object->shadow_offset >> 12);
0b4e3aa0
A
1749 }
1750 vm_object_unlock(object);
1751 return KERN_SUCCESS;
1752 } else {
1753 vm_object_unlock(object);
1754 return KERN_INVALID_OBJECT;
1755 }
1756 }
55e303ae
A
1757 if(object->phys_contiguous) {
1758 vm_object_unlock(object);
1759 return KERN_INVALID_OBJECT;
1760 }
0b4e3aa0 1761
1c79356b
A
1762 while(TRUE) {
1763 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
1764 vm_object_unlock(object);
1765 return KERN_FAILURE;
1766 }
1767
1768 /* Sync up on getting the busy bit */
1769 if((dst_page->busy || dst_page->cleaning) &&
0b4e3aa0
A
1770 (((ops & UPL_POP_SET) &&
1771 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
1c79356b
A
1772 /* someone else is playing with the page, we will */
1773 /* have to wait */
9bccf70c 1774 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
1c79356b
A
1775 continue;
1776 }
1777
1778 if (ops & UPL_POP_DUMP) {
55e303ae
A
1779 vm_page_lock_queues();
1780
1781 if (dst_page->no_isync == FALSE)
1782 pmap_page_protect(dst_page->phys_page, VM_PROT_NONE);
1783 vm_page_free(dst_page);
1784
1785 vm_page_unlock_queues();
1786 break;
1c79356b
A
1787 }
1788
1789 if (flags) {
1790 *flags = 0;
1791
1792 /* Get the condition of flags before requested ops */
1793 /* are undertaken */
1794
1795 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
1796 if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
1797 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
1798 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
1799 if(dst_page->busy) *flags |= UPL_POP_BUSY;
1800 }
1801 if (phys_entry)
55e303ae 1802 *phys_entry = dst_page->phys_page;
1c79356b
A
1803
1804 /* The caller should have made a call either contingent with */
1805 /* or prior to this call to set UPL_POP_BUSY */
1806 if(ops & UPL_POP_SET) {
1807 /* The protection granted with this assert will */
1808 /* not be complete. If the caller violates the */
1809 /* convention and attempts to change page state */
1810 /* without first setting busy we may not see it */
1811 /* because the page may already be busy. However */
1812 /* if such violations occur we will assert sooner */
1813 /* or later. */
1814 assert(dst_page->busy || (ops & UPL_POP_BUSY));
1815 if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
1816 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
1817 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
1818 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
1819 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
1820 }
1821
1822 if(ops & UPL_POP_CLR) {
1823 assert(dst_page->busy);
1824 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
1825 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
1826 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
1827 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
1828 if (ops & UPL_POP_BUSY) {
1829 dst_page->busy = FALSE;
1830 PAGE_WAKEUP(dst_page);
1831 }
1832 }
1833 break;
1834 }
1835
1836 vm_object_unlock(object);
1837 return KERN_SUCCESS;
1838
1839}
1840
55e303ae
A
1841/*
1842 * memory_object_range_op offers performance enhancement over
1843 * memory_object_page_op for page_op functions which do not require page
1844 * level state to be returned from the call. Page_op was created to provide
1845 * a low-cost alternative to page manipulation via UPLs when only a single
1846 * page was involved. The range_op call establishes the ability in the _op
1847 * family of functions to work on multiple pages where the lack of page level
1848 * state handling allows the caller to avoid the overhead of the upl structures.
1849 */
1850
1851kern_return_t
1852memory_object_range_op(
1853 memory_object_control_t control,
1854 memory_object_offset_t offset_beg,
1855 memory_object_offset_t offset_end,
1856 int ops,
1857 int *range)
1858{
1859 memory_object_offset_t offset;
1860 vm_object_t object;
1861 vm_page_t dst_page;
1862
1863 object = memory_object_control_to_vm_object(control);
1864 if (object == VM_OBJECT_NULL)
1865 return (KERN_INVALID_ARGUMENT);
1866
1867 if (object->resident_page_count == 0) {
1868 if (range) {
1869 if (ops & UPL_ROP_PRESENT)
1870 *range = 0;
1871 else
1872 *range = offset_end - offset_beg;
1873 }
1874 return KERN_SUCCESS;
1875 }
1876 vm_object_lock(object);
1877
1878 if (object->phys_contiguous)
1879 return KERN_INVALID_OBJECT;
1880
1881 offset = offset_beg;
1882
1883 while (offset < offset_end) {
1884 if (dst_page = vm_page_lookup(object, offset)) {
1885 if (ops & UPL_ROP_DUMP) {
1886 if (dst_page->busy || dst_page->cleaning) {
1887 /*
1888 * someone else is playing with the
1889 * page, we will have to wait
1890 */
1891 PAGE_SLEEP(object,
1892 dst_page, THREAD_UNINT);
1893 /*
1894 * need to relook the page up since it's
1895 * state may have changed while we slept
1896 * it might even belong to a different object
1897 * at this point
1898 */
1899 continue;
1900 }
1901 vm_page_lock_queues();
1902
1903 if (dst_page->no_isync == FALSE)
1904 pmap_page_protect(dst_page->phys_page, VM_PROT_NONE);
1905 vm_page_free(dst_page);
1906
1907 vm_page_unlock_queues();
1908 } else if (ops & UPL_ROP_ABSENT)
1909 break;
1910 } else if (ops & UPL_ROP_PRESENT)
1911 break;
1912
1913 offset += PAGE_SIZE;
1914 }
1915 vm_object_unlock(object);
1916
1917 if (range)
1918 *range = offset - offset_beg;
1919
1920 return KERN_SUCCESS;
1921}
1922
0b4e3aa0
A
1923static zone_t mem_obj_control_zone;
1924
1925__private_extern__ void
1926memory_object_control_bootstrap(void)
1927{
1928 int i;
1929
1930 i = (vm_size_t) sizeof (struct memory_object_control);
1931 mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
1932 return;
1933}
1934
1935__private_extern__ memory_object_control_t
1936memory_object_control_allocate(
1937 vm_object_t object)
1938{
1939 memory_object_control_t control;
1940
1941 control = (memory_object_control_t)zalloc(mem_obj_control_zone);
1942 if (control != MEMORY_OBJECT_CONTROL_NULL)
1943 control->object = object;
1944 return (control);
1945}
1946
1947__private_extern__ void
1948memory_object_control_collapse(
1949 memory_object_control_t control,
1950 vm_object_t object)
1951{
1952 assert((control->object != VM_OBJECT_NULL) &&
1953 (control->object != object));
1954 control->object = object;
1955}
1956
1957__private_extern__ vm_object_t
1958memory_object_control_to_vm_object(
1959 memory_object_control_t control)
1960{
1961 if (control == MEMORY_OBJECT_CONTROL_NULL)
1962 return VM_OBJECT_NULL;
1963
1964 return (control->object);
1965}
1966
1967memory_object_control_t
1968convert_port_to_mo_control(
1969 mach_port_t port)
1970{
1971 return MEMORY_OBJECT_CONTROL_NULL;
1972}
1973
1974
1975mach_port_t
1976convert_mo_control_to_port(
1977 memory_object_control_t control)
1978{
1979 return MACH_PORT_NULL;
1980}
1981
1982void
1983memory_object_control_reference(
1984 memory_object_control_t control)
1985{
1986 return;
1987}
1988
1989/*
1990 * We only every issue one of these references, so kill it
1991 * when that gets released (should switch the real reference
1992 * counting in true port-less EMMI).
1993 */
1994void
1995memory_object_control_deallocate(
1996 memory_object_control_t control)
1997{
1998 zfree(mem_obj_control_zone, (vm_offset_t)control);
1999}
2000
2001void
2002memory_object_control_disable(
2003 memory_object_control_t control)
2004{
2005 assert(control->object != VM_OBJECT_NULL);
2006 control->object = VM_OBJECT_NULL;
2007}
2008
2009void
2010memory_object_default_reference(
2011 memory_object_default_t dmm)
2012{
2013 ipc_port_make_send(dmm);
2014}
2015
2016void
2017memory_object_default_deallocate(
2018 memory_object_default_t dmm)
2019{
2020 ipc_port_release_send(dmm);
2021}
2022
2023memory_object_t
2024convert_port_to_memory_object(
2025 mach_port_t port)
2026{
2027 return (MEMORY_OBJECT_NULL);
2028}
2029
2030
2031mach_port_t
2032convert_memory_object_to_port(
2033 memory_object_t object)
2034{
2035 return (MACH_PORT_NULL);
2036}
2037
2038#ifdef MACH_BSD
2039/* remove after component interface available */
2040extern int vnode_pager_workaround;
2041extern int device_pager_workaround;
2042#endif
2043
2044
2045/* Routine memory_object_reference */
2046void memory_object_reference(
2047 memory_object_t memory_object)
2048{
2049extern void dp_memory_object_reference(memory_object_t);
2050
2051#ifdef MACH_BSD
2052 extern void vnode_pager_reference(memory_object_t);
2053 extern void device_pager_reference(memory_object_t);
2054
2055 if(memory_object->pager == &vnode_pager_workaround) {
2056 vnode_pager_reference(memory_object);
2057 } else if(memory_object->pager == &device_pager_workaround) {
2058 device_pager_reference(memory_object);
2059 } else
2060#endif
2061 dp_memory_object_reference(memory_object);
2062}
2063
2064/* Routine memory_object_deallocate */
2065void memory_object_deallocate(
2066 memory_object_t memory_object)
2067{
2068extern void dp_memory_object_deallocate(memory_object_t);
2069
2070#ifdef MACH_BSD
2071 extern void vnode_pager_deallocate(memory_object_t);
2072 extern void device_pager_deallocate(memory_object_t);
2073
2074 if(memory_object->pager == &vnode_pager_workaround) {
2075 vnode_pager_deallocate(memory_object);
2076 } else if(memory_object->pager == &device_pager_workaround) {
2077 device_pager_deallocate(memory_object);
2078 } else
2079#endif
2080 dp_memory_object_deallocate(memory_object);
2081}
2082
2083
2084/* Routine memory_object_init */
2085kern_return_t memory_object_init
2086(
2087 memory_object_t memory_object,
2088 memory_object_control_t memory_control,
2089 vm_size_t memory_object_page_size
2090)
2091{
2092extern kern_return_t dp_memory_object_init(memory_object_t,
2093 memory_object_control_t,
2094 vm_size_t);
2095#ifdef MACH_BSD
2096extern kern_return_t vnode_pager_init(memory_object_t,
2097 memory_object_control_t,
2098 vm_size_t);
2099extern kern_return_t device_pager_init(memory_object_t,
2100 memory_object_control_t,
2101 vm_size_t);
2102
2103 if(memory_object->pager == &vnode_pager_workaround) {
2104 return vnode_pager_init(memory_object,
2105 memory_control,
2106 memory_object_page_size);
2107 } else if(memory_object->pager == &device_pager_workaround) {
2108 return device_pager_init(memory_object,
2109 memory_control,
2110 memory_object_page_size);
2111 } else
2112#endif
2113 return dp_memory_object_init(memory_object,
2114 memory_control,
2115 memory_object_page_size);
2116}
2117
2118/* Routine memory_object_terminate */
2119kern_return_t memory_object_terminate
2120(
2121 memory_object_t memory_object
2122)
2123{
2124extern kern_return_t dp_memory_object_terminate(memory_object_t);
2125
2126#ifdef MACH_BSD
2127extern kern_return_t vnode_pager_terminate(memory_object_t);
2128extern kern_return_t device_pager_terminate(memory_object_t);
2129
2130 if(memory_object->pager == &vnode_pager_workaround) {
2131 return vnode_pager_terminate(memory_object);
2132 } else if(memory_object->pager == &device_pager_workaround) {
2133 return device_pager_terminate(memory_object);
2134 } else
2135#endif
2136 return dp_memory_object_terminate(memory_object);
2137}
2138
2139/* Routine memory_object_data_request */
2140kern_return_t memory_object_data_request
2141(
2142 memory_object_t memory_object,
2143 memory_object_offset_t offset,
2144 vm_size_t length,
2145 vm_prot_t desired_access
2146)
2147{
2148extern kern_return_t dp_memory_object_data_request(memory_object_t,
2149 memory_object_offset_t, vm_size_t, vm_prot_t);
2150
2151#ifdef MACH_BSD
2152extern kern_return_t vnode_pager_data_request(memory_object_t,
2153 memory_object_offset_t, vm_size_t, vm_prot_t);
2154extern kern_return_t device_pager_data_request(memory_object_t,
2155 memory_object_offset_t, vm_size_t, vm_prot_t);
2156
2157 if (memory_object->pager == &vnode_pager_workaround) {
2158 return vnode_pager_data_request(memory_object,
2159 offset,
2160 length,
2161 desired_access);
2162 } else if (memory_object->pager == &device_pager_workaround) {
2163 return device_pager_data_request(memory_object,
2164 offset,
2165 length,
2166 desired_access);
2167 } else
2168#endif
2169 return dp_memory_object_data_request(memory_object,
2170 offset,
2171 length,
2172 desired_access);
2173}
2174
2175/* Routine memory_object_data_return */
2176kern_return_t memory_object_data_return
2177(
2178 memory_object_t memory_object,
2179 memory_object_offset_t offset,
2180 vm_size_t size,
2181 boolean_t dirty,
2182 boolean_t kernel_copy
2183)
2184{
2185 extern kern_return_t dp_memory_object_data_return(memory_object_t,
2186 memory_object_offset_t,
2187 vm_size_t,
2188 boolean_t,
2189 boolean_t);
2190#ifdef MACH_BSD
2191 extern kern_return_t vnode_pager_data_return(memory_object_t,
2192 memory_object_offset_t,
2193 vm_size_t,
2194 boolean_t,
2195 boolean_t);
2196 extern kern_return_t device_pager_data_return(memory_object_t,
2197 memory_object_offset_t,
2198 vm_size_t,
2199 boolean_t,
2200 boolean_t);
2201
2202 if (memory_object->pager == &vnode_pager_workaround) {
2203 return vnode_pager_data_return(memory_object,
2204 offset,
2205 size,
2206 dirty,
2207 kernel_copy);
2208 } else if (memory_object->pager == &device_pager_workaround) {
2209 return device_pager_data_return(memory_object,
2210 offset,
2211 size,
2212 dirty,
2213 kernel_copy);
2214 } else
2215#endif
2216 return dp_memory_object_data_return(memory_object,
2217 offset,
2218 size,
2219 dirty,
2220 kernel_copy);
2221}
2222
2223/* Routine memory_object_data_initialize */
2224kern_return_t memory_object_data_initialize
2225(
2226 memory_object_t memory_object,
2227 memory_object_offset_t offset,
2228 vm_size_t size
2229)
2230{
2231
2232 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
2233 memory_object_offset_t,
2234 vm_size_t);
2235#ifdef MACH_BSD
2236 extern kern_return_t vnode_pager_data_initialize(memory_object_t,
2237 memory_object_offset_t,
2238 vm_size_t);
2239 extern kern_return_t device_pager_data_initialize(memory_object_t,
2240 memory_object_offset_t,
2241 vm_size_t);
2242
2243 if (memory_object->pager == &vnode_pager_workaround) {
2244 return vnode_pager_data_initialize(memory_object,
2245 offset,
2246 size);
2247 } else if (memory_object->pager == &device_pager_workaround) {
2248 return device_pager_data_initialize(memory_object,
2249 offset,
2250 size);
2251 } else
2252#endif
2253 return dp_memory_object_data_initialize(memory_object,
2254 offset,
2255 size);
2256}
2257
2258/* Routine memory_object_data_unlock */
2259kern_return_t memory_object_data_unlock
2260(
2261 memory_object_t memory_object,
2262 memory_object_offset_t offset,
2263 vm_size_t size,
2264 vm_prot_t desired_access
2265)
2266{
2267 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
2268 memory_object_offset_t,
2269 vm_size_t,
2270 vm_prot_t);
2271#ifdef MACH_BSD
2272 extern kern_return_t vnode_pager_data_unlock(memory_object_t,
2273 memory_object_offset_t,
2274 vm_size_t,
2275 vm_prot_t);
2276 extern kern_return_t device_pager_data_unlock(memory_object_t,
2277 memory_object_offset_t,
2278 vm_size_t,
2279 vm_prot_t);
2280
2281 if (memory_object->pager == &vnode_pager_workaround) {
2282 return vnode_pager_data_unlock(memory_object,
2283 offset,
2284 size,
2285 desired_access);
2286 } else if (memory_object->pager == &device_pager_workaround) {
2287 return device_pager_data_unlock(memory_object,
2288 offset,
2289 size,
2290 desired_access);
2291 } else
2292#endif
2293 return dp_memory_object_data_unlock(memory_object,
2294 offset,
2295 size,
2296 desired_access);
2297
2298}
2299
2300/* Routine memory_object_synchronize */
2301kern_return_t memory_object_synchronize
2302(
2303 memory_object_t memory_object,
2304 memory_object_offset_t offset,
2305 vm_size_t size,
2306 vm_sync_t sync_flags
2307)
2308{
2309 extern kern_return_t dp_memory_object_data_synchronize(memory_object_t,
2310 memory_object_offset_t,
2311 vm_size_t,
2312 vm_sync_t);
2313#ifdef MACH_BSD
2314 extern kern_return_t vnode_pager_data_synchronize(memory_object_t,
2315 memory_object_offset_t,
2316 vm_size_t,
2317 vm_sync_t);
2318 extern kern_return_t device_pager_data_synchronize(memory_object_t,
2319 memory_object_offset_t,
2320 vm_size_t,
2321 vm_sync_t);
2322
2323 if (memory_object->pager == &vnode_pager_workaround) {
2324 return vnode_pager_synchronize(
2325 memory_object,
2326 offset,
2327 size,
2328 sync_flags);
2329 } else if (memory_object->pager == &device_pager_workaround) {
2330 return device_pager_synchronize(
2331 memory_object,
2332 offset,
2333 size,
2334 sync_flags);
2335 } else
2336#endif
2337 return dp_memory_object_synchronize(
2338 memory_object,
2339 offset,
2340 size,
2341 sync_flags);
2342}
2343
2344/* Routine memory_object_unmap */
2345kern_return_t memory_object_unmap
2346(
2347 memory_object_t memory_object
2348)
2349{
2350 extern kern_return_t dp_memory_object_unmap(memory_object_t);
2351#ifdef MACH_BSD
2352 extern kern_return_t vnode_pager_unmap(memory_object_t);
2353 extern kern_return_t device_pager_unmap(memory_object_t);
2354
2355 if (memory_object->pager == &vnode_pager_workaround) {
2356 return vnode_pager_unmap(memory_object);
2357 } else if (memory_object->pager == &device_pager_workaround) {
2358 return device_pager_unmap(memory_object);
2359 } else
2360#endif
2361 return dp_memory_object_unmap(memory_object);
2362}
2363
2364/* Routine memory_object_create */
2365kern_return_t memory_object_create
2366(
2367 memory_object_default_t default_memory_manager,
2368 vm_size_t new_memory_object_size,
2369 memory_object_t *new_memory_object
2370)
2371{
2372extern kern_return_t default_pager_memory_object_create(memory_object_default_t,
2373 vm_size_t,
2374 memory_object_t *);
2375
2376 return default_pager_memory_object_create(default_memory_manager,
2377 new_memory_object_size,
2378 new_memory_object);
2379}
1c79356b 2380