]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/memory_object.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/memory_object.c
54 * Author: Michael Wayne Young
55 *
56 * External memory management interface control functions.
57 */
58
59 #include <advisory_pageout.h>
60
61 /*
62 * Interface dependencies:
63 */
64
65 #include <mach/std_types.h> /* For pointer_t */
66 #include <mach/mach_types.h>
67
68 #include <mach/mig.h>
69 #include <mach/kern_return.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/host_priv_server.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <mach/message.h>
77
78 /*
79 * Implementation dependencies:
80 */
81 #include <string.h> /* For memcpy() */
82
83 #include <kern/xpr.h>
84 #include <kern/host.h>
85 #include <kern/thread.h> /* For current_thread() */
86 #include <kern/ipc_mig.h>
87 #include <kern/misc_protos.h>
88
89 #include <vm/vm_object.h>
90 #include <vm/vm_fault.h>
91 #include <vm/memory_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/pmap.h> /* For pmap_clear_modify */
95 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
96 #include <vm/vm_map.h> /* For vm_map_pageable */
97
98 #if MACH_PAGEMAP
99 #include <vm/vm_external.h>
100 #endif /* MACH_PAGEMAP */
101
102 #include <vm/vm_protos.h>
103
104
105 memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
106 vm_size_t memory_manager_default_cluster = 0;
107 decl_mutex_data(, memory_manager_default_lock)
108
109
110 /*
111 * Routine: memory_object_should_return_page
112 *
113 * Description:
114 * Determine whether the given page should be returned,
115 * based on the page's state and on the given return policy.
116 *
117 * We should return the page if one of the following is true:
118 *
119 * 1. Page is dirty and should_return is not RETURN_NONE.
120 * 2. Page is precious and should_return is RETURN_ALL.
121 * 3. Should_return is RETURN_ANYTHING.
122 *
123 * As a side effect, m->dirty will be made consistent
124 * with pmap_is_modified(m), if should_return is not
125 * MEMORY_OBJECT_RETURN_NONE.
126 */
127
128 #define memory_object_should_return_page(m, should_return) \
129 (should_return != MEMORY_OBJECT_RETURN_NONE && \
130 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
131 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
132 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
133
134 typedef int memory_object_lock_result_t;
135
136 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
137 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
138 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
139 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
140
141 memory_object_lock_result_t memory_object_lock_page(
142 vm_page_t m,
143 memory_object_return_t should_return,
144 boolean_t should_flush,
145 vm_prot_t prot);
146
147 /*
148 * Routine: memory_object_lock_page
149 *
150 * Description:
151 * Perform the appropriate lock operations on the
152 * given page. See the description of
153 * "memory_object_lock_request" for the meanings
154 * of the arguments.
155 *
156 * Returns an indication that the operation
157 * completed, blocked, or that the page must
158 * be cleaned.
159 */
160 memory_object_lock_result_t
161 memory_object_lock_page(
162 vm_page_t m,
163 memory_object_return_t should_return,
164 boolean_t should_flush,
165 vm_prot_t prot)
166 {
167 XPR(XPR_MEMORY_OBJECT,
168 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
169 (integer_t)m, should_return, should_flush, prot, 0);
170
171 /*
172 * If we cannot change access to the page,
173 * either because a mapping is in progress
174 * (busy page) or because a mapping has been
175 * wired, then give up.
176 */
177
178 if (m->busy || m->cleaning)
179 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
180
181 /*
182 * Don't worry about pages for which the kernel
183 * does not have any data.
184 */
185
186 if (m->absent || m->error || m->restart) {
187 if(m->error && should_flush) {
188 /* dump the page, pager wants us to */
189 /* clean it up and there is no */
190 /* relevant data to return */
191 if(m->wire_count == 0) {
192 VM_PAGE_FREE(m);
193 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
194 }
195 } else {
196 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
197 }
198 }
199
200 assert(!m->fictitious);
201
202 if (m->wire_count != 0) {
203 /*
204 * If no change would take place
205 * anyway, return successfully.
206 *
207 * No change means:
208 * Not flushing AND
209 * No change to page lock [2 checks] AND
210 * Should not return page
211 *
212 * XXX This doesn't handle sending a copy of a wired
213 * XXX page to the pager, but that will require some
214 * XXX significant surgery.
215 */
216 if (!should_flush &&
217 (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
218 ! memory_object_should_return_page(m, should_return)) {
219
220 /*
221 * Restart page unlock requests,
222 * even though no change took place.
223 * [Memory managers may be expecting
224 * to see new requests.]
225 */
226 m->unlock_request = VM_PROT_NONE;
227 PAGE_WAKEUP(m);
228
229 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
230 }
231
232 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
233 }
234
235 /*
236 * If the page is to be flushed, allow
237 * that to be done as part of the protection.
238 */
239
240 if (should_flush)
241 prot = VM_PROT_ALL;
242
243 /*
244 * Set the page lock.
245 *
246 * If we are decreasing permission, do it now;
247 * let the fault handler take care of increases
248 * (pmap_page_protect may not increase protection).
249 */
250
251 if (prot != VM_PROT_NO_CHANGE) {
252 if ((m->page_lock ^ prot) & prot) {
253 pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot);
254 }
255 #if 0
256 /* code associated with the vestigial
257 * memory_object_data_unlock
258 */
259 m->page_lock = prot;
260 m->lock_supplied = TRUE;
261 if (prot != VM_PROT_NONE)
262 m->unusual = TRUE;
263 else
264 m->unusual = FALSE;
265
266 /*
267 * Restart any past unlock requests, even if no
268 * change resulted. If the manager explicitly
269 * requested no protection change, then it is assumed
270 * to be remembering past requests.
271 */
272
273 m->unlock_request = VM_PROT_NONE;
274 #endif /* 0 */
275 PAGE_WAKEUP(m);
276 }
277
278 /*
279 * Handle page returning.
280 */
281
282 if (memory_object_should_return_page(m, should_return)) {
283
284 /*
285 * If we weren't planning
286 * to flush the page anyway,
287 * we may need to remove the
288 * page from the pageout
289 * system and from physical
290 * maps now.
291 */
292
293 vm_page_lock_queues();
294 VM_PAGE_QUEUES_REMOVE(m);
295 vm_page_unlock_queues();
296
297 if (!should_flush)
298 pmap_disconnect(m->phys_page);
299
300 if (m->dirty)
301 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
302 else
303 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
304 }
305
306 /*
307 * Handle flushing
308 */
309
310 if (should_flush) {
311 VM_PAGE_FREE(m);
312 } else {
313 /*
314 * XXX Make clean but not flush a paging hint,
315 * and deactivate the pages. This is a hack
316 * because it overloads flush/clean with
317 * implementation-dependent meaning. This only
318 * happens to pages that are already clean.
319 */
320
321 if (vm_page_deactivate_hint &&
322 (should_return != MEMORY_OBJECT_RETURN_NONE)) {
323 vm_page_lock_queues();
324 vm_page_deactivate(m);
325 vm_page_unlock_queues();
326 }
327 }
328
329 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
330 }
331
332 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync) \
333 MACRO_BEGIN \
334 \
335 register int upl_flags; \
336 \
337 vm_object_unlock(object); \
338 \
339 if (iosync) \
340 upl_flags = UPL_MSYNC | UPL_IOSYNC; \
341 else \
342 upl_flags = UPL_MSYNC; \
343 \
344 (void) memory_object_data_return(object->pager, \
345 po, \
346 data_cnt, \
347 ro, \
348 ioerr, \
349 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
350 !should_flush, \
351 upl_flags); \
352 \
353 vm_object_lock(object); \
354 MACRO_END
355
356 /*
357 * Routine: memory_object_lock_request [user interface]
358 *
359 * Description:
360 * Control use of the data associated with the given
361 * memory object. For each page in the given range,
362 * perform the following operations, in order:
363 * 1) restrict access to the page (disallow
364 * forms specified by "prot");
365 * 2) return data to the manager (if "should_return"
366 * is RETURN_DIRTY and the page is dirty, or
367 * "should_return" is RETURN_ALL and the page
368 * is either dirty or precious); and,
369 * 3) flush the cached copy (if "should_flush"
370 * is asserted).
371 * The set of pages is defined by a starting offset
372 * ("offset") and size ("size"). Only pages with the
373 * same page alignment as the starting offset are
374 * considered.
375 *
376 * A single acknowledgement is sent (to the "reply_to"
377 * port) when these actions are complete. If successful,
378 * the naked send right for reply_to is consumed.
379 */
380
381 kern_return_t
382 memory_object_lock_request(
383 memory_object_control_t control,
384 memory_object_offset_t offset,
385 memory_object_size_t size,
386 memory_object_offset_t * resid_offset,
387 int * io_errno,
388 memory_object_return_t should_return,
389 int flags,
390 vm_prot_t prot)
391 {
392 vm_object_t object;
393 __unused boolean_t should_flush;
394
395 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
396
397 XPR(XPR_MEMORY_OBJECT,
398 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
399 (integer_t)control, offset, size,
400 (((should_return&1)<<1)|should_flush), prot);
401
402 /*
403 * Check for bogus arguments.
404 */
405 object = memory_object_control_to_vm_object(control);
406 if (object == VM_OBJECT_NULL)
407 return (KERN_INVALID_ARGUMENT);
408
409 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
410 return (KERN_INVALID_ARGUMENT);
411
412 size = round_page_64(size);
413
414 /*
415 * Lock the object, and acquire a paging reference to
416 * prevent the memory_object reference from being released.
417 */
418 vm_object_lock(object);
419 vm_object_paging_begin(object);
420 offset -= object->paging_offset;
421
422 (void)vm_object_update(object,
423 offset, size, resid_offset, io_errno, should_return, flags, prot);
424
425 vm_object_paging_end(object);
426 vm_object_unlock(object);
427
428 return (KERN_SUCCESS);
429 }
430
431 /*
432 * memory_object_release_name: [interface]
433 *
434 * Enforces name semantic on memory_object reference count decrement
435 * This routine should not be called unless the caller holds a name
436 * reference gained through the memory_object_named_create or the
437 * memory_object_rename call.
438 * If the TERMINATE_IDLE flag is set, the call will return if the
439 * reference count is not 1. i.e. idle with the only remaining reference
440 * being the name.
441 * If the decision is made to proceed the name field flag is set to
442 * false and the reference count is decremented. If the RESPECT_CACHE
443 * flag is set and the reference count has gone to zero, the
444 * memory_object is checked to see if it is cacheable otherwise when
445 * the reference count is zero, it is simply terminated.
446 */
447
448 kern_return_t
449 memory_object_release_name(
450 memory_object_control_t control,
451 int flags)
452 {
453 vm_object_t object;
454
455 object = memory_object_control_to_vm_object(control);
456 if (object == VM_OBJECT_NULL)
457 return (KERN_INVALID_ARGUMENT);
458
459 return vm_object_release_name(object, flags);
460 }
461
462
463
464 /*
465 * Routine: memory_object_destroy [user interface]
466 * Purpose:
467 * Shut down a memory object, despite the
468 * presence of address map (or other) references
469 * to the vm_object.
470 */
471 kern_return_t
472 memory_object_destroy(
473 memory_object_control_t control,
474 kern_return_t reason)
475 {
476 vm_object_t object;
477
478 object = memory_object_control_to_vm_object(control);
479 if (object == VM_OBJECT_NULL)
480 return (KERN_INVALID_ARGUMENT);
481
482 return (vm_object_destroy(object, reason));
483 }
484
485 /*
486 * Routine: vm_object_sync
487 *
488 * Kernel internal function to synch out pages in a given
489 * range within an object to its memory manager. Much the
490 * same as memory_object_lock_request but page protection
491 * is not changed.
492 *
493 * If the should_flush and should_return flags are true pages
494 * are flushed, that is dirty & precious pages are written to
495 * the memory manager and then discarded. If should_return
496 * is false, only precious pages are returned to the memory
497 * manager.
498 *
499 * If should flush is false and should_return true, the memory
500 * manager's copy of the pages is updated. If should_return
501 * is also false, only the precious pages are updated. This
502 * last option is of limited utility.
503 *
504 * Returns:
505 * FALSE if no pages were returned to the pager
506 * TRUE otherwise.
507 */
508
509 boolean_t
510 vm_object_sync(
511 vm_object_t object,
512 vm_object_offset_t offset,
513 vm_object_size_t size,
514 boolean_t should_flush,
515 boolean_t should_return,
516 boolean_t should_iosync)
517 {
518 boolean_t rv;
519 int flags;
520
521 XPR(XPR_VM_OBJECT,
522 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
523 (integer_t)object, offset, size, should_flush, should_return);
524
525 /*
526 * Lock the object, and acquire a paging reference to
527 * prevent the memory_object and control ports from
528 * being destroyed.
529 */
530 vm_object_lock(object);
531 vm_object_paging_begin(object);
532
533 if (should_flush)
534 flags = MEMORY_OBJECT_DATA_FLUSH;
535 else
536 flags = 0;
537
538 if (should_iosync)
539 flags |= MEMORY_OBJECT_IO_SYNC;
540
541 rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL,
542 (should_return) ?
543 MEMORY_OBJECT_RETURN_ALL :
544 MEMORY_OBJECT_RETURN_NONE,
545 flags,
546 VM_PROT_NO_CHANGE);
547
548
549 vm_object_paging_end(object);
550 vm_object_unlock(object);
551 return rv;
552 }
553
554
555
556
557 static int
558 vm_object_update_extent(
559 vm_object_t object,
560 vm_object_offset_t offset,
561 vm_object_offset_t offset_end,
562 vm_object_offset_t *offset_resid,
563 int *io_errno,
564 boolean_t should_flush,
565 memory_object_return_t should_return,
566 boolean_t should_iosync,
567 vm_prot_t prot)
568 {
569 vm_page_t m;
570 int retval = 0;
571 vm_size_t data_cnt = 0;
572 vm_object_offset_t paging_offset = 0;
573 vm_object_offset_t last_offset = offset;
574 memory_object_lock_result_t page_lock_result;
575 memory_object_lock_result_t pageout_action;
576
577 pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE;
578
579 for (;
580 offset < offset_end && object->resident_page_count;
581 offset += PAGE_SIZE_64) {
582
583 /*
584 * Limit the number of pages to be cleaned at once.
585 */
586 if (data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) {
587 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
588 pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
589 data_cnt = 0;
590 }
591
592 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
593 page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot);
594
595 XPR(XPR_MEMORY_OBJECT,
596 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
597 (integer_t)object, offset, page_lock_result, 0, 0);
598
599 switch (page_lock_result)
600 {
601 case MEMORY_OBJECT_LOCK_RESULT_DONE:
602 /*
603 * End of a cluster of dirty pages.
604 */
605 if (data_cnt) {
606 LIST_REQ_PAGEOUT_PAGES(object,
607 data_cnt, pageout_action,
608 paging_offset, offset_resid, io_errno, should_iosync);
609 data_cnt = 0;
610 continue;
611 }
612 break;
613
614 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
615 /*
616 * Since it is necessary to block,
617 * clean any dirty pages now.
618 */
619 if (data_cnt) {
620 LIST_REQ_PAGEOUT_PAGES(object,
621 data_cnt, pageout_action,
622 paging_offset, offset_resid, io_errno, should_iosync);
623 data_cnt = 0;
624 continue;
625 }
626 PAGE_SLEEP(object, m, THREAD_UNINT);
627 continue;
628
629 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
630 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
631 /*
632 * The clean and return cases are similar.
633 *
634 * if this would form a discontiguous block,
635 * clean the old pages and start anew.
636 *
637 * Mark the page busy since we will unlock the
638 * object if we issue the LIST_REQ_PAGEOUT
639 */
640 m->busy = TRUE;
641 if (data_cnt &&
642 ((last_offset != offset) || (pageout_action != page_lock_result))) {
643 LIST_REQ_PAGEOUT_PAGES(object,
644 data_cnt, pageout_action,
645 paging_offset, offset_resid, io_errno, should_iosync);
646 data_cnt = 0;
647 }
648 m->busy = FALSE;
649
650 if (m->cleaning) {
651 PAGE_SLEEP(object, m, THREAD_UNINT);
652 continue;
653 }
654 if (data_cnt == 0) {
655 pageout_action = page_lock_result;
656 paging_offset = offset;
657 }
658 data_cnt += PAGE_SIZE;
659 last_offset = offset + PAGE_SIZE_64;
660
661 vm_page_lock_queues();
662 /*
663 * Clean
664 */
665 m->list_req_pending = TRUE;
666 m->cleaning = TRUE;
667
668 if (should_flush) {
669 /*
670 * and add additional state
671 * for the flush
672 */
673 m->busy = TRUE;
674 m->pageout = TRUE;
675 vm_page_wire(m);
676 }
677 vm_page_unlock_queues();
678
679 retval = 1;
680 break;
681 }
682 break;
683 }
684 }
685 /*
686 * We have completed the scan for applicable pages.
687 * Clean any pages that have been saved.
688 */
689 if (data_cnt) {
690 LIST_REQ_PAGEOUT_PAGES(object,
691 data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync);
692 }
693 return (retval);
694 }
695
696
697
698 /*
699 * Routine: vm_object_update
700 * Description:
701 * Work function for m_o_lock_request(), vm_o_sync().
702 *
703 * Called with object locked and paging ref taken.
704 */
705 kern_return_t
706 vm_object_update(
707 register vm_object_t object,
708 register vm_object_offset_t offset,
709 register vm_object_size_t size,
710 register vm_object_offset_t *resid_offset,
711 int *io_errno,
712 memory_object_return_t should_return,
713 int flags,
714 vm_prot_t protection)
715 {
716 vm_object_t copy_object;
717 boolean_t data_returned = FALSE;
718 boolean_t update_cow;
719 boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE;
720 boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE;
721 int num_of_extents;
722 int n;
723 #define MAX_EXTENTS 8
724 #define EXTENT_SIZE (1024 * 1024 * 256)
725 #define RESIDENT_LIMIT (1024 * 32)
726 struct extent {
727 vm_object_offset_t e_base;
728 vm_object_offset_t e_min;
729 vm_object_offset_t e_max;
730 } extents[MAX_EXTENTS];
731
732 /*
733 * To avoid blocking while scanning for pages, save
734 * dirty pages to be cleaned all at once.
735 *
736 * XXXO A similar strategy could be used to limit the
737 * number of times that a scan must be restarted for
738 * other reasons. Those pages that would require blocking
739 * could be temporarily collected in another list, or
740 * their offsets could be recorded in a small array.
741 */
742
743 /*
744 * XXX NOTE: May want to consider converting this to a page list
745 * XXX vm_map_copy interface. Need to understand object
746 * XXX coalescing implications before doing so.
747 */
748
749 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
750 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
751 !(flags & MEMORY_OBJECT_DATA_PURGE)))
752 || (flags & MEMORY_OBJECT_COPY_SYNC);
753
754
755 if((((copy_object = object->copy) != NULL) && update_cow) ||
756 (flags & MEMORY_OBJECT_DATA_SYNC)) {
757 vm_map_size_t i;
758 vm_map_size_t copy_size;
759 vm_map_offset_t copy_offset;
760 vm_prot_t prot;
761 vm_page_t page;
762 vm_page_t top_page;
763 kern_return_t error = 0;
764
765 if(copy_object != NULL) {
766 /* translate offset with respect to shadow's offset */
767 copy_offset = (offset >= copy_object->shadow_offset)?
768 (vm_map_offset_t)(offset - copy_object->shadow_offset) :
769 (vm_map_offset_t) 0;
770 if(copy_offset > copy_object->size)
771 copy_offset = copy_object->size;
772
773 /* clip size with respect to shadow offset */
774 if (offset >= copy_object->shadow_offset) {
775 copy_size = size;
776 } else if (size >= copy_object->shadow_offset - offset) {
777 copy_size = size -
778 (copy_object->shadow_offset - offset);
779 } else {
780 copy_size = 0;
781 }
782
783 if (copy_offset + copy_size > copy_object->size) {
784 if (copy_object->size >= copy_offset) {
785 copy_size = copy_object->size - copy_offset;
786 } else {
787 copy_size = 0;
788 }
789 }
790
791 copy_size+=copy_offset;
792
793 vm_object_unlock(object);
794 vm_object_lock(copy_object);
795 } else {
796 copy_object = object;
797
798 copy_size = offset + size;
799 copy_offset = offset;
800 }
801
802 vm_object_paging_begin(copy_object);
803 for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
804 RETRY_COW_OF_LOCK_REQUEST:
805 prot = VM_PROT_WRITE|VM_PROT_READ;
806 switch (vm_fault_page(copy_object, i,
807 VM_PROT_WRITE|VM_PROT_READ,
808 FALSE,
809 THREAD_UNINT,
810 copy_offset,
811 copy_offset+copy_size,
812 VM_BEHAVIOR_SEQUENTIAL,
813 &prot,
814 &page,
815 &top_page,
816 (int *)0,
817 &error,
818 FALSE,
819 FALSE, NULL, 0)) {
820
821 case VM_FAULT_SUCCESS:
822 if(top_page) {
823 vm_fault_cleanup(
824 page->object, top_page);
825 PAGE_WAKEUP_DONE(page);
826 vm_page_lock_queues();
827 if (!page->active && !page->inactive)
828 vm_page_activate(page);
829 vm_page_unlock_queues();
830 vm_object_lock(copy_object);
831 vm_object_paging_begin(copy_object);
832 } else {
833 PAGE_WAKEUP_DONE(page);
834 vm_page_lock_queues();
835 if (!page->active && !page->inactive)
836 vm_page_activate(page);
837 vm_page_unlock_queues();
838 }
839 break;
840 case VM_FAULT_RETRY:
841 prot = VM_PROT_WRITE|VM_PROT_READ;
842 vm_object_lock(copy_object);
843 vm_object_paging_begin(copy_object);
844 goto RETRY_COW_OF_LOCK_REQUEST;
845 case VM_FAULT_INTERRUPTED:
846 prot = VM_PROT_WRITE|VM_PROT_READ;
847 vm_object_lock(copy_object);
848 vm_object_paging_begin(copy_object);
849 goto RETRY_COW_OF_LOCK_REQUEST;
850 case VM_FAULT_MEMORY_SHORTAGE:
851 VM_PAGE_WAIT();
852 prot = VM_PROT_WRITE|VM_PROT_READ;
853 vm_object_lock(copy_object);
854 vm_object_paging_begin(copy_object);
855 goto RETRY_COW_OF_LOCK_REQUEST;
856 case VM_FAULT_FICTITIOUS_SHORTAGE:
857 vm_page_more_fictitious();
858 prot = VM_PROT_WRITE|VM_PROT_READ;
859 vm_object_lock(copy_object);
860 vm_object_paging_begin(copy_object);
861 goto RETRY_COW_OF_LOCK_REQUEST;
862 case VM_FAULT_MEMORY_ERROR:
863 vm_object_lock(object);
864 goto BYPASS_COW_COPYIN;
865 }
866
867 }
868 vm_object_paging_end(copy_object);
869 if(copy_object != object) {
870 vm_object_unlock(copy_object);
871 vm_object_lock(object);
872 }
873 }
874 if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
875 return KERN_SUCCESS;
876 }
877 if(((copy_object = object->copy) != NULL) &&
878 (flags & MEMORY_OBJECT_DATA_PURGE)) {
879 copy_object->shadow_severed = TRUE;
880 copy_object->shadowed = FALSE;
881 copy_object->shadow = NULL;
882 /* delete the ref the COW was holding on the target object */
883 vm_object_deallocate(object);
884 }
885 BYPASS_COW_COPYIN:
886
887 /*
888 * when we have a really large range to check relative
889 * to the number of actual resident pages, we'd like
890 * to use the resident page list to drive our checks
891 * however, the object lock will get dropped while processing
892 * the page which means the resident queue can change which
893 * means we can't walk the queue as we process the pages
894 * we also want to do the processing in offset order to allow
895 * 'runs' of pages to be collected if we're being told to
896 * flush to disk... the resident page queue is NOT ordered.
897 *
898 * a temporary solution (until we figure out how to deal with
899 * large address spaces more generically) is to pre-flight
900 * the resident page queue (if it's small enough) and develop
901 * a collection of extents (that encompass actual resident pages)
902 * to visit. This will at least allow us to deal with some of the
903 * more pathological cases in a more efficient manner. The current
904 * worst case (a single resident page at the end of an extremely large
905 * range) can take minutes to complete for ranges in the terrabyte
906 * category... since this routine is called when truncating a file,
907 * and we currently support files up to 16 Tbytes in size, this
908 * is not a theoretical problem
909 */
910
911 if ((object->resident_page_count < RESIDENT_LIMIT) &&
912 (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) {
913 vm_page_t next;
914 vm_object_offset_t start;
915 vm_object_offset_t end;
916 vm_object_size_t e_mask;
917 vm_page_t m;
918
919 start = offset;
920 end = offset + size;
921 num_of_extents = 0;
922 e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1));
923
924 m = (vm_page_t) queue_first(&object->memq);
925
926 while (!queue_end(&object->memq, (queue_entry_t) m)) {
927 next = (vm_page_t) queue_next(&m->listq);
928
929 if ((m->offset >= start) && (m->offset < end)) {
930 /*
931 * this is a page we're interested in
932 * try to fit it into a current extent
933 */
934 for (n = 0; n < num_of_extents; n++) {
935 if ((m->offset & e_mask) == extents[n].e_base) {
936 /*
937 * use (PAGE_SIZE - 1) to determine the
938 * max offset so that we don't wrap if
939 * we're at the last page of the space
940 */
941 if (m->offset < extents[n].e_min)
942 extents[n].e_min = m->offset;
943 else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max)
944 extents[n].e_max = m->offset + (PAGE_SIZE - 1);
945 break;
946 }
947 }
948 if (n == num_of_extents) {
949 /*
950 * didn't find a current extent that can encompass
951 * this page
952 */
953 if (n < MAX_EXTENTS) {
954 /*
955 * if we still have room,
956 * create a new extent
957 */
958 extents[n].e_base = m->offset & e_mask;
959 extents[n].e_min = m->offset;
960 extents[n].e_max = m->offset + (PAGE_SIZE - 1);
961
962 num_of_extents++;
963 } else {
964 /*
965 * no room to create a new extent...
966 * fall back to a single extent based
967 * on the min and max page offsets
968 * we find in the range we're interested in...
969 * first, look through the extent list and
970 * develop the overall min and max for the
971 * pages we've looked at up to this point
972 */
973 for (n = 1; n < num_of_extents; n++) {
974 if (extents[n].e_min < extents[0].e_min)
975 extents[0].e_min = extents[n].e_min;
976 if (extents[n].e_max > extents[0].e_max)
977 extents[0].e_max = extents[n].e_max;
978 }
979 /*
980 * now setup to run through the remaining pages
981 * to determine the overall min and max
982 * offset for the specified range
983 */
984 extents[0].e_base = 0;
985 e_mask = 0;
986 num_of_extents = 1;
987
988 /*
989 * by continuing, we'll reprocess the
990 * page that forced us to abandon trying
991 * to develop multiple extents
992 */
993 continue;
994 }
995 }
996 }
997 m = next;
998 }
999 } else {
1000 extents[0].e_min = offset;
1001 extents[0].e_max = offset + (size - 1);
1002
1003 num_of_extents = 1;
1004 }
1005 for (n = 0; n < num_of_extents; n++) {
1006 if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno,
1007 should_flush, should_return, should_iosync, protection))
1008 data_returned = TRUE;
1009 }
1010 return (data_returned);
1011 }
1012
1013
1014 /*
1015 * Routine: memory_object_synchronize_completed [user interface]
1016 *
1017 * Tell kernel that previously synchronized data
1018 * (memory_object_synchronize) has been queue or placed on the
1019 * backing storage.
1020 *
1021 * Note: there may be multiple synchronize requests for a given
1022 * memory object outstanding but they will not overlap.
1023 */
1024
1025 kern_return_t
1026 memory_object_synchronize_completed(
1027 memory_object_control_t control,
1028 memory_object_offset_t offset,
1029 vm_offset_t length)
1030 {
1031 vm_object_t object;
1032 msync_req_t msr;
1033
1034 object = memory_object_control_to_vm_object(control);
1035
1036 XPR(XPR_MEMORY_OBJECT,
1037 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
1038 (integer_t)object, offset, length, 0, 0);
1039
1040 /*
1041 * Look for bogus arguments
1042 */
1043
1044 if (object == VM_OBJECT_NULL)
1045 return (KERN_INVALID_ARGUMENT);
1046
1047 vm_object_lock(object);
1048
1049 /*
1050 * search for sync request structure
1051 */
1052 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
1053 if (msr->offset == offset && msr->length == length) {
1054 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
1055 break;
1056 }
1057 }/* queue_iterate */
1058
1059 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
1060 vm_object_unlock(object);
1061 return KERN_INVALID_ARGUMENT;
1062 }
1063
1064 msr_lock(msr);
1065 vm_object_unlock(object);
1066 msr->flag = VM_MSYNC_DONE;
1067 msr_unlock(msr);
1068 thread_wakeup((event_t) msr);
1069
1070 return KERN_SUCCESS;
1071 }/* memory_object_synchronize_completed */
1072
1073 static kern_return_t
1074 vm_object_set_attributes_common(
1075 vm_object_t object,
1076 boolean_t may_cache,
1077 memory_object_copy_strategy_t copy_strategy,
1078 boolean_t temporary,
1079 memory_object_cluster_size_t cluster_size,
1080 boolean_t silent_overwrite,
1081 boolean_t advisory_pageout)
1082 {
1083 boolean_t object_became_ready;
1084
1085 XPR(XPR_MEMORY_OBJECT,
1086 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
1087 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
1088
1089 if (object == VM_OBJECT_NULL)
1090 return(KERN_INVALID_ARGUMENT);
1091
1092 /*
1093 * Verify the attributes of importance
1094 */
1095
1096 switch(copy_strategy) {
1097 case MEMORY_OBJECT_COPY_NONE:
1098 case MEMORY_OBJECT_COPY_DELAY:
1099 break;
1100 default:
1101 return(KERN_INVALID_ARGUMENT);
1102 }
1103
1104 #if !ADVISORY_PAGEOUT
1105 if (silent_overwrite || advisory_pageout)
1106 return(KERN_INVALID_ARGUMENT);
1107
1108 #endif /* !ADVISORY_PAGEOUT */
1109 if (may_cache)
1110 may_cache = TRUE;
1111 if (temporary)
1112 temporary = TRUE;
1113 if (cluster_size != 0) {
1114 int pages_per_cluster;
1115 pages_per_cluster = atop_32(cluster_size);
1116 /*
1117 * Cluster size must be integral multiple of page size,
1118 * and be a power of 2 number of pages.
1119 */
1120 if ((cluster_size & (PAGE_SIZE-1)) ||
1121 ((pages_per_cluster-1) & pages_per_cluster))
1122 return KERN_INVALID_ARGUMENT;
1123 }
1124
1125 vm_object_lock(object);
1126
1127 /*
1128 * Copy the attributes
1129 */
1130 assert(!object->internal);
1131 object_became_ready = !object->pager_ready;
1132 object->copy_strategy = copy_strategy;
1133 object->can_persist = may_cache;
1134 object->temporary = temporary;
1135 object->silent_overwrite = silent_overwrite;
1136 object->advisory_pageout = advisory_pageout;
1137 if (cluster_size == 0)
1138 cluster_size = PAGE_SIZE;
1139 object->cluster_size = cluster_size;
1140
1141 assert(cluster_size >= PAGE_SIZE &&
1142 cluster_size % PAGE_SIZE == 0);
1143
1144 /*
1145 * Wake up anyone waiting for the ready attribute
1146 * to become asserted.
1147 */
1148
1149 if (object_became_ready) {
1150 object->pager_ready = TRUE;
1151 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1152 }
1153
1154 vm_object_unlock(object);
1155
1156 return(KERN_SUCCESS);
1157 }
1158
1159 /*
1160 * Set the memory object attribute as provided.
1161 *
1162 * XXX This routine cannot be completed until the vm_msync, clean
1163 * in place, and cluster work is completed. See ifdef notyet
1164 * below and note that vm_object_set_attributes_common()
1165 * may have to be expanded.
1166 */
1167 kern_return_t
1168 memory_object_change_attributes(
1169 memory_object_control_t control,
1170 memory_object_flavor_t flavor,
1171 memory_object_info_t attributes,
1172 mach_msg_type_number_t count)
1173 {
1174 vm_object_t object;
1175 kern_return_t result = KERN_SUCCESS;
1176 boolean_t temporary;
1177 boolean_t may_cache;
1178 boolean_t invalidate;
1179 memory_object_cluster_size_t cluster_size;
1180 memory_object_copy_strategy_t copy_strategy;
1181 boolean_t silent_overwrite;
1182 boolean_t advisory_pageout;
1183
1184 object = memory_object_control_to_vm_object(control);
1185 if (object == VM_OBJECT_NULL)
1186 return (KERN_INVALID_ARGUMENT);
1187
1188 vm_object_lock(object);
1189
1190 temporary = object->temporary;
1191 may_cache = object->can_persist;
1192 copy_strategy = object->copy_strategy;
1193 silent_overwrite = object->silent_overwrite;
1194 advisory_pageout = object->advisory_pageout;
1195 #if notyet
1196 invalidate = object->invalidate;
1197 #endif
1198 cluster_size = object->cluster_size;
1199 vm_object_unlock(object);
1200
1201 switch (flavor) {
1202 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1203 {
1204 old_memory_object_behave_info_t behave;
1205
1206 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1207 result = KERN_INVALID_ARGUMENT;
1208 break;
1209 }
1210
1211 behave = (old_memory_object_behave_info_t) attributes;
1212
1213 temporary = behave->temporary;
1214 invalidate = behave->invalidate;
1215 copy_strategy = behave->copy_strategy;
1216
1217 break;
1218 }
1219
1220 case MEMORY_OBJECT_BEHAVIOR_INFO:
1221 {
1222 memory_object_behave_info_t behave;
1223
1224 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1225 result = KERN_INVALID_ARGUMENT;
1226 break;
1227 }
1228
1229 behave = (memory_object_behave_info_t) attributes;
1230
1231 temporary = behave->temporary;
1232 invalidate = behave->invalidate;
1233 copy_strategy = behave->copy_strategy;
1234 silent_overwrite = behave->silent_overwrite;
1235 advisory_pageout = behave->advisory_pageout;
1236 break;
1237 }
1238
1239 case MEMORY_OBJECT_PERFORMANCE_INFO:
1240 {
1241 memory_object_perf_info_t perf;
1242
1243 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1244 result = KERN_INVALID_ARGUMENT;
1245 break;
1246 }
1247
1248 perf = (memory_object_perf_info_t) attributes;
1249
1250 may_cache = perf->may_cache;
1251 cluster_size = round_page_32(perf->cluster_size);
1252
1253 break;
1254 }
1255
1256 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1257 {
1258 old_memory_object_attr_info_t attr;
1259
1260 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1261 result = KERN_INVALID_ARGUMENT;
1262 break;
1263 }
1264
1265 attr = (old_memory_object_attr_info_t) attributes;
1266
1267 may_cache = attr->may_cache;
1268 copy_strategy = attr->copy_strategy;
1269 cluster_size = page_size;
1270
1271 break;
1272 }
1273
1274 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1275 {
1276 memory_object_attr_info_t attr;
1277
1278 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1279 result = KERN_INVALID_ARGUMENT;
1280 break;
1281 }
1282
1283 attr = (memory_object_attr_info_t) attributes;
1284
1285 copy_strategy = attr->copy_strategy;
1286 may_cache = attr->may_cache_object;
1287 cluster_size = attr->cluster_size;
1288 temporary = attr->temporary;
1289
1290 break;
1291 }
1292
1293 default:
1294 result = KERN_INVALID_ARGUMENT;
1295 break;
1296 }
1297
1298 if (result != KERN_SUCCESS)
1299 return(result);
1300
1301 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1302 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1303 temporary = TRUE;
1304 } else {
1305 temporary = FALSE;
1306 }
1307
1308 /*
1309 * XXX may_cache may become a tri-valued variable to handle
1310 * XXX uncache if not in use.
1311 */
1312 return (vm_object_set_attributes_common(object,
1313 may_cache,
1314 copy_strategy,
1315 temporary,
1316 cluster_size,
1317 silent_overwrite,
1318 advisory_pageout));
1319 }
1320
1321 kern_return_t
1322 memory_object_get_attributes(
1323 memory_object_control_t control,
1324 memory_object_flavor_t flavor,
1325 memory_object_info_t attributes, /* pointer to OUT array */
1326 mach_msg_type_number_t *count) /* IN/OUT */
1327 {
1328 kern_return_t ret = KERN_SUCCESS;
1329 vm_object_t object;
1330
1331 object = memory_object_control_to_vm_object(control);
1332 if (object == VM_OBJECT_NULL)
1333 return (KERN_INVALID_ARGUMENT);
1334
1335 vm_object_lock(object);
1336
1337 switch (flavor) {
1338 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1339 {
1340 old_memory_object_behave_info_t behave;
1341
1342 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1343 ret = KERN_INVALID_ARGUMENT;
1344 break;
1345 }
1346
1347 behave = (old_memory_object_behave_info_t) attributes;
1348 behave->copy_strategy = object->copy_strategy;
1349 behave->temporary = object->temporary;
1350 #if notyet /* remove when vm_msync complies and clean in place fini */
1351 behave->invalidate = object->invalidate;
1352 #else
1353 behave->invalidate = FALSE;
1354 #endif
1355
1356 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1357 break;
1358 }
1359
1360 case MEMORY_OBJECT_BEHAVIOR_INFO:
1361 {
1362 memory_object_behave_info_t behave;
1363
1364 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1365 ret = KERN_INVALID_ARGUMENT;
1366 break;
1367 }
1368
1369 behave = (memory_object_behave_info_t) attributes;
1370 behave->copy_strategy = object->copy_strategy;
1371 behave->temporary = object->temporary;
1372 #if notyet /* remove when vm_msync complies and clean in place fini */
1373 behave->invalidate = object->invalidate;
1374 #else
1375 behave->invalidate = FALSE;
1376 #endif
1377 behave->advisory_pageout = object->advisory_pageout;
1378 behave->silent_overwrite = object->silent_overwrite;
1379 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1380 break;
1381 }
1382
1383 case MEMORY_OBJECT_PERFORMANCE_INFO:
1384 {
1385 memory_object_perf_info_t perf;
1386
1387 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1388 ret = KERN_INVALID_ARGUMENT;
1389 break;
1390 }
1391
1392 perf = (memory_object_perf_info_t) attributes;
1393 perf->cluster_size = object->cluster_size;
1394 perf->may_cache = object->can_persist;
1395
1396 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1397 break;
1398 }
1399
1400 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1401 {
1402 old_memory_object_attr_info_t attr;
1403
1404 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1405 ret = KERN_INVALID_ARGUMENT;
1406 break;
1407 }
1408
1409 attr = (old_memory_object_attr_info_t) attributes;
1410 attr->may_cache = object->can_persist;
1411 attr->copy_strategy = object->copy_strategy;
1412
1413 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1414 break;
1415 }
1416
1417 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1418 {
1419 memory_object_attr_info_t attr;
1420
1421 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1422 ret = KERN_INVALID_ARGUMENT;
1423 break;
1424 }
1425
1426 attr = (memory_object_attr_info_t) attributes;
1427 attr->copy_strategy = object->copy_strategy;
1428 attr->cluster_size = object->cluster_size;
1429 attr->may_cache_object = object->can_persist;
1430 attr->temporary = object->temporary;
1431
1432 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1433 break;
1434 }
1435
1436 default:
1437 ret = KERN_INVALID_ARGUMENT;
1438 break;
1439 }
1440
1441 vm_object_unlock(object);
1442
1443 return(ret);
1444 }
1445
1446
1447 kern_return_t
1448 memory_object_iopl_request(
1449 ipc_port_t port,
1450 memory_object_offset_t offset,
1451 upl_size_t *upl_size,
1452 upl_t *upl_ptr,
1453 upl_page_info_array_t user_page_list,
1454 unsigned int *page_list_count,
1455 int *flags)
1456 {
1457 vm_object_t object;
1458 kern_return_t ret;
1459 int caller_flags;
1460
1461 caller_flags = *flags;
1462
1463 if (caller_flags & ~UPL_VALID_FLAGS) {
1464 /*
1465 * For forward compatibility's sake,
1466 * reject any unknown flag.
1467 */
1468 return KERN_INVALID_VALUE;
1469 }
1470
1471 if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
1472 vm_named_entry_t named_entry;
1473
1474 named_entry = (vm_named_entry_t)port->ip_kobject;
1475 /* a few checks to make sure user is obeying rules */
1476 if(*upl_size == 0) {
1477 if(offset >= named_entry->size)
1478 return(KERN_INVALID_RIGHT);
1479 *upl_size = named_entry->size - offset;
1480 }
1481 if(caller_flags & UPL_COPYOUT_FROM) {
1482 if((named_entry->protection & VM_PROT_READ)
1483 != VM_PROT_READ) {
1484 return(KERN_INVALID_RIGHT);
1485 }
1486 } else {
1487 if((named_entry->protection &
1488 (VM_PROT_READ | VM_PROT_WRITE))
1489 != (VM_PROT_READ | VM_PROT_WRITE)) {
1490 return(KERN_INVALID_RIGHT);
1491 }
1492 }
1493 if(named_entry->size < (offset + *upl_size))
1494 return(KERN_INVALID_ARGUMENT);
1495
1496 /* the callers parameter offset is defined to be the */
1497 /* offset from beginning of named entry offset in object */
1498 offset = offset + named_entry->offset;
1499
1500 if(named_entry->is_sub_map)
1501 return (KERN_INVALID_ARGUMENT);
1502
1503 named_entry_lock(named_entry);
1504
1505 if (named_entry->is_pager) {
1506 object = vm_object_enter(named_entry->backing.pager,
1507 named_entry->offset + named_entry->size,
1508 named_entry->internal,
1509 FALSE,
1510 FALSE);
1511 if (object == VM_OBJECT_NULL) {
1512 named_entry_unlock(named_entry);
1513 return(KERN_INVALID_OBJECT);
1514 }
1515
1516 /* JMM - drop reference on pager here? */
1517
1518 /* create an extra reference for the named entry */
1519 vm_object_lock(object);
1520 vm_object_reference_locked(object);
1521 named_entry->backing.object = object;
1522 named_entry->is_pager = FALSE;
1523 named_entry_unlock(named_entry);
1524
1525 /* wait for object to be ready */
1526 while (!object->pager_ready) {
1527 vm_object_wait(object,
1528 VM_OBJECT_EVENT_PAGER_READY,
1529 THREAD_UNINT);
1530 vm_object_lock(object);
1531 }
1532 vm_object_unlock(object);
1533 } else {
1534 /* This is the case where we are going to map */
1535 /* an already mapped object. If the object is */
1536 /* not ready it is internal. An external */
1537 /* object cannot be mapped until it is ready */
1538 /* we can therefore avoid the ready check */
1539 /* in this case. */
1540 object = named_entry->backing.object;
1541 vm_object_reference(object);
1542 named_entry_unlock(named_entry);
1543 }
1544 } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) {
1545 memory_object_control_t control;
1546 control = (memory_object_control_t) port;
1547 if (control == NULL)
1548 return (KERN_INVALID_ARGUMENT);
1549 object = memory_object_control_to_vm_object(control);
1550 if (object == VM_OBJECT_NULL)
1551 return (KERN_INVALID_ARGUMENT);
1552 vm_object_reference(object);
1553 } else {
1554 return KERN_INVALID_ARGUMENT;
1555 }
1556 if (object == VM_OBJECT_NULL)
1557 return (KERN_INVALID_ARGUMENT);
1558
1559 if (!object->private) {
1560 if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
1561 *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
1562 if (object->phys_contiguous) {
1563 *flags = UPL_PHYS_CONTIG;
1564 } else {
1565 *flags = 0;
1566 }
1567 } else {
1568 *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
1569 }
1570
1571 ret = vm_object_iopl_request(object,
1572 offset,
1573 *upl_size,
1574 upl_ptr,
1575 user_page_list,
1576 page_list_count,
1577 caller_flags);
1578 vm_object_deallocate(object);
1579 return ret;
1580 }
1581
1582 /*
1583 * Routine: memory_object_upl_request [interface]
1584 * Purpose:
1585 * Cause the population of a portion of a vm_object.
1586 * Depending on the nature of the request, the pages
1587 * returned may be contain valid data or be uninitialized.
1588 *
1589 */
1590
1591 kern_return_t
1592 memory_object_upl_request(
1593 memory_object_control_t control,
1594 memory_object_offset_t offset,
1595 upl_size_t size,
1596 upl_t *upl_ptr,
1597 upl_page_info_array_t user_page_list,
1598 unsigned int *page_list_count,
1599 int cntrl_flags)
1600 {
1601 vm_object_t object;
1602
1603 object = memory_object_control_to_vm_object(control);
1604 if (object == VM_OBJECT_NULL)
1605 return (KERN_INVALID_ARGUMENT);
1606
1607 return vm_object_upl_request(object,
1608 offset,
1609 size,
1610 upl_ptr,
1611 user_page_list,
1612 page_list_count,
1613 cntrl_flags);
1614 }
1615
1616 /*
1617 * Routine: memory_object_super_upl_request [interface]
1618 * Purpose:
1619 * Cause the population of a portion of a vm_object
1620 * in much the same way as memory_object_upl_request.
1621 * Depending on the nature of the request, the pages
1622 * returned may be contain valid data or be uninitialized.
1623 * However, the region may be expanded up to the super
1624 * cluster size provided.
1625 */
1626
1627 kern_return_t
1628 memory_object_super_upl_request(
1629 memory_object_control_t control,
1630 memory_object_offset_t offset,
1631 upl_size_t size,
1632 upl_size_t super_cluster,
1633 upl_t *upl,
1634 upl_page_info_t *user_page_list,
1635 unsigned int *page_list_count,
1636 int cntrl_flags)
1637 {
1638 vm_object_t object;
1639
1640 object = memory_object_control_to_vm_object(control);
1641 if (object == VM_OBJECT_NULL)
1642 return (KERN_INVALID_ARGUMENT);
1643
1644 return vm_object_super_upl_request(object,
1645 offset,
1646 size,
1647 super_cluster,
1648 upl,
1649 user_page_list,
1650 page_list_count,
1651 cntrl_flags);
1652 }
1653
1654 int vm_stat_discard_cleared_reply = 0;
1655 int vm_stat_discard_cleared_unset = 0;
1656 int vm_stat_discard_cleared_too_late = 0;
1657
1658
1659
1660 /*
1661 * Routine: host_default_memory_manager [interface]
1662 * Purpose:
1663 * set/get the default memory manager port and default cluster
1664 * size.
1665 *
1666 * If successful, consumes the supplied naked send right.
1667 */
1668 kern_return_t
1669 host_default_memory_manager(
1670 host_priv_t host_priv,
1671 memory_object_default_t *default_manager,
1672 memory_object_cluster_size_t cluster_size)
1673 {
1674 memory_object_default_t current_manager;
1675 memory_object_default_t new_manager;
1676 memory_object_default_t returned_manager;
1677
1678 if (host_priv == HOST_PRIV_NULL)
1679 return(KERN_INVALID_HOST);
1680
1681 assert(host_priv == &realhost);
1682
1683 new_manager = *default_manager;
1684 mutex_lock(&memory_manager_default_lock);
1685 current_manager = memory_manager_default;
1686
1687 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1688 /*
1689 * Retrieve the current value.
1690 */
1691 memory_object_default_reference(current_manager);
1692 returned_manager = current_manager;
1693 } else {
1694 /*
1695 * Retrieve the current value,
1696 * and replace it with the supplied value.
1697 * We return the old reference to the caller
1698 * but we have to take a reference on the new
1699 * one.
1700 */
1701
1702 returned_manager = current_manager;
1703 memory_manager_default = new_manager;
1704 memory_object_default_reference(new_manager);
1705
1706 if (cluster_size % PAGE_SIZE != 0) {
1707 #if 0
1708 mutex_unlock(&memory_manager_default_lock);
1709 return KERN_INVALID_ARGUMENT;
1710 #else
1711 cluster_size = round_page_32(cluster_size);
1712 #endif
1713 }
1714 memory_manager_default_cluster = cluster_size;
1715
1716 /*
1717 * In case anyone's been waiting for a memory
1718 * manager to be established, wake them up.
1719 */
1720
1721 thread_wakeup((event_t) &memory_manager_default);
1722 }
1723
1724 mutex_unlock(&memory_manager_default_lock);
1725
1726 *default_manager = returned_manager;
1727 return(KERN_SUCCESS);
1728 }
1729
1730 /*
1731 * Routine: memory_manager_default_reference
1732 * Purpose:
1733 * Returns a naked send right for the default
1734 * memory manager. The returned right is always
1735 * valid (not IP_NULL or IP_DEAD).
1736 */
1737
1738 __private_extern__ memory_object_default_t
1739 memory_manager_default_reference(
1740 memory_object_cluster_size_t *cluster_size)
1741 {
1742 memory_object_default_t current_manager;
1743
1744 mutex_lock(&memory_manager_default_lock);
1745 current_manager = memory_manager_default;
1746 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1747 wait_result_t res;
1748
1749 res = thread_sleep_mutex((event_t) &memory_manager_default,
1750 &memory_manager_default_lock,
1751 THREAD_UNINT);
1752 assert(res == THREAD_AWAKENED);
1753 current_manager = memory_manager_default;
1754 }
1755 memory_object_default_reference(current_manager);
1756 *cluster_size = memory_manager_default_cluster;
1757 mutex_unlock(&memory_manager_default_lock);
1758
1759 return current_manager;
1760 }
1761
1762 /*
1763 * Routine: memory_manager_default_check
1764 *
1765 * Purpose:
1766 * Check whether a default memory manager has been set
1767 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1768 * and KERN_FAILURE if dmm does not exist.
1769 *
1770 * If there is no default memory manager, log an error,
1771 * but only the first time.
1772 *
1773 */
1774 __private_extern__ kern_return_t
1775 memory_manager_default_check(void)
1776 {
1777 memory_object_default_t current;
1778
1779 mutex_lock(&memory_manager_default_lock);
1780 current = memory_manager_default;
1781 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1782 static boolean_t logged; /* initialized to 0 */
1783 boolean_t complain = !logged;
1784 logged = TRUE;
1785 mutex_unlock(&memory_manager_default_lock);
1786 if (complain)
1787 printf("Warning: No default memory manager\n");
1788 return(KERN_FAILURE);
1789 } else {
1790 mutex_unlock(&memory_manager_default_lock);
1791 return(KERN_SUCCESS);
1792 }
1793 }
1794
1795 __private_extern__ void
1796 memory_manager_default_init(void)
1797 {
1798 memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
1799 mutex_init(&memory_manager_default_lock, 0);
1800 }
1801
1802
1803
1804 /* Allow manipulation of individual page state. This is actually part of */
1805 /* the UPL regimen but takes place on the object rather than on a UPL */
1806
1807 kern_return_t
1808 memory_object_page_op(
1809 memory_object_control_t control,
1810 memory_object_offset_t offset,
1811 int ops,
1812 ppnum_t *phys_entry,
1813 int *flags)
1814 {
1815 vm_object_t object;
1816
1817 object = memory_object_control_to_vm_object(control);
1818 if (object == VM_OBJECT_NULL)
1819 return (KERN_INVALID_ARGUMENT);
1820
1821 return vm_object_page_op(object, offset, ops, phys_entry, flags);
1822 }
1823
1824 /*
1825 * memory_object_range_op offers performance enhancement over
1826 * memory_object_page_op for page_op functions which do not require page
1827 * level state to be returned from the call. Page_op was created to provide
1828 * a low-cost alternative to page manipulation via UPLs when only a single
1829 * page was involved. The range_op call establishes the ability in the _op
1830 * family of functions to work on multiple pages where the lack of page level
1831 * state handling allows the caller to avoid the overhead of the upl structures.
1832 */
1833
1834 kern_return_t
1835 memory_object_range_op(
1836 memory_object_control_t control,
1837 memory_object_offset_t offset_beg,
1838 memory_object_offset_t offset_end,
1839 int ops,
1840 int *range)
1841 {
1842 vm_object_t object;
1843
1844 object = memory_object_control_to_vm_object(control);
1845 if (object == VM_OBJECT_NULL)
1846 return (KERN_INVALID_ARGUMENT);
1847
1848 return vm_object_range_op(object,
1849 offset_beg,
1850 offset_end,
1851 ops,
1852 range);
1853 }
1854
1855
1856 kern_return_t
1857 memory_object_pages_resident(
1858 memory_object_control_t control,
1859 boolean_t * has_pages_resident)
1860 {
1861 vm_object_t object;
1862
1863 *has_pages_resident = FALSE;
1864
1865 object = memory_object_control_to_vm_object(control);
1866 if (object == VM_OBJECT_NULL)
1867 return (KERN_INVALID_ARGUMENT);
1868
1869 if (object->resident_page_count)
1870 *has_pages_resident = TRUE;
1871
1872 return (KERN_SUCCESS);
1873 }
1874
1875
1876 static zone_t mem_obj_control_zone;
1877
1878 __private_extern__ void
1879 memory_object_control_bootstrap(void)
1880 {
1881 int i;
1882
1883 i = (vm_size_t) sizeof (struct memory_object_control);
1884 mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
1885 return;
1886 }
1887
1888 __private_extern__ memory_object_control_t
1889 memory_object_control_allocate(
1890 vm_object_t object)
1891 {
1892 memory_object_control_t control;
1893
1894 control = (memory_object_control_t)zalloc(mem_obj_control_zone);
1895 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1896 control->moc_object = object;
1897 control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */
1898 }
1899 return (control);
1900 }
1901
1902 __private_extern__ void
1903 memory_object_control_collapse(
1904 memory_object_control_t control,
1905 vm_object_t object)
1906 {
1907 assert((control->moc_object != VM_OBJECT_NULL) &&
1908 (control->moc_object != object));
1909 control->moc_object = object;
1910 }
1911
1912 __private_extern__ vm_object_t
1913 memory_object_control_to_vm_object(
1914 memory_object_control_t control)
1915 {
1916 if (control == MEMORY_OBJECT_CONTROL_NULL ||
1917 control->moc_ikot != IKOT_MEM_OBJ_CONTROL)
1918 return VM_OBJECT_NULL;
1919
1920 return (control->moc_object);
1921 }
1922
1923 memory_object_control_t
1924 convert_port_to_mo_control(
1925 __unused mach_port_t port)
1926 {
1927 return MEMORY_OBJECT_CONTROL_NULL;
1928 }
1929
1930
1931 mach_port_t
1932 convert_mo_control_to_port(
1933 __unused memory_object_control_t control)
1934 {
1935 return MACH_PORT_NULL;
1936 }
1937
1938 void
1939 memory_object_control_reference(
1940 __unused memory_object_control_t control)
1941 {
1942 return;
1943 }
1944
1945 /*
1946 * We only every issue one of these references, so kill it
1947 * when that gets released (should switch the real reference
1948 * counting in true port-less EMMI).
1949 */
1950 void
1951 memory_object_control_deallocate(
1952 memory_object_control_t control)
1953 {
1954 zfree(mem_obj_control_zone, control);
1955 }
1956
1957 void
1958 memory_object_control_disable(
1959 memory_object_control_t control)
1960 {
1961 assert(control->moc_object != VM_OBJECT_NULL);
1962 control->moc_object = VM_OBJECT_NULL;
1963 }
1964
1965 void
1966 memory_object_default_reference(
1967 memory_object_default_t dmm)
1968 {
1969 ipc_port_make_send(dmm);
1970 }
1971
1972 void
1973 memory_object_default_deallocate(
1974 memory_object_default_t dmm)
1975 {
1976 ipc_port_release_send(dmm);
1977 }
1978
1979 memory_object_t
1980 convert_port_to_memory_object(
1981 __unused mach_port_t port)
1982 {
1983 return (MEMORY_OBJECT_NULL);
1984 }
1985
1986
1987 mach_port_t
1988 convert_memory_object_to_port(
1989 __unused memory_object_t object)
1990 {
1991 return (MACH_PORT_NULL);
1992 }
1993
1994
1995 /* Routine memory_object_reference */
1996 void memory_object_reference(
1997 memory_object_t memory_object)
1998 {
1999 (memory_object->mo_pager_ops->memory_object_reference)(
2000 memory_object);
2001 }
2002
2003 /* Routine memory_object_deallocate */
2004 void memory_object_deallocate(
2005 memory_object_t memory_object)
2006 {
2007 (memory_object->mo_pager_ops->memory_object_deallocate)(
2008 memory_object);
2009 }
2010
2011
2012 /* Routine memory_object_init */
2013 kern_return_t memory_object_init
2014 (
2015 memory_object_t memory_object,
2016 memory_object_control_t memory_control,
2017 memory_object_cluster_size_t memory_object_page_size
2018 )
2019 {
2020 return (memory_object->mo_pager_ops->memory_object_init)(
2021 memory_object,
2022 memory_control,
2023 memory_object_page_size);
2024 }
2025
2026 /* Routine memory_object_terminate */
2027 kern_return_t memory_object_terminate
2028 (
2029 memory_object_t memory_object
2030 )
2031 {
2032 return (memory_object->mo_pager_ops->memory_object_terminate)(
2033 memory_object);
2034 }
2035
2036 /* Routine memory_object_data_request */
2037 kern_return_t memory_object_data_request
2038 (
2039 memory_object_t memory_object,
2040 memory_object_offset_t offset,
2041 memory_object_cluster_size_t length,
2042 vm_prot_t desired_access
2043 )
2044 {
2045 return (memory_object->mo_pager_ops->memory_object_data_request)(
2046 memory_object,
2047 offset,
2048 length,
2049 desired_access);
2050 }
2051
2052 /* Routine memory_object_data_return */
2053 kern_return_t memory_object_data_return
2054 (
2055 memory_object_t memory_object,
2056 memory_object_offset_t offset,
2057 vm_size_t size,
2058 memory_object_offset_t *resid_offset,
2059 int *io_error,
2060 boolean_t dirty,
2061 boolean_t kernel_copy,
2062 int upl_flags
2063 )
2064 {
2065 return (memory_object->mo_pager_ops->memory_object_data_return)(
2066 memory_object,
2067 offset,
2068 size,
2069 resid_offset,
2070 io_error,
2071 dirty,
2072 kernel_copy,
2073 upl_flags);
2074 }
2075
2076 /* Routine memory_object_data_initialize */
2077 kern_return_t memory_object_data_initialize
2078 (
2079 memory_object_t memory_object,
2080 memory_object_offset_t offset,
2081 vm_size_t size
2082 )
2083 {
2084 return (memory_object->mo_pager_ops->memory_object_data_initialize)(
2085 memory_object,
2086 offset,
2087 size);
2088 }
2089
2090 /* Routine memory_object_data_unlock */
2091 kern_return_t memory_object_data_unlock
2092 (
2093 memory_object_t memory_object,
2094 memory_object_offset_t offset,
2095 vm_size_t size,
2096 vm_prot_t desired_access
2097 )
2098 {
2099 return (memory_object->mo_pager_ops->memory_object_data_unlock)(
2100 memory_object,
2101 offset,
2102 size,
2103 desired_access);
2104 }
2105
2106 /* Routine memory_object_synchronize */
2107 kern_return_t memory_object_synchronize
2108 (
2109 memory_object_t memory_object,
2110 memory_object_offset_t offset,
2111 vm_size_t size,
2112 vm_sync_t sync_flags
2113 )
2114 {
2115 return (memory_object->mo_pager_ops->memory_object_synchronize)(
2116 memory_object,
2117 offset,
2118 size,
2119 sync_flags);
2120 }
2121
2122 /* Routine memory_object_unmap */
2123 kern_return_t memory_object_unmap
2124 (
2125 memory_object_t memory_object
2126 )
2127 {
2128 return (memory_object->mo_pager_ops->memory_object_unmap)(
2129 memory_object);
2130 }
2131
2132 /* Routine memory_object_create */
2133 kern_return_t memory_object_create
2134 (
2135 memory_object_default_t default_memory_manager,
2136 vm_size_t new_memory_object_size,
2137 memory_object_t *new_memory_object
2138 )
2139 {
2140 return default_pager_memory_object_create(default_memory_manager,
2141 new_memory_object_size,
2142 new_memory_object);
2143 }
2144
2145 upl_t
2146 convert_port_to_upl(
2147 ipc_port_t port)
2148 {
2149 upl_t upl;
2150
2151 ip_lock(port);
2152 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
2153 ip_unlock(port);
2154 return (upl_t)NULL;
2155 }
2156 upl = (upl_t) port->ip_kobject;
2157 ip_unlock(port);
2158 upl_lock(upl);
2159 upl->ref_count+=1;
2160 upl_unlock(upl);
2161 return upl;
2162 }
2163
2164 mach_port_t
2165 convert_upl_to_port(
2166 __unused upl_t upl)
2167 {
2168 return MACH_PORT_NULL;
2169 }
2170
2171 __private_extern__ void
2172 upl_no_senders(
2173 __unused ipc_port_t port,
2174 __unused mach_port_mscount_t mscount)
2175 {
2176 return;
2177 }