]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/vm_object.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * Virtual memory object module.
60 */
61
62 #ifdef MACH_BSD
63 /* remove as part of compoenent support merge */
64 extern int vnode_pager_workaround;
65 #endif
66
67 #include <mach_pagemap.h>
68 #include <task_swapper.h>
69
70 #include <mach/mach_types.h>
71 #include <mach/memory_object.h>
72 #include <mach/memory_object_default.h>
73 #include <mach/memory_object_control_server.h>
74 #include <mach/vm_param.h>
75 #include <ipc/ipc_port.h>
76 #include <kern/assert.h>
77 #include <kern/lock.h>
78 #include <kern/queue.h>
79 #include <kern/xpr.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <vm/memory_object.h>
85 #include <vm/vm_fault.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <kern/misc_protos.h>
91
92
93
94 /*
95 * Virtual memory objects maintain the actual data
96 * associated with allocated virtual memory. A given
97 * page of memory exists within exactly one object.
98 *
99 * An object is only deallocated when all "references"
100 * are given up.
101 *
102 * Associated with each object is a list of all resident
103 * memory pages belonging to that object; this list is
104 * maintained by the "vm_page" module, but locked by the object's
105 * lock.
106 *
107 * Each object also records the memory object reference
108 * that is used by the kernel to request and write
109 * back data (the memory object, field "pager"), etc...
110 *
111 * Virtual memory objects are allocated to provide
112 * zero-filled memory (vm_allocate) or map a user-defined
113 * memory object into a virtual address space (vm_map).
114 *
115 * Virtual memory objects that refer to a user-defined
116 * memory object are called "permanent", because all changes
117 * made in virtual memory are reflected back to the
118 * memory manager, which may then store it permanently.
119 * Other virtual memory objects are called "temporary",
120 * meaning that changes need be written back only when
121 * necessary to reclaim pages, and that storage associated
122 * with the object can be discarded once it is no longer
123 * mapped.
124 *
125 * A permanent memory object may be mapped into more
126 * than one virtual address space. Moreover, two threads
127 * may attempt to make the first mapping of a memory
128 * object concurrently. Only one thread is allowed to
129 * complete this mapping; all others wait for the
130 * "pager_initialized" field is asserted, indicating
131 * that the first thread has initialized all of the
132 * necessary fields in the virtual memory object structure.
133 *
134 * The kernel relies on a *default memory manager* to
135 * provide backing storage for the zero-filled virtual
136 * memory objects. The pager memory objects associated
137 * with these temporary virtual memory objects are only
138 * requested from the default memory manager when it
139 * becomes necessary. Virtual memory objects
140 * that depend on the default memory manager are called
141 * "internal". The "pager_created" field is provided to
142 * indicate whether these ports have ever been allocated.
143 *
144 * The kernel may also create virtual memory objects to
145 * hold changed pages after a copy-on-write operation.
146 * In this case, the virtual memory object (and its
147 * backing storage -- its memory object) only contain
148 * those pages that have been changed. The "shadow"
149 * field refers to the virtual memory object that contains
150 * the remainder of the contents. The "shadow_offset"
151 * field indicates where in the "shadow" these contents begin.
152 * The "copy" field refers to a virtual memory object
153 * to which changed pages must be copied before changing
154 * this object, in order to implement another form
155 * of copy-on-write optimization.
156 *
157 * The virtual memory object structure also records
158 * the attributes associated with its memory object.
159 * The "pager_ready", "can_persist" and "copy_strategy"
160 * fields represent those attributes. The "cached_list"
161 * field is used in the implementation of the persistence
162 * attribute.
163 *
164 * ZZZ Continue this comment.
165 */
166
167 /* Forward declarations for internal functions. */
168 static void _vm_object_allocate(
169 vm_object_size_t size,
170 vm_object_t object);
171
172 static kern_return_t vm_object_terminate(
173 vm_object_t object);
174
175 extern void vm_object_remove(
176 vm_object_t object);
177
178 static vm_object_t vm_object_cache_trim(
179 boolean_t called_from_vm_object_deallocate);
180
181 static void vm_object_deactivate_all_pages(
182 vm_object_t object);
183
184 static void vm_object_abort_activity(
185 vm_object_t object);
186
187 static kern_return_t vm_object_copy_call(
188 vm_object_t src_object,
189 vm_object_offset_t src_offset,
190 vm_object_size_t size,
191 vm_object_t *_result_object);
192
193 static void vm_object_do_collapse(
194 vm_object_t object,
195 vm_object_t backing_object);
196
197 static void vm_object_do_bypass(
198 vm_object_t object,
199 vm_object_t backing_object);
200
201 static void vm_object_release_pager(
202 memory_object_t pager);
203
204 static zone_t vm_object_zone; /* vm backing store zone */
205
206 /*
207 * All wired-down kernel memory belongs to a single virtual
208 * memory object (kernel_object) to avoid wasting data structures.
209 */
210 static struct vm_object kernel_object_store;
211 __private_extern__ vm_object_t kernel_object = &kernel_object_store;
212
213 /*
214 * The submap object is used as a placeholder for vm_map_submap
215 * operations. The object is declared in vm_map.c because it
216 * is exported by the vm_map module. The storage is declared
217 * here because it must be initialized here.
218 */
219 static struct vm_object vm_submap_object_store;
220
221 /*
222 * Virtual memory objects are initialized from
223 * a template (see vm_object_allocate).
224 *
225 * When adding a new field to the virtual memory
226 * object structure, be sure to add initialization
227 * (see _vm_object_allocate()).
228 */
229 static struct vm_object vm_object_template;
230
231 /*
232 * Virtual memory objects that are not referenced by
233 * any address maps, but that are allowed to persist
234 * (an attribute specified by the associated memory manager),
235 * are kept in a queue (vm_object_cached_list).
236 *
237 * When an object from this queue is referenced again,
238 * for example to make another address space mapping,
239 * it must be removed from the queue. That is, the
240 * queue contains *only* objects with zero references.
241 *
242 * The kernel may choose to terminate objects from this
243 * queue in order to reclaim storage. The current policy
244 * is to permit a fixed maximum number of unreferenced
245 * objects (vm_object_cached_max).
246 *
247 * A spin lock (accessed by routines
248 * vm_object_cache_{lock,lock_try,unlock}) governs the
249 * object cache. It must be held when objects are
250 * added to or removed from the cache (in vm_object_terminate).
251 * The routines that acquire a reference to a virtual
252 * memory object based on one of the memory object ports
253 * must also lock the cache.
254 *
255 * Ideally, the object cache should be more isolated
256 * from the reference mechanism, so that the lock need
257 * not be held to make simple references.
258 */
259 static queue_head_t vm_object_cached_list;
260 static int vm_object_cached_count=0;
261 static int vm_object_cached_high; /* highest # cached objects */
262 static int vm_object_cached_max = 512; /* may be patched*/
263
264 static decl_mutex_data(,vm_object_cached_lock_data)
265
266 #define vm_object_cache_lock() \
267 mutex_lock(&vm_object_cached_lock_data)
268 #define vm_object_cache_lock_try() \
269 mutex_try(&vm_object_cached_lock_data)
270 #define vm_object_cache_unlock() \
271 mutex_unlock(&vm_object_cached_lock_data)
272
273 #define VM_OBJECT_HASH_COUNT 1024
274 static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
275 static struct zone *vm_object_hash_zone;
276
277 struct vm_object_hash_entry {
278 queue_chain_t hash_link; /* hash chain link */
279 memory_object_t pager; /* pager we represent */
280 vm_object_t object; /* corresponding object */
281 boolean_t waiting; /* someone waiting for
282 * termination */
283 };
284
285 typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
286 #define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
287
288 #define VM_OBJECT_HASH_SHIFT 8
289 #define vm_object_hash(pager) \
290 ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
291
292 /*
293 * vm_object_hash_lookup looks up a pager in the hashtable
294 * and returns the corresponding entry, with optional removal.
295 */
296
297 static vm_object_hash_entry_t
298 vm_object_hash_lookup(
299 memory_object_t pager,
300 boolean_t remove_entry)
301 {
302 register queue_t bucket;
303 register vm_object_hash_entry_t entry;
304
305 bucket = &vm_object_hashtable[vm_object_hash(pager)];
306
307 entry = (vm_object_hash_entry_t)queue_first(bucket);
308 while (!queue_end(bucket, (queue_entry_t)entry)) {
309 if (entry->pager == pager && !remove_entry)
310 return(entry);
311 else if (entry->pager == pager) {
312 queue_remove(bucket, entry,
313 vm_object_hash_entry_t, hash_link);
314 return(entry);
315 }
316
317 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
318 }
319
320 return(VM_OBJECT_HASH_ENTRY_NULL);
321 }
322
323 /*
324 * vm_object_hash_enter enters the specified
325 * pager / cache object association in the hashtable.
326 */
327
328 static void
329 vm_object_hash_insert(
330 vm_object_hash_entry_t entry)
331 {
332 register queue_t bucket;
333
334 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
335
336 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
337 }
338
339 static vm_object_hash_entry_t
340 vm_object_hash_entry_alloc(
341 memory_object_t pager)
342 {
343 vm_object_hash_entry_t entry;
344
345 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
346 entry->pager = pager;
347 entry->object = VM_OBJECT_NULL;
348 entry->waiting = FALSE;
349
350 return(entry);
351 }
352
353 void
354 vm_object_hash_entry_free(
355 vm_object_hash_entry_t entry)
356 {
357 zfree(vm_object_hash_zone, (vm_offset_t)entry);
358 }
359
360 /*
361 * vm_object_allocate:
362 *
363 * Returns a new object with the given size.
364 */
365
366 static void
367 _vm_object_allocate(
368 vm_object_size_t size,
369 vm_object_t object)
370 {
371 XPR(XPR_VM_OBJECT,
372 "vm_object_allocate, object 0x%X size 0x%X\n",
373 (integer_t)object, size, 0,0,0);
374
375 *object = vm_object_template;
376 queue_init(&object->memq);
377 queue_init(&object->msr_q);
378 #ifdef UBC_DEBUG
379 queue_init(&object->uplq);
380 #endif /* UBC_DEBUG */
381 vm_object_lock_init(object);
382 object->size = size;
383 }
384
385 __private_extern__ vm_object_t
386 vm_object_allocate(
387 vm_object_size_t size)
388 {
389 register vm_object_t object;
390
391 object = (vm_object_t) zalloc(vm_object_zone);
392
393 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
394
395 if (object != VM_OBJECT_NULL)
396 _vm_object_allocate(size, object);
397
398 return object;
399 }
400
401 /*
402 * vm_object_bootstrap:
403 *
404 * Initialize the VM objects module.
405 */
406 __private_extern__ void
407 vm_object_bootstrap(void)
408 {
409 register i;
410
411 vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
412 round_page_32(512*1024),
413 round_page_32(12*1024),
414 "vm objects");
415
416 queue_init(&vm_object_cached_list);
417 mutex_init(&vm_object_cached_lock_data, ETAP_VM_OBJ_CACHE);
418
419 vm_object_hash_zone =
420 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
421 round_page_32(512*1024),
422 round_page_32(12*1024),
423 "vm object hash entries");
424
425 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
426 queue_init(&vm_object_hashtable[i]);
427
428 /*
429 * Fill in a template object, for quick initialization
430 */
431
432 /* memq; Lock; init after allocation */
433 vm_object_template.size = 0;
434 vm_object_template.frozen_size = 0;
435 vm_object_template.ref_count = 1;
436 #if TASK_SWAPPER
437 vm_object_template.res_count = 1;
438 #endif /* TASK_SWAPPER */
439 vm_object_template.resident_page_count = 0;
440 vm_object_template.copy = VM_OBJECT_NULL;
441 vm_object_template.shadow = VM_OBJECT_NULL;
442 vm_object_template.shadow_offset = (vm_object_offset_t) 0;
443 vm_object_template.cow_hint = 0;
444 vm_object_template.true_share = FALSE;
445
446 vm_object_template.pager = MEMORY_OBJECT_NULL;
447 vm_object_template.paging_offset = 0;
448 vm_object_template.pager_request = PAGER_REQUEST_NULL;
449 /* msr_q; init after allocation */
450
451 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
452 vm_object_template.absent_count = 0;
453 vm_object_template.paging_in_progress = 0;
454
455 /* Begin bitfields */
456 vm_object_template.all_wanted = 0; /* all bits FALSE */
457 vm_object_template.pager_created = FALSE;
458 vm_object_template.pager_initialized = FALSE;
459 vm_object_template.pager_ready = FALSE;
460 vm_object_template.pager_trusted = FALSE;
461 vm_object_template.can_persist = FALSE;
462 vm_object_template.internal = TRUE;
463 vm_object_template.temporary = TRUE;
464 vm_object_template.private = FALSE;
465 vm_object_template.pageout = FALSE;
466 vm_object_template.alive = TRUE;
467 vm_object_template.lock_in_progress = FALSE;
468 vm_object_template.lock_restart = FALSE;
469 vm_object_template.silent_overwrite = FALSE;
470 vm_object_template.advisory_pageout = FALSE;
471 vm_object_template.shadowed = FALSE;
472 vm_object_template.terminating = FALSE;
473 vm_object_template.shadow_severed = FALSE;
474 vm_object_template.phys_contiguous = FALSE;
475 vm_object_template.nophyscache = FALSE;
476 /* End bitfields */
477
478 /* cache bitfields */
479 vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
480
481 /* cached_list; init after allocation */
482 vm_object_template.last_alloc = (vm_object_offset_t) 0;
483 vm_object_template.cluster_size = 0;
484 #if MACH_PAGEMAP
485 vm_object_template.existence_map = VM_EXTERNAL_NULL;
486 #endif /* MACH_PAGEMAP */
487 #if MACH_ASSERT
488 vm_object_template.paging_object = VM_OBJECT_NULL;
489 #endif /* MACH_ASSERT */
490
491 /*
492 * Initialize the "kernel object"
493 */
494
495 kernel_object = &kernel_object_store;
496
497 /*
498 * Note that in the following size specifications, we need to add 1 because
499 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
500 */
501
502 #ifdef ppc
503 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
504 kernel_object);
505 #else
506 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
507 kernel_object);
508 #endif
509 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
510
511 /*
512 * Initialize the "submap object". Make it as large as the
513 * kernel object so that no limit is imposed on submap sizes.
514 */
515
516 vm_submap_object = &vm_submap_object_store;
517 #ifdef ppc
518 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
519 vm_submap_object);
520 #else
521 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
522 vm_submap_object);
523 #endif
524 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
525
526 /*
527 * Create an "extra" reference to this object so that we never
528 * try to deallocate it; zfree doesn't like to be called with
529 * non-zone memory.
530 */
531 vm_object_reference(vm_submap_object);
532
533 #if MACH_PAGEMAP
534 vm_external_module_initialize();
535 #endif /* MACH_PAGEMAP */
536 }
537
538 __private_extern__ void
539 vm_object_init(void)
540 {
541 /*
542 * Finish initializing the kernel object.
543 */
544 }
545
546 /* remove the typedef below when emergency work-around is taken out */
547 typedef struct vnode_pager {
548 memory_object_t pager;
549 memory_object_t pager_handle; /* pager */
550 memory_object_control_t control_handle; /* memory object's control handle */
551 void *vnode_handle; /* vnode handle */
552 } *vnode_pager_t;
553
554 #define MIGHT_NOT_CACHE_SHADOWS 1
555 #if MIGHT_NOT_CACHE_SHADOWS
556 static int cache_shadows = TRUE;
557 #endif /* MIGHT_NOT_CACHE_SHADOWS */
558
559 /*
560 * vm_object_deallocate:
561 *
562 * Release a reference to the specified object,
563 * gained either through a vm_object_allocate
564 * or a vm_object_reference call. When all references
565 * are gone, storage associated with this object
566 * may be relinquished.
567 *
568 * No object may be locked.
569 */
570 __private_extern__ void
571 vm_object_deallocate(
572 register vm_object_t object)
573 {
574 boolean_t retry_cache_trim = FALSE;
575 vm_object_t shadow;
576
577 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
578 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
579
580
581 while (object != VM_OBJECT_NULL) {
582
583 /*
584 * The cache holds a reference (uncounted) to
585 * the object; we must lock it before removing
586 * the object.
587 */
588
589 vm_object_cache_lock();
590 vm_object_lock(object);
591
592 assert(object->ref_count > 0);
593
594 /*
595 * If the object has a named reference, and only
596 * that reference would remain, inform the pager
597 * about the last "mapping" reference going away.
598 */
599 if ((object->ref_count == 2) && (object->named)) {
600 memory_object_t pager = object->pager;
601
602 /* Notify the Pager that there are no */
603 /* more mappers for this object */
604
605 if (pager != MEMORY_OBJECT_NULL) {
606 vm_object_unlock(object);
607 vm_object_cache_unlock();
608
609 memory_object_unmap(pager);
610
611 vm_object_cache_lock();
612 vm_object_lock(object);
613 assert(object->ref_count > 0);
614 }
615 }
616
617 /*
618 * Lose the reference. If other references
619 * remain, then we are done, unless we need
620 * to retry a cache trim.
621 * If it is the last reference, then keep it
622 * until any pending initialization is completed.
623 */
624
625 /* if the object is terminating, it cannot go into */
626 /* the cache and we obviously should not call */
627 /* terminate again. */
628
629 if ((object->ref_count > 1) || object->terminating) {
630 object->ref_count--;
631 vm_object_res_deallocate(object);
632 vm_object_unlock(object);
633 vm_object_cache_unlock();
634 if (retry_cache_trim &&
635 ((object = vm_object_cache_trim(TRUE)) !=
636 VM_OBJECT_NULL)) {
637 continue;
638 }
639 return;
640 }
641
642 /*
643 * We have to wait for initialization
644 * before destroying or caching the object.
645 */
646
647 if (object->pager_created && ! object->pager_initialized) {
648 assert(! object->can_persist);
649 vm_object_assert_wait(object,
650 VM_OBJECT_EVENT_INITIALIZED,
651 THREAD_UNINT);
652 vm_object_unlock(object);
653 vm_object_cache_unlock();
654 thread_block(THREAD_CONTINUE_NULL);
655 continue;
656 }
657
658 /*
659 * If this object can persist, then enter it in
660 * the cache. Otherwise, terminate it.
661 *
662 * NOTE: Only permanent objects are cached, and
663 * permanent objects cannot have shadows. This
664 * affects the residence counting logic in a minor
665 * way (can do it in-line, mostly).
666 */
667
668 if ((object->can_persist) && (object->alive)) {
669 /*
670 * Now it is safe to decrement reference count,
671 * and to return if reference count is > 0.
672 */
673 if (--object->ref_count > 0) {
674 vm_object_res_deallocate(object);
675 vm_object_unlock(object);
676 vm_object_cache_unlock();
677 if (retry_cache_trim &&
678 ((object = vm_object_cache_trim(TRUE)) !=
679 VM_OBJECT_NULL)) {
680 continue;
681 }
682 return;
683 }
684
685 #if MIGHT_NOT_CACHE_SHADOWS
686 /*
687 * Remove shadow now if we don't
688 * want to cache shadows.
689 */
690 if (! cache_shadows) {
691 shadow = object->shadow;
692 object->shadow = VM_OBJECT_NULL;
693 }
694 #endif /* MIGHT_NOT_CACHE_SHADOWS */
695
696 /*
697 * Enter the object onto the queue of
698 * cached objects, and deactivate
699 * all of its pages.
700 */
701 assert(object->shadow == VM_OBJECT_NULL);
702 VM_OBJ_RES_DECR(object);
703 XPR(XPR_VM_OBJECT,
704 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
705 (integer_t)object,
706 (integer_t)vm_object_cached_list.next,
707 (integer_t)vm_object_cached_list.prev,0,0);
708
709 vm_object_cached_count++;
710 if (vm_object_cached_count > vm_object_cached_high)
711 vm_object_cached_high = vm_object_cached_count;
712 queue_enter(&vm_object_cached_list, object,
713 vm_object_t, cached_list);
714 vm_object_cache_unlock();
715 vm_object_deactivate_all_pages(object);
716 vm_object_unlock(object);
717
718 #if MIGHT_NOT_CACHE_SHADOWS
719 /*
720 * If we have a shadow that we need
721 * to deallocate, do so now, remembering
722 * to trim the cache later.
723 */
724 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
725 object = shadow;
726 retry_cache_trim = TRUE;
727 continue;
728 }
729 #endif /* MIGHT_NOT_CACHE_SHADOWS */
730
731 /*
732 * Trim the cache. If the cache trim
733 * returns with a shadow for us to deallocate,
734 * then remember to retry the cache trim
735 * when we are done deallocating the shadow.
736 * Otherwise, we are done.
737 */
738
739 object = vm_object_cache_trim(TRUE);
740 if (object == VM_OBJECT_NULL) {
741 return;
742 }
743 retry_cache_trim = TRUE;
744
745 } else {
746 /*
747 * This object is not cachable; terminate it.
748 */
749 XPR(XPR_VM_OBJECT,
750 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%lX ref %d\n",
751 (integer_t)object, object->resident_page_count,
752 object->paging_in_progress,
753 (natural_t)current_thread(),object->ref_count);
754
755 VM_OBJ_RES_DECR(object); /* XXX ? */
756 /*
757 * Terminate this object. If it had a shadow,
758 * then deallocate it; otherwise, if we need
759 * to retry a cache trim, do so now; otherwise,
760 * we are done. "pageout" objects have a shadow,
761 * but maintain a "paging reference" rather than
762 * a normal reference.
763 */
764 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
765 if(vm_object_terminate(object) != KERN_SUCCESS) {
766 return;
767 }
768 if (shadow != VM_OBJECT_NULL) {
769 object = shadow;
770 continue;
771 }
772 if (retry_cache_trim &&
773 ((object = vm_object_cache_trim(TRUE)) !=
774 VM_OBJECT_NULL)) {
775 continue;
776 }
777 return;
778 }
779 }
780 assert(! retry_cache_trim);
781 }
782
783 /*
784 * Check to see whether we really need to trim
785 * down the cache. If so, remove an object from
786 * the cache, terminate it, and repeat.
787 *
788 * Called with, and returns with, cache lock unlocked.
789 */
790 vm_object_t
791 vm_object_cache_trim(
792 boolean_t called_from_vm_object_deallocate)
793 {
794 register vm_object_t object = VM_OBJECT_NULL;
795 vm_object_t shadow;
796
797 for (;;) {
798
799 /*
800 * If we no longer need to trim the cache,
801 * then we are done.
802 */
803
804 vm_object_cache_lock();
805 if (vm_object_cached_count <= vm_object_cached_max) {
806 vm_object_cache_unlock();
807 return VM_OBJECT_NULL;
808 }
809
810 /*
811 * We must trim down the cache, so remove
812 * the first object in the cache.
813 */
814 XPR(XPR_VM_OBJECT,
815 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
816 (integer_t)vm_object_cached_list.next,
817 (integer_t)vm_object_cached_list.prev, 0, 0, 0);
818
819 object = (vm_object_t) queue_first(&vm_object_cached_list);
820 if(object == (vm_object_t) &vm_object_cached_list) {
821 /* something's wrong with the calling parameter or */
822 /* the value of vm_object_cached_count, just fix */
823 /* and return */
824 if(vm_object_cached_max < 0)
825 vm_object_cached_max = 0;
826 vm_object_cached_count = 0;
827 vm_object_cache_unlock();
828 return VM_OBJECT_NULL;
829 }
830 vm_object_lock(object);
831 queue_remove(&vm_object_cached_list, object, vm_object_t,
832 cached_list);
833 vm_object_cached_count--;
834
835 /*
836 * Since this object is in the cache, we know
837 * that it is initialized and has no references.
838 * Take a reference to avoid recursive deallocations.
839 */
840
841 assert(object->pager_initialized);
842 assert(object->ref_count == 0);
843 object->ref_count++;
844
845 /*
846 * Terminate the object.
847 * If the object had a shadow, we let vm_object_deallocate
848 * deallocate it. "pageout" objects have a shadow, but
849 * maintain a "paging reference" rather than a normal
850 * reference.
851 * (We are careful here to limit recursion.)
852 */
853 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
854 if(vm_object_terminate(object) != KERN_SUCCESS)
855 continue;
856 if (shadow != VM_OBJECT_NULL) {
857 if (called_from_vm_object_deallocate) {
858 return shadow;
859 } else {
860 vm_object_deallocate(shadow);
861 }
862 }
863 }
864 }
865
866 boolean_t vm_object_terminate_remove_all = FALSE;
867
868 /*
869 * Routine: vm_object_terminate
870 * Purpose:
871 * Free all resources associated with a vm_object.
872 * In/out conditions:
873 * Upon entry, the object must be locked,
874 * and the object must have exactly one reference.
875 *
876 * The shadow object reference is left alone.
877 *
878 * The object must be unlocked if its found that pages
879 * must be flushed to a backing object. If someone
880 * manages to map the object while it is being flushed
881 * the object is returned unlocked and unchanged. Otherwise,
882 * upon exit, the cache will be unlocked, and the
883 * object will cease to exist.
884 */
885 static kern_return_t
886 vm_object_terminate(
887 register vm_object_t object)
888 {
889 memory_object_t pager;
890 register vm_page_t p;
891 vm_object_t shadow_object;
892
893 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
894 (integer_t)object, object->ref_count, 0, 0, 0);
895
896 if (!object->pageout && (!object->temporary || object->can_persist)
897 && (object->pager != NULL || object->shadow_severed)) {
898 vm_object_cache_unlock();
899 while (!queue_empty(&object->memq)) {
900 /*
901 * Clear pager_trusted bit so that the pages get yanked
902 * out of the object instead of cleaned in place. This
903 * prevents a deadlock in XMM and makes more sense anyway.
904 */
905 object->pager_trusted = FALSE;
906
907 p = (vm_page_t) queue_first(&object->memq);
908
909 VM_PAGE_CHECK(p);
910
911 if (p->busy || p->cleaning) {
912 if(p->cleaning || p->absent) {
913 vm_object_paging_wait(object, THREAD_UNINT);
914 continue;
915 } else {
916 panic("vm_object_terminate.3 0x%x 0x%x", object, p);
917 }
918 }
919
920 vm_page_lock_queues();
921 VM_PAGE_QUEUES_REMOVE(p);
922 vm_page_unlock_queues();
923
924 if (p->absent || p->private) {
925
926 /*
927 * For private pages, VM_PAGE_FREE just
928 * leaves the page structure around for
929 * its owner to clean up. For absent
930 * pages, the structure is returned to
931 * the appropriate pool.
932 */
933
934 goto free_page;
935 }
936
937 if (p->fictitious)
938 panic("vm_object_terminate.4 0x%x 0x%x", object, p);
939
940 if (!p->dirty)
941 p->dirty = pmap_is_modified(p->phys_page);
942
943 if ((p->dirty || p->precious) && !p->error && object->alive) {
944 p->busy = TRUE;
945 vm_object_paging_begin(object);
946 /* protect the object from re-use/caching while it */
947 /* is unlocked */
948 vm_object_unlock(object);
949 vm_pageout_cluster(p); /* flush page */
950 vm_object_lock(object);
951 vm_object_paging_wait(object, THREAD_UNINT);
952 XPR(XPR_VM_OBJECT,
953 "vm_object_terminate restart, object 0x%X ref %d\n",
954 (integer_t)object, object->ref_count, 0, 0, 0);
955 } else {
956 free_page:
957 VM_PAGE_FREE(p);
958 }
959 }
960 vm_object_unlock(object);
961 vm_object_cache_lock();
962 vm_object_lock(object);
963 }
964
965 /*
966 * Make sure the object isn't already being terminated
967 */
968 if(object->terminating) {
969 object->ref_count -= 1;
970 assert(object->ref_count > 0);
971 vm_object_cache_unlock();
972 vm_object_unlock(object);
973 return KERN_FAILURE;
974 }
975
976 /*
977 * Did somebody get a reference to the object while we were
978 * cleaning it?
979 */
980 if(object->ref_count != 1) {
981 object->ref_count -= 1;
982 assert(object->ref_count > 0);
983 vm_object_res_deallocate(object);
984 vm_object_cache_unlock();
985 vm_object_unlock(object);
986 return KERN_FAILURE;
987 }
988
989 /*
990 * Make sure no one can look us up now.
991 */
992
993 object->terminating = TRUE;
994 object->alive = FALSE;
995 vm_object_remove(object);
996
997 /*
998 * Detach the object from its shadow if we are the shadow's
999 * copy.
1000 */
1001 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1002 !(object->pageout)) {
1003 vm_object_lock(shadow_object);
1004 assert((shadow_object->copy == object) ||
1005 (shadow_object->copy == VM_OBJECT_NULL));
1006 shadow_object->copy = VM_OBJECT_NULL;
1007 vm_object_unlock(shadow_object);
1008 }
1009
1010 /*
1011 * The pageout daemon might be playing with our pages.
1012 * Now that the object is dead, it won't touch any more
1013 * pages, but some pages might already be on their way out.
1014 * Hence, we wait until the active paging activities have ceased
1015 * before we break the association with the pager itself.
1016 */
1017 while (object->paging_in_progress != 0) {
1018 vm_object_cache_unlock();
1019 vm_object_wait(object,
1020 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1021 THREAD_UNINT);
1022 vm_object_cache_lock();
1023 vm_object_lock(object);
1024 }
1025
1026 pager = object->pager;
1027 object->pager = MEMORY_OBJECT_NULL;
1028
1029 if (pager != MEMORY_OBJECT_NULL)
1030 memory_object_control_disable(object->pager_request);
1031 vm_object_cache_unlock();
1032
1033 object->ref_count--;
1034 #if TASK_SWAPPER
1035 assert(object->res_count == 0);
1036 #endif /* TASK_SWAPPER */
1037
1038 assert (object->ref_count == 0);
1039
1040 /*
1041 * Clean or free the pages, as appropriate.
1042 * It is possible for us to find busy/absent pages,
1043 * if some faults on this object were aborted.
1044 */
1045 if (object->pageout) {
1046 assert(shadow_object != VM_OBJECT_NULL);
1047 assert(shadow_object == object->shadow);
1048
1049 vm_pageout_object_terminate(object);
1050
1051 } else if ((object->temporary && !object->can_persist) ||
1052 (pager == MEMORY_OBJECT_NULL)) {
1053 while (!queue_empty(&object->memq)) {
1054 p = (vm_page_t) queue_first(&object->memq);
1055
1056 VM_PAGE_CHECK(p);
1057 VM_PAGE_FREE(p);
1058 }
1059 } else if (!queue_empty(&object->memq)) {
1060 panic("vm_object_terminate: queue just emptied isn't");
1061 }
1062
1063 assert(object->paging_in_progress == 0);
1064 assert(object->ref_count == 0);
1065
1066 /*
1067 * If the pager has not already been released by
1068 * vm_object_destroy, we need to terminate it and
1069 * release our reference to it here.
1070 */
1071 if (pager != MEMORY_OBJECT_NULL) {
1072 vm_object_unlock(object);
1073 vm_object_release_pager(pager);
1074 vm_object_lock(object);
1075 }
1076
1077 /* kick off anyone waiting on terminating */
1078 object->terminating = FALSE;
1079 vm_object_paging_begin(object);
1080 vm_object_paging_end(object);
1081 vm_object_unlock(object);
1082
1083 #if MACH_PAGEMAP
1084 vm_external_destroy(object->existence_map, object->size);
1085 #endif /* MACH_PAGEMAP */
1086
1087 /*
1088 * Free the space for the object.
1089 */
1090 zfree(vm_object_zone, (vm_offset_t) object);
1091 return KERN_SUCCESS;
1092 }
1093
1094 /*
1095 * Routine: vm_object_pager_wakeup
1096 * Purpose: Wake up anyone waiting for termination of a pager.
1097 */
1098
1099 static void
1100 vm_object_pager_wakeup(
1101 memory_object_t pager)
1102 {
1103 vm_object_hash_entry_t entry;
1104 boolean_t waiting = FALSE;
1105
1106 /*
1107 * If anyone was waiting for the memory_object_terminate
1108 * to be queued, wake them up now.
1109 */
1110 vm_object_cache_lock();
1111 entry = vm_object_hash_lookup(pager, TRUE);
1112 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
1113 waiting = entry->waiting;
1114 vm_object_cache_unlock();
1115 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
1116 if (waiting)
1117 thread_wakeup((event_t) pager);
1118 vm_object_hash_entry_free(entry);
1119 }
1120 }
1121
1122 /*
1123 * Routine: vm_object_release_pager
1124 * Purpose: Terminate the pager and, upon completion,
1125 * release our last reference to it.
1126 * just like memory_object_terminate, except
1127 * that we wake up anyone blocked in vm_object_enter
1128 * waiting for termination message to be queued
1129 * before calling memory_object_init.
1130 */
1131 static void
1132 vm_object_release_pager(
1133 memory_object_t pager)
1134 {
1135
1136 /*
1137 * Terminate the pager.
1138 */
1139
1140 (void) memory_object_terminate(pager);
1141
1142 /*
1143 * Wakeup anyone waiting for this terminate
1144 */
1145 vm_object_pager_wakeup(pager);
1146
1147 /*
1148 * Release reference to pager.
1149 */
1150 memory_object_deallocate(pager);
1151 }
1152
1153 /*
1154 * Routine: vm_object_abort_activity [internal use only]
1155 * Purpose:
1156 * Abort paging requests pending on this object.
1157 * In/out conditions:
1158 * The object is locked on entry and exit.
1159 */
1160 static void
1161 vm_object_abort_activity(
1162 vm_object_t object)
1163 {
1164 register
1165 vm_page_t p;
1166 vm_page_t next;
1167
1168 XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n",
1169 (integer_t)object, 0, 0, 0, 0);
1170
1171 /*
1172 * Abort all activity that would be waiting
1173 * for a result on this memory object.
1174 *
1175 * We could also choose to destroy all pages
1176 * that we have in memory for this object, but
1177 * we don't.
1178 */
1179
1180 p = (vm_page_t) queue_first(&object->memq);
1181 while (!queue_end(&object->memq, (queue_entry_t) p)) {
1182 next = (vm_page_t) queue_next(&p->listq);
1183
1184 /*
1185 * If it's being paged in, destroy it.
1186 * If an unlock has been requested, start it again.
1187 */
1188
1189 if (p->busy && p->absent) {
1190 VM_PAGE_FREE(p);
1191 }
1192 else {
1193 if (p->unlock_request != VM_PROT_NONE)
1194 p->unlock_request = VM_PROT_NONE;
1195 PAGE_WAKEUP(p);
1196 }
1197
1198 p = next;
1199 }
1200
1201 /*
1202 * Wake up threads waiting for the memory object to
1203 * become ready.
1204 */
1205
1206 object->pager_ready = TRUE;
1207 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1208 }
1209
1210 /*
1211 * Routine: vm_object_destroy
1212 * Purpose:
1213 * Shut down a VM object, despite the
1214 * presence of address map (or other) references
1215 * to the vm_object.
1216 */
1217 kern_return_t
1218 vm_object_destroy(
1219 vm_object_t object,
1220 kern_return_t reason)
1221 {
1222 memory_object_t old_pager;
1223
1224 if (object == VM_OBJECT_NULL)
1225 return(KERN_SUCCESS);
1226
1227 /*
1228 * Remove the pager association immediately.
1229 *
1230 * This will prevent the memory manager from further
1231 * meddling. [If it wanted to flush data or make
1232 * other changes, it should have done so before performing
1233 * the destroy call.]
1234 */
1235
1236 vm_object_cache_lock();
1237 vm_object_lock(object);
1238 object->can_persist = FALSE;
1239 object->named = FALSE;
1240 object->alive = FALSE;
1241
1242 /*
1243 * Rip out the pager from the vm_object now...
1244 */
1245
1246 vm_object_remove(object);
1247 old_pager = object->pager;
1248 object->pager = MEMORY_OBJECT_NULL;
1249 if (old_pager != MEMORY_OBJECT_NULL)
1250 memory_object_control_disable(object->pager_request);
1251 vm_object_cache_unlock();
1252
1253 /*
1254 * Wait for the existing paging activity (that got
1255 * through before we nulled out the pager) to subside.
1256 */
1257
1258 vm_object_paging_wait(object, THREAD_UNINT);
1259 vm_object_unlock(object);
1260
1261 /*
1262 * Terminate the object now.
1263 */
1264 if (old_pager != MEMORY_OBJECT_NULL) {
1265 vm_object_release_pager(old_pager);
1266
1267 /*
1268 * JMM - Release the caller's reference. This assumes the
1269 * caller had a reference to release, which is a big (but
1270 * currently valid) assumption if this is driven from the
1271 * vnode pager (it is holding a named reference when making
1272 * this call)..
1273 */
1274 vm_object_deallocate(object);
1275
1276 }
1277 return(KERN_SUCCESS);
1278 }
1279
1280 /*
1281 * vm_object_deactivate_pages
1282 *
1283 * Deactivate all pages in the specified object. (Keep its pages
1284 * in memory even though it is no longer referenced.)
1285 *
1286 * The object must be locked.
1287 */
1288 static void
1289 vm_object_deactivate_all_pages(
1290 register vm_object_t object)
1291 {
1292 register vm_page_t p;
1293
1294 queue_iterate(&object->memq, p, vm_page_t, listq) {
1295 vm_page_lock_queues();
1296 if (!p->busy)
1297 vm_page_deactivate(p);
1298 vm_page_unlock_queues();
1299 }
1300 }
1301
1302 __private_extern__ void
1303 vm_object_deactivate_pages(
1304 vm_object_t object,
1305 vm_object_offset_t offset,
1306 vm_object_size_t size,
1307 boolean_t kill_page)
1308 {
1309 vm_object_t orig_object;
1310 int pages_moved = 0;
1311 int pages_found = 0;
1312
1313 /*
1314 * entered with object lock held, acquire a paging reference to
1315 * prevent the memory_object and control ports from
1316 * being destroyed.
1317 */
1318 orig_object = object;
1319
1320 for (;;) {
1321 register vm_page_t m;
1322 vm_object_offset_t toffset;
1323 vm_object_size_t tsize;
1324
1325 vm_object_paging_begin(object);
1326 vm_page_lock_queues();
1327
1328 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1329
1330 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1331
1332 pages_found++;
1333
1334 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1335
1336 m->reference = FALSE;
1337 pmap_clear_reference(m->phys_page);
1338
1339 if ((kill_page) && (object->internal)) {
1340 m->precious = FALSE;
1341 m->dirty = FALSE;
1342 pmap_clear_modify(m->phys_page);
1343 vm_external_state_clr(object->existence_map, offset);
1344 }
1345 VM_PAGE_QUEUES_REMOVE(m);
1346
1347 if(m->zero_fill) {
1348 queue_enter_first(
1349 &vm_page_queue_zf,
1350 m, vm_page_t, pageq);
1351 } else {
1352 queue_enter_first(
1353 &vm_page_queue_inactive,
1354 m, vm_page_t, pageq);
1355 }
1356
1357 m->inactive = TRUE;
1358 if (!m->fictitious)
1359 vm_page_inactive_count++;
1360
1361 pages_moved++;
1362 }
1363 }
1364 }
1365 vm_page_unlock_queues();
1366 vm_object_paging_end(object);
1367
1368 if (object->shadow) {
1369 vm_object_t tmp_object;
1370
1371 kill_page = 0;
1372
1373 offset += object->shadow_offset;
1374
1375 tmp_object = object->shadow;
1376 vm_object_lock(tmp_object);
1377
1378 if (object != orig_object)
1379 vm_object_unlock(object);
1380 object = tmp_object;
1381 } else
1382 break;
1383 }
1384 if (object != orig_object)
1385 vm_object_unlock(object);
1386 }
1387
1388 /*
1389 * Routine: vm_object_pmap_protect
1390 *
1391 * Purpose:
1392 * Reduces the permission for all physical
1393 * pages in the specified object range.
1394 *
1395 * If removing write permission only, it is
1396 * sufficient to protect only the pages in
1397 * the top-level object; only those pages may
1398 * have write permission.
1399 *
1400 * If removing all access, we must follow the
1401 * shadow chain from the top-level object to
1402 * remove access to all pages in shadowed objects.
1403 *
1404 * The object must *not* be locked. The object must
1405 * be temporary/internal.
1406 *
1407 * If pmap is not NULL, this routine assumes that
1408 * the only mappings for the pages are in that
1409 * pmap.
1410 */
1411
1412 __private_extern__ void
1413 vm_object_pmap_protect(
1414 register vm_object_t object,
1415 register vm_object_offset_t offset,
1416 vm_size_t size,
1417 pmap_t pmap,
1418 vm_offset_t pmap_start,
1419 vm_prot_t prot)
1420 {
1421 if (object == VM_OBJECT_NULL)
1422 return;
1423 size = round_page_64(size);
1424 offset = trunc_page_64(offset);
1425
1426 vm_object_lock(object);
1427
1428 while (TRUE) {
1429 if (object->resident_page_count > atop_32(size) / 2 &&
1430 pmap != PMAP_NULL) {
1431 vm_object_unlock(object);
1432 pmap_protect(pmap, pmap_start, pmap_start + size, prot);
1433 return;
1434 }
1435
1436 /* if we are doing large ranges with respect to resident */
1437 /* page count then we should interate over pages otherwise */
1438 /* inverse page look-up will be faster */
1439 if ((object->resident_page_count / 4) < atop_32(size)) {
1440 vm_page_t p;
1441 vm_object_offset_t end;
1442
1443 end = offset + size;
1444
1445 if (pmap != PMAP_NULL) {
1446 queue_iterate(&object->memq, p, vm_page_t, listq) {
1447 if (!p->fictitious &&
1448 (offset <= p->offset) && (p->offset < end)) {
1449
1450 vm_offset_t start = pmap_start +
1451 (vm_offset_t)(p->offset - offset);
1452
1453 pmap_protect(pmap, start, start + PAGE_SIZE, prot);
1454 }
1455 }
1456 } else {
1457 queue_iterate(&object->memq, p, vm_page_t, listq) {
1458 if (!p->fictitious &&
1459 (offset <= p->offset) && (p->offset < end)) {
1460
1461 pmap_page_protect(p->phys_page,
1462 prot & ~p->page_lock);
1463 }
1464 }
1465 }
1466 } else {
1467 vm_page_t p;
1468 vm_object_offset_t end;
1469 vm_object_offset_t target_off;
1470
1471 end = offset + size;
1472
1473 if (pmap != PMAP_NULL) {
1474 for(target_off = offset;
1475 target_off < end; target_off += PAGE_SIZE) {
1476 if(p = vm_page_lookup(object, target_off)) {
1477 vm_offset_t start = pmap_start +
1478 (vm_offset_t)(p->offset - offset);
1479 pmap_protect(pmap, start,
1480 start + PAGE_SIZE, prot);
1481 }
1482 }
1483 } else {
1484 for(target_off = offset;
1485 target_off < end; target_off += PAGE_SIZE) {
1486 if(p = vm_page_lookup(object, target_off)) {
1487 pmap_page_protect(p->phys_page,
1488 prot & ~p->page_lock);
1489 }
1490 }
1491 }
1492 }
1493
1494 if (prot == VM_PROT_NONE) {
1495 /*
1496 * Must follow shadow chain to remove access
1497 * to pages in shadowed objects.
1498 */
1499 register vm_object_t next_object;
1500
1501 next_object = object->shadow;
1502 if (next_object != VM_OBJECT_NULL) {
1503 offset += object->shadow_offset;
1504 vm_object_lock(next_object);
1505 vm_object_unlock(object);
1506 object = next_object;
1507 }
1508 else {
1509 /*
1510 * End of chain - we are done.
1511 */
1512 break;
1513 }
1514 }
1515 else {
1516 /*
1517 * Pages in shadowed objects may never have
1518 * write permission - we may stop here.
1519 */
1520 break;
1521 }
1522 }
1523
1524 vm_object_unlock(object);
1525 }
1526
1527 /*
1528 * Routine: vm_object_copy_slowly
1529 *
1530 * Description:
1531 * Copy the specified range of the source
1532 * virtual memory object without using
1533 * protection-based optimizations (such
1534 * as copy-on-write). The pages in the
1535 * region are actually copied.
1536 *
1537 * In/out conditions:
1538 * The caller must hold a reference and a lock
1539 * for the source virtual memory object. The source
1540 * object will be returned *unlocked*.
1541 *
1542 * Results:
1543 * If the copy is completed successfully, KERN_SUCCESS is
1544 * returned. If the caller asserted the interruptible
1545 * argument, and an interruption occurred while waiting
1546 * for a user-generated event, MACH_SEND_INTERRUPTED is
1547 * returned. Other values may be returned to indicate
1548 * hard errors during the copy operation.
1549 *
1550 * A new virtual memory object is returned in a
1551 * parameter (_result_object). The contents of this
1552 * new object, starting at a zero offset, are a copy
1553 * of the source memory region. In the event of
1554 * an error, this parameter will contain the value
1555 * VM_OBJECT_NULL.
1556 */
1557 __private_extern__ kern_return_t
1558 vm_object_copy_slowly(
1559 register vm_object_t src_object,
1560 vm_object_offset_t src_offset,
1561 vm_object_size_t size,
1562 boolean_t interruptible,
1563 vm_object_t *_result_object) /* OUT */
1564 {
1565 vm_object_t new_object;
1566 vm_object_offset_t new_offset;
1567
1568 vm_object_offset_t src_lo_offset = src_offset;
1569 vm_object_offset_t src_hi_offset = src_offset + size;
1570
1571 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
1572 src_object, src_offset, size, 0, 0);
1573
1574 if (size == 0) {
1575 vm_object_unlock(src_object);
1576 *_result_object = VM_OBJECT_NULL;
1577 return(KERN_INVALID_ARGUMENT);
1578 }
1579
1580 /*
1581 * Prevent destruction of the source object while we copy.
1582 */
1583
1584 assert(src_object->ref_count > 0);
1585 src_object->ref_count++;
1586 VM_OBJ_RES_INCR(src_object);
1587 vm_object_unlock(src_object);
1588
1589 /*
1590 * Create a new object to hold the copied pages.
1591 * A few notes:
1592 * We fill the new object starting at offset 0,
1593 * regardless of the input offset.
1594 * We don't bother to lock the new object within
1595 * this routine, since we have the only reference.
1596 */
1597
1598 new_object = vm_object_allocate(size);
1599 new_offset = 0;
1600
1601 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
1602
1603 for ( ;
1604 size != 0 ;
1605 src_offset += PAGE_SIZE_64,
1606 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
1607 ) {
1608 vm_page_t new_page;
1609 vm_fault_return_t result;
1610
1611 while ((new_page = vm_page_alloc(new_object, new_offset))
1612 == VM_PAGE_NULL) {
1613 if (!vm_page_wait(interruptible)) {
1614 vm_object_deallocate(new_object);
1615 *_result_object = VM_OBJECT_NULL;
1616 return(MACH_SEND_INTERRUPTED);
1617 }
1618 }
1619
1620 do {
1621 vm_prot_t prot = VM_PROT_READ;
1622 vm_page_t _result_page;
1623 vm_page_t top_page;
1624 register
1625 vm_page_t result_page;
1626 kern_return_t error_code;
1627
1628 vm_object_lock(src_object);
1629 vm_object_paging_begin(src_object);
1630
1631 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
1632 result = vm_fault_page(src_object, src_offset,
1633 VM_PROT_READ, FALSE, interruptible,
1634 src_lo_offset, src_hi_offset,
1635 VM_BEHAVIOR_SEQUENTIAL,
1636 &prot, &_result_page, &top_page,
1637 (int *)0,
1638 &error_code, FALSE, FALSE, NULL, 0);
1639
1640 switch(result) {
1641 case VM_FAULT_SUCCESS:
1642 result_page = _result_page;
1643
1644 /*
1645 * We don't need to hold the object
1646 * lock -- the busy page will be enough.
1647 * [We don't care about picking up any
1648 * new modifications.]
1649 *
1650 * Copy the page to the new object.
1651 *
1652 * POLICY DECISION:
1653 * If result_page is clean,
1654 * we could steal it instead
1655 * of copying.
1656 */
1657
1658 vm_object_unlock(result_page->object);
1659 vm_page_copy(result_page, new_page);
1660
1661 /*
1662 * Let go of both pages (make them
1663 * not busy, perform wakeup, activate).
1664 */
1665
1666 new_page->busy = FALSE;
1667 new_page->dirty = TRUE;
1668 vm_object_lock(result_page->object);
1669 PAGE_WAKEUP_DONE(result_page);
1670
1671 vm_page_lock_queues();
1672 if (!result_page->active &&
1673 !result_page->inactive)
1674 vm_page_activate(result_page);
1675 vm_page_activate(new_page);
1676 vm_page_unlock_queues();
1677
1678 /*
1679 * Release paging references and
1680 * top-level placeholder page, if any.
1681 */
1682
1683 vm_fault_cleanup(result_page->object,
1684 top_page);
1685
1686 break;
1687
1688 case VM_FAULT_RETRY:
1689 break;
1690
1691 case VM_FAULT_FICTITIOUS_SHORTAGE:
1692 vm_page_more_fictitious();
1693 break;
1694
1695 case VM_FAULT_MEMORY_SHORTAGE:
1696 if (vm_page_wait(interruptible))
1697 break;
1698 /* fall thru */
1699
1700 case VM_FAULT_INTERRUPTED:
1701 vm_page_free(new_page);
1702 vm_object_deallocate(new_object);
1703 vm_object_deallocate(src_object);
1704 *_result_object = VM_OBJECT_NULL;
1705 return(MACH_SEND_INTERRUPTED);
1706
1707 case VM_FAULT_MEMORY_ERROR:
1708 /*
1709 * A policy choice:
1710 * (a) ignore pages that we can't
1711 * copy
1712 * (b) return the null object if
1713 * any page fails [chosen]
1714 */
1715
1716 vm_page_lock_queues();
1717 vm_page_free(new_page);
1718 vm_page_unlock_queues();
1719 vm_object_deallocate(new_object);
1720 vm_object_deallocate(src_object);
1721 *_result_object = VM_OBJECT_NULL;
1722 return(error_code ? error_code:
1723 KERN_MEMORY_ERROR);
1724 }
1725 } while (result != VM_FAULT_SUCCESS);
1726 }
1727
1728 /*
1729 * Lose the extra reference, and return our object.
1730 */
1731
1732 vm_object_deallocate(src_object);
1733 *_result_object = new_object;
1734 return(KERN_SUCCESS);
1735 }
1736
1737 /*
1738 * Routine: vm_object_copy_quickly
1739 *
1740 * Purpose:
1741 * Copy the specified range of the source virtual
1742 * memory object, if it can be done without waiting
1743 * for user-generated events.
1744 *
1745 * Results:
1746 * If the copy is successful, the copy is returned in
1747 * the arguments; otherwise, the arguments are not
1748 * affected.
1749 *
1750 * In/out conditions:
1751 * The object should be unlocked on entry and exit.
1752 */
1753
1754 /*ARGSUSED*/
1755 __private_extern__ boolean_t
1756 vm_object_copy_quickly(
1757 vm_object_t *_object, /* INOUT */
1758 vm_object_offset_t offset, /* IN */
1759 vm_object_size_t size, /* IN */
1760 boolean_t *_src_needs_copy, /* OUT */
1761 boolean_t *_dst_needs_copy) /* OUT */
1762 {
1763 vm_object_t object = *_object;
1764 memory_object_copy_strategy_t copy_strategy;
1765
1766 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
1767 *_object, offset, size, 0, 0);
1768 if (object == VM_OBJECT_NULL) {
1769 *_src_needs_copy = FALSE;
1770 *_dst_needs_copy = FALSE;
1771 return(TRUE);
1772 }
1773
1774 vm_object_lock(object);
1775
1776 copy_strategy = object->copy_strategy;
1777
1778 switch (copy_strategy) {
1779 case MEMORY_OBJECT_COPY_SYMMETRIC:
1780
1781 /*
1782 * Symmetric copy strategy.
1783 * Make another reference to the object.
1784 * Leave object/offset unchanged.
1785 */
1786
1787 assert(object->ref_count > 0);
1788 object->ref_count++;
1789 vm_object_res_reference(object);
1790 object->shadowed = TRUE;
1791 vm_object_unlock(object);
1792
1793 /*
1794 * Both source and destination must make
1795 * shadows, and the source must be made
1796 * read-only if not already.
1797 */
1798
1799 *_src_needs_copy = TRUE;
1800 *_dst_needs_copy = TRUE;
1801
1802 break;
1803
1804 case MEMORY_OBJECT_COPY_DELAY:
1805 vm_object_unlock(object);
1806 return(FALSE);
1807
1808 default:
1809 vm_object_unlock(object);
1810 return(FALSE);
1811 }
1812 return(TRUE);
1813 }
1814
1815 static int copy_call_count = 0;
1816 static int copy_call_sleep_count = 0;
1817 static int copy_call_restart_count = 0;
1818
1819 /*
1820 * Routine: vm_object_copy_call [internal]
1821 *
1822 * Description:
1823 * Copy the source object (src_object), using the
1824 * user-managed copy algorithm.
1825 *
1826 * In/out conditions:
1827 * The source object must be locked on entry. It
1828 * will be *unlocked* on exit.
1829 *
1830 * Results:
1831 * If the copy is successful, KERN_SUCCESS is returned.
1832 * A new object that represents the copied virtual
1833 * memory is returned in a parameter (*_result_object).
1834 * If the return value indicates an error, this parameter
1835 * is not valid.
1836 */
1837 static kern_return_t
1838 vm_object_copy_call(
1839 vm_object_t src_object,
1840 vm_object_offset_t src_offset,
1841 vm_object_size_t size,
1842 vm_object_t *_result_object) /* OUT */
1843 {
1844 kern_return_t kr;
1845 vm_object_t copy;
1846 boolean_t check_ready = FALSE;
1847
1848 /*
1849 * If a copy is already in progress, wait and retry.
1850 *
1851 * XXX
1852 * Consider making this call interruptable, as Mike
1853 * intended it to be.
1854 *
1855 * XXXO
1856 * Need a counter or version or something to allow
1857 * us to use the copy that the currently requesting
1858 * thread is obtaining -- is it worth adding to the
1859 * vm object structure? Depends how common this case it.
1860 */
1861 copy_call_count++;
1862 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
1863 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1864 THREAD_UNINT);
1865 copy_call_restart_count++;
1866 }
1867
1868 /*
1869 * Indicate (for the benefit of memory_object_create_copy)
1870 * that we want a copy for src_object. (Note that we cannot
1871 * do a real assert_wait before calling memory_object_copy,
1872 * so we simply set the flag.)
1873 */
1874
1875 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
1876 vm_object_unlock(src_object);
1877
1878 /*
1879 * Ask the memory manager to give us a memory object
1880 * which represents a copy of the src object.
1881 * The memory manager may give us a memory object
1882 * which we already have, or it may give us a
1883 * new memory object. This memory object will arrive
1884 * via memory_object_create_copy.
1885 */
1886
1887 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
1888 if (kr != KERN_SUCCESS) {
1889 return kr;
1890 }
1891
1892 /*
1893 * Wait for the copy to arrive.
1894 */
1895 vm_object_lock(src_object);
1896 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
1897 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1898 THREAD_UNINT);
1899 copy_call_sleep_count++;
1900 }
1901 Retry:
1902 assert(src_object->copy != VM_OBJECT_NULL);
1903 copy = src_object->copy;
1904 if (!vm_object_lock_try(copy)) {
1905 vm_object_unlock(src_object);
1906 mutex_pause(); /* wait a bit */
1907 vm_object_lock(src_object);
1908 goto Retry;
1909 }
1910 if (copy->size < src_offset+size)
1911 copy->size = src_offset+size;
1912
1913 if (!copy->pager_ready)
1914 check_ready = TRUE;
1915
1916 /*
1917 * Return the copy.
1918 */
1919 *_result_object = copy;
1920 vm_object_unlock(copy);
1921 vm_object_unlock(src_object);
1922
1923 /* Wait for the copy to be ready. */
1924 if (check_ready == TRUE) {
1925 vm_object_lock(copy);
1926 while (!copy->pager_ready) {
1927 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
1928 }
1929 vm_object_unlock(copy);
1930 }
1931
1932 return KERN_SUCCESS;
1933 }
1934
1935 static int copy_delayed_lock_collisions = 0;
1936 static int copy_delayed_max_collisions = 0;
1937 static int copy_delayed_lock_contention = 0;
1938 static int copy_delayed_protect_iterate = 0;
1939 static int copy_delayed_protect_lookup = 0;
1940 static int copy_delayed_protect_lookup_wait = 0;
1941
1942 /*
1943 * Routine: vm_object_copy_delayed [internal]
1944 *
1945 * Description:
1946 * Copy the specified virtual memory object, using
1947 * the asymmetric copy-on-write algorithm.
1948 *
1949 * In/out conditions:
1950 * The src_object must be locked on entry. It will be unlocked
1951 * on exit - so the caller must also hold a reference to it.
1952 *
1953 * This routine will not block waiting for user-generated
1954 * events. It is not interruptible.
1955 */
1956 __private_extern__ vm_object_t
1957 vm_object_copy_delayed(
1958 vm_object_t src_object,
1959 vm_object_offset_t src_offset,
1960 vm_object_size_t size)
1961 {
1962 vm_object_t new_copy = VM_OBJECT_NULL;
1963 vm_object_t old_copy;
1964 vm_page_t p;
1965 vm_object_size_t copy_size = src_offset + size;
1966
1967 int collisions = 0;
1968 /*
1969 * The user-level memory manager wants to see all of the changes
1970 * to this object, but it has promised not to make any changes on
1971 * its own.
1972 *
1973 * Perform an asymmetric copy-on-write, as follows:
1974 * Create a new object, called a "copy object" to hold
1975 * pages modified by the new mapping (i.e., the copy,
1976 * not the original mapping).
1977 * Record the original object as the backing object for
1978 * the copy object. If the original mapping does not
1979 * change a page, it may be used read-only by the copy.
1980 * Record the copy object in the original object.
1981 * When the original mapping causes a page to be modified,
1982 * it must be copied to a new page that is "pushed" to
1983 * the copy object.
1984 * Mark the new mapping (the copy object) copy-on-write.
1985 * This makes the copy object itself read-only, allowing
1986 * it to be reused if the original mapping makes no
1987 * changes, and simplifying the synchronization required
1988 * in the "push" operation described above.
1989 *
1990 * The copy-on-write is said to be assymetric because the original
1991 * object is *not* marked copy-on-write. A copied page is pushed
1992 * to the copy object, regardless which party attempted to modify
1993 * the page.
1994 *
1995 * Repeated asymmetric copy operations may be done. If the
1996 * original object has not been changed since the last copy, its
1997 * copy object can be reused. Otherwise, a new copy object can be
1998 * inserted between the original object and its previous copy
1999 * object. Since any copy object is read-only, this cannot affect
2000 * affect the contents of the previous copy object.
2001 *
2002 * Note that a copy object is higher in the object tree than the
2003 * original object; therefore, use of the copy object recorded in
2004 * the original object must be done carefully, to avoid deadlock.
2005 */
2006
2007 Retry:
2008
2009 /*
2010 * Wait for paging in progress.
2011 */
2012 if (!src_object->true_share)
2013 vm_object_paging_wait(src_object, THREAD_UNINT);
2014
2015 /*
2016 * See whether we can reuse the result of a previous
2017 * copy operation.
2018 */
2019
2020 old_copy = src_object->copy;
2021 if (old_copy != VM_OBJECT_NULL) {
2022 /*
2023 * Try to get the locks (out of order)
2024 */
2025 if (!vm_object_lock_try(old_copy)) {
2026 vm_object_unlock(src_object);
2027 mutex_pause();
2028
2029 /* Heisenberg Rules */
2030 copy_delayed_lock_collisions++;
2031 if (collisions++ == 0)
2032 copy_delayed_lock_contention++;
2033
2034 if (collisions > copy_delayed_max_collisions)
2035 copy_delayed_max_collisions = collisions;
2036
2037 vm_object_lock(src_object);
2038 goto Retry;
2039 }
2040
2041 /*
2042 * Determine whether the old copy object has
2043 * been modified.
2044 */
2045
2046 if (old_copy->resident_page_count == 0 &&
2047 !old_copy->pager_created) {
2048 /*
2049 * It has not been modified.
2050 *
2051 * Return another reference to
2052 * the existing copy-object if
2053 * we can safely grow it (if
2054 * needed).
2055 */
2056
2057 if (new_copy != VM_OBJECT_NULL) {
2058 vm_object_unlock(new_copy);
2059 vm_object_deallocate(new_copy);
2060 }
2061
2062 if (old_copy->size < copy_size) {
2063 /*
2064 * We can't perform a delayed copy if any of the
2065 * pages in the extended range are wired (because
2066 * we can't safely take write permission away from
2067 * wired pages). If the pages aren't wired, then
2068 * go ahead and protect them.
2069 */
2070 copy_delayed_protect_iterate++;
2071 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2072 if (!p->fictitious &&
2073 p->offset >= old_copy->size &&
2074 p->offset < copy_size) {
2075 if (p->wire_count > 0) {
2076 vm_object_unlock(old_copy);
2077 vm_object_unlock(src_object);
2078 return VM_OBJECT_NULL;
2079 } else {
2080 pmap_page_protect(p->phys_page,
2081 (VM_PROT_ALL & ~VM_PROT_WRITE &
2082 ~p->page_lock));
2083 }
2084 }
2085 }
2086 old_copy->size = copy_size;
2087 }
2088
2089 vm_object_reference_locked(old_copy);
2090 vm_object_unlock(old_copy);
2091 vm_object_unlock(src_object);
2092 return(old_copy);
2093 }
2094
2095 /*
2096 * Adjust the size argument so that the newly-created
2097 * copy object will be large enough to back either the
2098 * old copy object or the new mapping.
2099 */
2100 if (old_copy->size > copy_size)
2101 copy_size = old_copy->size;
2102
2103 if (new_copy == VM_OBJECT_NULL) {
2104 vm_object_unlock(old_copy);
2105 vm_object_unlock(src_object);
2106 new_copy = vm_object_allocate(copy_size);
2107 vm_object_lock(src_object);
2108 vm_object_lock(new_copy);
2109 goto Retry;
2110 }
2111 new_copy->size = copy_size;
2112
2113 /*
2114 * The copy-object is always made large enough to
2115 * completely shadow the original object, since
2116 * it may have several users who want to shadow
2117 * the original object at different points.
2118 */
2119
2120 assert((old_copy->shadow == src_object) &&
2121 (old_copy->shadow_offset == (vm_object_offset_t) 0));
2122
2123 } else if (new_copy == VM_OBJECT_NULL) {
2124 vm_object_unlock(src_object);
2125 new_copy = vm_object_allocate(copy_size);
2126 vm_object_lock(src_object);
2127 vm_object_lock(new_copy);
2128 goto Retry;
2129 }
2130
2131 /*
2132 * We now have the src object locked, and the new copy object
2133 * allocated and locked (and potentially the old copy locked).
2134 * Before we go any further, make sure we can still perform
2135 * a delayed copy, as the situation may have changed.
2136 *
2137 * Specifically, we can't perform a delayed copy if any of the
2138 * pages in the range are wired (because we can't safely take
2139 * write permission away from wired pages). If the pages aren't
2140 * wired, then go ahead and protect them.
2141 */
2142 copy_delayed_protect_iterate++;
2143 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2144 if (!p->fictitious && p->offset < copy_size) {
2145 if (p->wire_count > 0) {
2146 if (old_copy)
2147 vm_object_unlock(old_copy);
2148 vm_object_unlock(src_object);
2149 vm_object_unlock(new_copy);
2150 vm_object_deallocate(new_copy);
2151 return VM_OBJECT_NULL;
2152 } else {
2153 pmap_page_protect(p->phys_page,
2154 (VM_PROT_ALL & ~VM_PROT_WRITE &
2155 ~p->page_lock));
2156 }
2157 }
2158 }
2159
2160 if (old_copy != VM_OBJECT_NULL) {
2161 /*
2162 * Make the old copy-object shadow the new one.
2163 * It will receive no more pages from the original
2164 * object.
2165 */
2166
2167 src_object->ref_count--; /* remove ref. from old_copy */
2168 assert(src_object->ref_count > 0);
2169 old_copy->shadow = new_copy;
2170 assert(new_copy->ref_count > 0);
2171 new_copy->ref_count++; /* for old_copy->shadow ref. */
2172
2173 #if TASK_SWAPPER
2174 if (old_copy->res_count) {
2175 VM_OBJ_RES_INCR(new_copy);
2176 VM_OBJ_RES_DECR(src_object);
2177 }
2178 #endif
2179
2180 vm_object_unlock(old_copy); /* done with old_copy */
2181 }
2182
2183 /*
2184 * Point the new copy at the existing object.
2185 */
2186 new_copy->shadow = src_object;
2187 new_copy->shadow_offset = 0;
2188 new_copy->shadowed = TRUE; /* caller must set needs_copy */
2189 assert(src_object->ref_count > 0);
2190 src_object->ref_count++;
2191 VM_OBJ_RES_INCR(src_object);
2192 src_object->copy = new_copy;
2193 vm_object_unlock(src_object);
2194 vm_object_unlock(new_copy);
2195
2196 XPR(XPR_VM_OBJECT,
2197 "vm_object_copy_delayed: used copy object %X for source %X\n",
2198 (integer_t)new_copy, (integer_t)src_object, 0, 0, 0);
2199
2200 return(new_copy);
2201 }
2202
2203 /*
2204 * Routine: vm_object_copy_strategically
2205 *
2206 * Purpose:
2207 * Perform a copy according to the source object's
2208 * declared strategy. This operation may block,
2209 * and may be interrupted.
2210 */
2211 __private_extern__ kern_return_t
2212 vm_object_copy_strategically(
2213 register vm_object_t src_object,
2214 vm_object_offset_t src_offset,
2215 vm_object_size_t size,
2216 vm_object_t *dst_object, /* OUT */
2217 vm_object_offset_t *dst_offset, /* OUT */
2218 boolean_t *dst_needs_copy) /* OUT */
2219 {
2220 boolean_t result;
2221 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
2222 memory_object_copy_strategy_t copy_strategy;
2223
2224 assert(src_object != VM_OBJECT_NULL);
2225
2226 vm_object_lock(src_object);
2227
2228 /*
2229 * The copy strategy is only valid if the memory manager
2230 * is "ready". Internal objects are always ready.
2231 */
2232
2233 while (!src_object->internal && !src_object->pager_ready) {
2234 wait_result_t wait_result;
2235
2236 wait_result = vm_object_sleep( src_object,
2237 VM_OBJECT_EVENT_PAGER_READY,
2238 interruptible);
2239 if (wait_result != THREAD_AWAKENED) {
2240 vm_object_unlock(src_object);
2241 *dst_object = VM_OBJECT_NULL;
2242 *dst_offset = 0;
2243 *dst_needs_copy = FALSE;
2244 return(MACH_SEND_INTERRUPTED);
2245 }
2246 }
2247
2248 copy_strategy = src_object->copy_strategy;
2249
2250 /*
2251 * Use the appropriate copy strategy.
2252 */
2253
2254 switch (copy_strategy) {
2255 case MEMORY_OBJECT_COPY_DELAY:
2256 *dst_object = vm_object_copy_delayed(src_object,
2257 src_offset, size);
2258 if (*dst_object != VM_OBJECT_NULL) {
2259 *dst_offset = src_offset;
2260 *dst_needs_copy = TRUE;
2261 result = KERN_SUCCESS;
2262 break;
2263 }
2264 vm_object_lock(src_object);
2265 /* fall thru when delayed copy not allowed */
2266
2267 case MEMORY_OBJECT_COPY_NONE:
2268 result = vm_object_copy_slowly(src_object, src_offset, size,
2269 interruptible, dst_object);
2270 if (result == KERN_SUCCESS) {
2271 *dst_offset = 0;
2272 *dst_needs_copy = FALSE;
2273 }
2274 break;
2275
2276 case MEMORY_OBJECT_COPY_CALL:
2277 result = vm_object_copy_call(src_object, src_offset, size,
2278 dst_object);
2279 if (result == KERN_SUCCESS) {
2280 *dst_offset = src_offset;
2281 *dst_needs_copy = TRUE;
2282 }
2283 break;
2284
2285 case MEMORY_OBJECT_COPY_SYMMETRIC:
2286 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0);
2287 vm_object_unlock(src_object);
2288 result = KERN_MEMORY_RESTART_COPY;
2289 break;
2290
2291 default:
2292 panic("copy_strategically: bad strategy");
2293 result = KERN_INVALID_ARGUMENT;
2294 }
2295 return(result);
2296 }
2297
2298 /*
2299 * vm_object_shadow:
2300 *
2301 * Create a new object which is backed by the
2302 * specified existing object range. The source
2303 * object reference is deallocated.
2304 *
2305 * The new object and offset into that object
2306 * are returned in the source parameters.
2307 */
2308 boolean_t vm_object_shadow_check = FALSE;
2309
2310 __private_extern__ boolean_t
2311 vm_object_shadow(
2312 vm_object_t *object, /* IN/OUT */
2313 vm_object_offset_t *offset, /* IN/OUT */
2314 vm_object_size_t length)
2315 {
2316 register vm_object_t source;
2317 register vm_object_t result;
2318
2319 source = *object;
2320 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
2321
2322 /*
2323 * Determine if we really need a shadow.
2324 */
2325
2326 if (vm_object_shadow_check && source->ref_count == 1 &&
2327 (source->shadow == VM_OBJECT_NULL ||
2328 source->shadow->copy == VM_OBJECT_NULL))
2329 {
2330 source->shadowed = FALSE;
2331 return FALSE;
2332 }
2333
2334 /*
2335 * Allocate a new object with the given length
2336 */
2337
2338 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
2339 panic("vm_object_shadow: no object for shadowing");
2340
2341 /*
2342 * The new object shadows the source object, adding
2343 * a reference to it. Our caller changes his reference
2344 * to point to the new object, removing a reference to
2345 * the source object. Net result: no change of reference
2346 * count.
2347 */
2348 result->shadow = source;
2349
2350 /*
2351 * Store the offset into the source object,
2352 * and fix up the offset into the new object.
2353 */
2354
2355 result->shadow_offset = *offset;
2356
2357 /*
2358 * Return the new things
2359 */
2360
2361 *offset = 0;
2362 *object = result;
2363 return TRUE;
2364 }
2365
2366 /*
2367 * The relationship between vm_object structures and
2368 * the memory_object requires careful synchronization.
2369 *
2370 * All associations are created by memory_object_create_named
2371 * for external pagers and vm_object_pager_create for internal
2372 * objects as follows:
2373 *
2374 * pager: the memory_object itself, supplied by
2375 * the user requesting a mapping (or the kernel,
2376 * when initializing internal objects); the
2377 * kernel simulates holding send rights by keeping
2378 * a port reference;
2379 *
2380 * pager_request:
2381 * the memory object control port,
2382 * created by the kernel; the kernel holds
2383 * receive (and ownership) rights to this
2384 * port, but no other references.
2385 *
2386 * When initialization is complete, the "initialized" field
2387 * is asserted. Other mappings using a particular memory object,
2388 * and any references to the vm_object gained through the
2389 * port association must wait for this initialization to occur.
2390 *
2391 * In order to allow the memory manager to set attributes before
2392 * requests (notably virtual copy operations, but also data or
2393 * unlock requests) are made, a "ready" attribute is made available.
2394 * Only the memory manager may affect the value of this attribute.
2395 * Its value does not affect critical kernel functions, such as
2396 * internal object initialization or destruction. [Furthermore,
2397 * memory objects created by the kernel are assumed to be ready
2398 * immediately; the default memory manager need not explicitly
2399 * set the "ready" attribute.]
2400 *
2401 * [Both the "initialized" and "ready" attribute wait conditions
2402 * use the "pager" field as the wait event.]
2403 *
2404 * The port associations can be broken down by any of the
2405 * following routines:
2406 * vm_object_terminate:
2407 * No references to the vm_object remain, and
2408 * the object cannot (or will not) be cached.
2409 * This is the normal case, and is done even
2410 * though one of the other cases has already been
2411 * done.
2412 * memory_object_destroy:
2413 * The memory manager has requested that the
2414 * kernel relinquish references to the memory
2415 * object. [The memory manager may not want to
2416 * destroy the memory object, but may wish to
2417 * refuse or tear down existing memory mappings.]
2418 *
2419 * Each routine that breaks an association must break all of
2420 * them at once. At some later time, that routine must clear
2421 * the pager field and release the memory object references.
2422 * [Furthermore, each routine must cope with the simultaneous
2423 * or previous operations of the others.]
2424 *
2425 * In addition to the lock on the object, the vm_object_cache_lock
2426 * governs the associations. References gained through the
2427 * association require use of the cache lock.
2428 *
2429 * Because the pager field may be cleared spontaneously, it
2430 * cannot be used to determine whether a memory object has
2431 * ever been associated with a particular vm_object. [This
2432 * knowledge is important to the shadow object mechanism.]
2433 * For this reason, an additional "created" attribute is
2434 * provided.
2435 *
2436 * During various paging operations, the pager reference found in the
2437 * vm_object must be valid. To prevent this from being released,
2438 * (other than being removed, i.e., made null), routines may use
2439 * the vm_object_paging_begin/end routines [actually, macros].
2440 * The implementation uses the "paging_in_progress" and "wanted" fields.
2441 * [Operations that alter the validity of the pager values include the
2442 * termination routines and vm_object_collapse.]
2443 */
2444
2445 #if 0
2446 /*
2447 * Routine: vm_object_pager_dead
2448 *
2449 * Purpose:
2450 * A port is being destroy, and the IPC kobject code
2451 * can't tell if it represents a pager port or not.
2452 * So this function is called each time it sees a port
2453 * die.
2454 * THIS IS HORRIBLY INEFFICIENT. We should only call
2455 * this routine if we had requested a notification on
2456 * the port.
2457 */
2458
2459 __private_extern__ void
2460 vm_object_pager_dead(
2461 ipc_port_t pager)
2462 {
2463 vm_object_t object;
2464 vm_object_hash_entry_t entry;
2465
2466 /*
2467 * Perform essentially the same operations as in vm_object_lookup,
2468 * except that this time we look up based on the memory_object
2469 * port, not the control port.
2470 */
2471 vm_object_cache_lock();
2472 entry = vm_object_hash_lookup(pager, FALSE);
2473 if (entry == VM_OBJECT_HASH_ENTRY_NULL ||
2474 entry->object == VM_OBJECT_NULL) {
2475 vm_object_cache_unlock();
2476 return;
2477 }
2478
2479 object = entry->object;
2480 entry->object = VM_OBJECT_NULL;
2481
2482 vm_object_lock(object);
2483 if (object->ref_count == 0) {
2484 XPR(XPR_VM_OBJECT_CACHE,
2485 "vm_object_destroy: removing %x from cache, head (%x, %x)\n",
2486 (integer_t)object,
2487 (integer_t)vm_object_cached_list.next,
2488 (integer_t)vm_object_cached_list.prev, 0,0);
2489
2490 queue_remove(&vm_object_cached_list, object,
2491 vm_object_t, cached_list);
2492 vm_object_cached_count--;
2493 }
2494 object->ref_count++;
2495 vm_object_res_reference(object);
2496
2497 object->can_persist = FALSE;
2498
2499 assert(object->pager == pager);
2500
2501 /*
2502 * Remove the pager association.
2503 *
2504 * Note that the memory_object itself is dead, so
2505 * we don't bother with it.
2506 */
2507
2508 object->pager = MEMORY_OBJECT_NULL;
2509
2510 vm_object_unlock(object);
2511 vm_object_cache_unlock();
2512
2513 vm_object_pager_wakeup(pager);
2514
2515 /*
2516 * Release the pager reference. Note that there's no
2517 * point in trying the memory_object_terminate call
2518 * because the memory_object itself is dead. Also
2519 * release the memory_object_control reference, since
2520 * the pager didn't do that either.
2521 */
2522
2523 memory_object_deallocate(pager);
2524 memory_object_control_deallocate(object->pager_request);
2525
2526
2527 /*
2528 * Restart pending page requests
2529 */
2530 vm_object_lock(object);
2531 vm_object_abort_activity(object);
2532 vm_object_unlock(object);
2533
2534 /*
2535 * Lose the object reference.
2536 */
2537
2538 vm_object_deallocate(object);
2539 }
2540 #endif
2541
2542 /*
2543 * Routine: vm_object_enter
2544 * Purpose:
2545 * Find a VM object corresponding to the given
2546 * pager; if no such object exists, create one,
2547 * and initialize the pager.
2548 */
2549 vm_object_t
2550 vm_object_enter(
2551 memory_object_t pager,
2552 vm_object_size_t size,
2553 boolean_t internal,
2554 boolean_t init,
2555 boolean_t named)
2556 {
2557 register vm_object_t object;
2558 vm_object_t new_object;
2559 boolean_t must_init;
2560 vm_object_hash_entry_t entry, new_entry;
2561
2562 if (pager == MEMORY_OBJECT_NULL)
2563 return(vm_object_allocate(size));
2564
2565 new_object = VM_OBJECT_NULL;
2566 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2567 must_init = init;
2568
2569 /*
2570 * Look for an object associated with this port.
2571 */
2572
2573 restart:
2574 vm_object_cache_lock();
2575 for (;;) {
2576 entry = vm_object_hash_lookup(pager, FALSE);
2577
2578 /*
2579 * If a previous object is being terminated,
2580 * we must wait for the termination message
2581 * to be queued.
2582 *
2583 * We set kobject to a non-null value to let the
2584 * terminator know that someone is waiting.
2585 * Among the possibilities is that the port
2586 * could die while we're waiting. Must restart
2587 * instead of continuing the loop.
2588 */
2589
2590 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
2591 if (entry->object != VM_OBJECT_NULL)
2592 break;
2593
2594 entry->waiting = TRUE;
2595 assert_wait((event_t) pager, THREAD_UNINT);
2596 vm_object_cache_unlock();
2597 thread_block((void (*)(void))0);
2598 goto restart;
2599 }
2600
2601 /*
2602 * We must unlock to create a new object;
2603 * if we do so, we must try the lookup again.
2604 */
2605
2606 if (new_object == VM_OBJECT_NULL) {
2607 vm_object_cache_unlock();
2608 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
2609 new_entry = vm_object_hash_entry_alloc(pager);
2610 new_object = vm_object_allocate(size);
2611 vm_object_cache_lock();
2612 } else {
2613 /*
2614 * Lookup failed twice, and we have something
2615 * to insert; set the object.
2616 */
2617
2618 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
2619 vm_object_hash_insert(new_entry);
2620 entry = new_entry;
2621 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2622 }
2623
2624 entry->object = new_object;
2625 new_object = VM_OBJECT_NULL;
2626 must_init = TRUE;
2627 }
2628 }
2629
2630 object = entry->object;
2631 assert(object != VM_OBJECT_NULL);
2632
2633 if (!must_init) {
2634 vm_object_lock(object);
2635 assert(object->pager_created);
2636 assert(!internal || object->internal);
2637 if (named) {
2638 assert(!object->named);
2639 object->named = TRUE;
2640 }
2641 if (object->ref_count == 0) {
2642 XPR(XPR_VM_OBJECT_CACHE,
2643 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
2644 (integer_t)object,
2645 (integer_t)vm_object_cached_list.next,
2646 (integer_t)vm_object_cached_list.prev, 0,0);
2647 queue_remove(&vm_object_cached_list, object,
2648 vm_object_t, cached_list);
2649 vm_object_cached_count--;
2650 }
2651 object->ref_count++;
2652 vm_object_res_reference(object);
2653 vm_object_unlock(object);
2654
2655 VM_STAT(hits++);
2656 }
2657 assert(object->ref_count > 0);
2658
2659 VM_STAT(lookups++);
2660
2661 vm_object_cache_unlock();
2662
2663 XPR(XPR_VM_OBJECT,
2664 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
2665 (integer_t)pager, (integer_t)object, must_init, 0, 0);
2666
2667 /*
2668 * If we raced to create a vm_object but lost, let's
2669 * throw away ours.
2670 */
2671
2672 if (new_object != VM_OBJECT_NULL)
2673 vm_object_deallocate(new_object);
2674
2675 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
2676 vm_object_hash_entry_free(new_entry);
2677
2678 if (must_init) {
2679 pager_request_t pager_request;
2680
2681 /*
2682 * Allocate request port.
2683 */
2684
2685 pager_request = memory_object_control_allocate(object);
2686 assert (pager_request != PAGER_REQUEST_NULL);
2687
2688 vm_object_lock(object);
2689
2690 /*
2691 * Copy the reference we were given.
2692 */
2693
2694 memory_object_reference(pager);
2695 object->pager_created = TRUE;
2696 object->pager = pager;
2697 object->internal = internal;
2698 object->pager_trusted = internal;
2699 if (!internal) {
2700 /* copy strategy invalid until set by memory manager */
2701 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
2702 }
2703 object->pager_request = pager_request;
2704 object->pager_ready = FALSE;
2705
2706 vm_object_unlock(object);
2707
2708 /*
2709 * Let the pager know we're using it.
2710 */
2711
2712 (void) memory_object_init(pager,
2713 object->pager_request,
2714 PAGE_SIZE);
2715
2716 vm_object_lock(object);
2717 if (named)
2718 object->named = TRUE;
2719 if (internal) {
2720 object->pager_ready = TRUE;
2721 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
2722 }
2723
2724 object->pager_initialized = TRUE;
2725 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
2726 } else {
2727 vm_object_lock(object);
2728 }
2729
2730 /*
2731 * [At this point, the object must be locked]
2732 */
2733
2734 /*
2735 * Wait for the work above to be done by the first
2736 * thread to map this object.
2737 */
2738
2739 while (!object->pager_initialized) {
2740 vm_object_sleep(object,
2741 VM_OBJECT_EVENT_INITIALIZED,
2742 THREAD_UNINT);
2743 }
2744 vm_object_unlock(object);
2745
2746 XPR(XPR_VM_OBJECT,
2747 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
2748 (integer_t)object, (integer_t)object->pager, internal, 0,0);
2749 return(object);
2750 }
2751
2752 /*
2753 * Routine: vm_object_pager_create
2754 * Purpose:
2755 * Create a memory object for an internal object.
2756 * In/out conditions:
2757 * The object is locked on entry and exit;
2758 * it may be unlocked within this call.
2759 * Limitations:
2760 * Only one thread may be performing a
2761 * vm_object_pager_create on an object at
2762 * a time. Presumably, only the pageout
2763 * daemon will be using this routine.
2764 */
2765
2766 void
2767 vm_object_pager_create(
2768 register vm_object_t object)
2769 {
2770 memory_object_t pager;
2771 vm_object_hash_entry_t entry;
2772 #if MACH_PAGEMAP
2773 vm_object_size_t size;
2774 vm_external_map_t map;
2775 #endif /* MACH_PAGEMAP */
2776
2777 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
2778 (integer_t)object, 0,0,0,0);
2779
2780 if (memory_manager_default_check() != KERN_SUCCESS)
2781 return;
2782
2783 /*
2784 * Prevent collapse or termination by holding a paging reference
2785 */
2786
2787 vm_object_paging_begin(object);
2788 if (object->pager_created) {
2789 /*
2790 * Someone else got to it first...
2791 * wait for them to finish initializing the ports
2792 */
2793 while (!object->pager_initialized) {
2794 vm_object_sleep(object,
2795 VM_OBJECT_EVENT_INITIALIZED,
2796 THREAD_UNINT);
2797 }
2798 vm_object_paging_end(object);
2799 return;
2800 }
2801
2802 /*
2803 * Indicate that a memory object has been assigned
2804 * before dropping the lock, to prevent a race.
2805 */
2806
2807 object->pager_created = TRUE;
2808 object->paging_offset = 0;
2809
2810 #if MACH_PAGEMAP
2811 size = object->size;
2812 #endif /* MACH_PAGEMAP */
2813 vm_object_unlock(object);
2814
2815 #if MACH_PAGEMAP
2816 map = vm_external_create(size);
2817 vm_object_lock(object);
2818 assert(object->size == size);
2819 object->existence_map = map;
2820 vm_object_unlock(object);
2821 #endif /* MACH_PAGEMAP */
2822
2823 /*
2824 * Create the [internal] pager, and associate it with this object.
2825 *
2826 * We make the association here so that vm_object_enter()
2827 * can look up the object to complete initializing it. No
2828 * user will ever map this object.
2829 */
2830 {
2831 memory_object_default_t dmm;
2832 vm_size_t cluster_size;
2833
2834 /* acquire a reference for the default memory manager */
2835 dmm = memory_manager_default_reference(&cluster_size);
2836 assert(cluster_size >= PAGE_SIZE);
2837
2838 object->cluster_size = cluster_size; /* XXX ??? */
2839 assert(object->temporary);
2840
2841 /* create our new memory object */
2842 (void) memory_object_create(dmm, object->size, &pager);
2843
2844 memory_object_default_deallocate(dmm);
2845 }
2846
2847 entry = vm_object_hash_entry_alloc(pager);
2848
2849 vm_object_cache_lock();
2850 vm_object_hash_insert(entry);
2851
2852 entry->object = object;
2853 vm_object_cache_unlock();
2854
2855 /*
2856 * A reference was returned by
2857 * memory_object_create(), and it is
2858 * copied by vm_object_enter().
2859 */
2860
2861 if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
2862 panic("vm_object_pager_create: mismatch");
2863
2864 /*
2865 * Drop the reference we were passed.
2866 */
2867 memory_object_deallocate(pager);
2868
2869 vm_object_lock(object);
2870
2871 /*
2872 * Release the paging reference
2873 */
2874 vm_object_paging_end(object);
2875 }
2876
2877 /*
2878 * Routine: vm_object_remove
2879 * Purpose:
2880 * Eliminate the pager/object association
2881 * for this pager.
2882 * Conditions:
2883 * The object cache must be locked.
2884 */
2885 __private_extern__ void
2886 vm_object_remove(
2887 vm_object_t object)
2888 {
2889 memory_object_t pager;
2890 pager_request_t pager_request;
2891
2892 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
2893 vm_object_hash_entry_t entry;
2894
2895 entry = vm_object_hash_lookup(pager, FALSE);
2896 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
2897 entry->object = VM_OBJECT_NULL;
2898 }
2899
2900 }
2901
2902 /*
2903 * Global variables for vm_object_collapse():
2904 *
2905 * Counts for normal collapses and bypasses.
2906 * Debugging variables, to watch or disable collapse.
2907 */
2908 static long object_collapses = 0;
2909 static long object_bypasses = 0;
2910
2911 static boolean_t vm_object_collapse_allowed = TRUE;
2912 static boolean_t vm_object_bypass_allowed = TRUE;
2913
2914 static int vm_external_discarded;
2915 static int vm_external_collapsed;
2916
2917 /*
2918 * Routine: vm_object_do_collapse
2919 * Purpose:
2920 * Collapse an object with the object backing it.
2921 * Pages in the backing object are moved into the
2922 * parent, and the backing object is deallocated.
2923 * Conditions:
2924 * Both objects and the cache are locked; the page
2925 * queues are unlocked.
2926 *
2927 */
2928 static void
2929 vm_object_do_collapse(
2930 vm_object_t object,
2931 vm_object_t backing_object)
2932 {
2933 vm_page_t p, pp;
2934 vm_object_offset_t new_offset, backing_offset;
2935 vm_object_size_t size;
2936
2937 backing_offset = object->shadow_offset;
2938 size = object->size;
2939
2940 /*
2941 * Move all in-memory pages from backing_object
2942 * to the parent. Pages that have been paged out
2943 * will be overwritten by any of the parent's
2944 * pages that shadow them.
2945 */
2946
2947 while (!queue_empty(&backing_object->memq)) {
2948
2949 p = (vm_page_t) queue_first(&backing_object->memq);
2950
2951 new_offset = (p->offset - backing_offset);
2952
2953 assert(!p->busy || p->absent);
2954
2955 /*
2956 * If the parent has a page here, or if
2957 * this page falls outside the parent,
2958 * dispose of it.
2959 *
2960 * Otherwise, move it as planned.
2961 */
2962
2963 if (p->offset < backing_offset || new_offset >= size) {
2964 VM_PAGE_FREE(p);
2965 } else {
2966 pp = vm_page_lookup(object, new_offset);
2967 if (pp == VM_PAGE_NULL) {
2968
2969 /*
2970 * Parent now has no page.
2971 * Move the backing object's page up.
2972 */
2973
2974 vm_page_rename(p, object, new_offset);
2975 #if MACH_PAGEMAP
2976 } else if (pp->absent) {
2977
2978 /*
2979 * Parent has an absent page...
2980 * it's not being paged in, so
2981 * it must really be missing from
2982 * the parent.
2983 *
2984 * Throw out the absent page...
2985 * any faults looking for that
2986 * page will restart with the new
2987 * one.
2988 */
2989
2990 VM_PAGE_FREE(pp);
2991 vm_page_rename(p, object, new_offset);
2992 #endif /* MACH_PAGEMAP */
2993 } else {
2994 assert(! pp->absent);
2995
2996 /*
2997 * Parent object has a real page.
2998 * Throw away the backing object's
2999 * page.
3000 */
3001 VM_PAGE_FREE(p);
3002 }
3003 }
3004 }
3005
3006 assert(object->pager == MEMORY_OBJECT_NULL ||
3007 backing_object->pager == MEMORY_OBJECT_NULL);
3008
3009 if (backing_object->pager != MEMORY_OBJECT_NULL) {
3010 vm_object_hash_entry_t entry;
3011
3012 /*
3013 * Move the pager from backing_object to object.
3014 *
3015 * XXX We're only using part of the paging space
3016 * for keeps now... we ought to discard the
3017 * unused portion.
3018 */
3019
3020 object->pager = backing_object->pager;
3021 entry = vm_object_hash_lookup(object->pager, FALSE);
3022 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
3023 entry->object = object;
3024 object->pager_created = backing_object->pager_created;
3025 object->pager_request = backing_object->pager_request;
3026 object->pager_ready = backing_object->pager_ready;
3027 object->pager_initialized = backing_object->pager_initialized;
3028 object->cluster_size = backing_object->cluster_size;
3029 object->paging_offset =
3030 backing_object->paging_offset + backing_offset;
3031 if (object->pager_request != PAGER_REQUEST_NULL) {
3032 memory_object_control_collapse(object->pager_request,
3033 object);
3034 }
3035 }
3036
3037 vm_object_cache_unlock();
3038
3039 object->paging_offset = backing_object->paging_offset + backing_offset;
3040
3041 #if MACH_PAGEMAP
3042 /*
3043 * If the shadow offset is 0, the use the existence map from
3044 * the backing object if there is one. If the shadow offset is
3045 * not zero, toss it.
3046 *
3047 * XXX - If the shadow offset is not 0 then a bit copy is needed
3048 * if the map is to be salvaged. For now, we just just toss the
3049 * old map, giving the collapsed object no map. This means that
3050 * the pager is invoked for zero fill pages. If analysis shows
3051 * that this happens frequently and is a performance hit, then
3052 * this code should be fixed to salvage the map.
3053 */
3054 assert(object->existence_map == VM_EXTERNAL_NULL);
3055 if (backing_offset || (size != backing_object->size)) {
3056 vm_external_discarded++;
3057 vm_external_destroy(backing_object->existence_map,
3058 backing_object->size);
3059 }
3060 else {
3061 vm_external_collapsed++;
3062 object->existence_map = backing_object->existence_map;
3063 }
3064 backing_object->existence_map = VM_EXTERNAL_NULL;
3065 #endif /* MACH_PAGEMAP */
3066
3067 /*
3068 * Object now shadows whatever backing_object did.
3069 * Note that the reference to backing_object->shadow
3070 * moves from within backing_object to within object.
3071 */
3072
3073 object->shadow = backing_object->shadow;
3074 object->shadow_offset += backing_object->shadow_offset;
3075 assert((object->shadow == VM_OBJECT_NULL) ||
3076 (object->shadow->copy == VM_OBJECT_NULL));
3077
3078 /*
3079 * Discard backing_object.
3080 *
3081 * Since the backing object has no pages, no
3082 * pager left, and no object references within it,
3083 * all that is necessary is to dispose of it.
3084 */
3085
3086 assert((backing_object->ref_count == 1) &&
3087 (backing_object->resident_page_count == 0) &&
3088 (backing_object->paging_in_progress == 0));
3089
3090 backing_object->alive = FALSE;
3091 vm_object_unlock(backing_object);
3092
3093 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
3094 (integer_t)backing_object, 0,0,0,0);
3095
3096 zfree(vm_object_zone, (vm_offset_t) backing_object);
3097
3098 object_collapses++;
3099 }
3100
3101 static void
3102 vm_object_do_bypass(
3103 vm_object_t object,
3104 vm_object_t backing_object)
3105 {
3106 /*
3107 * Make the parent shadow the next object
3108 * in the chain.
3109 */
3110
3111 #if TASK_SWAPPER
3112 /*
3113 * Do object reference in-line to
3114 * conditionally increment shadow's
3115 * residence count. If object is not
3116 * resident, leave residence count
3117 * on shadow alone.
3118 */
3119 if (backing_object->shadow != VM_OBJECT_NULL) {
3120 vm_object_lock(backing_object->shadow);
3121 backing_object->shadow->ref_count++;
3122 if (object->res_count != 0)
3123 vm_object_res_reference(backing_object->shadow);
3124 vm_object_unlock(backing_object->shadow);
3125 }
3126 #else /* TASK_SWAPPER */
3127 vm_object_reference(backing_object->shadow);
3128 #endif /* TASK_SWAPPER */
3129
3130 object->shadow = backing_object->shadow;
3131 object->shadow_offset += backing_object->shadow_offset;
3132
3133 /*
3134 * Backing object might have had a copy pointer
3135 * to us. If it did, clear it.
3136 */
3137 if (backing_object->copy == object) {
3138 backing_object->copy = VM_OBJECT_NULL;
3139 }
3140
3141 /*
3142 * Drop the reference count on backing_object.
3143 #if TASK_SWAPPER
3144 * Since its ref_count was at least 2, it
3145 * will not vanish; so we don't need to call
3146 * vm_object_deallocate.
3147 * [FBDP: that doesn't seem to be true any more]
3148 *
3149 * The res_count on the backing object is
3150 * conditionally decremented. It's possible
3151 * (via vm_pageout_scan) to get here with
3152 * a "swapped" object, which has a 0 res_count,
3153 * in which case, the backing object res_count
3154 * is already down by one.
3155 #else
3156 * Don't call vm_object_deallocate unless
3157 * ref_count drops to zero.
3158 *
3159 * The ref_count can drop to zero here if the
3160 * backing object could be bypassed but not
3161 * collapsed, such as when the backing object
3162 * is temporary and cachable.
3163 #endif
3164 */
3165 if (backing_object->ref_count > 1) {
3166 backing_object->ref_count--;
3167 #if TASK_SWAPPER
3168 if (object->res_count != 0)
3169 vm_object_res_deallocate(backing_object);
3170 assert(backing_object->ref_count > 0);
3171 #endif /* TASK_SWAPPER */
3172 vm_object_unlock(backing_object);
3173 } else {
3174
3175 /*
3176 * Drop locks so that we can deallocate
3177 * the backing object.
3178 */
3179
3180 #if TASK_SWAPPER
3181 if (object->res_count == 0) {
3182 /* XXX get a reference for the deallocate below */
3183 vm_object_res_reference(backing_object);
3184 }
3185 #endif /* TASK_SWAPPER */
3186 vm_object_unlock(object);
3187 vm_object_unlock(backing_object);
3188 vm_object_deallocate(backing_object);
3189
3190 /*
3191 * Relock object. We don't have to reverify
3192 * its state since vm_object_collapse will
3193 * do that for us as it starts at the
3194 * top of its loop.
3195 */
3196
3197 vm_object_lock(object);
3198 }
3199
3200 object_bypasses++;
3201 }
3202
3203
3204 /*
3205 * vm_object_collapse:
3206 *
3207 * Perform an object collapse or an object bypass if appropriate.
3208 * The real work of collapsing and bypassing is performed in
3209 * the routines vm_object_do_collapse and vm_object_do_bypass.
3210 *
3211 * Requires that the object be locked and the page queues be unlocked.
3212 *
3213 */
3214 __private_extern__ void
3215 vm_object_collapse(
3216 register vm_object_t object)
3217 {
3218 register vm_object_t backing_object;
3219 register vm_object_offset_t backing_offset;
3220 register vm_object_size_t size;
3221 register vm_object_offset_t new_offset;
3222 register vm_page_t p;
3223
3224 vm_offset_t current_offset;
3225
3226 if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) {
3227 return;
3228 }
3229
3230 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
3231 (integer_t)object, 0,0,0,0);
3232
3233 while (TRUE) {
3234 /*
3235 * Verify that the conditions are right for either
3236 * collapse or bypass:
3237 *
3238 * The object exists and no pages in it are currently
3239 * being paged out, and
3240 */
3241 if (object == VM_OBJECT_NULL ||
3242 object->paging_in_progress != 0 ||
3243 object->absent_count != 0)
3244 return;
3245
3246 /*
3247 * There is a backing object, and
3248 */
3249
3250 if ((backing_object = object->shadow) == VM_OBJECT_NULL)
3251 return;
3252
3253 vm_object_lock(backing_object);
3254
3255 /*
3256 * ...
3257 * The backing object is not read_only,
3258 * and no pages in the backing object are
3259 * currently being paged out.
3260 * The backing object is internal.
3261 *
3262 */
3263
3264 if (!backing_object->internal ||
3265 backing_object->paging_in_progress != 0) {
3266 vm_object_unlock(backing_object);
3267 return;
3268 }
3269
3270 /*
3271 * The backing object can't be a copy-object:
3272 * the shadow_offset for the copy-object must stay
3273 * as 0. Furthermore (for the 'we have all the
3274 * pages' case), if we bypass backing_object and
3275 * just shadow the next object in the chain, old
3276 * pages from that object would then have to be copied
3277 * BOTH into the (former) backing_object and into the
3278 * parent object.
3279 */
3280 if (backing_object->shadow != VM_OBJECT_NULL &&
3281 backing_object->shadow->copy != VM_OBJECT_NULL) {
3282 vm_object_unlock(backing_object);
3283 return;
3284 }
3285
3286 /*
3287 * We can now try to either collapse the backing
3288 * object (if the parent is the only reference to
3289 * it) or (perhaps) remove the parent's reference
3290 * to it.
3291 *
3292 * If there is exactly one reference to the backing
3293 * object, we may be able to collapse it into the
3294 * parent.
3295 *
3296 * The backing object must not have a pager
3297 * created for it, since collapsing an object
3298 * into a backing_object dumps new pages into
3299 * the backing_object that its pager doesn't
3300 * know about.
3301 */
3302
3303 if (backing_object->ref_count == 1 &&
3304 ! object->pager_created &&
3305 vm_object_collapse_allowed) {
3306
3307 XPR(XPR_VM_OBJECT,
3308 "vm_object_collapse: %x to %x, pager %x, pager_request %x\n",
3309 (integer_t)backing_object, (integer_t)object,
3310 (integer_t)backing_object->pager,
3311 (integer_t)backing_object->pager_request, 0);
3312
3313 /*
3314 * We need the cache lock for collapsing,
3315 * but we must not deadlock.
3316 */
3317
3318 if (! vm_object_cache_lock_try()) {
3319 vm_object_unlock(backing_object);
3320 return;
3321 }
3322
3323 /*
3324 * Collapse the object with its backing
3325 * object, and try again with the object's
3326 * new backing object.
3327 */
3328
3329 vm_object_do_collapse(object, backing_object);
3330 continue;
3331 }
3332
3333
3334 /*
3335 * Collapsing the backing object was not possible
3336 * or permitted, so let's try bypassing it.
3337 */
3338
3339 if (! vm_object_bypass_allowed) {
3340 vm_object_unlock(backing_object);
3341 return;
3342 }
3343
3344
3345 /*
3346 * If the backing object has a pager but no pagemap,
3347 * then we cannot bypass it, because we don't know
3348 * what pages it has.
3349 */
3350 if (backing_object->pager_created
3351 #if MACH_PAGEMAP
3352 && (backing_object->existence_map == VM_EXTERNAL_NULL)
3353 #endif /* MACH_PAGEMAP */
3354 ) {
3355 vm_object_unlock(backing_object);
3356 return;
3357 }
3358
3359 /*
3360 * If the object has a pager but no pagemap,
3361 * then we cannot bypass it, because we don't know
3362 * what pages it has.
3363 */
3364 if (object->pager_created
3365 #if MACH_PAGEMAP
3366 && (object->existence_map == VM_EXTERNAL_NULL)
3367 #endif /* MACH_PAGEMAP */
3368 ) {
3369 vm_object_unlock(backing_object);
3370 return;
3371 }
3372
3373 backing_offset = object->shadow_offset;
3374 size = object->size;
3375
3376 /*
3377 * If all of the pages in the backing object are
3378 * shadowed by the parent object, the parent
3379 * object no longer has to shadow the backing
3380 * object; it can shadow the next one in the
3381 * chain.
3382 *
3383 * If the backing object has existence info,
3384 * we must check examine its existence info
3385 * as well.
3386 *
3387 */
3388
3389 if(object->cow_hint >= size)
3390 object->cow_hint = 0;
3391 current_offset = object->cow_hint;
3392 while(TRUE) {
3393 if (vm_page_lookup(object,
3394 (vm_object_offset_t)current_offset)
3395 != VM_PAGE_NULL) {
3396 current_offset+=PAGE_SIZE;
3397 } else if ((object->pager_created) &&
3398 (object->existence_map != NULL) &&
3399 (vm_external_state_get(object->existence_map,
3400 current_offset)
3401 != VM_EXTERNAL_STATE_ABSENT)) {
3402 current_offset+=PAGE_SIZE;
3403 } else if (vm_page_lookup(backing_object,
3404 (vm_object_offset_t)current_offset
3405 + backing_offset)!= VM_PAGE_NULL) {
3406 /* found a dependency */
3407 object->cow_hint = current_offset;
3408 vm_object_unlock(backing_object);
3409 return;
3410 } else if ((backing_object->pager_created) &&
3411 (backing_object->existence_map != NULL) &&
3412 (vm_external_state_get(
3413 backing_object->existence_map,
3414 current_offset + backing_offset)
3415 != VM_EXTERNAL_STATE_ABSENT)) {
3416 /* found a dependency */
3417 object->cow_hint = current_offset;
3418 vm_object_unlock(backing_object);
3419 return;
3420 } else {
3421 current_offset+=PAGE_SIZE;
3422 }
3423 if(current_offset >= size) {
3424 /* wrap at end of object */
3425 current_offset = 0;
3426 }
3427 if(current_offset == object->cow_hint) {
3428 /* we are free of shadow influence */
3429 break;
3430 }
3431 }
3432 /* reset the cow_hint for any objects deeper in the chain */
3433 object->cow_hint = 0;
3434
3435
3436
3437 /*
3438 * All interesting pages in the backing object
3439 * already live in the parent or its pager.
3440 * Thus we can bypass the backing object.
3441 */
3442
3443 vm_object_do_bypass(object, backing_object);
3444
3445 /*
3446 * Try again with this object's new backing object.
3447 */
3448
3449 continue;
3450 }
3451 }
3452
3453 /*
3454 * Routine: vm_object_page_remove: [internal]
3455 * Purpose:
3456 * Removes all physical pages in the specified
3457 * object range from the object's list of pages.
3458 *
3459 * In/out conditions:
3460 * The object must be locked.
3461 * The object must not have paging_in_progress, usually
3462 * guaranteed by not having a pager.
3463 */
3464 unsigned int vm_object_page_remove_lookup = 0;
3465 unsigned int vm_object_page_remove_iterate = 0;
3466
3467 __private_extern__ void
3468 vm_object_page_remove(
3469 register vm_object_t object,
3470 register vm_object_offset_t start,
3471 register vm_object_offset_t end)
3472 {
3473 register vm_page_t p, next;
3474
3475 /*
3476 * One and two page removals are most popular.
3477 * The factor of 16 here is somewhat arbitrary.
3478 * It balances vm_object_lookup vs iteration.
3479 */
3480
3481 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
3482 vm_object_page_remove_lookup++;
3483
3484 for (; start < end; start += PAGE_SIZE_64) {
3485 p = vm_page_lookup(object, start);
3486 if (p != VM_PAGE_NULL) {
3487 assert(!p->cleaning && !p->pageout);
3488 if (!p->fictitious)
3489 pmap_page_protect(p->phys_page,
3490 VM_PROT_NONE);
3491 VM_PAGE_FREE(p);
3492 }
3493 }
3494 } else {
3495 vm_object_page_remove_iterate++;
3496
3497 p = (vm_page_t) queue_first(&object->memq);
3498 while (!queue_end(&object->memq, (queue_entry_t) p)) {
3499 next = (vm_page_t) queue_next(&p->listq);
3500 if ((start <= p->offset) && (p->offset < end)) {
3501 assert(!p->cleaning && !p->pageout);
3502 if (!p->fictitious)
3503 pmap_page_protect(p->phys_page,
3504 VM_PROT_NONE);
3505 VM_PAGE_FREE(p);
3506 }
3507 p = next;
3508 }
3509 }
3510 }
3511
3512
3513 /*
3514 * Routine: vm_object_coalesce
3515 * Function: Coalesces two objects backing up adjoining
3516 * regions of memory into a single object.
3517 *
3518 * returns TRUE if objects were combined.
3519 *
3520 * NOTE: Only works at the moment if the second object is NULL -
3521 * if it's not, which object do we lock first?
3522 *
3523 * Parameters:
3524 * prev_object First object to coalesce
3525 * prev_offset Offset into prev_object
3526 * next_object Second object into coalesce
3527 * next_offset Offset into next_object
3528 *
3529 * prev_size Size of reference to prev_object
3530 * next_size Size of reference to next_object
3531 *
3532 * Conditions:
3533 * The object(s) must *not* be locked. The map must be locked
3534 * to preserve the reference to the object(s).
3535 */
3536 static int vm_object_coalesce_count = 0;
3537
3538 __private_extern__ boolean_t
3539 vm_object_coalesce(
3540 register vm_object_t prev_object,
3541 vm_object_t next_object,
3542 vm_object_offset_t prev_offset,
3543 vm_object_offset_t next_offset,
3544 vm_object_size_t prev_size,
3545 vm_object_size_t next_size)
3546 {
3547 vm_object_size_t newsize;
3548
3549 #ifdef lint
3550 next_offset++;
3551 #endif /* lint */
3552
3553 if (next_object != VM_OBJECT_NULL) {
3554 return(FALSE);
3555 }
3556
3557 if (prev_object == VM_OBJECT_NULL) {
3558 return(TRUE);
3559 }
3560
3561 XPR(XPR_VM_OBJECT,
3562 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
3563 (integer_t)prev_object, prev_offset, prev_size, next_size, 0);
3564
3565 vm_object_lock(prev_object);
3566
3567 /*
3568 * Try to collapse the object first
3569 */
3570 vm_object_collapse(prev_object);
3571
3572 /*
3573 * Can't coalesce if pages not mapped to
3574 * prev_entry may be in use any way:
3575 * . more than one reference
3576 * . paged out
3577 * . shadows another object
3578 * . has a copy elsewhere
3579 * . paging references (pages might be in page-list)
3580 */
3581
3582 if ((prev_object->ref_count > 1) ||
3583 prev_object->pager_created ||
3584 (prev_object->shadow != VM_OBJECT_NULL) ||
3585 (prev_object->copy != VM_OBJECT_NULL) ||
3586 (prev_object->true_share != FALSE) ||
3587 (prev_object->paging_in_progress != 0)) {
3588 vm_object_unlock(prev_object);
3589 return(FALSE);
3590 }
3591
3592 vm_object_coalesce_count++;
3593
3594 /*
3595 * Remove any pages that may still be in the object from
3596 * a previous deallocation.
3597 */
3598 vm_object_page_remove(prev_object,
3599 prev_offset + prev_size,
3600 prev_offset + prev_size + next_size);
3601
3602 /*
3603 * Extend the object if necessary.
3604 */
3605 newsize = prev_offset + prev_size + next_size;
3606 if (newsize > prev_object->size) {
3607 #if MACH_PAGEMAP
3608 /*
3609 * We cannot extend an object that has existence info,
3610 * since the existence info might then fail to cover
3611 * the entire object.
3612 *
3613 * This assertion must be true because the object
3614 * has no pager, and we only create existence info
3615 * for objects with pagers.
3616 */
3617 assert(prev_object->existence_map == VM_EXTERNAL_NULL);
3618 #endif /* MACH_PAGEMAP */
3619 prev_object->size = newsize;
3620 }
3621
3622 vm_object_unlock(prev_object);
3623 return(TRUE);
3624 }
3625
3626 /*
3627 * Attach a set of physical pages to an object, so that they can
3628 * be mapped by mapping the object. Typically used to map IO memory.
3629 *
3630 * The mapping function and its private data are used to obtain the
3631 * physical addresses for each page to be mapped.
3632 */
3633 void
3634 vm_object_page_map(
3635 vm_object_t object,
3636 vm_object_offset_t offset,
3637 vm_object_size_t size,
3638 vm_object_offset_t (*map_fn)(void *map_fn_data,
3639 vm_object_offset_t offset),
3640 void *map_fn_data) /* private to map_fn */
3641 {
3642 int num_pages;
3643 int i;
3644 vm_page_t m;
3645 vm_page_t old_page;
3646 vm_object_offset_t addr;
3647
3648 num_pages = atop_64(size);
3649
3650 for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
3651
3652 addr = (*map_fn)(map_fn_data, offset);
3653
3654 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
3655 vm_page_more_fictitious();
3656
3657 vm_object_lock(object);
3658 if ((old_page = vm_page_lookup(object, offset))
3659 != VM_PAGE_NULL)
3660 {
3661 vm_page_lock_queues();
3662 vm_page_free(old_page);
3663 vm_page_unlock_queues();
3664 }
3665
3666 vm_page_init(m, addr);
3667 /* private normally requires lock_queues but since we */
3668 /* are initializing the page, its not necessary here */
3669 m->private = TRUE; /* don`t free page */
3670 m->wire_count = 1;
3671 vm_page_insert(m, object, offset);
3672
3673 PAGE_WAKEUP_DONE(m);
3674 vm_object_unlock(object);
3675 }
3676 }
3677
3678 #include <mach_kdb.h>
3679
3680 #if MACH_KDB
3681 #include <ddb/db_output.h>
3682 #include <vm/vm_print.h>
3683
3684 #define printf kdbprintf
3685
3686 extern boolean_t vm_object_cached(
3687 vm_object_t object);
3688
3689 extern void print_bitstring(
3690 char byte);
3691
3692 boolean_t vm_object_print_pages = FALSE;
3693
3694 void
3695 print_bitstring(
3696 char byte)
3697 {
3698 printf("%c%c%c%c%c%c%c%c",
3699 ((byte & (1 << 0)) ? '1' : '0'),
3700 ((byte & (1 << 1)) ? '1' : '0'),
3701 ((byte & (1 << 2)) ? '1' : '0'),
3702 ((byte & (1 << 3)) ? '1' : '0'),
3703 ((byte & (1 << 4)) ? '1' : '0'),
3704 ((byte & (1 << 5)) ? '1' : '0'),
3705 ((byte & (1 << 6)) ? '1' : '0'),
3706 ((byte & (1 << 7)) ? '1' : '0'));
3707 }
3708
3709 boolean_t
3710 vm_object_cached(
3711 register vm_object_t object)
3712 {
3713 register vm_object_t o;
3714
3715 queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) {
3716 if (object == o) {
3717 return TRUE;
3718 }
3719 }
3720 return FALSE;
3721 }
3722
3723 #if MACH_PAGEMAP
3724 /*
3725 * vm_external_print: [ debug ]
3726 */
3727 void
3728 vm_external_print(
3729 vm_external_map_t map,
3730 vm_size_t size)
3731 {
3732 if (map == VM_EXTERNAL_NULL) {
3733 printf("0 ");
3734 } else {
3735 vm_size_t existence_size = stob(size);
3736 printf("{ size=%d, map=[", existence_size);
3737 if (existence_size > 0) {
3738 print_bitstring(map[0]);
3739 }
3740 if (existence_size > 1) {
3741 print_bitstring(map[1]);
3742 }
3743 if (existence_size > 2) {
3744 printf("...");
3745 print_bitstring(map[existence_size-1]);
3746 }
3747 printf("] }\n");
3748 }
3749 return;
3750 }
3751 #endif /* MACH_PAGEMAP */
3752
3753 int
3754 vm_follow_object(
3755 vm_object_t object)
3756 {
3757 extern db_indent;
3758
3759 int count = 0;
3760 int orig_db_indent = db_indent;
3761
3762 while (TRUE) {
3763 if (object == VM_OBJECT_NULL) {
3764 db_indent = orig_db_indent;
3765 return count;
3766 }
3767
3768 count += 1;
3769
3770 iprintf("object 0x%x", object);
3771 printf(", shadow=0x%x", object->shadow);
3772 printf(", copy=0x%x", object->copy);
3773 printf(", pager=0x%x", object->pager);
3774 printf(", ref=%d\n", object->ref_count);
3775
3776 db_indent += 2;
3777 object = object->shadow;
3778 }
3779
3780 }
3781
3782 /*
3783 * vm_object_print: [ debug ]
3784 */
3785 void
3786 vm_object_print(
3787 vm_object_t object,
3788 boolean_t have_addr,
3789 int arg_count,
3790 char *modif)
3791 {
3792 register vm_page_t p;
3793 extern db_indent;
3794 char *s;
3795
3796 register int count;
3797
3798 if (object == VM_OBJECT_NULL)
3799 return;
3800
3801 iprintf("object 0x%x\n", object);
3802
3803 db_indent += 2;
3804
3805 iprintf("size=0x%x", object->size);
3806 printf(", cluster=0x%x", object->cluster_size);
3807 printf(", frozen=0x%x", object->frozen_size);
3808 printf(", ref_count=%d\n", object->ref_count);
3809 iprintf("");
3810 #if TASK_SWAPPER
3811 printf("res_count=%d, ", object->res_count);
3812 #endif /* TASK_SWAPPER */
3813 printf("resident_page_count=%d\n", object->resident_page_count);
3814
3815 iprintf("shadow=0x%x", object->shadow);
3816 if (object->shadow) {
3817 register int i = 0;
3818 vm_object_t shadow = object;
3819 while(shadow = shadow->shadow)
3820 i++;
3821 printf(" (depth %d)", i);
3822 }
3823 printf(", copy=0x%x", object->copy);
3824 printf(", shadow_offset=0x%x", object->shadow_offset);
3825 printf(", last_alloc=0x%x\n", object->last_alloc);
3826
3827 iprintf("pager=0x%x", object->pager);
3828 printf(", paging_offset=0x%x", object->paging_offset);
3829 printf(", pager_request=0x%x\n", object->pager_request);
3830
3831 iprintf("copy_strategy=%d[", object->copy_strategy);
3832 switch (object->copy_strategy) {
3833 case MEMORY_OBJECT_COPY_NONE:
3834 printf("copy_none");
3835 break;
3836
3837 case MEMORY_OBJECT_COPY_CALL:
3838 printf("copy_call");
3839 break;
3840
3841 case MEMORY_OBJECT_COPY_DELAY:
3842 printf("copy_delay");
3843 break;
3844
3845 case MEMORY_OBJECT_COPY_SYMMETRIC:
3846 printf("copy_symmetric");
3847 break;
3848
3849 case MEMORY_OBJECT_COPY_INVALID:
3850 printf("copy_invalid");
3851 break;
3852
3853 default:
3854 printf("?");
3855 }
3856 printf("]");
3857 printf(", absent_count=%d\n", object->absent_count);
3858
3859 iprintf("all_wanted=0x%x<", object->all_wanted);
3860 s = "";
3861 if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
3862 printf("%sinit", s);
3863 s = ",";
3864 }
3865 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
3866 printf("%sready", s);
3867 s = ",";
3868 }
3869 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
3870 printf("%spaging", s);
3871 s = ",";
3872 }
3873 if (vm_object_wanted(object, VM_OBJECT_EVENT_ABSENT_COUNT)) {
3874 printf("%sabsent", s);
3875 s = ",";
3876 }
3877 if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
3878 printf("%slock", s);
3879 s = ",";
3880 }
3881 if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
3882 printf("%suncaching", s);
3883 s = ",";
3884 }
3885 if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
3886 printf("%scopy_call", s);
3887 s = ",";
3888 }
3889 if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
3890 printf("%scaching", s);
3891 s = ",";
3892 }
3893 printf(">");
3894 printf(", paging_in_progress=%d\n", object->paging_in_progress);
3895
3896 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
3897 (object->pager_created ? "" : "!"),
3898 (object->pager_initialized ? "" : "!"),
3899 (object->pager_ready ? "" : "!"),
3900 (object->can_persist ? "" : "!"),
3901 (object->pager_trusted ? "" : "!"),
3902 (object->pageout ? "" : "!"),
3903 (object->internal ? "internal" : "external"),
3904 (object->temporary ? "temporary" : "permanent"));
3905 iprintf("%salive, %slock_in_progress, %slock_restart, %sshadowed, %scached, %sprivate\n",
3906 (object->alive ? "" : "!"),
3907 (object->lock_in_progress ? "" : "!"),
3908 (object->lock_restart ? "" : "!"),
3909 (object->shadowed ? "" : "!"),
3910 (vm_object_cached(object) ? "" : "!"),
3911 (object->private ? "" : "!"));
3912 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
3913 (object->advisory_pageout ? "" : "!"),
3914 (object->silent_overwrite ? "" : "!"));
3915
3916 #if MACH_PAGEMAP
3917 iprintf("existence_map=");
3918 vm_external_print(object->existence_map, object->size);
3919 #endif /* MACH_PAGEMAP */
3920 #if MACH_ASSERT
3921 iprintf("paging_object=0x%x\n", object->paging_object);
3922 #endif /* MACH_ASSERT */
3923
3924 if (vm_object_print_pages) {
3925 count = 0;
3926 p = (vm_page_t) queue_first(&object->memq);
3927 while (!queue_end(&object->memq, (queue_entry_t) p)) {
3928 if (count == 0) {
3929 iprintf("memory:=");
3930 } else if (count == 2) {
3931 printf("\n");
3932 iprintf(" ...");
3933 count = 0;
3934 } else {
3935 printf(",");
3936 }
3937 count++;
3938
3939 printf("(off=0x%X,page=0x%X)", p->offset, (integer_t) p);
3940 p = (vm_page_t) queue_next(&p->listq);
3941 }
3942 if (count != 0) {
3943 printf("\n");
3944 }
3945 }
3946 db_indent -= 2;
3947 }
3948
3949
3950 /*
3951 * vm_object_find [ debug ]
3952 *
3953 * Find all tasks which reference the given vm_object.
3954 */
3955
3956 boolean_t vm_object_find(vm_object_t object);
3957 boolean_t vm_object_print_verbose = FALSE;
3958
3959 boolean_t
3960 vm_object_find(
3961 vm_object_t object)
3962 {
3963 task_t task;
3964 vm_map_t map;
3965 vm_map_entry_t entry;
3966 processor_set_t pset = &default_pset;
3967 boolean_t found = FALSE;
3968
3969 queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
3970 map = task->map;
3971 for (entry = vm_map_first_entry(map);
3972 entry && entry != vm_map_to_entry(map);
3973 entry = entry->vme_next) {
3974
3975 vm_object_t obj;
3976
3977 /*
3978 * For the time being skip submaps,
3979 * only the kernel can have submaps,
3980 * and unless we are interested in
3981 * kernel objects, we can simply skip
3982 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
3983 * for a full solution.
3984 */
3985 if (entry->is_sub_map)
3986 continue;
3987 if (entry)
3988 obj = entry->object.vm_object;
3989 else
3990 continue;
3991
3992 while (obj != VM_OBJECT_NULL) {
3993 if (obj == object) {
3994 if (!found) {
3995 printf("TASK\t\tMAP\t\tENTRY\n");
3996 found = TRUE;
3997 }
3998 printf("0x%x\t0x%x\t0x%x\n",
3999 task, map, entry);
4000 }
4001 obj = obj->shadow;
4002 }
4003 }
4004 }
4005
4006 return(found);
4007 }
4008
4009 #endif /* MACH_KDB */
4010
4011 kern_return_t
4012 vm_object_populate_with_private(
4013 vm_object_t object,
4014 vm_object_offset_t offset,
4015 ppnum_t phys_page,
4016 vm_size_t size)
4017 {
4018 ppnum_t base_page;
4019 vm_object_offset_t base_offset;
4020
4021
4022 if(!object->private)
4023 return KERN_FAILURE;
4024
4025 base_page = phys_page;
4026
4027 vm_object_lock(object);
4028 if(!object->phys_contiguous) {
4029 vm_page_t m;
4030 if((base_offset = trunc_page_64(offset)) != offset) {
4031 vm_object_unlock(object);
4032 return KERN_FAILURE;
4033 }
4034 base_offset += object->paging_offset;
4035 while(size) {
4036 m = vm_page_lookup(object, base_offset);
4037 if(m != VM_PAGE_NULL) {
4038 if(m->fictitious) {
4039 vm_page_lock_queues();
4040 m->fictitious = FALSE;
4041 m->private = TRUE;
4042 m->phys_page = base_page;
4043 if(!m->busy) {
4044 m->busy = TRUE;
4045 }
4046 if(!m->absent) {
4047 m->absent = TRUE;
4048 object->absent_count++;
4049 }
4050 m->list_req_pending = TRUE;
4051 vm_page_unlock_queues();
4052 } else if (m->phys_page != base_page) {
4053 /* pmap call to clear old mapping */
4054 pmap_page_protect(m->phys_page,
4055 VM_PROT_NONE);
4056 m->phys_page = base_page;
4057 }
4058 } else {
4059 while ((m = vm_page_grab_fictitious())
4060 == VM_PAGE_NULL)
4061 vm_page_more_fictitious();
4062 vm_page_lock_queues();
4063 m->fictitious = FALSE;
4064 m->private = TRUE;
4065 m->phys_page = base_page;
4066 m->list_req_pending = TRUE;
4067 m->absent = TRUE;
4068 m->unusual = TRUE;
4069 object->absent_count++;
4070 vm_page_unlock_queues();
4071 vm_page_insert(m, object, base_offset);
4072 }
4073 base_page++; /* Go to the next physical page */
4074 base_offset += PAGE_SIZE;
4075 size -= PAGE_SIZE;
4076 }
4077 } else {
4078 /* NOTE: we should check the original settings here */
4079 /* if we have a size > zero a pmap call should be made */
4080 /* to disable the range */
4081
4082 /* pmap_? */
4083
4084 /* shadows on contiguous memory are not allowed */
4085 /* we therefore can use the offset field */
4086 object->shadow_offset = (vm_object_offset_t)(phys_page << 12);
4087 object->size = size;
4088 }
4089 vm_object_unlock(object);
4090 return KERN_SUCCESS;
4091 }
4092
4093 /*
4094 * memory_object_free_from_cache:
4095 *
4096 * Walk the vm_object cache list, removing and freeing vm_objects
4097 * which are backed by the pager identified by the caller, (pager_id).
4098 * Remove up to "count" objects, if there are that may available
4099 * in the cache.
4100 *
4101 * Walk the list at most once, return the number of vm_objects
4102 * actually freed.
4103 */
4104
4105 __private_extern__ kern_return_t
4106 memory_object_free_from_cache(
4107 host_t host,
4108 int *pager_id,
4109 int *count)
4110 {
4111
4112 int object_released = 0;
4113 int i;
4114
4115 register vm_object_t object = VM_OBJECT_NULL;
4116 vm_object_t shadow;
4117
4118 /*
4119 if(host == HOST_NULL)
4120 return(KERN_INVALID_ARGUMENT);
4121 */
4122
4123 try_again:
4124 vm_object_cache_lock();
4125
4126 queue_iterate(&vm_object_cached_list, object,
4127 vm_object_t, cached_list) {
4128 if (object->pager && (pager_id == object->pager->pager)) {
4129 vm_object_lock(object);
4130 queue_remove(&vm_object_cached_list, object,
4131 vm_object_t, cached_list);
4132 vm_object_cached_count--;
4133
4134 /*
4135 * Since this object is in the cache, we know
4136 * that it is initialized and has only a pager's
4137 * (implicit) reference. Take a reference to avoid
4138 * recursive deallocations.
4139 */
4140
4141 assert(object->pager_initialized);
4142 assert(object->ref_count == 0);
4143 object->ref_count++;
4144
4145 /*
4146 * Terminate the object.
4147 * If the object had a shadow, we let
4148 * vm_object_deallocate deallocate it.
4149 * "pageout" objects have a shadow, but
4150 * maintain a "paging reference" rather
4151 * than a normal reference.
4152 * (We are careful here to limit recursion.)
4153 */
4154 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4155 if ((vm_object_terminate(object) == KERN_SUCCESS)
4156 && (shadow != VM_OBJECT_NULL)) {
4157 vm_object_deallocate(shadow);
4158 }
4159
4160 if(object_released++ == *count)
4161 return KERN_SUCCESS;
4162 goto try_again;
4163 }
4164 }
4165 vm_object_cache_unlock();
4166 *count = object_released;
4167 return KERN_SUCCESS;
4168 }
4169
4170
4171
4172 kern_return_t
4173 memory_object_create_named(
4174 memory_object_t pager,
4175 memory_object_offset_t size,
4176 memory_object_control_t *control)
4177 {
4178 vm_object_t object;
4179 vm_object_hash_entry_t entry;
4180
4181 *control = MEMORY_OBJECT_CONTROL_NULL;
4182 if (pager == MEMORY_OBJECT_NULL)
4183 return KERN_INVALID_ARGUMENT;
4184
4185 vm_object_cache_lock();
4186 entry = vm_object_hash_lookup(pager, FALSE);
4187 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
4188 (entry->object != VM_OBJECT_NULL)) {
4189 if (entry->object->named == TRUE)
4190 panic("memory_object_create_named: caller already holds the right"); }
4191
4192 vm_object_cache_unlock();
4193 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE))
4194 == VM_OBJECT_NULL) {
4195 return(KERN_INVALID_OBJECT);
4196 }
4197
4198 /* wait for object (if any) to be ready */
4199 if (object != VM_OBJECT_NULL) {
4200 vm_object_lock(object);
4201 object->named = TRUE;
4202 while (!object->pager_ready) {
4203 vm_object_sleep(object,
4204 VM_OBJECT_EVENT_PAGER_READY,
4205 THREAD_UNINT);
4206 }
4207 *control = object->pager_request;
4208 vm_object_unlock(object);
4209 }
4210 return (KERN_SUCCESS);
4211 }
4212
4213
4214 /*
4215 * Routine: memory_object_recover_named [user interface]
4216 * Purpose:
4217 * Attempt to recover a named reference for a VM object.
4218 * VM will verify that the object has not already started
4219 * down the termination path, and if it has, will optionally
4220 * wait for that to finish.
4221 * Returns:
4222 * KERN_SUCCESS - we recovered a named reference on the object
4223 * KERN_FAILURE - we could not recover a reference (object dead)
4224 * KERN_INVALID_ARGUMENT - bad memory object control
4225 */
4226 kern_return_t
4227 memory_object_recover_named(
4228 memory_object_control_t control,
4229 boolean_t wait_on_terminating)
4230 {
4231 vm_object_t object;
4232
4233 vm_object_cache_lock();
4234 object = memory_object_control_to_vm_object(control);
4235 if (object == VM_OBJECT_NULL) {
4236 vm_object_cache_unlock();
4237 return (KERN_INVALID_ARGUMENT);
4238 }
4239
4240 restart:
4241 vm_object_lock(object);
4242
4243 if (object->terminating && wait_on_terminating) {
4244 vm_object_cache_unlock();
4245 vm_object_wait(object,
4246 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
4247 THREAD_UNINT);
4248 vm_object_cache_lock();
4249 goto restart;
4250 }
4251
4252 if (!object->alive) {
4253 vm_object_cache_unlock();
4254 vm_object_unlock(object);
4255 return KERN_FAILURE;
4256 }
4257
4258 if (object->named == TRUE) {
4259 vm_object_cache_unlock();
4260 vm_object_unlock(object);
4261 return KERN_SUCCESS;
4262 }
4263
4264 if((object->ref_count == 0) && (!object->terminating)){
4265 queue_remove(&vm_object_cached_list, object,
4266 vm_object_t, cached_list);
4267 vm_object_cached_count--;
4268 XPR(XPR_VM_OBJECT_CACHE,
4269 "memory_object_recover_named: removing %X, head (%X, %X)\n",
4270 (integer_t)object,
4271 (integer_t)vm_object_cached_list.next,
4272 (integer_t)vm_object_cached_list.prev, 0,0);
4273 }
4274
4275 vm_object_cache_unlock();
4276
4277 object->named = TRUE;
4278 object->ref_count++;
4279 vm_object_res_reference(object);
4280 while (!object->pager_ready) {
4281 vm_object_sleep(object,
4282 VM_OBJECT_EVENT_PAGER_READY,
4283 THREAD_UNINT);
4284 }
4285 vm_object_unlock(object);
4286 return (KERN_SUCCESS);
4287 }
4288
4289
4290 /*
4291 * vm_object_release_name:
4292 *
4293 * Enforces name semantic on memory_object reference count decrement
4294 * This routine should not be called unless the caller holds a name
4295 * reference gained through the memory_object_create_named.
4296 *
4297 * If the TERMINATE_IDLE flag is set, the call will return if the
4298 * reference count is not 1. i.e. idle with the only remaining reference
4299 * being the name.
4300 * If the decision is made to proceed the name field flag is set to
4301 * false and the reference count is decremented. If the RESPECT_CACHE
4302 * flag is set and the reference count has gone to zero, the
4303 * memory_object is checked to see if it is cacheable otherwise when
4304 * the reference count is zero, it is simply terminated.
4305 */
4306
4307 __private_extern__ kern_return_t
4308 vm_object_release_name(
4309 vm_object_t object,
4310 int flags)
4311 {
4312 vm_object_t shadow;
4313 boolean_t original_object = TRUE;
4314
4315 while (object != VM_OBJECT_NULL) {
4316
4317 /*
4318 * The cache holds a reference (uncounted) to
4319 * the object. We must locke it before removing
4320 * the object.
4321 *
4322 */
4323
4324 vm_object_cache_lock();
4325 vm_object_lock(object);
4326 assert(object->alive);
4327 if(original_object)
4328 assert(object->named);
4329 assert(object->ref_count > 0);
4330
4331 /*
4332 * We have to wait for initialization before
4333 * destroying or caching the object.
4334 */
4335
4336 if (object->pager_created && !object->pager_initialized) {
4337 assert(!object->can_persist);
4338 vm_object_assert_wait(object,
4339 VM_OBJECT_EVENT_INITIALIZED,
4340 THREAD_UNINT);
4341 vm_object_unlock(object);
4342 vm_object_cache_unlock();
4343 thread_block(THREAD_CONTINUE_NULL);
4344 continue;
4345 }
4346
4347 if (((object->ref_count > 1)
4348 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
4349 || (object->terminating)) {
4350 vm_object_unlock(object);
4351 vm_object_cache_unlock();
4352 return KERN_FAILURE;
4353 } else {
4354 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
4355 vm_object_unlock(object);
4356 vm_object_cache_unlock();
4357 return KERN_SUCCESS;
4358 }
4359 }
4360
4361 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
4362 (object->ref_count == 1)) {
4363 if(original_object)
4364 object->named = FALSE;
4365 vm_object_unlock(object);
4366 vm_object_cache_unlock();
4367 /* let vm_object_deallocate push this thing into */
4368 /* the cache, if that it is where it is bound */
4369 vm_object_deallocate(object);
4370 return KERN_SUCCESS;
4371 }
4372 VM_OBJ_RES_DECR(object);
4373 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4374 if(object->ref_count == 1) {
4375 if(vm_object_terminate(object) != KERN_SUCCESS) {
4376 if(original_object) {
4377 return KERN_FAILURE;
4378 } else {
4379 return KERN_SUCCESS;
4380 }
4381 }
4382 if (shadow != VM_OBJECT_NULL) {
4383 original_object = FALSE;
4384 object = shadow;
4385 continue;
4386 }
4387 return KERN_SUCCESS;
4388 } else {
4389 object->ref_count--;
4390 assert(object->ref_count > 0);
4391 if(original_object)
4392 object->named = FALSE;
4393 vm_object_unlock(object);
4394 vm_object_cache_unlock();
4395 return KERN_SUCCESS;
4396 }
4397 }
4398 }
4399
4400
4401 __private_extern__ kern_return_t
4402 vm_object_lock_request(
4403 vm_object_t object,
4404 vm_object_offset_t offset,
4405 vm_object_size_t size,
4406 memory_object_return_t should_return,
4407 int flags,
4408 vm_prot_t prot)
4409 {
4410 vm_object_offset_t original_offset = offset;
4411 boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH;
4412
4413 XPR(XPR_MEMORY_OBJECT,
4414 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
4415 (integer_t)object, offset, size,
4416 (((should_return&1)<<1)|should_flush), prot);
4417
4418 /*
4419 * Check for bogus arguments.
4420 */
4421 if (object == VM_OBJECT_NULL)
4422 return (KERN_INVALID_ARGUMENT);
4423
4424 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
4425 return (KERN_INVALID_ARGUMENT);
4426
4427 size = round_page_64(size);
4428
4429 /*
4430 * Lock the object, and acquire a paging reference to
4431 * prevent the memory_object reference from being released.
4432 */
4433 vm_object_lock(object);
4434 vm_object_paging_begin(object);
4435 offset -= object->paging_offset;
4436
4437 (void)vm_object_update(object,
4438 offset, size, should_return, flags, prot);
4439
4440 vm_object_paging_end(object);
4441 vm_object_unlock(object);
4442
4443 return (KERN_SUCCESS);
4444 }
4445
4446
4447
4448 #if TASK_SWAPPER
4449 /*
4450 * vm_object_res_deallocate
4451 *
4452 * (recursively) decrement residence counts on vm objects and their shadows.
4453 * Called from vm_object_deallocate and when swapping out an object.
4454 *
4455 * The object is locked, and remains locked throughout the function,
4456 * even as we iterate down the shadow chain. Locks on intermediate objects
4457 * will be dropped, but not the original object.
4458 *
4459 * NOTE: this function used to use recursion, rather than iteration.
4460 */
4461
4462 __private_extern__ void
4463 vm_object_res_deallocate(
4464 vm_object_t object)
4465 {
4466 vm_object_t orig_object = object;
4467 /*
4468 * Object is locked so it can be called directly
4469 * from vm_object_deallocate. Original object is never
4470 * unlocked.
4471 */
4472 assert(object->res_count > 0);
4473 while (--object->res_count == 0) {
4474 assert(object->ref_count >= object->res_count);
4475 vm_object_deactivate_all_pages(object);
4476 /* iterate on shadow, if present */
4477 if (object->shadow != VM_OBJECT_NULL) {
4478 vm_object_t tmp_object = object->shadow;
4479 vm_object_lock(tmp_object);
4480 if (object != orig_object)
4481 vm_object_unlock(object);
4482 object = tmp_object;
4483 assert(object->res_count > 0);
4484 } else
4485 break;
4486 }
4487 if (object != orig_object)
4488 vm_object_unlock(object);
4489 }
4490
4491 /*
4492 * vm_object_res_reference
4493 *
4494 * Internal function to increment residence count on a vm object
4495 * and its shadows. It is called only from vm_object_reference, and
4496 * when swapping in a vm object, via vm_map_swap.
4497 *
4498 * The object is locked, and remains locked throughout the function,
4499 * even as we iterate down the shadow chain. Locks on intermediate objects
4500 * will be dropped, but not the original object.
4501 *
4502 * NOTE: this function used to use recursion, rather than iteration.
4503 */
4504
4505 __private_extern__ void
4506 vm_object_res_reference(
4507 vm_object_t object)
4508 {
4509 vm_object_t orig_object = object;
4510 /*
4511 * Object is locked, so this can be called directly
4512 * from vm_object_reference. This lock is never released.
4513 */
4514 while ((++object->res_count == 1) &&
4515 (object->shadow != VM_OBJECT_NULL)) {
4516 vm_object_t tmp_object = object->shadow;
4517
4518 assert(object->ref_count >= object->res_count);
4519 vm_object_lock(tmp_object);
4520 if (object != orig_object)
4521 vm_object_unlock(object);
4522 object = tmp_object;
4523 }
4524 if (object != orig_object)
4525 vm_object_unlock(object);
4526 assert(orig_object->ref_count >= orig_object->res_count);
4527 }
4528 #endif /* TASK_SWAPPER */
4529
4530 /*
4531 * vm_object_reference:
4532 *
4533 * Gets another reference to the given object.
4534 */
4535 #ifdef vm_object_reference
4536 #undef vm_object_reference
4537 #endif
4538 __private_extern__ void
4539 vm_object_reference(
4540 register vm_object_t object)
4541 {
4542 if (object == VM_OBJECT_NULL)
4543 return;
4544
4545 vm_object_lock(object);
4546 assert(object->ref_count > 0);
4547 vm_object_reference_locked(object);
4548 vm_object_unlock(object);
4549 }
4550
4551 #ifdef MACH_BSD
4552 /*
4553 * Scale the vm_object_cache
4554 * This is required to make sure that the vm_object_cache is big
4555 * enough to effectively cache the mapped file.
4556 * This is really important with UBC as all the regular file vnodes
4557 * have memory object associated with them. Havving this cache too
4558 * small results in rapid reclaim of vnodes and hurts performance a LOT!
4559 *
4560 * This is also needed as number of vnodes can be dynamically scaled.
4561 */
4562 kern_return_t
4563 adjust_vm_object_cache(vm_size_t oval, vm_size_t nval)
4564 {
4565 vm_object_cached_max = nval;
4566 vm_object_cache_trim(FALSE);
4567 return (KERN_SUCCESS);
4568 }
4569 #endif /* MACH_BSD */
4570