]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_object.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_object.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * Virtual memory object module.
57 */
58
1c79356b
A
59#include <mach_pagemap.h>
60#include <task_swapper.h>
61
0b4e3aa0 62#include <mach/mach_types.h>
1c79356b
A
63#include <mach/memory_object.h>
64#include <mach/memory_object_default.h>
65#include <mach/memory_object_control_server.h>
66#include <mach/vm_param.h>
91447636
A
67
68#include <ipc/ipc_types.h>
1c79356b 69#include <ipc/ipc_port.h>
91447636
A
70
71#include <kern/kern_types.h>
1c79356b
A
72#include <kern/assert.h>
73#include <kern/lock.h>
74#include <kern/queue.h>
75#include <kern/xpr.h>
76#include <kern/zalloc.h>
77#include <kern/host.h>
78#include <kern/host_statistics.h>
79#include <kern/processor.h>
91447636
A
80#include <kern/misc_protos.h>
81
1c79356b
A
82#include <vm/memory_object.h>
83#include <vm/vm_fault.h>
84#include <vm/vm_map.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pageout.h>
91447636 88#include <vm/vm_protos.h>
1c79356b 89
1c79356b
A
90/*
91 * Virtual memory objects maintain the actual data
92 * associated with allocated virtual memory. A given
93 * page of memory exists within exactly one object.
94 *
95 * An object is only deallocated when all "references"
0b4e3aa0 96 * are given up.
1c79356b
A
97 *
98 * Associated with each object is a list of all resident
99 * memory pages belonging to that object; this list is
100 * maintained by the "vm_page" module, but locked by the object's
101 * lock.
102 *
0b4e3aa0 103 * Each object also records the memory object reference
1c79356b 104 * that is used by the kernel to request and write
0b4e3aa0 105 * back data (the memory object, field "pager"), etc...
1c79356b
A
106 *
107 * Virtual memory objects are allocated to provide
108 * zero-filled memory (vm_allocate) or map a user-defined
109 * memory object into a virtual address space (vm_map).
110 *
111 * Virtual memory objects that refer to a user-defined
112 * memory object are called "permanent", because all changes
113 * made in virtual memory are reflected back to the
114 * memory manager, which may then store it permanently.
115 * Other virtual memory objects are called "temporary",
116 * meaning that changes need be written back only when
117 * necessary to reclaim pages, and that storage associated
118 * with the object can be discarded once it is no longer
119 * mapped.
120 *
121 * A permanent memory object may be mapped into more
122 * than one virtual address space. Moreover, two threads
123 * may attempt to make the first mapping of a memory
124 * object concurrently. Only one thread is allowed to
125 * complete this mapping; all others wait for the
126 * "pager_initialized" field is asserted, indicating
127 * that the first thread has initialized all of the
128 * necessary fields in the virtual memory object structure.
129 *
130 * The kernel relies on a *default memory manager* to
131 * provide backing storage for the zero-filled virtual
0b4e3aa0 132 * memory objects. The pager memory objects associated
1c79356b 133 * with these temporary virtual memory objects are only
0b4e3aa0
A
134 * requested from the default memory manager when it
135 * becomes necessary. Virtual memory objects
1c79356b
A
136 * that depend on the default memory manager are called
137 * "internal". The "pager_created" field is provided to
138 * indicate whether these ports have ever been allocated.
139 *
140 * The kernel may also create virtual memory objects to
141 * hold changed pages after a copy-on-write operation.
142 * In this case, the virtual memory object (and its
143 * backing storage -- its memory object) only contain
144 * those pages that have been changed. The "shadow"
145 * field refers to the virtual memory object that contains
146 * the remainder of the contents. The "shadow_offset"
147 * field indicates where in the "shadow" these contents begin.
148 * The "copy" field refers to a virtual memory object
149 * to which changed pages must be copied before changing
150 * this object, in order to implement another form
151 * of copy-on-write optimization.
152 *
153 * The virtual memory object structure also records
154 * the attributes associated with its memory object.
155 * The "pager_ready", "can_persist" and "copy_strategy"
156 * fields represent those attributes. The "cached_list"
157 * field is used in the implementation of the persistence
158 * attribute.
159 *
160 * ZZZ Continue this comment.
161 */
162
163/* Forward declarations for internal functions. */
0b4e3aa0 164static kern_return_t vm_object_terminate(
1c79356b
A
165 vm_object_t object);
166
167extern void vm_object_remove(
168 vm_object_t object);
169
0b4e3aa0 170static vm_object_t vm_object_cache_trim(
1c79356b
A
171 boolean_t called_from_vm_object_deallocate);
172
0b4e3aa0 173static void vm_object_deactivate_all_pages(
1c79356b
A
174 vm_object_t object);
175
0b4e3aa0 176static kern_return_t vm_object_copy_call(
1c79356b
A
177 vm_object_t src_object,
178 vm_object_offset_t src_offset,
179 vm_object_size_t size,
180 vm_object_t *_result_object);
181
0b4e3aa0 182static void vm_object_do_collapse(
1c79356b
A
183 vm_object_t object,
184 vm_object_t backing_object);
185
0b4e3aa0 186static void vm_object_do_bypass(
1c79356b
A
187 vm_object_t object,
188 vm_object_t backing_object);
189
0b4e3aa0
A
190static void vm_object_release_pager(
191 memory_object_t pager);
1c79356b 192
0b4e3aa0 193static zone_t vm_object_zone; /* vm backing store zone */
1c79356b
A
194
195/*
196 * All wired-down kernel memory belongs to a single virtual
197 * memory object (kernel_object) to avoid wasting data structures.
198 */
0b4e3aa0
A
199static struct vm_object kernel_object_store;
200__private_extern__ vm_object_t kernel_object = &kernel_object_store;
1c79356b
A
201
202/*
203 * The submap object is used as a placeholder for vm_map_submap
204 * operations. The object is declared in vm_map.c because it
205 * is exported by the vm_map module. The storage is declared
206 * here because it must be initialized here.
207 */
0b4e3aa0 208static struct vm_object vm_submap_object_store;
1c79356b
A
209
210/*
211 * Virtual memory objects are initialized from
212 * a template (see vm_object_allocate).
213 *
214 * When adding a new field to the virtual memory
215 * object structure, be sure to add initialization
0b4e3aa0 216 * (see _vm_object_allocate()).
1c79356b 217 */
0b4e3aa0 218static struct vm_object vm_object_template;
1c79356b
A
219
220/*
221 * Virtual memory objects that are not referenced by
222 * any address maps, but that are allowed to persist
223 * (an attribute specified by the associated memory manager),
224 * are kept in a queue (vm_object_cached_list).
225 *
226 * When an object from this queue is referenced again,
227 * for example to make another address space mapping,
228 * it must be removed from the queue. That is, the
229 * queue contains *only* objects with zero references.
230 *
231 * The kernel may choose to terminate objects from this
232 * queue in order to reclaim storage. The current policy
233 * is to permit a fixed maximum number of unreferenced
234 * objects (vm_object_cached_max).
235 *
236 * A spin lock (accessed by routines
237 * vm_object_cache_{lock,lock_try,unlock}) governs the
238 * object cache. It must be held when objects are
239 * added to or removed from the cache (in vm_object_terminate).
240 * The routines that acquire a reference to a virtual
241 * memory object based on one of the memory object ports
242 * must also lock the cache.
243 *
244 * Ideally, the object cache should be more isolated
245 * from the reference mechanism, so that the lock need
246 * not be held to make simple references.
247 */
0b4e3aa0 248static queue_head_t vm_object_cached_list;
9bccf70c 249static int vm_object_cached_count=0;
0b4e3aa0
A
250static int vm_object_cached_high; /* highest # cached objects */
251static int vm_object_cached_max = 512; /* may be patched*/
1c79356b 252
0b4e3aa0 253static decl_mutex_data(,vm_object_cached_lock_data)
1c79356b
A
254
255#define vm_object_cache_lock() \
256 mutex_lock(&vm_object_cached_lock_data)
257#define vm_object_cache_lock_try() \
258 mutex_try(&vm_object_cached_lock_data)
259#define vm_object_cache_unlock() \
260 mutex_unlock(&vm_object_cached_lock_data)
261
262#define VM_OBJECT_HASH_COUNT 1024
0b4e3aa0
A
263static queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
264static struct zone *vm_object_hash_zone;
1c79356b
A
265
266struct vm_object_hash_entry {
267 queue_chain_t hash_link; /* hash chain link */
0b4e3aa0 268 memory_object_t pager; /* pager we represent */
1c79356b
A
269 vm_object_t object; /* corresponding object */
270 boolean_t waiting; /* someone waiting for
271 * termination */
272};
273
274typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
275#define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0)
276
277#define VM_OBJECT_HASH_SHIFT 8
278#define vm_object_hash(pager) \
279 ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT)
280
91447636
A
281void vm_object_hash_entry_free(
282 vm_object_hash_entry_t entry);
283
1c79356b
A
284/*
285 * vm_object_hash_lookup looks up a pager in the hashtable
286 * and returns the corresponding entry, with optional removal.
287 */
288
0b4e3aa0 289static vm_object_hash_entry_t
1c79356b 290vm_object_hash_lookup(
0b4e3aa0 291 memory_object_t pager,
1c79356b
A
292 boolean_t remove_entry)
293{
294 register queue_t bucket;
295 register vm_object_hash_entry_t entry;
296
297 bucket = &vm_object_hashtable[vm_object_hash(pager)];
298
299 entry = (vm_object_hash_entry_t)queue_first(bucket);
300 while (!queue_end(bucket, (queue_entry_t)entry)) {
301 if (entry->pager == pager && !remove_entry)
302 return(entry);
303 else if (entry->pager == pager) {
304 queue_remove(bucket, entry,
305 vm_object_hash_entry_t, hash_link);
306 return(entry);
307 }
308
309 entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link);
310 }
311
312 return(VM_OBJECT_HASH_ENTRY_NULL);
313}
314
315/*
316 * vm_object_hash_enter enters the specified
317 * pager / cache object association in the hashtable.
318 */
319
0b4e3aa0 320static void
1c79356b
A
321vm_object_hash_insert(
322 vm_object_hash_entry_t entry)
323{
324 register queue_t bucket;
325
326 bucket = &vm_object_hashtable[vm_object_hash(entry->pager)];
327
328 queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link);
329}
330
0b4e3aa0 331static vm_object_hash_entry_t
1c79356b 332vm_object_hash_entry_alloc(
0b4e3aa0 333 memory_object_t pager)
1c79356b
A
334{
335 vm_object_hash_entry_t entry;
336
337 entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone);
338 entry->pager = pager;
339 entry->object = VM_OBJECT_NULL;
340 entry->waiting = FALSE;
341
342 return(entry);
343}
344
345void
346vm_object_hash_entry_free(
347 vm_object_hash_entry_t entry)
348{
91447636 349 zfree(vm_object_hash_zone, entry);
1c79356b
A
350}
351
352/*
353 * vm_object_allocate:
354 *
355 * Returns a new object with the given size.
356 */
357
91447636 358__private_extern__ void
1c79356b
A
359_vm_object_allocate(
360 vm_object_size_t size,
361 vm_object_t object)
362{
363 XPR(XPR_VM_OBJECT,
364 "vm_object_allocate, object 0x%X size 0x%X\n",
365 (integer_t)object, size, 0,0,0);
366
367 *object = vm_object_template;
368 queue_init(&object->memq);
369 queue_init(&object->msr_q);
91447636 370#ifdef UPL_DEBUG
1c79356b 371 queue_init(&object->uplq);
91447636 372#endif /* UPL_DEBUG */
1c79356b
A
373 vm_object_lock_init(object);
374 object->size = size;
375}
376
0b4e3aa0 377__private_extern__ vm_object_t
1c79356b
A
378vm_object_allocate(
379 vm_object_size_t size)
380{
381 register vm_object_t object;
1c79356b
A
382
383 object = (vm_object_t) zalloc(vm_object_zone);
384
0b4e3aa0
A
385// dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
386
387 if (object != VM_OBJECT_NULL)
388 _vm_object_allocate(size, object);
1c79356b
A
389
390 return object;
391}
392
393/*
394 * vm_object_bootstrap:
395 *
396 * Initialize the VM objects module.
397 */
0b4e3aa0 398__private_extern__ void
1c79356b
A
399vm_object_bootstrap(void)
400{
91447636 401 register int i;
1c79356b
A
402
403 vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object),
55e303ae
A
404 round_page_32(512*1024),
405 round_page_32(12*1024),
1c79356b
A
406 "vm objects");
407
408 queue_init(&vm_object_cached_list);
91447636 409 mutex_init(&vm_object_cached_lock_data, 0);
1c79356b
A
410
411 vm_object_hash_zone =
412 zinit((vm_size_t) sizeof (struct vm_object_hash_entry),
55e303ae
A
413 round_page_32(512*1024),
414 round_page_32(12*1024),
1c79356b
A
415 "vm object hash entries");
416
417 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
418 queue_init(&vm_object_hashtable[i]);
419
420 /*
421 * Fill in a template object, for quick initialization
422 */
423
424 /* memq; Lock; init after allocation */
425 vm_object_template.size = 0;
91447636 426 vm_object_template.memq_hint = VM_PAGE_NULL;
1c79356b
A
427 vm_object_template.ref_count = 1;
428#if TASK_SWAPPER
429 vm_object_template.res_count = 1;
430#endif /* TASK_SWAPPER */
431 vm_object_template.resident_page_count = 0;
432 vm_object_template.copy = VM_OBJECT_NULL;
433 vm_object_template.shadow = VM_OBJECT_NULL;
434 vm_object_template.shadow_offset = (vm_object_offset_t) 0;
55e303ae 435 vm_object_template.cow_hint = ~(vm_offset_t)0;
1c79356b
A
436 vm_object_template.true_share = FALSE;
437
0b4e3aa0 438 vm_object_template.pager = MEMORY_OBJECT_NULL;
1c79356b 439 vm_object_template.paging_offset = 0;
91447636 440 vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL;
1c79356b
A
441 /* msr_q; init after allocation */
442
443 vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC;
444 vm_object_template.absent_count = 0;
445 vm_object_template.paging_in_progress = 0;
446
447 /* Begin bitfields */
448 vm_object_template.all_wanted = 0; /* all bits FALSE */
449 vm_object_template.pager_created = FALSE;
450 vm_object_template.pager_initialized = FALSE;
451 vm_object_template.pager_ready = FALSE;
452 vm_object_template.pager_trusted = FALSE;
453 vm_object_template.can_persist = FALSE;
454 vm_object_template.internal = TRUE;
455 vm_object_template.temporary = TRUE;
456 vm_object_template.private = FALSE;
457 vm_object_template.pageout = FALSE;
458 vm_object_template.alive = TRUE;
91447636 459 vm_object_template.purgable = VM_OBJECT_NONPURGABLE;
1c79356b
A
460 vm_object_template.silent_overwrite = FALSE;
461 vm_object_template.advisory_pageout = FALSE;
462 vm_object_template.shadowed = FALSE;
463 vm_object_template.terminating = FALSE;
464 vm_object_template.shadow_severed = FALSE;
465 vm_object_template.phys_contiguous = FALSE;
0b4e3aa0 466 vm_object_template.nophyscache = FALSE;
1c79356b
A
467 /* End bitfields */
468
9bccf70c
A
469 /* cache bitfields */
470 vm_object_template.wimg_bits = VM_WIMG_DEFAULT;
471
1c79356b
A
472 /* cached_list; init after allocation */
473 vm_object_template.last_alloc = (vm_object_offset_t) 0;
474 vm_object_template.cluster_size = 0;
475#if MACH_PAGEMAP
476 vm_object_template.existence_map = VM_EXTERNAL_NULL;
477#endif /* MACH_PAGEMAP */
478#if MACH_ASSERT
479 vm_object_template.paging_object = VM_OBJECT_NULL;
480#endif /* MACH_ASSERT */
481
482 /*
483 * Initialize the "kernel object"
484 */
485
486 kernel_object = &kernel_object_store;
487
488/*
489 * Note that in the following size specifications, we need to add 1 because
55e303ae 490 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
1c79356b 491 */
55e303ae
A
492
493#ifdef ppc
494 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
495 kernel_object);
496#else
1c79356b
A
497 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
498 kernel_object);
55e303ae
A
499#endif
500 kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1c79356b
A
501
502 /*
503 * Initialize the "submap object". Make it as large as the
504 * kernel object so that no limit is imposed on submap sizes.
505 */
506
507 vm_submap_object = &vm_submap_object_store;
55e303ae
A
508#ifdef ppc
509 _vm_object_allocate((vm_last_addr - VM_MIN_KERNEL_ADDRESS) + 1,
510 vm_submap_object);
511#else
1c79356b
A
512 _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1,
513 vm_submap_object);
55e303ae
A
514#endif
515 vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
516
1c79356b
A
517 /*
518 * Create an "extra" reference to this object so that we never
519 * try to deallocate it; zfree doesn't like to be called with
520 * non-zone memory.
521 */
522 vm_object_reference(vm_submap_object);
523
524#if MACH_PAGEMAP
525 vm_external_module_initialize();
526#endif /* MACH_PAGEMAP */
527}
528
0b4e3aa0 529__private_extern__ void
1c79356b
A
530vm_object_init(void)
531{
532 /*
533 * Finish initializing the kernel object.
534 */
535}
536
1c79356b
A
537/* remove the typedef below when emergency work-around is taken out */
538typedef struct vnode_pager {
0b4e3aa0
A
539 memory_object_t pager;
540 memory_object_t pager_handle; /* pager */
541 memory_object_control_t control_handle; /* memory object's control handle */
542 void *vnode_handle; /* vnode handle */
1c79356b
A
543} *vnode_pager_t;
544
545#define MIGHT_NOT_CACHE_SHADOWS 1
546#if MIGHT_NOT_CACHE_SHADOWS
0b4e3aa0 547static int cache_shadows = TRUE;
1c79356b
A
548#endif /* MIGHT_NOT_CACHE_SHADOWS */
549
550/*
551 * vm_object_deallocate:
552 *
553 * Release a reference to the specified object,
554 * gained either through a vm_object_allocate
555 * or a vm_object_reference call. When all references
556 * are gone, storage associated with this object
557 * may be relinquished.
558 *
559 * No object may be locked.
560 */
0b4e3aa0 561__private_extern__ void
1c79356b
A
562vm_object_deallocate(
563 register vm_object_t object)
564{
565 boolean_t retry_cache_trim = FALSE;
91447636 566 vm_object_t shadow = VM_OBJECT_NULL;
1c79356b
A
567
568// if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
569// else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
570
571
572 while (object != VM_OBJECT_NULL) {
573
574 /*
575 * The cache holds a reference (uncounted) to
576 * the object; we must lock it before removing
577 * the object.
578 */
55e303ae
A
579 for (;;) {
580 vm_object_cache_lock();
1c79356b 581
55e303ae
A
582 /*
583 * if we try to take a regular lock here
584 * we risk deadlocking against someone
585 * holding a lock on this object while
586 * trying to vm_object_deallocate a different
587 * object
588 */
589 if (vm_object_lock_try(object))
590 break;
591 vm_object_cache_unlock();
592 mutex_pause(); /* wait a bit */
593 }
0b4e3aa0
A
594 assert(object->ref_count > 0);
595
596 /*
597 * If the object has a named reference, and only
598 * that reference would remain, inform the pager
599 * about the last "mapping" reference going away.
600 */
601 if ((object->ref_count == 2) && (object->named)) {
602 memory_object_t pager = object->pager;
603
604 /* Notify the Pager that there are no */
605 /* more mappers for this object */
606
607 if (pager != MEMORY_OBJECT_NULL) {
608 vm_object_unlock(object);
609 vm_object_cache_unlock();
610
611 memory_object_unmap(pager);
612
55e303ae
A
613 for (;;) {
614 vm_object_cache_lock();
615
616 /*
617 * if we try to take a regular lock here
618 * we risk deadlocking against someone
619 * holding a lock on this object while
620 * trying to vm_object_deallocate a different
621 * object
622 */
623 if (vm_object_lock_try(object))
624 break;
625 vm_object_cache_unlock();
626 mutex_pause(); /* wait a bit */
627 }
0b4e3aa0
A
628 assert(object->ref_count > 0);
629 }
630 }
1c79356b
A
631
632 /*
633 * Lose the reference. If other references
634 * remain, then we are done, unless we need
635 * to retry a cache trim.
636 * If it is the last reference, then keep it
637 * until any pending initialization is completed.
638 */
639
0b4e3aa0
A
640 /* if the object is terminating, it cannot go into */
641 /* the cache and we obviously should not call */
642 /* terminate again. */
643
644 if ((object->ref_count > 1) || object->terminating) {
1c79356b 645 object->ref_count--;
1c79356b 646 vm_object_res_deallocate(object);
1c79356b 647 vm_object_cache_unlock();
91447636
A
648
649 if (object->ref_count == 1 &&
650 object->shadow != VM_OBJECT_NULL) {
651 /*
652 * We don't use this VM object anymore. We
653 * would like to collapse it into its parent(s),
654 * but we don't have any pointers back to these
655 * parent object(s).
656 * But we can try and collapse this object with
657 * its own shadows, in case these are useless
658 * too...
659 */
660 vm_object_collapse(object, 0);
661 }
662
663 vm_object_unlock(object);
1c79356b
A
664 if (retry_cache_trim &&
665 ((object = vm_object_cache_trim(TRUE)) !=
666 VM_OBJECT_NULL)) {
667 continue;
668 }
669 return;
670 }
671
672 /*
673 * We have to wait for initialization
674 * before destroying or caching the object.
675 */
676
677 if (object->pager_created && ! object->pager_initialized) {
678 assert(! object->can_persist);
679 vm_object_assert_wait(object,
680 VM_OBJECT_EVENT_INITIALIZED,
681 THREAD_UNINT);
682 vm_object_unlock(object);
683 vm_object_cache_unlock();
9bccf70c 684 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
685 continue;
686 }
687
688 /*
689 * If this object can persist, then enter it in
690 * the cache. Otherwise, terminate it.
691 *
692 * NOTE: Only permanent objects are cached, and
693 * permanent objects cannot have shadows. This
694 * affects the residence counting logic in a minor
695 * way (can do it in-line, mostly).
696 */
697
0b4e3aa0 698 if ((object->can_persist) && (object->alive)) {
1c79356b
A
699 /*
700 * Now it is safe to decrement reference count,
701 * and to return if reference count is > 0.
702 */
703 if (--object->ref_count > 0) {
704 vm_object_res_deallocate(object);
705 vm_object_unlock(object);
706 vm_object_cache_unlock();
707 if (retry_cache_trim &&
708 ((object = vm_object_cache_trim(TRUE)) !=
709 VM_OBJECT_NULL)) {
710 continue;
711 }
712 return;
713 }
714
715#if MIGHT_NOT_CACHE_SHADOWS
716 /*
717 * Remove shadow now if we don't
718 * want to cache shadows.
719 */
720 if (! cache_shadows) {
721 shadow = object->shadow;
722 object->shadow = VM_OBJECT_NULL;
723 }
724#endif /* MIGHT_NOT_CACHE_SHADOWS */
725
726 /*
727 * Enter the object onto the queue of
728 * cached objects, and deactivate
729 * all of its pages.
730 */
731 assert(object->shadow == VM_OBJECT_NULL);
732 VM_OBJ_RES_DECR(object);
733 XPR(XPR_VM_OBJECT,
734 "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n",
735 (integer_t)object,
736 (integer_t)vm_object_cached_list.next,
737 (integer_t)vm_object_cached_list.prev,0,0);
738
739 vm_object_cached_count++;
740 if (vm_object_cached_count > vm_object_cached_high)
741 vm_object_cached_high = vm_object_cached_count;
742 queue_enter(&vm_object_cached_list, object,
743 vm_object_t, cached_list);
744 vm_object_cache_unlock();
0b4e3aa0 745 vm_object_deactivate_all_pages(object);
1c79356b
A
746 vm_object_unlock(object);
747
748#if MIGHT_NOT_CACHE_SHADOWS
749 /*
750 * If we have a shadow that we need
751 * to deallocate, do so now, remembering
752 * to trim the cache later.
753 */
754 if (! cache_shadows && shadow != VM_OBJECT_NULL) {
755 object = shadow;
756 retry_cache_trim = TRUE;
757 continue;
758 }
759#endif /* MIGHT_NOT_CACHE_SHADOWS */
760
761 /*
762 * Trim the cache. If the cache trim
763 * returns with a shadow for us to deallocate,
764 * then remember to retry the cache trim
765 * when we are done deallocating the shadow.
766 * Otherwise, we are done.
767 */
768
769 object = vm_object_cache_trim(TRUE);
770 if (object == VM_OBJECT_NULL) {
771 return;
772 }
773 retry_cache_trim = TRUE;
774
775 } else {
776 /*
777 * This object is not cachable; terminate it.
778 */
779 XPR(XPR_VM_OBJECT,
91447636
A
780 "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%p ref %d\n",
781 (integer_t)object, object->resident_page_count,
782 object->paging_in_progress,
783 (void *)current_thread(),object->ref_count);
1c79356b
A
784
785 VM_OBJ_RES_DECR(object); /* XXX ? */
786 /*
787 * Terminate this object. If it had a shadow,
788 * then deallocate it; otherwise, if we need
789 * to retry a cache trim, do so now; otherwise,
790 * we are done. "pageout" objects have a shadow,
791 * but maintain a "paging reference" rather than
792 * a normal reference.
793 */
794 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
795 if(vm_object_terminate(object) != KERN_SUCCESS) {
796 return;
797 }
798 if (shadow != VM_OBJECT_NULL) {
799 object = shadow;
800 continue;
801 }
802 if (retry_cache_trim &&
803 ((object = vm_object_cache_trim(TRUE)) !=
804 VM_OBJECT_NULL)) {
805 continue;
806 }
807 return;
808 }
809 }
810 assert(! retry_cache_trim);
811}
812
813/*
814 * Check to see whether we really need to trim
815 * down the cache. If so, remove an object from
816 * the cache, terminate it, and repeat.
817 *
818 * Called with, and returns with, cache lock unlocked.
819 */
820vm_object_t
821vm_object_cache_trim(
822 boolean_t called_from_vm_object_deallocate)
823{
824 register vm_object_t object = VM_OBJECT_NULL;
825 vm_object_t shadow;
826
827 for (;;) {
828
829 /*
830 * If we no longer need to trim the cache,
831 * then we are done.
832 */
833
834 vm_object_cache_lock();
835 if (vm_object_cached_count <= vm_object_cached_max) {
836 vm_object_cache_unlock();
837 return VM_OBJECT_NULL;
838 }
839
840 /*
841 * We must trim down the cache, so remove
842 * the first object in the cache.
843 */
844 XPR(XPR_VM_OBJECT,
845 "vm_object_cache_trim: removing from front of cache (%x, %x)\n",
846 (integer_t)vm_object_cached_list.next,
847 (integer_t)vm_object_cached_list.prev, 0, 0, 0);
848
849 object = (vm_object_t) queue_first(&vm_object_cached_list);
9bccf70c
A
850 if(object == (vm_object_t) &vm_object_cached_list) {
851 /* something's wrong with the calling parameter or */
852 /* the value of vm_object_cached_count, just fix */
853 /* and return */
854 if(vm_object_cached_max < 0)
855 vm_object_cached_max = 0;
856 vm_object_cached_count = 0;
857 vm_object_cache_unlock();
858 return VM_OBJECT_NULL;
859 }
1c79356b
A
860 vm_object_lock(object);
861 queue_remove(&vm_object_cached_list, object, vm_object_t,
862 cached_list);
863 vm_object_cached_count--;
864
865 /*
866 * Since this object is in the cache, we know
867 * that it is initialized and has no references.
868 * Take a reference to avoid recursive deallocations.
869 */
870
871 assert(object->pager_initialized);
872 assert(object->ref_count == 0);
873 object->ref_count++;
874
875 /*
876 * Terminate the object.
877 * If the object had a shadow, we let vm_object_deallocate
878 * deallocate it. "pageout" objects have a shadow, but
879 * maintain a "paging reference" rather than a normal
880 * reference.
881 * (We are careful here to limit recursion.)
882 */
883 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
884 if(vm_object_terminate(object) != KERN_SUCCESS)
885 continue;
886 if (shadow != VM_OBJECT_NULL) {
887 if (called_from_vm_object_deallocate) {
888 return shadow;
889 } else {
890 vm_object_deallocate(shadow);
891 }
892 }
893 }
894}
895
896boolean_t vm_object_terminate_remove_all = FALSE;
897
898/*
899 * Routine: vm_object_terminate
900 * Purpose:
901 * Free all resources associated with a vm_object.
902 * In/out conditions:
0b4e3aa0 903 * Upon entry, the object must be locked,
1c79356b
A
904 * and the object must have exactly one reference.
905 *
906 * The shadow object reference is left alone.
907 *
908 * The object must be unlocked if its found that pages
909 * must be flushed to a backing object. If someone
910 * manages to map the object while it is being flushed
911 * the object is returned unlocked and unchanged. Otherwise,
912 * upon exit, the cache will be unlocked, and the
913 * object will cease to exist.
914 */
0b4e3aa0 915static kern_return_t
1c79356b
A
916vm_object_terminate(
917 register vm_object_t object)
918{
0b4e3aa0 919 memory_object_t pager;
1c79356b
A
920 register vm_page_t p;
921 vm_object_t shadow_object;
922
923 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
924 (integer_t)object, object->ref_count, 0, 0, 0);
925
1c79356b
A
926 if (!object->pageout && (!object->temporary || object->can_persist)
927 && (object->pager != NULL || object->shadow_severed)) {
0b4e3aa0 928 vm_object_cache_unlock();
1c79356b
A
929 while (!queue_empty(&object->memq)) {
930 /*
931 * Clear pager_trusted bit so that the pages get yanked
932 * out of the object instead of cleaned in place. This
933 * prevents a deadlock in XMM and makes more sense anyway.
934 */
935 object->pager_trusted = FALSE;
936
937 p = (vm_page_t) queue_first(&object->memq);
938
939 VM_PAGE_CHECK(p);
940
941 if (p->busy || p->cleaning) {
942 if(p->cleaning || p->absent) {
943 vm_object_paging_wait(object, THREAD_UNINT);
944 continue;
945 } else {
946 panic("vm_object_terminate.3 0x%x 0x%x", object, p);
947 }
948 }
949
950 vm_page_lock_queues();
55e303ae 951 p->busy = TRUE;
1c79356b
A
952 VM_PAGE_QUEUES_REMOVE(p);
953 vm_page_unlock_queues();
954
955 if (p->absent || p->private) {
956
957 /*
958 * For private pages, VM_PAGE_FREE just
959 * leaves the page structure around for
960 * its owner to clean up. For absent
961 * pages, the structure is returned to
962 * the appropriate pool.
963 */
964
965 goto free_page;
966 }
967
968 if (p->fictitious)
969 panic("vm_object_terminate.4 0x%x 0x%x", object, p);
970
971 if (!p->dirty)
55e303ae 972 p->dirty = pmap_is_modified(p->phys_page);
1c79356b 973
0b4e3aa0 974 if ((p->dirty || p->precious) && !p->error && object->alive) {
1c79356b 975 vm_pageout_cluster(p); /* flush page */
1c79356b
A
976 vm_object_paging_wait(object, THREAD_UNINT);
977 XPR(XPR_VM_OBJECT,
978 "vm_object_terminate restart, object 0x%X ref %d\n",
979 (integer_t)object, object->ref_count, 0, 0, 0);
980 } else {
981 free_page:
982 VM_PAGE_FREE(p);
983 }
984 }
0b4e3aa0
A
985 vm_object_unlock(object);
986 vm_object_cache_lock();
987 vm_object_lock(object);
1c79356b 988 }
0b4e3aa0
A
989
990 /*
991 * Make sure the object isn't already being terminated
992 */
993 if(object->terminating) {
994 object->ref_count -= 1;
995 assert(object->ref_count > 0);
996 vm_object_cache_unlock();
997 vm_object_unlock(object);
998 return KERN_FAILURE;
999 }
1000
1001 /*
1002 * Did somebody get a reference to the object while we were
1003 * cleaning it?
1004 */
1c79356b
A
1005 if(object->ref_count != 1) {
1006 object->ref_count -= 1;
0b4e3aa0 1007 assert(object->ref_count > 0);
1c79356b 1008 vm_object_res_deallocate(object);
0b4e3aa0 1009 vm_object_cache_unlock();
1c79356b
A
1010 vm_object_unlock(object);
1011 return KERN_FAILURE;
1012 }
1013
1c79356b
A
1014 /*
1015 * Make sure no one can look us up now.
1016 */
1017
0b4e3aa0
A
1018 object->terminating = TRUE;
1019 object->alive = FALSE;
1020 vm_object_remove(object);
1c79356b
A
1021
1022 /*
1023 * Detach the object from its shadow if we are the shadow's
55e303ae
A
1024 * copy. The reference we hold on the shadow must be dropped
1025 * by our caller.
1c79356b
A
1026 */
1027 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1028 !(object->pageout)) {
1029 vm_object_lock(shadow_object);
55e303ae
A
1030 if (shadow_object->copy == object)
1031 shadow_object->copy = VM_OBJECT_NULL;
1c79356b
A
1032 vm_object_unlock(shadow_object);
1033 }
1034
1035 /*
1036 * The pageout daemon might be playing with our pages.
1037 * Now that the object is dead, it won't touch any more
1038 * pages, but some pages might already be on their way out.
0b4e3aa0
A
1039 * Hence, we wait until the active paging activities have ceased
1040 * before we break the association with the pager itself.
1c79356b 1041 */
0b4e3aa0
A
1042 while (object->paging_in_progress != 0) {
1043 vm_object_cache_unlock();
1044 vm_object_wait(object,
1045 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1046 THREAD_UNINT);
1047 vm_object_cache_lock();
1048 vm_object_lock(object);
1049 }
1050
1051 pager = object->pager;
1052 object->pager = MEMORY_OBJECT_NULL;
1053
1054 if (pager != MEMORY_OBJECT_NULL)
91447636 1055 memory_object_control_disable(object->pager_control);
0b4e3aa0
A
1056 vm_object_cache_unlock();
1057
1c79356b
A
1058 object->ref_count--;
1059#if TASK_SWAPPER
1060 assert(object->res_count == 0);
1061#endif /* TASK_SWAPPER */
1062
1c79356b
A
1063 assert (object->ref_count == 0);
1064
1065 /*
1066 * Clean or free the pages, as appropriate.
1067 * It is possible for us to find busy/absent pages,
1068 * if some faults on this object were aborted.
1069 */
1070 if (object->pageout) {
1071 assert(shadow_object != VM_OBJECT_NULL);
1072 assert(shadow_object == object->shadow);
1073
1074 vm_pageout_object_terminate(object);
1075
0b4e3aa0
A
1076 } else if ((object->temporary && !object->can_persist) ||
1077 (pager == MEMORY_OBJECT_NULL)) {
1c79356b
A
1078 while (!queue_empty(&object->memq)) {
1079 p = (vm_page_t) queue_first(&object->memq);
1080
1081 VM_PAGE_CHECK(p);
1082 VM_PAGE_FREE(p);
1083 }
1084 } else if (!queue_empty(&object->memq)) {
1085 panic("vm_object_terminate: queue just emptied isn't");
1086 }
1087
1088 assert(object->paging_in_progress == 0);
1089 assert(object->ref_count == 0);
1090
1c79356b 1091 /*
0b4e3aa0
A
1092 * If the pager has not already been released by
1093 * vm_object_destroy, we need to terminate it and
1094 * release our reference to it here.
1c79356b 1095 */
0b4e3aa0
A
1096 if (pager != MEMORY_OBJECT_NULL) {
1097 vm_object_unlock(object);
1098 vm_object_release_pager(pager);
1099 vm_object_lock(object);
1c79356b 1100 }
0b4e3aa0 1101
1c79356b 1102 /* kick off anyone waiting on terminating */
0b4e3aa0 1103 object->terminating = FALSE;
1c79356b
A
1104 vm_object_paging_begin(object);
1105 vm_object_paging_end(object);
1106 vm_object_unlock(object);
1107
1108#if MACH_PAGEMAP
1109 vm_external_destroy(object->existence_map, object->size);
1110#endif /* MACH_PAGEMAP */
1111
1112 /*
1113 * Free the space for the object.
1114 */
91447636 1115 zfree(vm_object_zone, object);
1c79356b
A
1116 return KERN_SUCCESS;
1117}
1118
1119/*
1120 * Routine: vm_object_pager_wakeup
1121 * Purpose: Wake up anyone waiting for termination of a pager.
1122 */
1123
0b4e3aa0 1124static void
1c79356b 1125vm_object_pager_wakeup(
0b4e3aa0 1126 memory_object_t pager)
1c79356b
A
1127{
1128 vm_object_hash_entry_t entry;
1129 boolean_t waiting = FALSE;
1130
1131 /*
1132 * If anyone was waiting for the memory_object_terminate
1133 * to be queued, wake them up now.
1134 */
1135 vm_object_cache_lock();
1136 entry = vm_object_hash_lookup(pager, TRUE);
1137 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
1138 waiting = entry->waiting;
1139 vm_object_cache_unlock();
1140 if (entry != VM_OBJECT_HASH_ENTRY_NULL) {
1141 if (waiting)
1142 thread_wakeup((event_t) pager);
1143 vm_object_hash_entry_free(entry);
1144 }
1145}
1146
1147/*
0b4e3aa0
A
1148 * Routine: vm_object_release_pager
1149 * Purpose: Terminate the pager and, upon completion,
1150 * release our last reference to it.
1151 * just like memory_object_terminate, except
1152 * that we wake up anyone blocked in vm_object_enter
1153 * waiting for termination message to be queued
1154 * before calling memory_object_init.
1c79356b 1155 */
0b4e3aa0
A
1156static void
1157vm_object_release_pager(
1158 memory_object_t pager)
1c79356b 1159{
1c79356b 1160
0b4e3aa0
A
1161 /*
1162 * Terminate the pager.
1163 */
1c79356b 1164
0b4e3aa0 1165 (void) memory_object_terminate(pager);
1c79356b 1166
0b4e3aa0
A
1167 /*
1168 * Wakeup anyone waiting for this terminate
1169 */
1170 vm_object_pager_wakeup(pager);
1c79356b 1171
0b4e3aa0
A
1172 /*
1173 * Release reference to pager.
1174 */
1175 memory_object_deallocate(pager);
1176}
1c79356b 1177
1c79356b 1178/*
0b4e3aa0 1179 * Routine: vm_object_destroy
1c79356b 1180 * Purpose:
0b4e3aa0 1181 * Shut down a VM object, despite the
1c79356b
A
1182 * presence of address map (or other) references
1183 * to the vm_object.
1184 */
1185kern_return_t
0b4e3aa0
A
1186vm_object_destroy(
1187 vm_object_t object,
91447636 1188 __unused kern_return_t reason)
1c79356b 1189{
0b4e3aa0 1190 memory_object_t old_pager;
1c79356b
A
1191
1192 if (object == VM_OBJECT_NULL)
1193 return(KERN_SUCCESS);
1194
1195 /*
0b4e3aa0 1196 * Remove the pager association immediately.
1c79356b
A
1197 *
1198 * This will prevent the memory manager from further
1199 * meddling. [If it wanted to flush data or make
1200 * other changes, it should have done so before performing
1201 * the destroy call.]
1202 */
1203
1204 vm_object_cache_lock();
1205 vm_object_lock(object);
1c79356b
A
1206 object->can_persist = FALSE;
1207 object->named = FALSE;
0b4e3aa0 1208 object->alive = FALSE;
1c79356b
A
1209
1210 /*
0b4e3aa0 1211 * Rip out the pager from the vm_object now...
1c79356b
A
1212 */
1213
0b4e3aa0
A
1214 vm_object_remove(object);
1215 old_pager = object->pager;
1216 object->pager = MEMORY_OBJECT_NULL;
1217 if (old_pager != MEMORY_OBJECT_NULL)
91447636 1218 memory_object_control_disable(object->pager_control);
0b4e3aa0 1219 vm_object_cache_unlock();
1c79356b
A
1220
1221 /*
0b4e3aa0
A
1222 * Wait for the existing paging activity (that got
1223 * through before we nulled out the pager) to subside.
1c79356b
A
1224 */
1225
1226 vm_object_paging_wait(object, THREAD_UNINT);
1227 vm_object_unlock(object);
1228
1229 /*
0b4e3aa0 1230 * Terminate the object now.
1c79356b 1231 */
0b4e3aa0
A
1232 if (old_pager != MEMORY_OBJECT_NULL) {
1233 vm_object_release_pager(old_pager);
1234
1235 /*
1236 * JMM - Release the caller's reference. This assumes the
1237 * caller had a reference to release, which is a big (but
1238 * currently valid) assumption if this is driven from the
1239 * vnode pager (it is holding a named reference when making
1240 * this call)..
1241 */
1242 vm_object_deallocate(object);
1c79356b 1243
1c79356b 1244 }
1c79356b
A
1245 return(KERN_SUCCESS);
1246}
1247
1248/*
1249 * vm_object_deactivate_pages
1250 *
1251 * Deactivate all pages in the specified object. (Keep its pages
1252 * in memory even though it is no longer referenced.)
1253 *
1254 * The object must be locked.
1255 */
0b4e3aa0
A
1256static void
1257vm_object_deactivate_all_pages(
1c79356b
A
1258 register vm_object_t object)
1259{
1260 register vm_page_t p;
1261
1262 queue_iterate(&object->memq, p, vm_page_t, listq) {
1263 vm_page_lock_queues();
1264 if (!p->busy)
1265 vm_page_deactivate(p);
1266 vm_page_unlock_queues();
1267 }
1268}
1269
0b4e3aa0
A
1270__private_extern__ void
1271vm_object_deactivate_pages(
1272 vm_object_t object,
1273 vm_object_offset_t offset,
1274 vm_object_size_t size,
1275 boolean_t kill_page)
1276{
1277 vm_object_t orig_object;
1278 int pages_moved = 0;
1279 int pages_found = 0;
1280
1281 /*
1282 * entered with object lock held, acquire a paging reference to
1283 * prevent the memory_object and control ports from
1284 * being destroyed.
1285 */
1286 orig_object = object;
1287
1288 for (;;) {
1289 register vm_page_t m;
1290 vm_object_offset_t toffset;
1291 vm_object_size_t tsize;
1292
1293 vm_object_paging_begin(object);
1294 vm_page_lock_queues();
1295
1296 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1297
1298 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1299
1300 pages_found++;
1301
1302 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1303
91447636
A
1304 assert(!m->laundry);
1305
0b4e3aa0 1306 m->reference = FALSE;
55e303ae 1307 pmap_clear_reference(m->phys_page);
0b4e3aa0
A
1308
1309 if ((kill_page) && (object->internal)) {
1310 m->precious = FALSE;
1311 m->dirty = FALSE;
55e303ae 1312 pmap_clear_modify(m->phys_page);
0b4e3aa0
A
1313 vm_external_state_clr(object->existence_map, offset);
1314 }
1315 VM_PAGE_QUEUES_REMOVE(m);
1316
91447636
A
1317 assert(!m->laundry);
1318 assert(m->object != kernel_object);
1319 assert(m->pageq.next == NULL &&
1320 m->pageq.prev == NULL);
9bccf70c
A
1321 if(m->zero_fill) {
1322 queue_enter_first(
1323 &vm_page_queue_zf,
1324 m, vm_page_t, pageq);
1325 } else {
1326 queue_enter_first(
1327 &vm_page_queue_inactive,
1328 m, vm_page_t, pageq);
1329 }
0b4e3aa0
A
1330
1331 m->inactive = TRUE;
1332 if (!m->fictitious)
1333 vm_page_inactive_count++;
1334
1335 pages_moved++;
1336 }
1337 }
1338 }
1339 vm_page_unlock_queues();
1340 vm_object_paging_end(object);
1341
1342 if (object->shadow) {
1343 vm_object_t tmp_object;
1344
1345 kill_page = 0;
1346
1347 offset += object->shadow_offset;
1348
1349 tmp_object = object->shadow;
1350 vm_object_lock(tmp_object);
1351
1352 if (object != orig_object)
1353 vm_object_unlock(object);
1354 object = tmp_object;
1355 } else
1356 break;
1357 }
1358 if (object != orig_object)
1359 vm_object_unlock(object);
1360}
1c79356b
A
1361
1362/*
1363 * Routine: vm_object_pmap_protect
1364 *
1365 * Purpose:
1366 * Reduces the permission for all physical
1367 * pages in the specified object range.
1368 *
1369 * If removing write permission only, it is
1370 * sufficient to protect only the pages in
1371 * the top-level object; only those pages may
1372 * have write permission.
1373 *
1374 * If removing all access, we must follow the
1375 * shadow chain from the top-level object to
1376 * remove access to all pages in shadowed objects.
1377 *
1378 * The object must *not* be locked. The object must
1379 * be temporary/internal.
1380 *
1381 * If pmap is not NULL, this routine assumes that
1382 * the only mappings for the pages are in that
1383 * pmap.
1384 */
1385
0b4e3aa0 1386__private_extern__ void
1c79356b
A
1387vm_object_pmap_protect(
1388 register vm_object_t object,
1389 register vm_object_offset_t offset,
91447636 1390 vm_object_size_t size,
1c79356b 1391 pmap_t pmap,
91447636 1392 vm_map_offset_t pmap_start,
1c79356b
A
1393 vm_prot_t prot)
1394{
1395 if (object == VM_OBJECT_NULL)
1396 return;
91447636
A
1397 size = vm_object_round_page(size);
1398 offset = vm_object_trunc_page(offset);
1c79356b
A
1399
1400 vm_object_lock(object);
1401
55e303ae 1402 assert(object->internal);
de355530 1403
1c79356b 1404 while (TRUE) {
91447636 1405 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
1c79356b
A
1406 vm_object_unlock(object);
1407 pmap_protect(pmap, pmap_start, pmap_start + size, prot);
1408 return;
1409 }
1410
9bccf70c
A
1411 /* if we are doing large ranges with respect to resident */
1412 /* page count then we should interate over pages otherwise */
1413 /* inverse page look-up will be faster */
91447636 1414 if (ptoa_64(object->resident_page_count / 4) < size) {
9bccf70c
A
1415 vm_page_t p;
1416 vm_object_offset_t end;
1c79356b
A
1417
1418 end = offset + size;
1419
1420 if (pmap != PMAP_NULL) {
1421 queue_iterate(&object->memq, p, vm_page_t, listq) {
1422 if (!p->fictitious &&
1423 (offset <= p->offset) && (p->offset < end)) {
91447636 1424 vm_map_offset_t start;
1c79356b 1425
91447636
A
1426 start = pmap_start + p->offset - offset;
1427 pmap_protect(pmap, start, start + PAGE_SIZE_64, prot);
1c79356b
A
1428 }
1429 }
1430 } else {
1431 queue_iterate(&object->memq, p, vm_page_t, listq) {
1432 if (!p->fictitious &&
1433 (offset <= p->offset) && (p->offset < end)) {
1434
55e303ae 1435 pmap_page_protect(p->phys_page,
1c79356b
A
1436 prot & ~p->page_lock);
1437 }
1438 }
1439 }
9bccf70c
A
1440 } else {
1441 vm_page_t p;
1442 vm_object_offset_t end;
1443 vm_object_offset_t target_off;
1444
1445 end = offset + size;
1446
1447 if (pmap != PMAP_NULL) {
1448 for(target_off = offset;
91447636
A
1449 target_off < end;
1450 target_off += PAGE_SIZE) {
1451 p = vm_page_lookup(object, target_off);
1452 if (p != VM_PAGE_NULL) {
1453 vm_offset_t start;
1454 start = pmap_start +
9bccf70c
A
1455 (vm_offset_t)(p->offset - offset);
1456 pmap_protect(pmap, start,
1457 start + PAGE_SIZE, prot);
1458 }
1459 }
1460 } else {
1461 for(target_off = offset;
1462 target_off < end; target_off += PAGE_SIZE) {
91447636
A
1463 p = vm_page_lookup(object, target_off);
1464 if (p != VM_PAGE_NULL) {
55e303ae 1465 pmap_page_protect(p->phys_page,
9bccf70c
A
1466 prot & ~p->page_lock);
1467 }
1468 }
1469 }
1470 }
1c79356b
A
1471
1472 if (prot == VM_PROT_NONE) {
1473 /*
1474 * Must follow shadow chain to remove access
1475 * to pages in shadowed objects.
1476 */
1477 register vm_object_t next_object;
1478
1479 next_object = object->shadow;
1480 if (next_object != VM_OBJECT_NULL) {
1481 offset += object->shadow_offset;
1482 vm_object_lock(next_object);
1483 vm_object_unlock(object);
1484 object = next_object;
1485 }
1486 else {
1487 /*
1488 * End of chain - we are done.
1489 */
1490 break;
1491 }
1492 }
1493 else {
1494 /*
1495 * Pages in shadowed objects may never have
1496 * write permission - we may stop here.
1497 */
1498 break;
1499 }
1500 }
1501
1502 vm_object_unlock(object);
1503}
1504
1505/*
1506 * Routine: vm_object_copy_slowly
1507 *
1508 * Description:
1509 * Copy the specified range of the source
1510 * virtual memory object without using
1511 * protection-based optimizations (such
1512 * as copy-on-write). The pages in the
1513 * region are actually copied.
1514 *
1515 * In/out conditions:
1516 * The caller must hold a reference and a lock
1517 * for the source virtual memory object. The source
1518 * object will be returned *unlocked*.
1519 *
1520 * Results:
1521 * If the copy is completed successfully, KERN_SUCCESS is
1522 * returned. If the caller asserted the interruptible
1523 * argument, and an interruption occurred while waiting
1524 * for a user-generated event, MACH_SEND_INTERRUPTED is
1525 * returned. Other values may be returned to indicate
1526 * hard errors during the copy operation.
1527 *
1528 * A new virtual memory object is returned in a
1529 * parameter (_result_object). The contents of this
1530 * new object, starting at a zero offset, are a copy
1531 * of the source memory region. In the event of
1532 * an error, this parameter will contain the value
1533 * VM_OBJECT_NULL.
1534 */
0b4e3aa0 1535__private_extern__ kern_return_t
1c79356b
A
1536vm_object_copy_slowly(
1537 register vm_object_t src_object,
1538 vm_object_offset_t src_offset,
1539 vm_object_size_t size,
1540 boolean_t interruptible,
1541 vm_object_t *_result_object) /* OUT */
1542{
1543 vm_object_t new_object;
1544 vm_object_offset_t new_offset;
1545
1546 vm_object_offset_t src_lo_offset = src_offset;
1547 vm_object_offset_t src_hi_offset = src_offset + size;
1548
1549 XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n",
1550 src_object, src_offset, size, 0, 0);
1551
1552 if (size == 0) {
1553 vm_object_unlock(src_object);
1554 *_result_object = VM_OBJECT_NULL;
1555 return(KERN_INVALID_ARGUMENT);
1556 }
1557
1558 /*
1559 * Prevent destruction of the source object while we copy.
1560 */
1561
1562 assert(src_object->ref_count > 0);
1563 src_object->ref_count++;
1564 VM_OBJ_RES_INCR(src_object);
1565 vm_object_unlock(src_object);
1566
1567 /*
1568 * Create a new object to hold the copied pages.
1569 * A few notes:
1570 * We fill the new object starting at offset 0,
1571 * regardless of the input offset.
1572 * We don't bother to lock the new object within
1573 * this routine, since we have the only reference.
1574 */
1575
1576 new_object = vm_object_allocate(size);
1577 new_offset = 0;
91447636 1578 vm_object_lock(new_object);
1c79356b
A
1579
1580 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
1581
1582 for ( ;
1583 size != 0 ;
1584 src_offset += PAGE_SIZE_64,
1585 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
1586 ) {
1587 vm_page_t new_page;
1588 vm_fault_return_t result;
1589
1590 while ((new_page = vm_page_alloc(new_object, new_offset))
1591 == VM_PAGE_NULL) {
1592 if (!vm_page_wait(interruptible)) {
91447636 1593 vm_object_unlock(new_object);
1c79356b 1594 vm_object_deallocate(new_object);
91447636 1595 vm_object_deallocate(src_object);
1c79356b
A
1596 *_result_object = VM_OBJECT_NULL;
1597 return(MACH_SEND_INTERRUPTED);
1598 }
1599 }
1600
1601 do {
1602 vm_prot_t prot = VM_PROT_READ;
1603 vm_page_t _result_page;
1604 vm_page_t top_page;
1605 register
1606 vm_page_t result_page;
1607 kern_return_t error_code;
1608
1609 vm_object_lock(src_object);
1610 vm_object_paging_begin(src_object);
1611
1612 XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0);
1613 result = vm_fault_page(src_object, src_offset,
1614 VM_PROT_READ, FALSE, interruptible,
1615 src_lo_offset, src_hi_offset,
1616 VM_BEHAVIOR_SEQUENTIAL,
1617 &prot, &_result_page, &top_page,
1618 (int *)0,
0b4e3aa0 1619 &error_code, FALSE, FALSE, NULL, 0);
1c79356b
A
1620
1621 switch(result) {
1622 case VM_FAULT_SUCCESS:
1623 result_page = _result_page;
1624
1625 /*
1626 * We don't need to hold the object
1627 * lock -- the busy page will be enough.
1628 * [We don't care about picking up any
1629 * new modifications.]
1630 *
1631 * Copy the page to the new object.
1632 *
1633 * POLICY DECISION:
1634 * If result_page is clean,
1635 * we could steal it instead
1636 * of copying.
1637 */
1638
1639 vm_object_unlock(result_page->object);
1640 vm_page_copy(result_page, new_page);
1641
1642 /*
1643 * Let go of both pages (make them
1644 * not busy, perform wakeup, activate).
1645 */
1646
1647 new_page->busy = FALSE;
1648 new_page->dirty = TRUE;
1649 vm_object_lock(result_page->object);
1650 PAGE_WAKEUP_DONE(result_page);
1651
1652 vm_page_lock_queues();
1653 if (!result_page->active &&
1654 !result_page->inactive)
1655 vm_page_activate(result_page);
1656 vm_page_activate(new_page);
1657 vm_page_unlock_queues();
1658
1659 /*
1660 * Release paging references and
1661 * top-level placeholder page, if any.
1662 */
1663
1664 vm_fault_cleanup(result_page->object,
1665 top_page);
1666
1667 break;
1668
1669 case VM_FAULT_RETRY:
1670 break;
1671
1672 case VM_FAULT_FICTITIOUS_SHORTAGE:
1673 vm_page_more_fictitious();
1674 break;
1675
1676 case VM_FAULT_MEMORY_SHORTAGE:
1677 if (vm_page_wait(interruptible))
1678 break;
1679 /* fall thru */
1680
1681 case VM_FAULT_INTERRUPTED:
1682 vm_page_free(new_page);
91447636 1683 vm_object_unlock(new_object);
1c79356b
A
1684 vm_object_deallocate(new_object);
1685 vm_object_deallocate(src_object);
1686 *_result_object = VM_OBJECT_NULL;
1687 return(MACH_SEND_INTERRUPTED);
1688
1689 case VM_FAULT_MEMORY_ERROR:
1690 /*
1691 * A policy choice:
1692 * (a) ignore pages that we can't
1693 * copy
1694 * (b) return the null object if
1695 * any page fails [chosen]
1696 */
1697
1698 vm_page_lock_queues();
1699 vm_page_free(new_page);
1700 vm_page_unlock_queues();
91447636 1701 vm_object_unlock(new_object);
1c79356b
A
1702 vm_object_deallocate(new_object);
1703 vm_object_deallocate(src_object);
1704 *_result_object = VM_OBJECT_NULL;
1705 return(error_code ? error_code:
1706 KERN_MEMORY_ERROR);
1707 }
1708 } while (result != VM_FAULT_SUCCESS);
1709 }
1710
1711 /*
1712 * Lose the extra reference, and return our object.
1713 */
1714
91447636 1715 vm_object_unlock(new_object);
1c79356b
A
1716 vm_object_deallocate(src_object);
1717 *_result_object = new_object;
1718 return(KERN_SUCCESS);
1719}
1720
1721/*
1722 * Routine: vm_object_copy_quickly
1723 *
1724 * Purpose:
1725 * Copy the specified range of the source virtual
1726 * memory object, if it can be done without waiting
1727 * for user-generated events.
1728 *
1729 * Results:
1730 * If the copy is successful, the copy is returned in
1731 * the arguments; otherwise, the arguments are not
1732 * affected.
1733 *
1734 * In/out conditions:
1735 * The object should be unlocked on entry and exit.
1736 */
1737
1738/*ARGSUSED*/
0b4e3aa0 1739__private_extern__ boolean_t
1c79356b
A
1740vm_object_copy_quickly(
1741 vm_object_t *_object, /* INOUT */
91447636
A
1742 __unused vm_object_offset_t offset, /* IN */
1743 __unused vm_object_size_t size, /* IN */
1c79356b
A
1744 boolean_t *_src_needs_copy, /* OUT */
1745 boolean_t *_dst_needs_copy) /* OUT */
1746{
1747 vm_object_t object = *_object;
1748 memory_object_copy_strategy_t copy_strategy;
1749
1750 XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n",
1751 *_object, offset, size, 0, 0);
1752 if (object == VM_OBJECT_NULL) {
1753 *_src_needs_copy = FALSE;
1754 *_dst_needs_copy = FALSE;
1755 return(TRUE);
1756 }
1757
1758 vm_object_lock(object);
1759
1760 copy_strategy = object->copy_strategy;
1761
1762 switch (copy_strategy) {
1763 case MEMORY_OBJECT_COPY_SYMMETRIC:
1764
1765 /*
1766 * Symmetric copy strategy.
1767 * Make another reference to the object.
1768 * Leave object/offset unchanged.
1769 */
1770
1771 assert(object->ref_count > 0);
1772 object->ref_count++;
1773 vm_object_res_reference(object);
1774 object->shadowed = TRUE;
1775 vm_object_unlock(object);
1776
1777 /*
1778 * Both source and destination must make
1779 * shadows, and the source must be made
1780 * read-only if not already.
1781 */
1782
1783 *_src_needs_copy = TRUE;
1784 *_dst_needs_copy = TRUE;
1785
1786 break;
1787
1788 case MEMORY_OBJECT_COPY_DELAY:
1789 vm_object_unlock(object);
1790 return(FALSE);
1791
1792 default:
1793 vm_object_unlock(object);
1794 return(FALSE);
1795 }
1796 return(TRUE);
1797}
1798
0b4e3aa0
A
1799static int copy_call_count = 0;
1800static int copy_call_sleep_count = 0;
1801static int copy_call_restart_count = 0;
1c79356b
A
1802
1803/*
1804 * Routine: vm_object_copy_call [internal]
1805 *
1806 * Description:
1807 * Copy the source object (src_object), using the
1808 * user-managed copy algorithm.
1809 *
1810 * In/out conditions:
1811 * The source object must be locked on entry. It
1812 * will be *unlocked* on exit.
1813 *
1814 * Results:
1815 * If the copy is successful, KERN_SUCCESS is returned.
1816 * A new object that represents the copied virtual
1817 * memory is returned in a parameter (*_result_object).
1818 * If the return value indicates an error, this parameter
1819 * is not valid.
1820 */
0b4e3aa0 1821static kern_return_t
1c79356b
A
1822vm_object_copy_call(
1823 vm_object_t src_object,
1824 vm_object_offset_t src_offset,
1825 vm_object_size_t size,
1826 vm_object_t *_result_object) /* OUT */
1827{
1828 kern_return_t kr;
1829 vm_object_t copy;
1830 boolean_t check_ready = FALSE;
1831
1832 /*
1833 * If a copy is already in progress, wait and retry.
1834 *
1835 * XXX
1836 * Consider making this call interruptable, as Mike
1837 * intended it to be.
1838 *
1839 * XXXO
1840 * Need a counter or version or something to allow
1841 * us to use the copy that the currently requesting
1842 * thread is obtaining -- is it worth adding to the
1843 * vm object structure? Depends how common this case it.
1844 */
1845 copy_call_count++;
1846 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
9bccf70c 1847 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1c79356b 1848 THREAD_UNINT);
1c79356b
A
1849 copy_call_restart_count++;
1850 }
1851
1852 /*
1853 * Indicate (for the benefit of memory_object_create_copy)
1854 * that we want a copy for src_object. (Note that we cannot
1855 * do a real assert_wait before calling memory_object_copy,
1856 * so we simply set the flag.)
1857 */
1858
1859 vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
1860 vm_object_unlock(src_object);
1861
1862 /*
1863 * Ask the memory manager to give us a memory object
1864 * which represents a copy of the src object.
1865 * The memory manager may give us a memory object
1866 * which we already have, or it may give us a
1867 * new memory object. This memory object will arrive
1868 * via memory_object_create_copy.
1869 */
1870
1871 kr = KERN_FAILURE; /* XXX need to change memory_object.defs */
1872 if (kr != KERN_SUCCESS) {
1873 return kr;
1874 }
1875
1876 /*
1877 * Wait for the copy to arrive.
1878 */
1879 vm_object_lock(src_object);
1880 while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
9bccf70c 1881 vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
1c79356b 1882 THREAD_UNINT);
1c79356b
A
1883 copy_call_sleep_count++;
1884 }
1885Retry:
1886 assert(src_object->copy != VM_OBJECT_NULL);
1887 copy = src_object->copy;
1888 if (!vm_object_lock_try(copy)) {
1889 vm_object_unlock(src_object);
1890 mutex_pause(); /* wait a bit */
1891 vm_object_lock(src_object);
1892 goto Retry;
1893 }
1894 if (copy->size < src_offset+size)
1895 copy->size = src_offset+size;
1896
1897 if (!copy->pager_ready)
1898 check_ready = TRUE;
1899
1900 /*
1901 * Return the copy.
1902 */
1903 *_result_object = copy;
1904 vm_object_unlock(copy);
1905 vm_object_unlock(src_object);
1906
1907 /* Wait for the copy to be ready. */
1908 if (check_ready == TRUE) {
1909 vm_object_lock(copy);
1910 while (!copy->pager_ready) {
9bccf70c 1911 vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
1c79356b
A
1912 }
1913 vm_object_unlock(copy);
1914 }
1915
1916 return KERN_SUCCESS;
1917}
1918
0b4e3aa0
A
1919static int copy_delayed_lock_collisions = 0;
1920static int copy_delayed_max_collisions = 0;
1921static int copy_delayed_lock_contention = 0;
1922static int copy_delayed_protect_iterate = 0;
1c79356b
A
1923
1924/*
1925 * Routine: vm_object_copy_delayed [internal]
1926 *
1927 * Description:
1928 * Copy the specified virtual memory object, using
1929 * the asymmetric copy-on-write algorithm.
1930 *
1931 * In/out conditions:
55e303ae
A
1932 * The src_object must be locked on entry. It will be unlocked
1933 * on exit - so the caller must also hold a reference to it.
1c79356b
A
1934 *
1935 * This routine will not block waiting for user-generated
1936 * events. It is not interruptible.
1937 */
0b4e3aa0 1938__private_extern__ vm_object_t
1c79356b
A
1939vm_object_copy_delayed(
1940 vm_object_t src_object,
1941 vm_object_offset_t src_offset,
1942 vm_object_size_t size)
1943{
1944 vm_object_t new_copy = VM_OBJECT_NULL;
1945 vm_object_t old_copy;
1946 vm_page_t p;
55e303ae 1947 vm_object_size_t copy_size = src_offset + size;
1c79356b
A
1948
1949 int collisions = 0;
1950 /*
1951 * The user-level memory manager wants to see all of the changes
1952 * to this object, but it has promised not to make any changes on
1953 * its own.
1954 *
1955 * Perform an asymmetric copy-on-write, as follows:
1956 * Create a new object, called a "copy object" to hold
1957 * pages modified by the new mapping (i.e., the copy,
1958 * not the original mapping).
1959 * Record the original object as the backing object for
1960 * the copy object. If the original mapping does not
1961 * change a page, it may be used read-only by the copy.
1962 * Record the copy object in the original object.
1963 * When the original mapping causes a page to be modified,
1964 * it must be copied to a new page that is "pushed" to
1965 * the copy object.
1966 * Mark the new mapping (the copy object) copy-on-write.
1967 * This makes the copy object itself read-only, allowing
1968 * it to be reused if the original mapping makes no
1969 * changes, and simplifying the synchronization required
1970 * in the "push" operation described above.
1971 *
1972 * The copy-on-write is said to be assymetric because the original
1973 * object is *not* marked copy-on-write. A copied page is pushed
1974 * to the copy object, regardless which party attempted to modify
1975 * the page.
1976 *
1977 * Repeated asymmetric copy operations may be done. If the
1978 * original object has not been changed since the last copy, its
1979 * copy object can be reused. Otherwise, a new copy object can be
1980 * inserted between the original object and its previous copy
1981 * object. Since any copy object is read-only, this cannot affect
1982 * affect the contents of the previous copy object.
1983 *
1984 * Note that a copy object is higher in the object tree than the
1985 * original object; therefore, use of the copy object recorded in
1986 * the original object must be done carefully, to avoid deadlock.
1987 */
1988
1989 Retry:
1c79356b 1990
55e303ae
A
1991 /*
1992 * Wait for paging in progress.
1993 */
1994 if (!src_object->true_share)
1995 vm_object_paging_wait(src_object, THREAD_UNINT);
1996
1c79356b
A
1997 /*
1998 * See whether we can reuse the result of a previous
1999 * copy operation.
2000 */
2001
2002 old_copy = src_object->copy;
2003 if (old_copy != VM_OBJECT_NULL) {
2004 /*
2005 * Try to get the locks (out of order)
2006 */
2007 if (!vm_object_lock_try(old_copy)) {
2008 vm_object_unlock(src_object);
2009 mutex_pause();
2010
2011 /* Heisenberg Rules */
2012 copy_delayed_lock_collisions++;
2013 if (collisions++ == 0)
2014 copy_delayed_lock_contention++;
2015
2016 if (collisions > copy_delayed_max_collisions)
2017 copy_delayed_max_collisions = collisions;
2018
55e303ae 2019 vm_object_lock(src_object);
1c79356b
A
2020 goto Retry;
2021 }
2022
2023 /*
2024 * Determine whether the old copy object has
2025 * been modified.
2026 */
2027
2028 if (old_copy->resident_page_count == 0 &&
2029 !old_copy->pager_created) {
2030 /*
2031 * It has not been modified.
2032 *
2033 * Return another reference to
55e303ae
A
2034 * the existing copy-object if
2035 * we can safely grow it (if
2036 * needed).
de355530 2037 */
1c79356b 2038
55e303ae
A
2039 if (old_copy->size < copy_size) {
2040 /*
2041 * We can't perform a delayed copy if any of the
2042 * pages in the extended range are wired (because
2043 * we can't safely take write permission away from
2044 * wired pages). If the pages aren't wired, then
2045 * go ahead and protect them.
2046 */
2047 copy_delayed_protect_iterate++;
2048 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2049 if (!p->fictitious &&
2050 p->offset >= old_copy->size &&
2051 p->offset < copy_size) {
2052 if (p->wire_count > 0) {
2053 vm_object_unlock(old_copy);
2054 vm_object_unlock(src_object);
91447636
A
2055
2056 if (new_copy != VM_OBJECT_NULL) {
2057 vm_object_unlock(new_copy);
2058 vm_object_deallocate(new_copy);
2059 }
2060
55e303ae
A
2061 return VM_OBJECT_NULL;
2062 } else {
2063 pmap_page_protect(p->phys_page,
2064 (VM_PROT_ALL & ~VM_PROT_WRITE &
2065 ~p->page_lock));
2066 }
2067 }
2068 }
2069 old_copy->size = copy_size;
2070 }
2071
2072 vm_object_reference_locked(old_copy);
d7e50217
A
2073 vm_object_unlock(old_copy);
2074 vm_object_unlock(src_object);
91447636
A
2075
2076 if (new_copy != VM_OBJECT_NULL) {
2077 vm_object_unlock(new_copy);
2078 vm_object_deallocate(new_copy);
2079 }
2080
55e303ae 2081 return(old_copy);
d7e50217 2082 }
de355530
A
2083
2084 /*
2085 * Adjust the size argument so that the newly-created
2086 * copy object will be large enough to back either the
55e303ae 2087 * old copy object or the new mapping.
de355530 2088 */
55e303ae
A
2089 if (old_copy->size > copy_size)
2090 copy_size = old_copy->size;
2091
2092 if (new_copy == VM_OBJECT_NULL) {
2093 vm_object_unlock(old_copy);
2094 vm_object_unlock(src_object);
2095 new_copy = vm_object_allocate(copy_size);
2096 vm_object_lock(src_object);
2097 vm_object_lock(new_copy);
2098 goto Retry;
2099 }
2100 new_copy->size = copy_size;
1c79356b
A
2101
2102 /*
2103 * The copy-object is always made large enough to
2104 * completely shadow the original object, since
2105 * it may have several users who want to shadow
2106 * the original object at different points.
2107 */
2108
2109 assert((old_copy->shadow == src_object) &&
2110 (old_copy->shadow_offset == (vm_object_offset_t) 0));
2111
55e303ae
A
2112 } else if (new_copy == VM_OBJECT_NULL) {
2113 vm_object_unlock(src_object);
2114 new_copy = vm_object_allocate(copy_size);
2115 vm_object_lock(src_object);
2116 vm_object_lock(new_copy);
2117 goto Retry;
2118 }
2119
2120 /*
2121 * We now have the src object locked, and the new copy object
2122 * allocated and locked (and potentially the old copy locked).
2123 * Before we go any further, make sure we can still perform
2124 * a delayed copy, as the situation may have changed.
2125 *
2126 * Specifically, we can't perform a delayed copy if any of the
2127 * pages in the range are wired (because we can't safely take
2128 * write permission away from wired pages). If the pages aren't
2129 * wired, then go ahead and protect them.
2130 */
2131 copy_delayed_protect_iterate++;
2132 queue_iterate(&src_object->memq, p, vm_page_t, listq) {
2133 if (!p->fictitious && p->offset < copy_size) {
2134 if (p->wire_count > 0) {
2135 if (old_copy)
2136 vm_object_unlock(old_copy);
2137 vm_object_unlock(src_object);
2138 vm_object_unlock(new_copy);
2139 vm_object_deallocate(new_copy);
2140 return VM_OBJECT_NULL;
2141 } else {
2142 pmap_page_protect(p->phys_page,
2143 (VM_PROT_ALL & ~VM_PROT_WRITE &
2144 ~p->page_lock));
2145 }
2146 }
2147 }
2148
2149 if (old_copy != VM_OBJECT_NULL) {
1c79356b
A
2150 /*
2151 * Make the old copy-object shadow the new one.
2152 * It will receive no more pages from the original
2153 * object.
2154 */
2155
2156 src_object->ref_count--; /* remove ref. from old_copy */
2157 assert(src_object->ref_count > 0);
2158 old_copy->shadow = new_copy;
2159 assert(new_copy->ref_count > 0);
2160 new_copy->ref_count++; /* for old_copy->shadow ref. */
2161
2162#if TASK_SWAPPER
2163 if (old_copy->res_count) {
2164 VM_OBJ_RES_INCR(new_copy);
2165 VM_OBJ_RES_DECR(src_object);
2166 }
2167#endif
2168
2169 vm_object_unlock(old_copy); /* done with old_copy */
1c79356b
A
2170 }
2171
2172 /*
2173 * Point the new copy at the existing object.
2174 */
1c79356b
A
2175 new_copy->shadow = src_object;
2176 new_copy->shadow_offset = 0;
2177 new_copy->shadowed = TRUE; /* caller must set needs_copy */
2178 assert(src_object->ref_count > 0);
2179 src_object->ref_count++;
2180 VM_OBJ_RES_INCR(src_object);
2181 src_object->copy = new_copy;
55e303ae 2182 vm_object_unlock(src_object);
1c79356b
A
2183 vm_object_unlock(new_copy);
2184
1c79356b
A
2185 XPR(XPR_VM_OBJECT,
2186 "vm_object_copy_delayed: used copy object %X for source %X\n",
2187 (integer_t)new_copy, (integer_t)src_object, 0, 0, 0);
2188
2189 return(new_copy);
2190}
2191
2192/*
2193 * Routine: vm_object_copy_strategically
2194 *
2195 * Purpose:
2196 * Perform a copy according to the source object's
2197 * declared strategy. This operation may block,
2198 * and may be interrupted.
2199 */
0b4e3aa0 2200__private_extern__ kern_return_t
1c79356b
A
2201vm_object_copy_strategically(
2202 register vm_object_t src_object,
2203 vm_object_offset_t src_offset,
2204 vm_object_size_t size,
2205 vm_object_t *dst_object, /* OUT */
2206 vm_object_offset_t *dst_offset, /* OUT */
2207 boolean_t *dst_needs_copy) /* OUT */
2208{
2209 boolean_t result;
2210 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
2211 memory_object_copy_strategy_t copy_strategy;
2212
2213 assert(src_object != VM_OBJECT_NULL);
2214
2215 vm_object_lock(src_object);
2216
2217 /*
2218 * The copy strategy is only valid if the memory manager
2219 * is "ready". Internal objects are always ready.
2220 */
2221
2222 while (!src_object->internal && !src_object->pager_ready) {
9bccf70c 2223 wait_result_t wait_result;
1c79356b 2224
9bccf70c
A
2225 wait_result = vm_object_sleep( src_object,
2226 VM_OBJECT_EVENT_PAGER_READY,
2227 interruptible);
2228 if (wait_result != THREAD_AWAKENED) {
2229 vm_object_unlock(src_object);
1c79356b
A
2230 *dst_object = VM_OBJECT_NULL;
2231 *dst_offset = 0;
2232 *dst_needs_copy = FALSE;
2233 return(MACH_SEND_INTERRUPTED);
2234 }
1c79356b
A
2235 }
2236
2237 copy_strategy = src_object->copy_strategy;
2238
2239 /*
2240 * Use the appropriate copy strategy.
2241 */
2242
2243 switch (copy_strategy) {
55e303ae
A
2244 case MEMORY_OBJECT_COPY_DELAY:
2245 *dst_object = vm_object_copy_delayed(src_object,
2246 src_offset, size);
2247 if (*dst_object != VM_OBJECT_NULL) {
2248 *dst_offset = src_offset;
2249 *dst_needs_copy = TRUE;
2250 result = KERN_SUCCESS;
2251 break;
2252 }
2253 vm_object_lock(src_object);
2254 /* fall thru when delayed copy not allowed */
2255
1c79356b
A
2256 case MEMORY_OBJECT_COPY_NONE:
2257 result = vm_object_copy_slowly(src_object, src_offset, size,
2258 interruptible, dst_object);
2259 if (result == KERN_SUCCESS) {
2260 *dst_offset = 0;
2261 *dst_needs_copy = FALSE;
2262 }
2263 break;
2264
2265 case MEMORY_OBJECT_COPY_CALL:
2266 result = vm_object_copy_call(src_object, src_offset, size,
2267 dst_object);
2268 if (result == KERN_SUCCESS) {
2269 *dst_offset = src_offset;
2270 *dst_needs_copy = TRUE;
2271 }
2272 break;
2273
1c79356b
A
2274 case MEMORY_OBJECT_COPY_SYMMETRIC:
2275 XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0);
2276 vm_object_unlock(src_object);
2277 result = KERN_MEMORY_RESTART_COPY;
2278 break;
2279
2280 default:
2281 panic("copy_strategically: bad strategy");
2282 result = KERN_INVALID_ARGUMENT;
2283 }
2284 return(result);
2285}
2286
2287/*
2288 * vm_object_shadow:
2289 *
2290 * Create a new object which is backed by the
2291 * specified existing object range. The source
2292 * object reference is deallocated.
2293 *
2294 * The new object and offset into that object
2295 * are returned in the source parameters.
2296 */
2297boolean_t vm_object_shadow_check = FALSE;
2298
0b4e3aa0 2299__private_extern__ boolean_t
1c79356b
A
2300vm_object_shadow(
2301 vm_object_t *object, /* IN/OUT */
2302 vm_object_offset_t *offset, /* IN/OUT */
2303 vm_object_size_t length)
2304{
2305 register vm_object_t source;
2306 register vm_object_t result;
2307
2308 source = *object;
2309 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
2310
2311 /*
2312 * Determine if we really need a shadow.
2313 */
2314
2315 if (vm_object_shadow_check && source->ref_count == 1 &&
2316 (source->shadow == VM_OBJECT_NULL ||
2317 source->shadow->copy == VM_OBJECT_NULL))
2318 {
2319 source->shadowed = FALSE;
2320 return FALSE;
2321 }
2322
2323 /*
2324 * Allocate a new object with the given length
2325 */
2326
2327 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
2328 panic("vm_object_shadow: no object for shadowing");
2329
2330 /*
2331 * The new object shadows the source object, adding
2332 * a reference to it. Our caller changes his reference
2333 * to point to the new object, removing a reference to
2334 * the source object. Net result: no change of reference
2335 * count.
2336 */
2337 result->shadow = source;
2338
2339 /*
2340 * Store the offset into the source object,
2341 * and fix up the offset into the new object.
2342 */
2343
2344 result->shadow_offset = *offset;
2345
2346 /*
2347 * Return the new things
2348 */
2349
2350 *offset = 0;
2351 *object = result;
2352 return TRUE;
2353}
2354
2355/*
2356 * The relationship between vm_object structures and
0b4e3aa0 2357 * the memory_object requires careful synchronization.
1c79356b 2358 *
0b4e3aa0
A
2359 * All associations are created by memory_object_create_named
2360 * for external pagers and vm_object_pager_create for internal
2361 * objects as follows:
2362 *
2363 * pager: the memory_object itself, supplied by
1c79356b
A
2364 * the user requesting a mapping (or the kernel,
2365 * when initializing internal objects); the
2366 * kernel simulates holding send rights by keeping
2367 * a port reference;
0b4e3aa0 2368 *
1c79356b
A
2369 * pager_request:
2370 * the memory object control port,
2371 * created by the kernel; the kernel holds
2372 * receive (and ownership) rights to this
2373 * port, but no other references.
1c79356b
A
2374 *
2375 * When initialization is complete, the "initialized" field
2376 * is asserted. Other mappings using a particular memory object,
2377 * and any references to the vm_object gained through the
2378 * port association must wait for this initialization to occur.
2379 *
2380 * In order to allow the memory manager to set attributes before
2381 * requests (notably virtual copy operations, but also data or
2382 * unlock requests) are made, a "ready" attribute is made available.
2383 * Only the memory manager may affect the value of this attribute.
2384 * Its value does not affect critical kernel functions, such as
2385 * internal object initialization or destruction. [Furthermore,
2386 * memory objects created by the kernel are assumed to be ready
2387 * immediately; the default memory manager need not explicitly
2388 * set the "ready" attribute.]
2389 *
2390 * [Both the "initialized" and "ready" attribute wait conditions
2391 * use the "pager" field as the wait event.]
2392 *
2393 * The port associations can be broken down by any of the
2394 * following routines:
2395 * vm_object_terminate:
2396 * No references to the vm_object remain, and
2397 * the object cannot (or will not) be cached.
2398 * This is the normal case, and is done even
2399 * though one of the other cases has already been
2400 * done.
1c79356b
A
2401 * memory_object_destroy:
2402 * The memory manager has requested that the
0b4e3aa0
A
2403 * kernel relinquish references to the memory
2404 * object. [The memory manager may not want to
2405 * destroy the memory object, but may wish to
2406 * refuse or tear down existing memory mappings.]
2407 *
1c79356b
A
2408 * Each routine that breaks an association must break all of
2409 * them at once. At some later time, that routine must clear
0b4e3aa0 2410 * the pager field and release the memory object references.
1c79356b
A
2411 * [Furthermore, each routine must cope with the simultaneous
2412 * or previous operations of the others.]
2413 *
2414 * In addition to the lock on the object, the vm_object_cache_lock
0b4e3aa0
A
2415 * governs the associations. References gained through the
2416 * association require use of the cache lock.
1c79356b 2417 *
0b4e3aa0 2418 * Because the pager field may be cleared spontaneously, it
1c79356b
A
2419 * cannot be used to determine whether a memory object has
2420 * ever been associated with a particular vm_object. [This
2421 * knowledge is important to the shadow object mechanism.]
2422 * For this reason, an additional "created" attribute is
2423 * provided.
2424 *
0b4e3aa0
A
2425 * During various paging operations, the pager reference found in the
2426 * vm_object must be valid. To prevent this from being released,
1c79356b
A
2427 * (other than being removed, i.e., made null), routines may use
2428 * the vm_object_paging_begin/end routines [actually, macros].
2429 * The implementation uses the "paging_in_progress" and "wanted" fields.
0b4e3aa0 2430 * [Operations that alter the validity of the pager values include the
1c79356b
A
2431 * termination routines and vm_object_collapse.]
2432 */
2433
0b4e3aa0 2434#if 0
91447636
A
2435static void vm_object_abort_activity(
2436 vm_object_t object);
2437
2438/*
2439 * Routine: vm_object_abort_activity [internal use only]
2440 * Purpose:
2441 * Abort paging requests pending on this object.
2442 * In/out conditions:
2443 * The object is locked on entry and exit.
2444 */
2445static void
2446vm_object_abort_activity(
2447 vm_object_t object)
2448{
2449 register
2450 vm_page_t p;
2451 vm_page_t next;
2452
2453 XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n",
2454 (integer_t)object, 0, 0, 0, 0);
2455
2456 /*
2457 * Abort all activity that would be waiting
2458 * for a result on this memory object.
2459 *
2460 * We could also choose to destroy all pages
2461 * that we have in memory for this object, but
2462 * we don't.
2463 */
2464
2465 p = (vm_page_t) queue_first(&object->memq);
2466 while (!queue_end(&object->memq, (queue_entry_t) p)) {
2467 next = (vm_page_t) queue_next(&p->listq);
2468
2469 /*
2470 * If it's being paged in, destroy it.
2471 * If an unlock has been requested, start it again.
2472 */
2473
2474 if (p->busy && p->absent) {
2475 VM_PAGE_FREE(p);
2476 }
2477 else {
2478 if (p->unlock_request != VM_PROT_NONE)
2479 p->unlock_request = VM_PROT_NONE;
2480 PAGE_WAKEUP(p);
2481 }
2482
2483 p = next;
2484 }
2485
2486 /*
2487 * Wake up threads waiting for the memory object to
2488 * become ready.
2489 */
2490
2491 object->pager_ready = TRUE;
2492 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
2493}
2494
0b4e3aa0
A
2495/*
2496 * Routine: vm_object_pager_dead
2497 *
2498 * Purpose:
2499 * A port is being destroy, and the IPC kobject code
2500 * can't tell if it represents a pager port or not.
2501 * So this function is called each time it sees a port
2502 * die.
2503 * THIS IS HORRIBLY INEFFICIENT. We should only call
2504 * this routine if we had requested a notification on
2505 * the port.
2506 */
1c79356b 2507
0b4e3aa0
A
2508__private_extern__ void
2509vm_object_pager_dead(
1c79356b
A
2510 ipc_port_t pager)
2511{
2512 vm_object_t object;
2513 vm_object_hash_entry_t entry;
1c79356b
A
2514
2515 /*
2516 * Perform essentially the same operations as in vm_object_lookup,
2517 * except that this time we look up based on the memory_object
2518 * port, not the control port.
2519 */
2520 vm_object_cache_lock();
2521 entry = vm_object_hash_lookup(pager, FALSE);
2522 if (entry == VM_OBJECT_HASH_ENTRY_NULL ||
2523 entry->object == VM_OBJECT_NULL) {
2524 vm_object_cache_unlock();
2525 return;
2526 }
2527
2528 object = entry->object;
2529 entry->object = VM_OBJECT_NULL;
2530
2531 vm_object_lock(object);
2532 if (object->ref_count == 0) {
2533 XPR(XPR_VM_OBJECT_CACHE,
2534 "vm_object_destroy: removing %x from cache, head (%x, %x)\n",
2535 (integer_t)object,
2536 (integer_t)vm_object_cached_list.next,
2537 (integer_t)vm_object_cached_list.prev, 0,0);
2538
2539 queue_remove(&vm_object_cached_list, object,
2540 vm_object_t, cached_list);
2541 vm_object_cached_count--;
2542 }
2543 object->ref_count++;
2544 vm_object_res_reference(object);
2545
2546 object->can_persist = FALSE;
2547
2548 assert(object->pager == pager);
2549
2550 /*
0b4e3aa0 2551 * Remove the pager association.
1c79356b
A
2552 *
2553 * Note that the memory_object itself is dead, so
2554 * we don't bother with it.
2555 */
2556
0b4e3aa0 2557 object->pager = MEMORY_OBJECT_NULL;
1c79356b
A
2558
2559 vm_object_unlock(object);
2560 vm_object_cache_unlock();
2561
2562 vm_object_pager_wakeup(pager);
2563
2564 /*
0b4e3aa0 2565 * Release the pager reference. Note that there's no
1c79356b 2566 * point in trying the memory_object_terminate call
0b4e3aa0
A
2567 * because the memory_object itself is dead. Also
2568 * release the memory_object_control reference, since
2569 * the pager didn't do that either.
1c79356b
A
2570 */
2571
0b4e3aa0
A
2572 memory_object_deallocate(pager);
2573 memory_object_control_deallocate(object->pager_request);
2574
1c79356b
A
2575
2576 /*
2577 * Restart pending page requests
2578 */
2579 vm_object_lock(object);
1c79356b 2580 vm_object_abort_activity(object);
1c79356b
A
2581 vm_object_unlock(object);
2582
2583 /*
2584 * Lose the object reference.
2585 */
2586
2587 vm_object_deallocate(object);
2588}
0b4e3aa0 2589#endif
1c79356b
A
2590
2591/*
2592 * Routine: vm_object_enter
2593 * Purpose:
2594 * Find a VM object corresponding to the given
2595 * pager; if no such object exists, create one,
2596 * and initialize the pager.
2597 */
2598vm_object_t
2599vm_object_enter(
0b4e3aa0 2600 memory_object_t pager,
1c79356b
A
2601 vm_object_size_t size,
2602 boolean_t internal,
2603 boolean_t init,
0b4e3aa0 2604 boolean_t named)
1c79356b
A
2605{
2606 register vm_object_t object;
2607 vm_object_t new_object;
2608 boolean_t must_init;
1c79356b 2609 vm_object_hash_entry_t entry, new_entry;
1c79356b 2610
0b4e3aa0 2611 if (pager == MEMORY_OBJECT_NULL)
1c79356b
A
2612 return(vm_object_allocate(size));
2613
2614 new_object = VM_OBJECT_NULL;
2615 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2616 must_init = init;
2617
2618 /*
2619 * Look for an object associated with this port.
2620 */
2621
1c79356b 2622 vm_object_cache_lock();
55e303ae 2623 do {
1c79356b
A
2624 entry = vm_object_hash_lookup(pager, FALSE);
2625
55e303ae
A
2626 if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
2627 if (new_object == VM_OBJECT_NULL) {
2628 /*
2629 * We must unlock to create a new object;
2630 * if we do so, we must try the lookup again.
2631 */
2632 vm_object_cache_unlock();
2633 assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
2634 new_entry = vm_object_hash_entry_alloc(pager);
2635 new_object = vm_object_allocate(size);
2636 vm_object_cache_lock();
2637 } else {
2638 /*
2639 * Lookup failed twice, and we have something
2640 * to insert; set the object.
2641 */
2642 vm_object_hash_insert(new_entry);
2643 entry = new_entry;
2644 entry->object = new_object;
2645 new_entry = VM_OBJECT_HASH_ENTRY_NULL;
2646 new_object = VM_OBJECT_NULL;
2647 must_init = TRUE;
2648 }
2649 } else if (entry->object == VM_OBJECT_NULL) {
2650 /*
2651 * If a previous object is being terminated,
2652 * we must wait for the termination message
2653 * to be queued (and lookup the entry again).
2654 */
1c79356b 2655 entry->waiting = TRUE;
55e303ae 2656 entry = VM_OBJECT_HASH_ENTRY_NULL;
1c79356b
A
2657 assert_wait((event_t) pager, THREAD_UNINT);
2658 vm_object_cache_unlock();
91447636 2659 thread_block(THREAD_CONTINUE_NULL);
1c79356b 2660 vm_object_cache_lock();
1c79356b 2661 }
55e303ae 2662 } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
1c79356b
A
2663
2664 object = entry->object;
2665 assert(object != VM_OBJECT_NULL);
2666
2667 if (!must_init) {
2668 vm_object_lock(object);
1c79356b 2669 assert(!internal || object->internal);
0b4e3aa0
A
2670 if (named) {
2671 assert(!object->named);
1c79356b 2672 object->named = TRUE;
0b4e3aa0 2673 }
1c79356b
A
2674 if (object->ref_count == 0) {
2675 XPR(XPR_VM_OBJECT_CACHE,
2676 "vm_object_enter: removing %x from cache, head (%x, %x)\n",
2677 (integer_t)object,
2678 (integer_t)vm_object_cached_list.next,
2679 (integer_t)vm_object_cached_list.prev, 0,0);
2680 queue_remove(&vm_object_cached_list, object,
2681 vm_object_t, cached_list);
2682 vm_object_cached_count--;
2683 }
2684 object->ref_count++;
2685 vm_object_res_reference(object);
2686 vm_object_unlock(object);
2687
2688 VM_STAT(hits++);
2689 }
2690 assert(object->ref_count > 0);
2691
2692 VM_STAT(lookups++);
2693
2694 vm_object_cache_unlock();
2695
2696 XPR(XPR_VM_OBJECT,
2697 "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
2698 (integer_t)pager, (integer_t)object, must_init, 0, 0);
2699
2700 /*
2701 * If we raced to create a vm_object but lost, let's
2702 * throw away ours.
2703 */
2704
2705 if (new_object != VM_OBJECT_NULL)
2706 vm_object_deallocate(new_object);
2707
2708 if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
2709 vm_object_hash_entry_free(new_entry);
2710
2711 if (must_init) {
91447636 2712 memory_object_control_t control;
1c79356b
A
2713
2714 /*
2715 * Allocate request port.
2716 */
2717
91447636
A
2718 control = memory_object_control_allocate(object);
2719 assert (control != MEMORY_OBJECT_CONTROL_NULL);
1c79356b
A
2720
2721 vm_object_lock(object);
91447636 2722 assert(object != kernel_object);
1c79356b
A
2723
2724 /*
0b4e3aa0 2725 * Copy the reference we were given.
1c79356b
A
2726 */
2727
0b4e3aa0 2728 memory_object_reference(pager);
1c79356b
A
2729 object->pager_created = TRUE;
2730 object->pager = pager;
2731 object->internal = internal;
2732 object->pager_trusted = internal;
2733 if (!internal) {
2734 /* copy strategy invalid until set by memory manager */
2735 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
2736 }
91447636 2737 object->pager_control = control;
1c79356b
A
2738 object->pager_ready = FALSE;
2739
1c79356b
A
2740 vm_object_unlock(object);
2741
2742 /*
2743 * Let the pager know we're using it.
2744 */
2745
0b4e3aa0 2746 (void) memory_object_init(pager,
91447636 2747 object->pager_control,
0b4e3aa0 2748 PAGE_SIZE);
1c79356b
A
2749
2750 vm_object_lock(object);
0b4e3aa0
A
2751 if (named)
2752 object->named = TRUE;
1c79356b
A
2753 if (internal) {
2754 object->pager_ready = TRUE;
2755 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
2756 }
2757
2758 object->pager_initialized = TRUE;
2759 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
2760 } else {
2761 vm_object_lock(object);
2762 }
2763
2764 /*
2765 * [At this point, the object must be locked]
2766 */
2767
2768 /*
2769 * Wait for the work above to be done by the first
2770 * thread to map this object.
2771 */
2772
2773 while (!object->pager_initialized) {
9bccf70c 2774 vm_object_sleep(object,
1c79356b
A
2775 VM_OBJECT_EVENT_INITIALIZED,
2776 THREAD_UNINT);
1c79356b
A
2777 }
2778 vm_object_unlock(object);
2779
2780 XPR(XPR_VM_OBJECT,
2781 "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
2782 (integer_t)object, (integer_t)object->pager, internal, 0,0);
2783 return(object);
2784}
2785
2786/*
2787 * Routine: vm_object_pager_create
2788 * Purpose:
2789 * Create a memory object for an internal object.
2790 * In/out conditions:
2791 * The object is locked on entry and exit;
2792 * it may be unlocked within this call.
2793 * Limitations:
2794 * Only one thread may be performing a
2795 * vm_object_pager_create on an object at
2796 * a time. Presumably, only the pageout
2797 * daemon will be using this routine.
2798 */
2799
2800void
2801vm_object_pager_create(
2802 register vm_object_t object)
2803{
0b4e3aa0 2804 memory_object_t pager;
1c79356b
A
2805 vm_object_hash_entry_t entry;
2806#if MACH_PAGEMAP
2807 vm_object_size_t size;
2808 vm_external_map_t map;
2809#endif /* MACH_PAGEMAP */
2810
2811 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
2812 (integer_t)object, 0,0,0,0);
2813
91447636
A
2814 assert(object != kernel_object);
2815
1c79356b
A
2816 if (memory_manager_default_check() != KERN_SUCCESS)
2817 return;
2818
2819 /*
2820 * Prevent collapse or termination by holding a paging reference
2821 */
2822
2823 vm_object_paging_begin(object);
2824 if (object->pager_created) {
2825 /*
2826 * Someone else got to it first...
2827 * wait for them to finish initializing the ports
2828 */
2829 while (!object->pager_initialized) {
9bccf70c
A
2830 vm_object_sleep(object,
2831 VM_OBJECT_EVENT_INITIALIZED,
2832 THREAD_UNINT);
1c79356b
A
2833 }
2834 vm_object_paging_end(object);
2835 return;
2836 }
2837
2838 /*
2839 * Indicate that a memory object has been assigned
2840 * before dropping the lock, to prevent a race.
2841 */
2842
2843 object->pager_created = TRUE;
2844 object->paging_offset = 0;
2845
2846#if MACH_PAGEMAP
2847 size = object->size;
2848#endif /* MACH_PAGEMAP */
2849 vm_object_unlock(object);
2850
2851#if MACH_PAGEMAP
2852 map = vm_external_create(size);
2853 vm_object_lock(object);
2854 assert(object->size == size);
2855 object->existence_map = map;
2856 vm_object_unlock(object);
2857#endif /* MACH_PAGEMAP */
2858
2859 /*
0b4e3aa0 2860 * Create the [internal] pager, and associate it with this object.
1c79356b 2861 *
0b4e3aa0 2862 * We make the association here so that vm_object_enter()
1c79356b
A
2863 * can look up the object to complete initializing it. No
2864 * user will ever map this object.
2865 */
2866 {
0b4e3aa0 2867 memory_object_default_t dmm;
1c79356b
A
2868 vm_size_t cluster_size;
2869
0b4e3aa0
A
2870 /* acquire a reference for the default memory manager */
2871 dmm = memory_manager_default_reference(&cluster_size);
1c79356b
A
2872 assert(cluster_size >= PAGE_SIZE);
2873
2874 object->cluster_size = cluster_size; /* XXX ??? */
2875 assert(object->temporary);
2876
0b4e3aa0
A
2877 /* create our new memory object */
2878 (void) memory_object_create(dmm, object->size, &pager);
2879
2880 memory_object_default_deallocate(dmm);
1c79356b
A
2881 }
2882
2883 entry = vm_object_hash_entry_alloc(pager);
2884
2885 vm_object_cache_lock();
2886 vm_object_hash_insert(entry);
2887
2888 entry->object = object;
2889 vm_object_cache_unlock();
2890
2891 /*
0b4e3aa0 2892 * A reference was returned by
1c79356b
A
2893 * memory_object_create(), and it is
2894 * copied by vm_object_enter().
2895 */
2896
2897 if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
2898 panic("vm_object_pager_create: mismatch");
2899
2900 /*
0b4e3aa0 2901 * Drop the reference we were passed.
1c79356b 2902 */
0b4e3aa0 2903 memory_object_deallocate(pager);
1c79356b
A
2904
2905 vm_object_lock(object);
2906
2907 /*
2908 * Release the paging reference
2909 */
2910 vm_object_paging_end(object);
2911}
2912
2913/*
2914 * Routine: vm_object_remove
2915 * Purpose:
2916 * Eliminate the pager/object association
2917 * for this pager.
2918 * Conditions:
2919 * The object cache must be locked.
2920 */
0b4e3aa0 2921__private_extern__ void
1c79356b
A
2922vm_object_remove(
2923 vm_object_t object)
2924{
0b4e3aa0 2925 memory_object_t pager;
1c79356b 2926
0b4e3aa0 2927 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
1c79356b
A
2928 vm_object_hash_entry_t entry;
2929
0b4e3aa0 2930 entry = vm_object_hash_lookup(pager, FALSE);
1c79356b
A
2931 if (entry != VM_OBJECT_HASH_ENTRY_NULL)
2932 entry->object = VM_OBJECT_NULL;
2933 }
2934
1c79356b
A
2935}
2936
2937/*
2938 * Global variables for vm_object_collapse():
2939 *
2940 * Counts for normal collapses and bypasses.
2941 * Debugging variables, to watch or disable collapse.
2942 */
0b4e3aa0
A
2943static long object_collapses = 0;
2944static long object_bypasses = 0;
1c79356b 2945
0b4e3aa0
A
2946static boolean_t vm_object_collapse_allowed = TRUE;
2947static boolean_t vm_object_bypass_allowed = TRUE;
2948
2949static int vm_external_discarded;
2950static int vm_external_collapsed;
1c79356b 2951
91447636
A
2952unsigned long vm_object_collapse_encrypted = 0;
2953
1c79356b 2954/*
0b4e3aa0
A
2955 * Routine: vm_object_do_collapse
2956 * Purpose:
2957 * Collapse an object with the object backing it.
2958 * Pages in the backing object are moved into the
2959 * parent, and the backing object is deallocated.
2960 * Conditions:
2961 * Both objects and the cache are locked; the page
2962 * queues are unlocked.
1c79356b
A
2963 *
2964 */
0b4e3aa0 2965static void
1c79356b
A
2966vm_object_do_collapse(
2967 vm_object_t object,
2968 vm_object_t backing_object)
2969{
2970 vm_page_t p, pp;
2971 vm_object_offset_t new_offset, backing_offset;
2972 vm_object_size_t size;
2973
2974 backing_offset = object->shadow_offset;
2975 size = object->size;
2976
1c79356b
A
2977 /*
2978 * Move all in-memory pages from backing_object
2979 * to the parent. Pages that have been paged out
2980 * will be overwritten by any of the parent's
2981 * pages that shadow them.
2982 */
2983
2984 while (!queue_empty(&backing_object->memq)) {
2985
2986 p = (vm_page_t) queue_first(&backing_object->memq);
2987
2988 new_offset = (p->offset - backing_offset);
2989
2990 assert(!p->busy || p->absent);
91447636 2991
1c79356b
A
2992 /*
2993 * If the parent has a page here, or if
2994 * this page falls outside the parent,
2995 * dispose of it.
2996 *
2997 * Otherwise, move it as planned.
2998 */
2999
3000 if (p->offset < backing_offset || new_offset >= size) {
3001 VM_PAGE_FREE(p);
3002 } else {
91447636
A
3003 /*
3004 * ENCRYPTED SWAP:
3005 * The encryption key includes the "pager" and the
3006 * "paging_offset". These might not be the same in
3007 * the new object, so we can't just move an encrypted
3008 * page from one object to the other. We can't just
3009 * decrypt the page here either, because that would drop
3010 * the object lock.
3011 * The caller should check for encrypted pages before
3012 * attempting to collapse.
3013 */
3014 ASSERT_PAGE_DECRYPTED(p);
3015
1c79356b
A
3016 pp = vm_page_lookup(object, new_offset);
3017 if (pp == VM_PAGE_NULL) {
3018
3019 /*
3020 * Parent now has no page.
3021 * Move the backing object's page up.
3022 */
3023
3024 vm_page_rename(p, object, new_offset);
3025#if MACH_PAGEMAP
3026 } else if (pp->absent) {
3027
3028 /*
3029 * Parent has an absent page...
3030 * it's not being paged in, so
3031 * it must really be missing from
3032 * the parent.
3033 *
3034 * Throw out the absent page...
3035 * any faults looking for that
3036 * page will restart with the new
3037 * one.
3038 */
3039
3040 VM_PAGE_FREE(pp);
3041 vm_page_rename(p, object, new_offset);
3042#endif /* MACH_PAGEMAP */
3043 } else {
3044 assert(! pp->absent);
3045
3046 /*
3047 * Parent object has a real page.
3048 * Throw away the backing object's
3049 * page.
3050 */
3051 VM_PAGE_FREE(p);
3052 }
3053 }
3054 }
3055
55e303ae
A
3056#if !MACH_PAGEMAP
3057 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL
3058 || (!backing_object->pager_created
3059 && backing_object->pager == MEMORY_OBJECT_NULL));
3060#else
3061 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
3062#endif /* !MACH_PAGEMAP */
1c79356b 3063
0b4e3aa0 3064 if (backing_object->pager != MEMORY_OBJECT_NULL) {
1c79356b
A
3065 vm_object_hash_entry_t entry;
3066
3067 /*
3068 * Move the pager from backing_object to object.
3069 *
3070 * XXX We're only using part of the paging space
3071 * for keeps now... we ought to discard the
3072 * unused portion.
3073 */
3074
55e303ae 3075 assert(!object->paging_in_progress);
1c79356b
A
3076 object->pager = backing_object->pager;
3077 entry = vm_object_hash_lookup(object->pager, FALSE);
3078 assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
3079 entry->object = object;
3080 object->pager_created = backing_object->pager_created;
91447636 3081 object->pager_control = backing_object->pager_control;
1c79356b
A
3082 object->pager_ready = backing_object->pager_ready;
3083 object->pager_initialized = backing_object->pager_initialized;
3084 object->cluster_size = backing_object->cluster_size;
3085 object->paging_offset =
3086 backing_object->paging_offset + backing_offset;
91447636
A
3087 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
3088 memory_object_control_collapse(object->pager_control,
0b4e3aa0 3089 object);
1c79356b
A
3090 }
3091 }
3092
3093 vm_object_cache_unlock();
3094
1c79356b
A
3095#if MACH_PAGEMAP
3096 /*
3097 * If the shadow offset is 0, the use the existence map from
3098 * the backing object if there is one. If the shadow offset is
3099 * not zero, toss it.
3100 *
3101 * XXX - If the shadow offset is not 0 then a bit copy is needed
3102 * if the map is to be salvaged. For now, we just just toss the
3103 * old map, giving the collapsed object no map. This means that
3104 * the pager is invoked for zero fill pages. If analysis shows
3105 * that this happens frequently and is a performance hit, then
3106 * this code should be fixed to salvage the map.
3107 */
3108 assert(object->existence_map == VM_EXTERNAL_NULL);
3109 if (backing_offset || (size != backing_object->size)) {
3110 vm_external_discarded++;
3111 vm_external_destroy(backing_object->existence_map,
3112 backing_object->size);
3113 }
3114 else {
3115 vm_external_collapsed++;
3116 object->existence_map = backing_object->existence_map;
3117 }
3118 backing_object->existence_map = VM_EXTERNAL_NULL;
3119#endif /* MACH_PAGEMAP */
3120
3121 /*
3122 * Object now shadows whatever backing_object did.
3123 * Note that the reference to backing_object->shadow
3124 * moves from within backing_object to within object.
3125 */
3126
91447636
A
3127 assert(!object->phys_contiguous);
3128 assert(!backing_object->phys_contiguous);
1c79356b 3129 object->shadow = backing_object->shadow;
91447636
A
3130 if (object->shadow) {
3131 object->shadow_offset += backing_object->shadow_offset;
3132 } else {
3133 /* no shadow, therefore no shadow offset... */
3134 object->shadow_offset = 0;
3135 }
1c79356b 3136 assert((object->shadow == VM_OBJECT_NULL) ||
55e303ae 3137 (object->shadow->copy != backing_object));
1c79356b
A
3138
3139 /*
3140 * Discard backing_object.
3141 *
3142 * Since the backing object has no pages, no
3143 * pager left, and no object references within it,
3144 * all that is necessary is to dispose of it.
3145 */
3146
3147 assert((backing_object->ref_count == 1) &&
3148 (backing_object->resident_page_count == 0) &&
3149 (backing_object->paging_in_progress == 0));
3150
1c79356b
A
3151 backing_object->alive = FALSE;
3152 vm_object_unlock(backing_object);
3153
3154 XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
3155 (integer_t)backing_object, 0,0,0,0);
3156
91447636 3157 zfree(vm_object_zone, backing_object);
1c79356b
A
3158
3159 object_collapses++;
3160}
3161
0b4e3aa0 3162static void
1c79356b
A
3163vm_object_do_bypass(
3164 vm_object_t object,
3165 vm_object_t backing_object)
3166{
3167 /*
3168 * Make the parent shadow the next object
3169 * in the chain.
3170 */
3171
3172#if TASK_SWAPPER
3173 /*
3174 * Do object reference in-line to
3175 * conditionally increment shadow's
3176 * residence count. If object is not
3177 * resident, leave residence count
3178 * on shadow alone.
3179 */
3180 if (backing_object->shadow != VM_OBJECT_NULL) {
3181 vm_object_lock(backing_object->shadow);
3182 backing_object->shadow->ref_count++;
3183 if (object->res_count != 0)
3184 vm_object_res_reference(backing_object->shadow);
3185 vm_object_unlock(backing_object->shadow);
3186 }
3187#else /* TASK_SWAPPER */
3188 vm_object_reference(backing_object->shadow);
3189#endif /* TASK_SWAPPER */
3190
91447636
A
3191 assert(!object->phys_contiguous);
3192 assert(!backing_object->phys_contiguous);
1c79356b 3193 object->shadow = backing_object->shadow;
91447636
A
3194 if (object->shadow) {
3195 object->shadow_offset += backing_object->shadow_offset;
3196 } else {
3197 /* no shadow, therefore no shadow offset... */
3198 object->shadow_offset = 0;
3199 }
1c79356b
A
3200
3201 /*
3202 * Backing object might have had a copy pointer
3203 * to us. If it did, clear it.
3204 */
3205 if (backing_object->copy == object) {
3206 backing_object->copy = VM_OBJECT_NULL;
3207 }
3208
3209 /*
3210 * Drop the reference count on backing_object.
3211#if TASK_SWAPPER
3212 * Since its ref_count was at least 2, it
3213 * will not vanish; so we don't need to call
3214 * vm_object_deallocate.
3215 * [FBDP: that doesn't seem to be true any more]
3216 *
3217 * The res_count on the backing object is
3218 * conditionally decremented. It's possible
3219 * (via vm_pageout_scan) to get here with
3220 * a "swapped" object, which has a 0 res_count,
3221 * in which case, the backing object res_count
3222 * is already down by one.
3223#else
3224 * Don't call vm_object_deallocate unless
3225 * ref_count drops to zero.
3226 *
3227 * The ref_count can drop to zero here if the
3228 * backing object could be bypassed but not
3229 * collapsed, such as when the backing object
3230 * is temporary and cachable.
3231#endif
3232 */
3233 if (backing_object->ref_count > 1) {
3234 backing_object->ref_count--;
3235#if TASK_SWAPPER
3236 if (object->res_count != 0)
3237 vm_object_res_deallocate(backing_object);
3238 assert(backing_object->ref_count > 0);
3239#endif /* TASK_SWAPPER */
3240 vm_object_unlock(backing_object);
3241 } else {
3242
3243 /*
3244 * Drop locks so that we can deallocate
3245 * the backing object.
3246 */
3247
3248#if TASK_SWAPPER
3249 if (object->res_count == 0) {
3250 /* XXX get a reference for the deallocate below */
3251 vm_object_res_reference(backing_object);
3252 }
3253#endif /* TASK_SWAPPER */
3254 vm_object_unlock(object);
3255 vm_object_unlock(backing_object);
3256 vm_object_deallocate(backing_object);
3257
3258 /*
3259 * Relock object. We don't have to reverify
3260 * its state since vm_object_collapse will
3261 * do that for us as it starts at the
3262 * top of its loop.
3263 */
3264
3265 vm_object_lock(object);
3266 }
3267
3268 object_bypasses++;
3269}
0b4e3aa0 3270
1c79356b
A
3271
3272/*
3273 * vm_object_collapse:
3274 *
3275 * Perform an object collapse or an object bypass if appropriate.
3276 * The real work of collapsing and bypassing is performed in
3277 * the routines vm_object_do_collapse and vm_object_do_bypass.
3278 *
3279 * Requires that the object be locked and the page queues be unlocked.
3280 *
3281 */
91447636
A
3282static unsigned long vm_object_collapse_calls = 0;
3283static unsigned long vm_object_collapse_objects = 0;
3284static unsigned long vm_object_collapse_do_collapse = 0;
3285static unsigned long vm_object_collapse_do_bypass = 0;
0b4e3aa0 3286__private_extern__ void
1c79356b 3287vm_object_collapse(
55e303ae
A
3288 register vm_object_t object,
3289 register vm_object_offset_t hint_offset)
1c79356b
A
3290{
3291 register vm_object_t backing_object;
55e303ae
A
3292 register unsigned int rcount;
3293 register unsigned int size;
91447636
A
3294 vm_object_offset_t collapse_min_offset;
3295 vm_object_offset_t collapse_max_offset;
3296 vm_page_t page;
3297 vm_object_t original_object;
3298
3299 vm_object_collapse_calls++;
0b4e3aa0 3300
1c79356b
A
3301 if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) {
3302 return;
3303 }
3304
3305 XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
3306 (integer_t)object, 0,0,0,0);
3307
91447636
A
3308 if (object == VM_OBJECT_NULL)
3309 return;
3310
3311 original_object = object;
3312
1c79356b 3313 while (TRUE) {
91447636 3314 vm_object_collapse_objects++;
1c79356b
A
3315 /*
3316 * Verify that the conditions are right for either
3317 * collapse or bypass:
1c79356b 3318 */
1c79356b
A
3319
3320 /*
3321 * There is a backing object, and
3322 */
3323
91447636
A
3324 backing_object = object->shadow;
3325 if (backing_object == VM_OBJECT_NULL) {
3326 if (object != original_object) {
3327 vm_object_unlock(object);
3328 }
1c79356b 3329 return;
91447636 3330 }
1c79356b 3331
91447636
A
3332 /*
3333 * No pages in the object are currently
3334 * being paged out, and
3335 */
3336 if (object->paging_in_progress != 0 ||
3337 object->absent_count != 0) {
3338 /* try and collapse the rest of the shadow chain */
3339 vm_object_lock(backing_object);
3340 if (object != original_object) {
3341 vm_object_unlock(object);
3342 }
3343 object = backing_object;
3344 continue;
3345 }
3346
1c79356b
A
3347 vm_object_lock(backing_object);
3348
3349 /*
3350 * ...
3351 * The backing object is not read_only,
3352 * and no pages in the backing object are
3353 * currently being paged out.
3354 * The backing object is internal.
3355 *
3356 */
3357
3358 if (!backing_object->internal ||
3359 backing_object->paging_in_progress != 0) {
91447636
A
3360 /* try and collapse the rest of the shadow chain */
3361 if (object != original_object) {
3362 vm_object_unlock(object);
3363 }
3364 object = backing_object;
3365 continue;
1c79356b
A
3366 }
3367
3368 /*
3369 * The backing object can't be a copy-object:
3370 * the shadow_offset for the copy-object must stay
3371 * as 0. Furthermore (for the 'we have all the
3372 * pages' case), if we bypass backing_object and
3373 * just shadow the next object in the chain, old
3374 * pages from that object would then have to be copied
3375 * BOTH into the (former) backing_object and into the
3376 * parent object.
3377 */
3378 if (backing_object->shadow != VM_OBJECT_NULL &&
55e303ae 3379 backing_object->shadow->copy == backing_object) {
91447636
A
3380 /* try and collapse the rest of the shadow chain */
3381 if (object != original_object) {
3382 vm_object_unlock(object);
3383 }
3384 object = backing_object;
3385 continue;
1c79356b
A
3386 }
3387
3388 /*
3389 * We can now try to either collapse the backing
3390 * object (if the parent is the only reference to
3391 * it) or (perhaps) remove the parent's reference
3392 * to it.
1c79356b 3393 *
0b4e3aa0
A
3394 * If there is exactly one reference to the backing
3395 * object, we may be able to collapse it into the
3396 * parent.
1c79356b 3397 *
55e303ae
A
3398 * If MACH_PAGEMAP is defined:
3399 * The parent must not have a pager created for it,
3400 * since collapsing a backing_object dumps new pages
3401 * into the parent that its pager doesn't know about
3402 * (and the collapse code can't merge the existence
3403 * maps).
3404 * Otherwise:
3405 * As long as one of the objects is still not known
3406 * to the pager, we can collapse them.
1c79356b 3407 */
1c79356b 3408 if (backing_object->ref_count == 1 &&
55e303ae
A
3409 (!object->pager_created
3410#if !MACH_PAGEMAP
91447636 3411 || !backing_object->pager_created
55e303ae
A
3412#endif /*!MACH_PAGEMAP */
3413 ) && vm_object_collapse_allowed) {
1c79356b
A
3414
3415 XPR(XPR_VM_OBJECT,
91447636 3416 "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
1c79356b
A
3417 (integer_t)backing_object, (integer_t)object,
3418 (integer_t)backing_object->pager,
91447636 3419 (integer_t)backing_object->pager_control, 0);
1c79356b
A
3420
3421 /*
3422 * We need the cache lock for collapsing,
3423 * but we must not deadlock.
3424 */
3425
3426 if (! vm_object_cache_lock_try()) {
91447636
A
3427 if (object != original_object) {
3428 vm_object_unlock(object);
3429 }
1c79356b
A
3430 vm_object_unlock(backing_object);
3431 return;
3432 }
3433
91447636
A
3434 /*
3435 * ENCRYPTED SWAP
3436 * We can't collapse the object if it contains
3437 * any encypted page, because the encryption key
3438 * includes the <object,offset> info. We can't
3439 * drop the object lock in vm_object_do_collapse()
3440 * so we can't decrypt the page there either.
3441 */
3442 if (vm_pages_encrypted) {
3443 collapse_min_offset = object->shadow_offset;
3444 collapse_max_offset =
3445 object->shadow_offset + object->size;
3446 queue_iterate(&backing_object->memq,
3447 page, vm_page_t, listq) {
3448 if (page->encrypted &&
3449 (page->offset >=
3450 collapse_min_offset) &&
3451 (page->offset <
3452 collapse_max_offset)) {
3453 /*
3454 * We found an encrypted page
3455 * in the backing object,
3456 * within the range covered
3457 * by the parent object: we can
3458 * not collapse them.
3459 */
3460 vm_object_collapse_encrypted++;
3461 vm_object_cache_unlock();
3462 goto try_bypass;
3463 }
3464 }
3465 }
3466
1c79356b
A
3467 /*
3468 * Collapse the object with its backing
3469 * object, and try again with the object's
3470 * new backing object.
3471 */
3472
3473 vm_object_do_collapse(object, backing_object);
91447636 3474 vm_object_collapse_do_collapse++;
1c79356b
A
3475 continue;
3476 }
3477
91447636 3478 try_bypass:
1c79356b
A
3479 /*
3480 * Collapsing the backing object was not possible
3481 * or permitted, so let's try bypassing it.
3482 */
3483
3484 if (! vm_object_bypass_allowed) {
91447636
A
3485 /* try and collapse the rest of the shadow chain */
3486 if (object != original_object) {
3487 vm_object_unlock(object);
3488 }
3489 object = backing_object;
3490 continue;
1c79356b
A
3491 }
3492
0b4e3aa0 3493
1c79356b 3494 /*
55e303ae
A
3495 * If the object doesn't have all its pages present,
3496 * we have to make sure no pages in the backing object
3497 * "show through" before bypassing it.
1c79356b 3498 */
55e303ae
A
3499 size = atop(object->size);
3500 rcount = object->resident_page_count;
3501 if (rcount != size) {
55e303ae
A
3502 vm_object_offset_t offset;
3503 vm_object_offset_t backing_offset;
3504 unsigned int backing_rcount;
3505 unsigned int lookups = 0;
3506
3507 /*
3508 * If the backing object has a pager but no pagemap,
3509 * then we cannot bypass it, because we don't know
3510 * what pages it has.
3511 */
3512 if (backing_object->pager_created
1c79356b 3513#if MACH_PAGEMAP
55e303ae 3514 && (backing_object->existence_map == VM_EXTERNAL_NULL)
1c79356b 3515#endif /* MACH_PAGEMAP */
55e303ae 3516 ) {
91447636
A
3517 /* try and collapse the rest of the shadow chain */
3518 if (object != original_object) {
3519 vm_object_unlock(object);
3520 }
3521 object = backing_object;
3522 continue;
55e303ae 3523 }
1c79356b 3524
55e303ae
A
3525 /*
3526 * If the object has a pager but no pagemap,
3527 * then we cannot bypass it, because we don't know
3528 * what pages it has.
3529 */
3530 if (object->pager_created
0b4e3aa0 3531#if MACH_PAGEMAP
55e303ae 3532 && (object->existence_map == VM_EXTERNAL_NULL)
0b4e3aa0 3533#endif /* MACH_PAGEMAP */
55e303ae 3534 ) {
91447636
A
3535 /* try and collapse the rest of the shadow chain */
3536 if (object != original_object) {
3537 vm_object_unlock(object);
3538 }
3539 object = backing_object;
3540 continue;
55e303ae 3541 }
0b4e3aa0 3542
55e303ae
A
3543 /*
3544 * If all of the pages in the backing object are
3545 * shadowed by the parent object, the parent
3546 * object no longer has to shadow the backing
3547 * object; it can shadow the next one in the
3548 * chain.
3549 *
3550 * If the backing object has existence info,
3551 * we must check examine its existence info
3552 * as well.
3553 *
3554 */
1c79356b 3555
55e303ae
A
3556 backing_offset = object->shadow_offset;
3557 backing_rcount = backing_object->resident_page_count;
1c79356b 3558
55e303ae
A
3559#define EXISTS_IN_OBJECT(obj, off, rc) \
3560 (vm_external_state_get((obj)->existence_map, \
3561 (vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
3562 ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
3563
3564 /*
3565 * Check the hint location first
3566 * (since it is often the quickest way out of here).
3567 */
3568 if (object->cow_hint != ~(vm_offset_t)0)
3569 hint_offset = (vm_object_offset_t)object->cow_hint;
3570 else
3571 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
3572 (hint_offset - 8 * PAGE_SIZE_64) : 0;
3573
3574 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
3575 backing_offset, backing_rcount) &&
3576 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
3577 /* dependency right at the hint */
3578 object->cow_hint = (vm_offset_t)hint_offset;
91447636
A
3579 /* try and collapse the rest of the shadow chain */
3580 if (object != original_object) {
3581 vm_object_unlock(object);
3582 }
3583 object = backing_object;
3584 continue;
0b4e3aa0 3585 }
55e303ae
A
3586
3587 /*
3588 * If the object's window onto the backing_object
3589 * is large compared to the number of resident
3590 * pages in the backing object, it makes sense to
3591 * walk the backing_object's resident pages first.
3592 *
3593 * NOTE: Pages may be in both the existence map and
3594 * resident. So, we can't permanently decrement
3595 * the rcount here because the second loop may
3596 * find the same pages in the backing object'
3597 * existence map that we found here and we would
3598 * double-decrement the rcount. We also may or
3599 * may not have found the
3600 */
3601 if (backing_rcount && size >
3602 ((backing_object->existence_map) ?
3603 backing_rcount : (backing_rcount >> 1))) {
3604 unsigned int rc = rcount;
3605 vm_page_t p;
3606
3607 backing_rcount = backing_object->resident_page_count;
3608 p = (vm_page_t)queue_first(&backing_object->memq);
3609 do {
3610 /* Until we get more than one lookup lock */
3611 if (lookups > 256) {
3612 lookups = 0;
3613 delay(1);
3614 }
3615
3616 offset = (p->offset - backing_offset);
3617 if (offset < object->size &&
3618 offset != hint_offset &&
3619 !EXISTS_IN_OBJECT(object, offset, rc)) {
3620 /* found a dependency */
3621 object->cow_hint = (vm_offset_t)offset;
91447636 3622 break;
55e303ae 3623 }
91447636 3624 p = (vm_page_t) queue_next(&p->listq);
55e303ae
A
3625
3626 } while (--backing_rcount);
91447636
A
3627 if (backing_rcount != 0 ) {
3628 /* try and collapse the rest of the shadow chain */
3629 if (object != original_object) {
3630 vm_object_unlock(object);
3631 }
3632 object = backing_object;
3633 continue;
3634 }
0b4e3aa0 3635 }
55e303ae
A
3636
3637 /*
3638 * Walk through the offsets looking for pages in the
3639 * backing object that show through to the object.
3640 */
3641 if (backing_rcount || backing_object->existence_map) {
3642 offset = hint_offset;
3643
3644 while((offset =
3645 (offset + PAGE_SIZE_64 < object->size) ?
3646 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
3647
3648 /* Until we get more than one lookup lock */
3649 if (lookups > 256) {
3650 lookups = 0;
3651 delay(1);
3652 }
3653
3654 if (EXISTS_IN_OBJECT(backing_object, offset +
3655 backing_offset, backing_rcount) &&
3656 !EXISTS_IN_OBJECT(object, offset, rcount)) {
3657 /* found a dependency */
3658 object->cow_hint = (vm_offset_t)offset;
91447636 3659 break;
55e303ae
A
3660 }
3661 }
91447636
A
3662 if (offset != hint_offset) {
3663 /* try and collapse the rest of the shadow chain */
3664 if (object != original_object) {
3665 vm_object_unlock(object);
3666 }
3667 object = backing_object;
3668 continue;
3669 }
0b4e3aa0
A
3670 }
3671 }
1c79356b 3672
55e303ae
A
3673 /* reset the offset hint for any objects deeper in the chain */
3674 object->cow_hint = (vm_offset_t)0;
1c79356b
A
3675
3676 /*
3677 * All interesting pages in the backing object
3678 * already live in the parent or its pager.
3679 * Thus we can bypass the backing object.
3680 */
3681
3682 vm_object_do_bypass(object, backing_object);
91447636 3683 vm_object_collapse_do_bypass++;
1c79356b
A
3684
3685 /*
3686 * Try again with this object's new backing object.
3687 */
3688
3689 continue;
3690 }
91447636
A
3691
3692 if (object != original_object) {
3693 vm_object_unlock(object);
3694 }
1c79356b
A
3695}
3696
3697/*
3698 * Routine: vm_object_page_remove: [internal]
3699 * Purpose:
3700 * Removes all physical pages in the specified
3701 * object range from the object's list of pages.
3702 *
3703 * In/out conditions:
3704 * The object must be locked.
3705 * The object must not have paging_in_progress, usually
3706 * guaranteed by not having a pager.
3707 */
3708unsigned int vm_object_page_remove_lookup = 0;
3709unsigned int vm_object_page_remove_iterate = 0;
3710
0b4e3aa0 3711__private_extern__ void
1c79356b
A
3712vm_object_page_remove(
3713 register vm_object_t object,
3714 register vm_object_offset_t start,
3715 register vm_object_offset_t end)
3716{
3717 register vm_page_t p, next;
3718
3719 /*
3720 * One and two page removals are most popular.
3721 * The factor of 16 here is somewhat arbitrary.
3722 * It balances vm_object_lookup vs iteration.
3723 */
3724
55e303ae 3725 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
1c79356b
A
3726 vm_object_page_remove_lookup++;
3727
3728 for (; start < end; start += PAGE_SIZE_64) {
3729 p = vm_page_lookup(object, start);
3730 if (p != VM_PAGE_NULL) {
3731 assert(!p->cleaning && !p->pageout);
3732 if (!p->fictitious)
91447636 3733 pmap_disconnect(p->phys_page);
1c79356b
A
3734 VM_PAGE_FREE(p);
3735 }
3736 }
3737 } else {
3738 vm_object_page_remove_iterate++;
3739
3740 p = (vm_page_t) queue_first(&object->memq);
3741 while (!queue_end(&object->memq, (queue_entry_t) p)) {
3742 next = (vm_page_t) queue_next(&p->listq);
3743 if ((start <= p->offset) && (p->offset < end)) {
3744 assert(!p->cleaning && !p->pageout);
3745 if (!p->fictitious)
91447636 3746 pmap_disconnect(p->phys_page);
1c79356b
A
3747 VM_PAGE_FREE(p);
3748 }
3749 p = next;
3750 }
3751 }
3752}
3753
0b4e3aa0 3754
1c79356b
A
3755/*
3756 * Routine: vm_object_coalesce
3757 * Function: Coalesces two objects backing up adjoining
3758 * regions of memory into a single object.
3759 *
3760 * returns TRUE if objects were combined.
3761 *
3762 * NOTE: Only works at the moment if the second object is NULL -
3763 * if it's not, which object do we lock first?
3764 *
3765 * Parameters:
3766 * prev_object First object to coalesce
3767 * prev_offset Offset into prev_object
3768 * next_object Second object into coalesce
3769 * next_offset Offset into next_object
3770 *
3771 * prev_size Size of reference to prev_object
3772 * next_size Size of reference to next_object
3773 *
3774 * Conditions:
3775 * The object(s) must *not* be locked. The map must be locked
3776 * to preserve the reference to the object(s).
3777 */
0b4e3aa0 3778static int vm_object_coalesce_count = 0;
1c79356b 3779
0b4e3aa0 3780__private_extern__ boolean_t
1c79356b
A
3781vm_object_coalesce(
3782 register vm_object_t prev_object,
3783 vm_object_t next_object,
3784 vm_object_offset_t prev_offset,
91447636 3785 __unused vm_object_offset_t next_offset,
1c79356b
A
3786 vm_object_size_t prev_size,
3787 vm_object_size_t next_size)
3788{
3789 vm_object_size_t newsize;
3790
3791#ifdef lint
3792 next_offset++;
3793#endif /* lint */
3794
3795 if (next_object != VM_OBJECT_NULL) {
3796 return(FALSE);
3797 }
3798
3799 if (prev_object == VM_OBJECT_NULL) {
3800 return(TRUE);
3801 }
3802
3803 XPR(XPR_VM_OBJECT,
3804 "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
3805 (integer_t)prev_object, prev_offset, prev_size, next_size, 0);
3806
3807 vm_object_lock(prev_object);
3808
3809 /*
3810 * Try to collapse the object first
3811 */
55e303ae 3812 vm_object_collapse(prev_object, prev_offset);
1c79356b
A
3813
3814 /*
3815 * Can't coalesce if pages not mapped to
3816 * prev_entry may be in use any way:
3817 * . more than one reference
3818 * . paged out
3819 * . shadows another object
3820 * . has a copy elsewhere
91447636 3821 * . is purgable
1c79356b
A
3822 * . paging references (pages might be in page-list)
3823 */
3824
3825 if ((prev_object->ref_count > 1) ||
3826 prev_object->pager_created ||
3827 (prev_object->shadow != VM_OBJECT_NULL) ||
3828 (prev_object->copy != VM_OBJECT_NULL) ||
3829 (prev_object->true_share != FALSE) ||
91447636 3830 (prev_object->purgable != VM_OBJECT_NONPURGABLE) ||
1c79356b
A
3831 (prev_object->paging_in_progress != 0)) {
3832 vm_object_unlock(prev_object);
3833 return(FALSE);
3834 }
3835
3836 vm_object_coalesce_count++;
3837
3838 /*
3839 * Remove any pages that may still be in the object from
3840 * a previous deallocation.
3841 */
3842 vm_object_page_remove(prev_object,
3843 prev_offset + prev_size,
3844 prev_offset + prev_size + next_size);
3845
3846 /*
3847 * Extend the object if necessary.
3848 */
3849 newsize = prev_offset + prev_size + next_size;
3850 if (newsize > prev_object->size) {
3851#if MACH_PAGEMAP
3852 /*
3853 * We cannot extend an object that has existence info,
3854 * since the existence info might then fail to cover
3855 * the entire object.
3856 *
3857 * This assertion must be true because the object
3858 * has no pager, and we only create existence info
3859 * for objects with pagers.
3860 */
3861 assert(prev_object->existence_map == VM_EXTERNAL_NULL);
3862#endif /* MACH_PAGEMAP */
3863 prev_object->size = newsize;
3864 }
3865
3866 vm_object_unlock(prev_object);
3867 return(TRUE);
3868}
3869
3870/*
3871 * Attach a set of physical pages to an object, so that they can
3872 * be mapped by mapping the object. Typically used to map IO memory.
3873 *
3874 * The mapping function and its private data are used to obtain the
3875 * physical addresses for each page to be mapped.
3876 */
3877void
3878vm_object_page_map(
3879 vm_object_t object,
3880 vm_object_offset_t offset,
3881 vm_object_size_t size,
3882 vm_object_offset_t (*map_fn)(void *map_fn_data,
3883 vm_object_offset_t offset),
3884 void *map_fn_data) /* private to map_fn */
3885{
3886 int num_pages;
3887 int i;
3888 vm_page_t m;
3889 vm_page_t old_page;
3890 vm_object_offset_t addr;
3891
55e303ae 3892 num_pages = atop_64(size);
1c79356b
A
3893
3894 for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
3895
3896 addr = (*map_fn)(map_fn_data, offset);
3897
3898 while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
3899 vm_page_more_fictitious();
3900
3901 vm_object_lock(object);
3902 if ((old_page = vm_page_lookup(object, offset))
3903 != VM_PAGE_NULL)
3904 {
3905 vm_page_lock_queues();
3906 vm_page_free(old_page);
3907 vm_page_unlock_queues();
3908 }
3909
3910 vm_page_init(m, addr);
0b4e3aa0
A
3911 /* private normally requires lock_queues but since we */
3912 /* are initializing the page, its not necessary here */
1c79356b
A
3913 m->private = TRUE; /* don`t free page */
3914 m->wire_count = 1;
3915 vm_page_insert(m, object, offset);
3916
3917 PAGE_WAKEUP_DONE(m);
3918 vm_object_unlock(object);
3919 }
3920}
3921
3922#include <mach_kdb.h>
3923
3924#if MACH_KDB
3925#include <ddb/db_output.h>
3926#include <vm/vm_print.h>
3927
3928#define printf kdbprintf
3929
3930extern boolean_t vm_object_cached(
3931 vm_object_t object);
3932
3933extern void print_bitstring(
3934 char byte);
3935
3936boolean_t vm_object_print_pages = FALSE;
3937
3938void
3939print_bitstring(
3940 char byte)
3941{
3942 printf("%c%c%c%c%c%c%c%c",
3943 ((byte & (1 << 0)) ? '1' : '0'),
3944 ((byte & (1 << 1)) ? '1' : '0'),
3945 ((byte & (1 << 2)) ? '1' : '0'),
3946 ((byte & (1 << 3)) ? '1' : '0'),
3947 ((byte & (1 << 4)) ? '1' : '0'),
3948 ((byte & (1 << 5)) ? '1' : '0'),
3949 ((byte & (1 << 6)) ? '1' : '0'),
3950 ((byte & (1 << 7)) ? '1' : '0'));
3951}
3952
3953boolean_t
3954vm_object_cached(
3955 register vm_object_t object)
3956{
3957 register vm_object_t o;
3958
3959 queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) {
3960 if (object == o) {
3961 return TRUE;
3962 }
3963 }
3964 return FALSE;
3965}
3966
3967#if MACH_PAGEMAP
3968/*
3969 * vm_external_print: [ debug ]
3970 */
3971void
3972vm_external_print(
91447636
A
3973 vm_external_map_t emap,
3974 vm_size_t size)
1c79356b 3975{
91447636 3976 if (emap == VM_EXTERNAL_NULL) {
1c79356b
A
3977 printf("0 ");
3978 } else {
3979 vm_size_t existence_size = stob(size);
3980 printf("{ size=%d, map=[", existence_size);
3981 if (existence_size > 0) {
91447636 3982 print_bitstring(emap[0]);
1c79356b
A
3983 }
3984 if (existence_size > 1) {
91447636 3985 print_bitstring(emap[1]);
1c79356b
A
3986 }
3987 if (existence_size > 2) {
3988 printf("...");
91447636 3989 print_bitstring(emap[existence_size-1]);
1c79356b
A
3990 }
3991 printf("] }\n");
3992 }
3993 return;
3994}
3995#endif /* MACH_PAGEMAP */
3996
3997int
3998vm_follow_object(
3999 vm_object_t object)
4000{
0b4e3aa0
A
4001 int count = 0;
4002 int orig_db_indent = db_indent;
1c79356b 4003
0b4e3aa0
A
4004 while (TRUE) {
4005 if (object == VM_OBJECT_NULL) {
4006 db_indent = orig_db_indent;
4007 return count;
4008 }
1c79356b 4009
0b4e3aa0 4010 count += 1;
1c79356b 4011
0b4e3aa0
A
4012 iprintf("object 0x%x", object);
4013 printf(", shadow=0x%x", object->shadow);
4014 printf(", copy=0x%x", object->copy);
4015 printf(", pager=0x%x", object->pager);
4016 printf(", ref=%d\n", object->ref_count);
4017
4018 db_indent += 2;
4019 object = object->shadow;
4020 }
1c79356b 4021
1c79356b
A
4022}
4023
4024/*
4025 * vm_object_print: [ debug ]
4026 */
4027void
4028vm_object_print(
91447636
A
4029 db_addr_t db_addr,
4030 __unused boolean_t have_addr,
4031 __unused int arg_count,
4032 __unused char *modif)
1c79356b 4033{
91447636 4034 vm_object_t object;
1c79356b 4035 register vm_page_t p;
91447636 4036 const char *s;
1c79356b
A
4037
4038 register int count;
4039
91447636 4040 object = (vm_object_t) (long) db_addr;
1c79356b
A
4041 if (object == VM_OBJECT_NULL)
4042 return;
4043
4044 iprintf("object 0x%x\n", object);
4045
4046 db_indent += 2;
4047
4048 iprintf("size=0x%x", object->size);
4049 printf(", cluster=0x%x", object->cluster_size);
91447636 4050 printf(", memq_hint=%p", object->memq_hint);
1c79356b
A
4051 printf(", ref_count=%d\n", object->ref_count);
4052 iprintf("");
4053#if TASK_SWAPPER
4054 printf("res_count=%d, ", object->res_count);
4055#endif /* TASK_SWAPPER */
4056 printf("resident_page_count=%d\n", object->resident_page_count);
4057
4058 iprintf("shadow=0x%x", object->shadow);
4059 if (object->shadow) {
4060 register int i = 0;
4061 vm_object_t shadow = object;
91447636 4062 while((shadow = shadow->shadow))
1c79356b
A
4063 i++;
4064 printf(" (depth %d)", i);
4065 }
4066 printf(", copy=0x%x", object->copy);
4067 printf(", shadow_offset=0x%x", object->shadow_offset);
4068 printf(", last_alloc=0x%x\n", object->last_alloc);
4069
4070 iprintf("pager=0x%x", object->pager);
4071 printf(", paging_offset=0x%x", object->paging_offset);
91447636 4072 printf(", pager_control=0x%x\n", object->pager_control);
1c79356b
A
4073
4074 iprintf("copy_strategy=%d[", object->copy_strategy);
4075 switch (object->copy_strategy) {
4076 case MEMORY_OBJECT_COPY_NONE:
4077 printf("copy_none");
4078 break;
4079
4080 case MEMORY_OBJECT_COPY_CALL:
4081 printf("copy_call");
4082 break;
4083
4084 case MEMORY_OBJECT_COPY_DELAY:
4085 printf("copy_delay");
4086 break;
4087
4088 case MEMORY_OBJECT_COPY_SYMMETRIC:
4089 printf("copy_symmetric");
4090 break;
4091
4092 case MEMORY_OBJECT_COPY_INVALID:
4093 printf("copy_invalid");
4094 break;
4095
4096 default:
4097 printf("?");
4098 }
4099 printf("]");
4100 printf(", absent_count=%d\n", object->absent_count);
4101
4102 iprintf("all_wanted=0x%x<", object->all_wanted);
4103 s = "";
4104 if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
4105 printf("%sinit", s);
4106 s = ",";
4107 }
4108 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
4109 printf("%sready", s);
4110 s = ",";
4111 }
4112 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
4113 printf("%spaging", s);
4114 s = ",";
4115 }
4116 if (vm_object_wanted(object, VM_OBJECT_EVENT_ABSENT_COUNT)) {
4117 printf("%sabsent", s);
4118 s = ",";
4119 }
4120 if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
4121 printf("%slock", s);
4122 s = ",";
4123 }
4124 if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
4125 printf("%suncaching", s);
4126 s = ",";
4127 }
4128 if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
4129 printf("%scopy_call", s);
4130 s = ",";
4131 }
4132 if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
4133 printf("%scaching", s);
4134 s = ",";
4135 }
4136 printf(">");
4137 printf(", paging_in_progress=%d\n", object->paging_in_progress);
4138
4139 iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n",
4140 (object->pager_created ? "" : "!"),
4141 (object->pager_initialized ? "" : "!"),
4142 (object->pager_ready ? "" : "!"),
4143 (object->can_persist ? "" : "!"),
4144 (object->pager_trusted ? "" : "!"),
4145 (object->pageout ? "" : "!"),
4146 (object->internal ? "internal" : "external"),
4147 (object->temporary ? "temporary" : "permanent"));
91447636 4148 iprintf("%salive, %spurgable, %spurgable_volatile, %spurgable_empty, %sshadowed, %scached, %sprivate\n",
1c79356b 4149 (object->alive ? "" : "!"),
91447636
A
4150 ((object->purgable != VM_OBJECT_NONPURGABLE) ? "" : "!"),
4151 ((object->purgable == VM_OBJECT_PURGABLE_VOLATILE) ? "" : "!"),
4152 ((object->purgable == VM_OBJECT_PURGABLE_EMPTY) ? "" : "!"),
1c79356b
A
4153 (object->shadowed ? "" : "!"),
4154 (vm_object_cached(object) ? "" : "!"),
4155 (object->private ? "" : "!"));
4156 iprintf("%sadvisory_pageout, %ssilent_overwrite\n",
4157 (object->advisory_pageout ? "" : "!"),
4158 (object->silent_overwrite ? "" : "!"));
4159
4160#if MACH_PAGEMAP
4161 iprintf("existence_map=");
4162 vm_external_print(object->existence_map, object->size);
4163#endif /* MACH_PAGEMAP */
4164#if MACH_ASSERT
4165 iprintf("paging_object=0x%x\n", object->paging_object);
4166#endif /* MACH_ASSERT */
4167
4168 if (vm_object_print_pages) {
4169 count = 0;
4170 p = (vm_page_t) queue_first(&object->memq);
4171 while (!queue_end(&object->memq, (queue_entry_t) p)) {
4172 if (count == 0) {
4173 iprintf("memory:=");
4174 } else if (count == 2) {
4175 printf("\n");
4176 iprintf(" ...");
4177 count = 0;
4178 } else {
4179 printf(",");
4180 }
4181 count++;
4182
91447636 4183 printf("(off=0x%llX,page=%p)", p->offset, p);
1c79356b
A
4184 p = (vm_page_t) queue_next(&p->listq);
4185 }
4186 if (count != 0) {
4187 printf("\n");
4188 }
4189 }
4190 db_indent -= 2;
4191}
4192
4193
4194/*
4195 * vm_object_find [ debug ]
4196 *
4197 * Find all tasks which reference the given vm_object.
4198 */
4199
4200boolean_t vm_object_find(vm_object_t object);
4201boolean_t vm_object_print_verbose = FALSE;
4202
4203boolean_t
4204vm_object_find(
4205 vm_object_t object)
4206{
4207 task_t task;
4208 vm_map_t map;
4209 vm_map_entry_t entry;
4210 processor_set_t pset = &default_pset;
4211 boolean_t found = FALSE;
4212
4213 queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
4214 map = task->map;
4215 for (entry = vm_map_first_entry(map);
4216 entry && entry != vm_map_to_entry(map);
4217 entry = entry->vme_next) {
4218
4219 vm_object_t obj;
4220
4221 /*
4222 * For the time being skip submaps,
4223 * only the kernel can have submaps,
4224 * and unless we are interested in
4225 * kernel objects, we can simply skip
4226 * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm
4227 * for a full solution.
4228 */
4229 if (entry->is_sub_map)
4230 continue;
4231 if (entry)
4232 obj = entry->object.vm_object;
4233 else
4234 continue;
4235
4236 while (obj != VM_OBJECT_NULL) {
4237 if (obj == object) {
4238 if (!found) {
4239 printf("TASK\t\tMAP\t\tENTRY\n");
4240 found = TRUE;
4241 }
4242 printf("0x%x\t0x%x\t0x%x\n",
4243 task, map, entry);
4244 }
4245 obj = obj->shadow;
4246 }
4247 }
4248 }
4249
4250 return(found);
4251}
4252
4253#endif /* MACH_KDB */
4254
0b4e3aa0
A
4255kern_return_t
4256vm_object_populate_with_private(
55e303ae 4257 vm_object_t object,
0b4e3aa0 4258 vm_object_offset_t offset,
55e303ae
A
4259 ppnum_t phys_page,
4260 vm_size_t size)
0b4e3aa0 4261{
55e303ae 4262 ppnum_t base_page;
0b4e3aa0
A
4263 vm_object_offset_t base_offset;
4264
4265
4266 if(!object->private)
4267 return KERN_FAILURE;
4268
55e303ae 4269 base_page = phys_page;
0b4e3aa0
A
4270
4271 vm_object_lock(object);
4272 if(!object->phys_contiguous) {
4273 vm_page_t m;
55e303ae 4274 if((base_offset = trunc_page_64(offset)) != offset) {
0b4e3aa0
A
4275 vm_object_unlock(object);
4276 return KERN_FAILURE;
4277 }
4278 base_offset += object->paging_offset;
4279 while(size) {
4280 m = vm_page_lookup(object, base_offset);
4281 if(m != VM_PAGE_NULL) {
4282 if(m->fictitious) {
4283 vm_page_lock_queues();
4284 m->fictitious = FALSE;
4285 m->private = TRUE;
55e303ae 4286 m->phys_page = base_page;
0b4e3aa0
A
4287 if(!m->busy) {
4288 m->busy = TRUE;
4289 }
4290 if(!m->absent) {
4291 m->absent = TRUE;
4292 object->absent_count++;
4293 }
4294 m->list_req_pending = TRUE;
4295 vm_page_unlock_queues();
55e303ae 4296 } else if (m->phys_page != base_page) {
0b4e3aa0 4297 /* pmap call to clear old mapping */
91447636 4298 pmap_disconnect(m->phys_page);
55e303ae 4299 m->phys_page = base_page;
0b4e3aa0 4300 }
91447636
A
4301
4302 /*
4303 * ENCRYPTED SWAP:
4304 * We're not pointing to the same
4305 * physical page any longer and the
4306 * contents of the new one are not
4307 * supposed to be encrypted.
4308 * XXX What happens to the original
4309 * physical page. Is it lost ?
4310 */
4311 m->encrypted = FALSE;
4312
0b4e3aa0
A
4313 } else {
4314 while ((m = vm_page_grab_fictitious())
4315 == VM_PAGE_NULL)
4316 vm_page_more_fictitious();
4317 vm_page_lock_queues();
4318 m->fictitious = FALSE;
4319 m->private = TRUE;
55e303ae 4320 m->phys_page = base_page;
0b4e3aa0
A
4321 m->list_req_pending = TRUE;
4322 m->absent = TRUE;
4323 m->unusual = TRUE;
4324 object->absent_count++;
4325 vm_page_unlock_queues();
4326 vm_page_insert(m, object, base_offset);
4327 }
55e303ae 4328 base_page++; /* Go to the next physical page */
0b4e3aa0
A
4329 base_offset += PAGE_SIZE;
4330 size -= PAGE_SIZE;
4331 }
4332 } else {
4333 /* NOTE: we should check the original settings here */
4334 /* if we have a size > zero a pmap call should be made */
4335 /* to disable the range */
4336
4337 /* pmap_? */
4338
4339 /* shadows on contiguous memory are not allowed */
4340 /* we therefore can use the offset field */
55e303ae 4341 object->shadow_offset = (vm_object_offset_t)(phys_page << 12);
0b4e3aa0
A
4342 object->size = size;
4343 }
4344 vm_object_unlock(object);
4345 return KERN_SUCCESS;
4346}
4347
1c79356b
A
4348/*
4349 * memory_object_free_from_cache:
4350 *
4351 * Walk the vm_object cache list, removing and freeing vm_objects
4352 * which are backed by the pager identified by the caller, (pager_id).
4353 * Remove up to "count" objects, if there are that may available
4354 * in the cache.
0b4e3aa0 4355 *
1c79356b
A
4356 * Walk the list at most once, return the number of vm_objects
4357 * actually freed.
1c79356b
A
4358 */
4359
0b4e3aa0 4360__private_extern__ kern_return_t
1c79356b 4361memory_object_free_from_cache(
91447636 4362 __unused host_t host,
0b4e3aa0 4363 int *pager_id,
1c79356b
A
4364 int *count)
4365{
4366
4367 int object_released = 0;
1c79356b
A
4368
4369 register vm_object_t object = VM_OBJECT_NULL;
4370 vm_object_t shadow;
4371
4372/*
4373 if(host == HOST_NULL)
4374 return(KERN_INVALID_ARGUMENT);
4375*/
4376
4377 try_again:
4378 vm_object_cache_lock();
4379
4380 queue_iterate(&vm_object_cached_list, object,
4381 vm_object_t, cached_list) {
0b4e3aa0 4382 if (object->pager && (pager_id == object->pager->pager)) {
1c79356b
A
4383 vm_object_lock(object);
4384 queue_remove(&vm_object_cached_list, object,
4385 vm_object_t, cached_list);
4386 vm_object_cached_count--;
4387
4388 /*
4389 * Since this object is in the cache, we know
0b4e3aa0
A
4390 * that it is initialized and has only a pager's
4391 * (implicit) reference. Take a reference to avoid
4392 * recursive deallocations.
1c79356b
A
4393 */
4394
4395 assert(object->pager_initialized);
4396 assert(object->ref_count == 0);
4397 object->ref_count++;
4398
4399 /*
4400 * Terminate the object.
4401 * If the object had a shadow, we let
4402 * vm_object_deallocate deallocate it.
4403 * "pageout" objects have a shadow, but
4404 * maintain a "paging reference" rather
4405 * than a normal reference.
4406 * (We are careful here to limit recursion.)
4407 */
4408 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4409 if ((vm_object_terminate(object) == KERN_SUCCESS)
4410 && (shadow != VM_OBJECT_NULL)) {
4411 vm_object_deallocate(shadow);
4412 }
4413
4414 if(object_released++ == *count)
4415 return KERN_SUCCESS;
4416 goto try_again;
4417 }
4418 }
4419 vm_object_cache_unlock();
4420 *count = object_released;
4421 return KERN_SUCCESS;
4422}
4423
0b4e3aa0 4424
1c79356b
A
4425
4426kern_return_t
0b4e3aa0
A
4427memory_object_create_named(
4428 memory_object_t pager,
4429 memory_object_offset_t size,
4430 memory_object_control_t *control)
1c79356b 4431{
0b4e3aa0
A
4432 vm_object_t object;
4433 vm_object_hash_entry_t entry;
1c79356b 4434
0b4e3aa0
A
4435 *control = MEMORY_OBJECT_CONTROL_NULL;
4436 if (pager == MEMORY_OBJECT_NULL)
4437 return KERN_INVALID_ARGUMENT;
1c79356b 4438
0b4e3aa0
A
4439 vm_object_cache_lock();
4440 entry = vm_object_hash_lookup(pager, FALSE);
4441 if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
4442 (entry->object != VM_OBJECT_NULL)) {
4443 if (entry->object->named == TRUE)
4444 panic("memory_object_create_named: caller already holds the right"); }
1c79356b 4445
0b4e3aa0
A
4446 vm_object_cache_unlock();
4447 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE))
4448 == VM_OBJECT_NULL) {
4449 return(KERN_INVALID_OBJECT);
4450 }
4451
4452 /* wait for object (if any) to be ready */
4453 if (object != VM_OBJECT_NULL) {
4454 vm_object_lock(object);
4455 object->named = TRUE;
4456 while (!object->pager_ready) {
9bccf70c
A
4457 vm_object_sleep(object,
4458 VM_OBJECT_EVENT_PAGER_READY,
4459 THREAD_UNINT);
0b4e3aa0 4460 }
91447636 4461 *control = object->pager_control;
0b4e3aa0
A
4462 vm_object_unlock(object);
4463 }
4464 return (KERN_SUCCESS);
4465}
1c79356b 4466
1c79356b 4467
0b4e3aa0
A
4468/*
4469 * Routine: memory_object_recover_named [user interface]
4470 * Purpose:
4471 * Attempt to recover a named reference for a VM object.
4472 * VM will verify that the object has not already started
4473 * down the termination path, and if it has, will optionally
4474 * wait for that to finish.
4475 * Returns:
4476 * KERN_SUCCESS - we recovered a named reference on the object
4477 * KERN_FAILURE - we could not recover a reference (object dead)
4478 * KERN_INVALID_ARGUMENT - bad memory object control
4479 */
4480kern_return_t
4481memory_object_recover_named(
4482 memory_object_control_t control,
4483 boolean_t wait_on_terminating)
4484{
4485 vm_object_t object;
1c79356b 4486
0b4e3aa0
A
4487 vm_object_cache_lock();
4488 object = memory_object_control_to_vm_object(control);
4489 if (object == VM_OBJECT_NULL) {
4490 vm_object_cache_unlock();
4491 return (KERN_INVALID_ARGUMENT);
4492 }
1c79356b 4493
0b4e3aa0
A
4494restart:
4495 vm_object_lock(object);
1c79356b 4496
0b4e3aa0
A
4497 if (object->terminating && wait_on_terminating) {
4498 vm_object_cache_unlock();
4499 vm_object_wait(object,
4500 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
4501 THREAD_UNINT);
4502 vm_object_cache_lock();
4503 goto restart;
4504 }
4505
4506 if (!object->alive) {
4507 vm_object_cache_unlock();
4508 vm_object_unlock(object);
4509 return KERN_FAILURE;
1c79356b
A
4510 }
4511
0b4e3aa0
A
4512 if (object->named == TRUE) {
4513 vm_object_cache_unlock();
4514 vm_object_unlock(object);
4515 return KERN_SUCCESS;
4516 }
1c79356b 4517
0b4e3aa0
A
4518 if((object->ref_count == 0) && (!object->terminating)){
4519 queue_remove(&vm_object_cached_list, object,
4520 vm_object_t, cached_list);
4521 vm_object_cached_count--;
4522 XPR(XPR_VM_OBJECT_CACHE,
4523 "memory_object_recover_named: removing %X, head (%X, %X)\n",
4524 (integer_t)object,
4525 (integer_t)vm_object_cached_list.next,
4526 (integer_t)vm_object_cached_list.prev, 0,0);
4527 }
4528
4529 vm_object_cache_unlock();
4530
4531 object->named = TRUE;
4532 object->ref_count++;
4533 vm_object_res_reference(object);
4534 while (!object->pager_ready) {
9bccf70c
A
4535 vm_object_sleep(object,
4536 VM_OBJECT_EVENT_PAGER_READY,
4537 THREAD_UNINT);
0b4e3aa0
A
4538 }
4539 vm_object_unlock(object);
4540 return (KERN_SUCCESS);
1c79356b
A
4541}
4542
0b4e3aa0
A
4543
4544/*
4545 * vm_object_release_name:
4546 *
4547 * Enforces name semantic on memory_object reference count decrement
4548 * This routine should not be called unless the caller holds a name
4549 * reference gained through the memory_object_create_named.
4550 *
4551 * If the TERMINATE_IDLE flag is set, the call will return if the
4552 * reference count is not 1. i.e. idle with the only remaining reference
4553 * being the name.
4554 * If the decision is made to proceed the name field flag is set to
4555 * false and the reference count is decremented. If the RESPECT_CACHE
4556 * flag is set and the reference count has gone to zero, the
4557 * memory_object is checked to see if it is cacheable otherwise when
4558 * the reference count is zero, it is simply terminated.
4559 */
4560
4561__private_extern__ kern_return_t
4562vm_object_release_name(
4563 vm_object_t object,
4564 int flags)
1c79356b 4565{
0b4e3aa0
A
4566 vm_object_t shadow;
4567 boolean_t original_object = TRUE;
1c79356b 4568
0b4e3aa0 4569 while (object != VM_OBJECT_NULL) {
1c79356b 4570
0b4e3aa0
A
4571 /*
4572 * The cache holds a reference (uncounted) to
4573 * the object. We must locke it before removing
4574 * the object.
4575 *
4576 */
4577
1c79356b 4578 vm_object_cache_lock();
0b4e3aa0
A
4579 vm_object_lock(object);
4580 assert(object->alive);
4581 if(original_object)
4582 assert(object->named);
4583 assert(object->ref_count > 0);
4584
4585 /*
4586 * We have to wait for initialization before
4587 * destroying or caching the object.
4588 */
4589
4590 if (object->pager_created && !object->pager_initialized) {
4591 assert(!object->can_persist);
4592 vm_object_assert_wait(object,
4593 VM_OBJECT_EVENT_INITIALIZED,
4594 THREAD_UNINT);
4595 vm_object_unlock(object);
4596 vm_object_cache_unlock();
9bccf70c 4597 thread_block(THREAD_CONTINUE_NULL);
0b4e3aa0 4598 continue;
1c79356b
A
4599 }
4600
0b4e3aa0
A
4601 if (((object->ref_count > 1)
4602 && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
4603 || (object->terminating)) {
4604 vm_object_unlock(object);
4605 vm_object_cache_unlock();
4606 return KERN_FAILURE;
4607 } else {
4608 if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
4609 vm_object_unlock(object);
4610 vm_object_cache_unlock();
4611 return KERN_SUCCESS;
1c79356b 4612 }
0b4e3aa0
A
4613 }
4614
4615 if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
4616 (object->ref_count == 1)) {
4617 if(original_object)
4618 object->named = FALSE;
1c79356b 4619 vm_object_unlock(object);
0b4e3aa0
A
4620 vm_object_cache_unlock();
4621 /* let vm_object_deallocate push this thing into */
4622 /* the cache, if that it is where it is bound */
4623 vm_object_deallocate(object);
4624 return KERN_SUCCESS;
4625 }
4626 VM_OBJ_RES_DECR(object);
4627 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4628 if(object->ref_count == 1) {
4629 if(vm_object_terminate(object) != KERN_SUCCESS) {
4630 if(original_object) {
4631 return KERN_FAILURE;
4632 } else {
4633 return KERN_SUCCESS;
4634 }
4635 }
4636 if (shadow != VM_OBJECT_NULL) {
4637 original_object = FALSE;
4638 object = shadow;
4639 continue;
4640 }
4641 return KERN_SUCCESS;
4642 } else {
4643 object->ref_count--;
4644 assert(object->ref_count > 0);
4645 if(original_object)
4646 object->named = FALSE;
4647 vm_object_unlock(object);
4648 vm_object_cache_unlock();
4649 return KERN_SUCCESS;
1c79356b 4650 }
1c79356b 4651 }
91447636
A
4652 /*NOTREACHED*/
4653 assert(0);
4654 return KERN_FAILURE;
1c79356b
A
4655}
4656
0b4e3aa0
A
4657
4658__private_extern__ kern_return_t
4659vm_object_lock_request(
4660 vm_object_t object,
4661 vm_object_offset_t offset,
4662 vm_object_size_t size,
4663 memory_object_return_t should_return,
4664 int flags,
4665 vm_prot_t prot)
1c79356b 4666{
91447636
A
4667 __unused boolean_t should_flush;
4668
4669 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
1c79356b 4670
0b4e3aa0
A
4671 XPR(XPR_MEMORY_OBJECT,
4672 "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
4673 (integer_t)object, offset, size,
4674 (((should_return&1)<<1)|should_flush), prot);
1c79356b 4675
0b4e3aa0
A
4676 /*
4677 * Check for bogus arguments.
4678 */
4679 if (object == VM_OBJECT_NULL)
4680 return (KERN_INVALID_ARGUMENT);
1c79356b 4681
0b4e3aa0
A
4682 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
4683 return (KERN_INVALID_ARGUMENT);
1c79356b 4684
55e303ae 4685 size = round_page_64(size);
0b4e3aa0
A
4686
4687 /*
4688 * Lock the object, and acquire a paging reference to
4689 * prevent the memory_object reference from being released.
4690 */
4691 vm_object_lock(object);
4692 vm_object_paging_begin(object);
0b4e3aa0
A
4693
4694 (void)vm_object_update(object,
91447636 4695 offset, size, NULL, NULL, should_return, flags, prot);
0b4e3aa0
A
4696
4697 vm_object_paging_end(object);
4698 vm_object_unlock(object);
4699
4700 return (KERN_SUCCESS);
4701}
4702
91447636
A
4703/*
4704 * Empty a purgable object by grabbing the physical pages assigned to it and
4705 * putting them on the free queue without writing them to backing store, etc.
4706 * When the pages are next touched they will be demand zero-fill pages. We
4707 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
4708 * skip referenced/dirty pages, pages on the active queue, etc. We're more
4709 * than happy to grab these since this is a purgable object. We mark the
4710 * object as "empty" after reaping its pages.
4711 *
4712 * On entry the object and page queues are locked, the object must be a
4713 * purgable object with no delayed copies pending.
4714 */
4715unsigned int
4716vm_object_purge(vm_object_t object)
4717{
4718 vm_page_t p, next;
4719 unsigned int num_purged_pages;
4720 vm_page_t local_freeq;
4721 unsigned long local_freed;
4722 int purge_loop_quota;
4723/* free pages as soon as we gather PURGE_BATCH_FREE_LIMIT pages to free */
4724#define PURGE_BATCH_FREE_LIMIT 50
4725/* release page queues lock every PURGE_LOOP_QUOTA iterations */
4726#define PURGE_LOOP_QUOTA 100
4727
4728 num_purged_pages = 0;
4729 if (object->purgable == VM_OBJECT_NONPURGABLE)
4730 return num_purged_pages;
0b4e3aa0 4731
91447636
A
4732 object->purgable = VM_OBJECT_PURGABLE_EMPTY;
4733
4734 assert(object->copy == VM_OBJECT_NULL);
4735 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
4736 purge_loop_quota = PURGE_LOOP_QUOTA;
4737
4738 local_freeq = VM_PAGE_NULL;
4739 local_freed = 0;
4740
4741 /*
4742 * Go through the object's resident pages and try and discard them.
4743 */
4744 next = (vm_page_t)queue_first(&object->memq);
4745 while (!queue_end(&object->memq, (queue_entry_t)next)) {
4746 p = next;
4747 next = (vm_page_t)queue_next(&next->listq);
4748
4749 if (purge_loop_quota-- == 0) {
4750 /*
4751 * Avoid holding the page queues lock for too long.
4752 * Let someone else take it for a while if needed.
4753 * Keep holding the object's lock to guarantee that
4754 * the object's page list doesn't change under us
4755 * while we yield.
4756 */
4757 if (local_freeq != VM_PAGE_NULL) {
4758 /*
4759 * Flush our queue of pages to free.
4760 */
4761 vm_page_free_list(local_freeq);
4762 local_freeq = VM_PAGE_NULL;
4763 local_freed = 0;
4764 }
4765 vm_page_unlock_queues();
4766 mutex_pause();
4767 vm_page_lock_queues();
4768
4769 /* resume with the current page and a new quota */
4770 purge_loop_quota = PURGE_LOOP_QUOTA;
4771 }
4772
4773
4774 if (p->busy || p->cleaning || p->laundry ||
4775 p->list_req_pending) {
4776 /* page is being acted upon, so don't mess with it */
4777 continue;
4778 }
4779 if (p->wire_count) {
4780 /* don't discard a wired page */
4781 continue;
4782 }
4783
4784 if (p->tabled) {
4785 /* clean up the object/offset table */
4786 vm_page_remove(p);
4787 }
4788 if (p->absent) {
4789 /* update the object's count of absent pages */
4790 vm_object_absent_release(object);
4791 }
4792
4793 /* we can discard this page */
4794
4795 /* advertize that this page is in a transition state */
4796 p->busy = TRUE;
4797
4798 if (p->no_isync == TRUE) {
4799 /* the page hasn't been mapped yet */
4800 /* (optimization to delay the i-cache sync) */
4801 } else {
4802 /* unmap the page */
4803 int refmod_state;
4804
4805 refmod_state = pmap_disconnect(p->phys_page);
4806 if (refmod_state & VM_MEM_MODIFIED) {
4807 p->dirty = TRUE;
4808 }
4809 }
4810
4811 if (p->dirty || p->precious) {
4812 /* we saved the cost of cleaning this page ! */
4813 num_purged_pages++;
4814 vm_page_purged_count++;
4815 }
4816
4817 /* remove page from active or inactive queue... */
4818 VM_PAGE_QUEUES_REMOVE(p);
4819
4820 /* ... and put it on our queue of pages to free */
4821 assert(!p->laundry);
4822 assert(p->object != kernel_object);
4823 assert(p->pageq.next == NULL &&
4824 p->pageq.prev == NULL);
4825 p->pageq.next = (queue_entry_t) local_freeq;
4826 local_freeq = p;
4827 if (++local_freed >= PURGE_BATCH_FREE_LIMIT) {
4828 /* flush our queue of pages to free */
4829 vm_page_free_list(local_freeq);
4830 local_freeq = VM_PAGE_NULL;
4831 local_freed = 0;
4832 }
4833 }
4834
4835 /* flush our local queue of pages to free one last time */
4836 if (local_freeq != VM_PAGE_NULL) {
4837 vm_page_free_list(local_freeq);
4838 local_freeq = VM_PAGE_NULL;
4839 local_freed = 0;
4840 }
4841
4842 return num_purged_pages;
4843}
4844
4845/*
4846 * vm_object_purgable_control() allows the caller to control and investigate the
4847 * state of a purgable object. A purgable object is created via a call to
4848 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgable object will
4849 * never be coalesced with any other object -- even other purgable objects --
4850 * and will thus always remain a distinct object. A purgable object has
4851 * special semantics when its reference count is exactly 1. If its reference
4852 * count is greater than 1, then a purgable object will behave like a normal
4853 * object and attempts to use this interface will result in an error return
4854 * of KERN_INVALID_ARGUMENT.
4855 *
4856 * A purgable object may be put into a "volatile" state which will make the
4857 * object's pages elligable for being reclaimed without paging to backing
4858 * store if the system runs low on memory. If the pages in a volatile
4859 * purgable object are reclaimed, the purgable object is said to have been
4860 * "emptied." When a purgable object is emptied the system will reclaim as
4861 * many pages from the object as it can in a convenient manner (pages already
4862 * en route to backing store or busy for other reasons are left as is). When
4863 * a purgable object is made volatile, its pages will generally be reclaimed
4864 * before other pages in the application's working set. This semantic is
4865 * generally used by applications which can recreate the data in the object
4866 * faster than it can be paged in. One such example might be media assets
4867 * which can be reread from a much faster RAID volume.
4868 *
4869 * A purgable object may be designated as "non-volatile" which means it will
4870 * behave like all other objects in the system with pages being written to and
4871 * read from backing store as needed to satisfy system memory needs. If the
4872 * object was emptied before the object was made non-volatile, that fact will
4873 * be returned as the old state of the purgable object (see
4874 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
4875 * were reclaimed as part of emptying the object will be refaulted in as
4876 * zero-fill on demand. It is up to the application to note that an object
4877 * was emptied and recreate the objects contents if necessary. When a
4878 * purgable object is made non-volatile, its pages will generally not be paged
4879 * out to backing store in the immediate future. A purgable object may also
4880 * be manually emptied.
4881 *
4882 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
4883 * volatile purgable object may be queried at any time. This information may
4884 * be used as a control input to let the application know when the system is
4885 * experiencing memory pressure and is reclaiming memory.
4886 *
4887 * The specified address may be any address within the purgable object. If
4888 * the specified address does not represent any object in the target task's
4889 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
4890 * object containing the specified address is not a purgable object, then
4891 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
4892 * returned.
4893 *
4894 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
4895 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
4896 * state is used to set the new state of the purgable object and return its
4897 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgable
4898 * object is returned in the parameter state.
4899 *
4900 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
4901 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
4902 * the non-volatile, volatile and volatile/empty states described above.
4903 * Setting the state of a purgable object to VM_PURGABLE_EMPTY will
4904 * immediately reclaim as many pages in the object as can be conveniently
4905 * collected (some may have already been written to backing store or be
4906 * otherwise busy).
4907 *
4908 * The process of making a purgable object non-volatile and determining its
4909 * previous state is atomic. Thus, if a purgable object is made
4910 * VM_PURGABLE_NONVOLATILE and the old state is returned as
4911 * VM_PURGABLE_VOLATILE, then the purgable object's previous contents are
4912 * completely intact and will remain so until the object is made volatile
4913 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
4914 * was reclaimed while it was in a volatile state and its previous contents
4915 * have been lost.
4916 */
4917/*
4918 * The object must be locked.
4919 */
4920kern_return_t
4921vm_object_purgable_control(
4922 vm_object_t object,
4923 vm_purgable_t control,
4924 int *state)
4925{
4926 int old_state;
4927 vm_page_t p;
4928
4929 if (object == VM_OBJECT_NULL) {
4930 /*
4931 * Object must already be present or it can't be purgable.
4932 */
4933 return KERN_INVALID_ARGUMENT;
4934 }
4935
4936 /*
4937 * Get current state of the purgable object.
4938 */
4939 switch (object->purgable) {
4940 case VM_OBJECT_NONPURGABLE:
4941 return KERN_INVALID_ARGUMENT;
4942
4943 case VM_OBJECT_PURGABLE_NONVOLATILE:
4944 old_state = VM_PURGABLE_NONVOLATILE;
4945 break;
4946
4947 case VM_OBJECT_PURGABLE_VOLATILE:
4948 old_state = VM_PURGABLE_VOLATILE;
4949 break;
4950
4951 case VM_OBJECT_PURGABLE_EMPTY:
4952 old_state = VM_PURGABLE_EMPTY;
4953 break;
4954
4955 default:
4956 old_state = VM_PURGABLE_NONVOLATILE;
4957 panic("Bad state (%d) for purgable object!\n",
4958 object->purgable);
4959 /*NOTREACHED*/
4960 }
4961
4962 /* purgable cant have delayed copies - now or in the future */
4963 assert(object->copy == VM_OBJECT_NULL);
4964 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
4965
4966 /*
4967 * Execute the desired operation.
4968 */
4969 if (control == VM_PURGABLE_GET_STATE) {
4970 *state = old_state;
4971 return KERN_SUCCESS;
4972 }
4973
4974 switch (*state) {
4975 case VM_PURGABLE_NONVOLATILE:
4976 vm_page_lock_queues();
4977 if (object->purgable != VM_OBJECT_PURGABLE_NONVOLATILE) {
4978 assert(vm_page_purgeable_count >=
4979 object->resident_page_count);
4980 vm_page_purgeable_count -= object->resident_page_count;
4981 }
4982
4983 object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE;
4984
4985 /*
4986 * If the object wasn't emptied, then mark all pages of the
4987 * object as referenced in order to give them a complete turn
4988 * of the virtual memory "clock" before becoming candidates
4989 * for paging out (if the system is suffering from memory
4990 * pressure). We don't really need to set the pmap reference
4991 * bits (which would be expensive) since the software copies
4992 * are believed if they're set to true ...
4993 */
4994 if (old_state != VM_PURGABLE_EMPTY) {
4995 for (p = (vm_page_t)queue_first(&object->memq);
4996 !queue_end(&object->memq, (queue_entry_t)p);
4997 p = (vm_page_t)queue_next(&p->listq))
4998 p->reference = TRUE;
4999 }
5000
5001 vm_page_unlock_queues();
5002
5003 break;
5004
5005 case VM_PURGABLE_VOLATILE:
5006 vm_page_lock_queues();
5007
5008 if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
5009 object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
5010 vm_page_purgeable_count += object->resident_page_count;
5011 }
5012
5013 object->purgable = VM_OBJECT_PURGABLE_VOLATILE;
5014
5015 /*
5016 * We want the newly volatile purgable object to be a
5017 * candidate for the pageout scan before other pages in the
5018 * application if the system is suffering from memory
5019 * pressure. To do this, we move a page of the object from
5020 * the active queue onto the inactive queue in order to
5021 * promote the object for early reclaim. We only need to move
5022 * a single page since the pageout scan will reap the entire
5023 * purgable object if it finds a single page in a volatile
5024 * state. Obviously we don't do this if there are no pages
5025 * associated with the object or we find a page of the object
5026 * already on the inactive queue.
5027 */
5028 for (p = (vm_page_t)queue_first(&object->memq);
5029 !queue_end(&object->memq, (queue_entry_t)p);
5030 p = (vm_page_t)queue_next(&p->listq)) {
5031 if (p->inactive) {
5032 /* already a page on the inactive queue */
5033 break;
5034 }
5035 if (p->active && !p->busy) {
5036 /* found one we can move */
5037 vm_page_deactivate(p);
5038 break;
5039 }
5040 }
5041 vm_page_unlock_queues();
5042
5043 break;
5044
5045
5046 case VM_PURGABLE_EMPTY:
5047 vm_page_lock_queues();
5048 if (object->purgable != VM_OBJECT_PURGABLE_VOLATILE &&
5049 object->purgable != VM_OBJECT_PURGABLE_EMPTY) {
5050 vm_page_purgeable_count += object->resident_page_count;
5051 }
5052 (void) vm_object_purge(object);
5053 vm_page_unlock_queues();
5054 break;
5055
5056 }
5057 *state = old_state;
5058
5059 return KERN_SUCCESS;
5060}
0b4e3aa0
A
5061
5062#if TASK_SWAPPER
5063/*
5064 * vm_object_res_deallocate
5065 *
5066 * (recursively) decrement residence counts on vm objects and their shadows.
5067 * Called from vm_object_deallocate and when swapping out an object.
5068 *
5069 * The object is locked, and remains locked throughout the function,
5070 * even as we iterate down the shadow chain. Locks on intermediate objects
5071 * will be dropped, but not the original object.
5072 *
5073 * NOTE: this function used to use recursion, rather than iteration.
5074 */
5075
5076__private_extern__ void
5077vm_object_res_deallocate(
5078 vm_object_t object)
5079{
5080 vm_object_t orig_object = object;
5081 /*
5082 * Object is locked so it can be called directly
5083 * from vm_object_deallocate. Original object is never
5084 * unlocked.
5085 */
5086 assert(object->res_count > 0);
5087 while (--object->res_count == 0) {
5088 assert(object->ref_count >= object->res_count);
5089 vm_object_deactivate_all_pages(object);
5090 /* iterate on shadow, if present */
5091 if (object->shadow != VM_OBJECT_NULL) {
5092 vm_object_t tmp_object = object->shadow;
5093 vm_object_lock(tmp_object);
5094 if (object != orig_object)
5095 vm_object_unlock(object);
5096 object = tmp_object;
5097 assert(object->res_count > 0);
5098 } else
5099 break;
5100 }
5101 if (object != orig_object)
1c79356b 5102 vm_object_unlock(object);
0b4e3aa0
A
5103}
5104
5105/*
5106 * vm_object_res_reference
5107 *
5108 * Internal function to increment residence count on a vm object
5109 * and its shadows. It is called only from vm_object_reference, and
5110 * when swapping in a vm object, via vm_map_swap.
5111 *
5112 * The object is locked, and remains locked throughout the function,
5113 * even as we iterate down the shadow chain. Locks on intermediate objects
5114 * will be dropped, but not the original object.
5115 *
5116 * NOTE: this function used to use recursion, rather than iteration.
5117 */
5118
5119__private_extern__ void
5120vm_object_res_reference(
5121 vm_object_t object)
5122{
5123 vm_object_t orig_object = object;
5124 /*
5125 * Object is locked, so this can be called directly
5126 * from vm_object_reference. This lock is never released.
5127 */
5128 while ((++object->res_count == 1) &&
5129 (object->shadow != VM_OBJECT_NULL)) {
5130 vm_object_t tmp_object = object->shadow;
5131
5132 assert(object->ref_count >= object->res_count);
5133 vm_object_lock(tmp_object);
5134 if (object != orig_object)
5135 vm_object_unlock(object);
5136 object = tmp_object;
1c79356b 5137 }
0b4e3aa0
A
5138 if (object != orig_object)
5139 vm_object_unlock(object);
5140 assert(orig_object->ref_count >= orig_object->res_count);
1c79356b 5141}
0b4e3aa0
A
5142#endif /* TASK_SWAPPER */
5143
5144/*
5145 * vm_object_reference:
5146 *
5147 * Gets another reference to the given object.
5148 */
5149#ifdef vm_object_reference
5150#undef vm_object_reference
5151#endif
5152__private_extern__ void
5153vm_object_reference(
5154 register vm_object_t object)
5155{
5156 if (object == VM_OBJECT_NULL)
5157 return;
5158
5159 vm_object_lock(object);
5160 assert(object->ref_count > 0);
5161 vm_object_reference_locked(object);
5162 vm_object_unlock(object);
5163}
5164
1c79356b
A
5165#ifdef MACH_BSD
5166/*
5167 * Scale the vm_object_cache
5168 * This is required to make sure that the vm_object_cache is big
5169 * enough to effectively cache the mapped file.
5170 * This is really important with UBC as all the regular file vnodes
5171 * have memory object associated with them. Havving this cache too
5172 * small results in rapid reclaim of vnodes and hurts performance a LOT!
5173 *
5174 * This is also needed as number of vnodes can be dynamically scaled.
5175 */
5176kern_return_t
91447636
A
5177adjust_vm_object_cache(
5178 __unused vm_size_t oval,
5179 vm_size_t nval)
1c79356b
A
5180{
5181 vm_object_cached_max = nval;
5182 vm_object_cache_trim(FALSE);
5183 return (KERN_SUCCESS);
5184}
5185#endif /* MACH_BSD */
5186
91447636
A
5187
5188/*
5189 * vm_object_transpose
5190 *
5191 * This routine takes two VM objects of the same size and exchanges
5192 * their backing store.
5193 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
5194 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
5195 *
5196 * The VM objects must not be locked by caller.
5197 */
5198kern_return_t
5199vm_object_transpose(
5200 vm_object_t object1,
5201 vm_object_t object2,
5202 vm_object_size_t transpose_size)
5203{
5204 vm_object_t tmp_object;
5205 kern_return_t retval;
5206 boolean_t object1_locked, object2_locked;
5207 boolean_t object1_paging, object2_paging;
5208 vm_page_t page;
5209 vm_object_offset_t page_offset;
5210
5211 tmp_object = VM_OBJECT_NULL;
5212 object1_locked = FALSE; object2_locked = FALSE;
5213 object1_paging = FALSE; object2_paging = FALSE;
5214
5215 if (object1 == object2 ||
5216 object1 == VM_OBJECT_NULL ||
5217 object2 == VM_OBJECT_NULL) {
5218 /*
5219 * If the 2 VM objects are the same, there's
5220 * no point in exchanging their backing store.
5221 */
5222 retval = KERN_INVALID_VALUE;
5223 goto done;
5224 }
5225
5226 vm_object_lock(object1);
5227 object1_locked = TRUE;
5228 if (object1->copy || object1->shadow || object1->shadowed ||
5229 object1->purgable != VM_OBJECT_NONPURGABLE) {
5230 /*
5231 * We don't deal with copy or shadow objects (yet).
5232 */
5233 retval = KERN_INVALID_VALUE;
5234 goto done;
5235 }
5236 /*
5237 * Since we're about to mess with the object's backing store,
5238 * mark it as "paging_in_progress". Note that this is not enough
5239 * to prevent any paging activity on this object, so the caller should
5240 * have "quiesced" the objects beforehand, via a UPL operation with
5241 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
5242 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
5243 */
5244 vm_object_paging_begin(object1);
5245 object1_paging = TRUE;
5246 vm_object_unlock(object1);
5247 object1_locked = FALSE;
5248
5249 /*
5250 * Same as above for the 2nd object...
5251 */
5252 vm_object_lock(object2);
5253 object2_locked = TRUE;
5254 if (object2->copy || object2->shadow || object2->shadowed ||
5255 object2->purgable != VM_OBJECT_NONPURGABLE) {
5256 retval = KERN_INVALID_VALUE;
5257 goto done;
5258 }
5259 vm_object_paging_begin(object2);
5260 object2_paging = TRUE;
5261 vm_object_unlock(object2);
5262 object2_locked = FALSE;
5263
5264 /*
5265 * Allocate a temporary VM object to hold object1's contents
5266 * while we copy object2 to object1.
5267 */
5268 tmp_object = vm_object_allocate(transpose_size);
5269 vm_object_lock(tmp_object);
5270 vm_object_paging_begin(tmp_object);
5271 tmp_object->can_persist = FALSE;
5272
5273 /*
5274 * Since we need to lock both objects at the same time,
5275 * make sure we always lock them in the same order to
5276 * avoid deadlocks.
5277 */
5278 if (object1 < object2) {
5279 vm_object_lock(object1);
5280 vm_object_lock(object2);
5281 } else {
5282 vm_object_lock(object2);
5283 vm_object_lock(object1);
5284 }
5285 object1_locked = TRUE;
5286 object2_locked = TRUE;
5287
5288 if (object1->size != object2->size ||
5289 object1->size != transpose_size) {
5290 /*
5291 * If the 2 objects don't have the same size, we can't
5292 * exchange their backing stores or one would overflow.
5293 * If their size doesn't match the caller's
5294 * "transpose_size", we can't do it either because the
5295 * transpose operation will affect the entire span of
5296 * the objects.
5297 */
5298 retval = KERN_INVALID_VALUE;
5299 goto done;
5300 }
5301
5302
5303 /*
5304 * Transpose the lists of resident pages.
5305 */
5306 if (object1->phys_contiguous || queue_empty(&object1->memq)) {
5307 /*
5308 * No pages in object1, just transfer pages
5309 * from object2 to object1. No need to go through
5310 * an intermediate object.
5311 */
5312 while (!queue_empty(&object2->memq)) {
5313 page = (vm_page_t) queue_first(&object2->memq);
5314 vm_page_rename(page, object1, page->offset);
5315 }
5316 assert(queue_empty(&object2->memq));
5317 } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
5318 /*
5319 * No pages in object2, just transfer pages
5320 * from object1 to object2. No need to go through
5321 * an intermediate object.
5322 */
5323 while (!queue_empty(&object1->memq)) {
5324 page = (vm_page_t) queue_first(&object1->memq);
5325 vm_page_rename(page, object2, page->offset);
5326 }
5327 assert(queue_empty(&object1->memq));
5328 } else {
5329 /* transfer object1's pages to tmp_object */
5330 vm_page_lock_queues();
5331 while (!queue_empty(&object1->memq)) {
5332 page = (vm_page_t) queue_first(&object1->memq);
5333 page_offset = page->offset;
5334 vm_page_remove(page);
5335 page->offset = page_offset;
5336 queue_enter(&tmp_object->memq, page, vm_page_t, listq);
5337 }
5338 vm_page_unlock_queues();
5339 assert(queue_empty(&object1->memq));
5340 /* transfer object2's pages to object1 */
5341 while (!queue_empty(&object2->memq)) {
5342 page = (vm_page_t) queue_first(&object2->memq);
5343 vm_page_rename(page, object1, page->offset);
5344 }
5345 assert(queue_empty(&object2->memq));
5346 /* transfer tmp_object's pages to object1 */
5347 while (!queue_empty(&tmp_object->memq)) {
5348 page = (vm_page_t) queue_first(&tmp_object->memq);
5349 queue_remove(&tmp_object->memq, page,
5350 vm_page_t, listq);
5351 vm_page_insert(page, object2, page->offset);
5352 }
5353 assert(queue_empty(&tmp_object->memq));
5354 }
5355
5356 /* no need to transpose the size: they should be identical */
5357 assert(object1->size == object2->size);
5358
5359#define __TRANSPOSE_FIELD(field) \
5360MACRO_BEGIN \
5361 tmp_object->field = object1->field; \
5362 object1->field = object2->field; \
5363 object2->field = tmp_object->field; \
5364MACRO_END
5365
5366 assert(!object1->copy);
5367 assert(!object2->copy);
5368
5369 assert(!object1->shadow);
5370 assert(!object2->shadow);
5371
5372 __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
5373 __TRANSPOSE_FIELD(pager);
5374 __TRANSPOSE_FIELD(paging_offset);
5375
5376 __TRANSPOSE_FIELD(pager_control);
5377 /* update the memory_objects' pointers back to the VM objects */
5378 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5379 memory_object_control_collapse(object1->pager_control,
5380 object1);
5381 }
5382 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5383 memory_object_control_collapse(object2->pager_control,
5384 object2);
5385 }
5386
5387 __TRANSPOSE_FIELD(absent_count);
5388
5389 assert(object1->paging_in_progress);
5390 assert(object2->paging_in_progress);
5391
5392 __TRANSPOSE_FIELD(pager_created);
5393 __TRANSPOSE_FIELD(pager_initialized);
5394 __TRANSPOSE_FIELD(pager_ready);
5395 __TRANSPOSE_FIELD(pager_trusted);
5396 __TRANSPOSE_FIELD(internal);
5397 __TRANSPOSE_FIELD(temporary);
5398 __TRANSPOSE_FIELD(private);
5399 __TRANSPOSE_FIELD(pageout);
5400 __TRANSPOSE_FIELD(true_share);
5401 __TRANSPOSE_FIELD(phys_contiguous);
5402 __TRANSPOSE_FIELD(nophyscache);
5403 __TRANSPOSE_FIELD(last_alloc);
5404 __TRANSPOSE_FIELD(sequential);
5405 __TRANSPOSE_FIELD(cluster_size);
5406 __TRANSPOSE_FIELD(existence_map);
5407 __TRANSPOSE_FIELD(cow_hint);
5408 __TRANSPOSE_FIELD(wimg_bits);
5409
5410#undef __TRANSPOSE_FIELD
5411
5412 retval = KERN_SUCCESS;
5413
5414done:
5415 /*
5416 * Cleanup.
5417 */
5418 if (tmp_object != VM_OBJECT_NULL) {
5419 vm_object_paging_end(tmp_object);
5420 vm_object_unlock(tmp_object);
5421 /*
5422 * Re-initialize the temporary object to avoid
5423 * deallocating a real pager.
5424 */
5425 _vm_object_allocate(transpose_size, tmp_object);
5426 vm_object_deallocate(tmp_object);
5427 tmp_object = VM_OBJECT_NULL;
5428 }
5429
5430 if (object1_locked) {
5431 vm_object_unlock(object1);
5432 object1_locked = FALSE;
5433 }
5434 if (object2_locked) {
5435 vm_object_unlock(object2);
5436 object2_locked = FALSE;
5437 }
5438 if (object1_paging) {
5439 vm_object_lock(object1);
5440 vm_object_paging_end(object1);
5441 vm_object_unlock(object1);
5442 object1_paging = FALSE;
5443 }
5444 if (object2_paging) {
5445 vm_object_lock(object2);
5446 vm_object_paging_end(object2);
5447 vm_object_unlock(object2);
5448 object2_paging = FALSE;
5449 }
5450
5451 return retval;
5452}