]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
73
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
87 #include <vm/pmap.h>
88
89 #include <vm/vm_external.h>
90
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
93
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited;
99 extern btlog_t *vm_object_tracking_btlog;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
106
107 struct vm_page;
108 struct vm_shared_region_slide_info;
109
110 /*
111 * Types defined:
112 *
113 * vm_object_t Virtual memory object.
114 * vm_object_fault_info_t Used to determine cluster size.
115 */
116
117 struct vm_object_fault_info {
118 int interruptible;
119 uint32_t user_tag;
120 vm_size_t cluster_size;
121 vm_behavior_t behavior;
122 vm_map_offset_t lo_offset;
123 vm_map_offset_t hi_offset;
124 unsigned int
125 /* boolean_t */ no_cache:1,
126 /* boolean_t */ stealth:1,
127 /* boolean_t */ io_sync:1,
128 /* boolean_t */ cs_bypass:1,
129 /* boolean_t */ mark_zf_absent:1,
130 /* boolean_t */ batch_pmap_op:1,
131 __vm_object_fault_info_unused_bits:26;
132 int pmap_options;
133 };
134
135
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_purgeable_owner vo_un2.vou_purgeable_owner
141 #define vo_slide_info vo_un2.vou_slide_info
142
143 struct vm_object {
144 /*
145 * on 64 bit systems we pack the pointers hung off the memq.
146 * those pointers have to be able to point back to the memq.
147 * the packed pointers are required to be on a 64 byte boundary
148 * which means 2 things for the vm_object... (1) the memq
149 * struct has to be the first element of the structure so that
150 * we can control it's alignment... (2) the vm_object must be
151 * aligned on a 64 byte boundary... for static vm_object's
152 * this is accomplished via the 'aligned' attribute... for
153 * vm_object's in the zone pool, this is accomplished by
154 * rounding the size of the vm_object element to the nearest
155 * 64 byte size before creating the zone.
156 */
157 vm_page_queue_head_t memq; /* Resident memory - must be first */
158 lck_rw_t Lock; /* Synchronization */
159
160 #if DEVELOPMENT || DEBUG
161 thread_t Lock_owner;
162 #endif
163 union {
164 vm_object_size_t vou_size; /* Object size (only valid if internal) */
165 int vou_cache_pages_to_scan; /* pages yet to be visited in an
166 * external object in cache
167 */
168 } vo_un1;
169
170 struct vm_page *memq_hint;
171 int ref_count; /* Number of references */
172 unsigned int resident_page_count;
173 /* number of resident pages */
174 const unsigned int wired_page_count; /* number of wired pages
175 use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
176 unsigned int reusable_page_count;
177
178 struct vm_object *copy; /* Object that should receive
179 * a copy of my changed pages,
180 * for copy_delay, or just the
181 * temporary object that
182 * shadows this object, for
183 * copy_call.
184 */
185 struct vm_object *shadow; /* My shadow */
186
187 union {
188 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
189 clock_sec_t vou_cache_ts; /* age of an external object
190 * present in cache
191 */
192 task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set
193 * to volatile/emtpy, this is the task
194 * that owns this purgeable object.
195 */
196 struct vm_shared_region_slide_info *vou_slide_info;
197 } vo_un2;
198
199 memory_object_t pager; /* Where to get data */
200 vm_object_offset_t paging_offset; /* Offset into memory object */
201 memory_object_control_t pager_control; /* Where data comes back */
202
203 memory_object_copy_strategy_t
204 copy_strategy; /* How to handle data copy */
205
206 #if __LP64__
207 /*
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
213 */
214 unsigned int paging_in_progress:16,
215 __object1_unused_bits:16;
216 unsigned int activity_in_progress;
217 #else /* __LP64__ */
218 /*
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
223 */
224 unsigned short paging_in_progress;
225 unsigned short activity_in_progress;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
232 * terminate)
233 */
234
235 unsigned int
236 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
239 * below */
240 /* boolean_t */ pager_created:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
243
244 /* boolean_t */ pager_trusted:1,/* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
248 */
249 /* boolean_t */ can_persist:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
253 * are deallocated?
254 */
255 /* boolean_t */ internal:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
258 */
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive:1, /* Not yet terminated */
265
266 /* boolean_t */ purgable:2, /* Purgable state. See
267 * VM_PURGABLE_*
268 */
269 /* boolean_t */ purgeable_only_by_kernel:1,
270 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
271 * becomes ripe.
272 */
273 /* boolean_t */ shadowed:1, /* Shadow may exist */
274 /* boolean_t */ true_share:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
278 * coalesced */
279 /* boolean_t */ terminating:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
286 * object termination
287 */
288 /* boolean_t */ named:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
291 * for allocation and
292 * destruction, UBC references
293 * against the vm_object are
294 * checked.
295 */
296 /* boolean_t */ shadow_severed:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
302 * zero filled page.
303 */
304 /* boolean_t */ phys_contiguous:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
311 * access bits.
312 */
313 /* boolean_t */ nophyscache:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
317 * I/O)
318 */
319 /* boolean_t */ _object5_unused_bits:1;
320
321 queue_chain_t cached_list; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
324 * value
325 */
326 /*
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
329 */
330 vm_object_offset_t last_alloc; /* last allocation offset */
331 int sequential; /* sequential access size */
332
333 uint32_t pages_created;
334 uint32_t pages_used;
335 vm_offset_t cow_hint; /* last page present in */
336 /* shadow but not in object */
337 /* hold object lock when altering */
338 unsigned int
339 wimg_bits:8, /* cache WIMG bits */
340 code_signed:1, /* pages are signed and should be
341 validated; the signatures are stored
342 with the pager */
343 transposed:1, /* object was transposed with another */
344 mapping_in_progress:1, /* pager being mapped/unmapped */
345 phantom_isssd:1,
346 volatile_empty:1,
347 volatile_fault:1,
348 all_reusable:1,
349 blocked_access:1,
350 set_cache_attr:1,
351 object_slid:1,
352 purgeable_queue_type:2,
353 purgeable_queue_group:3,
354 io_tracking:1,
355 no_tag_update:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded:1,
358 can_grab_secluded:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 __object2_unused_bits:5; /* for expansion */
363
364 uint8_t scan_collisions;
365 vm_tag_t wire_tag;
366 uint8_t __object4_unused_bits[2];
367
368 #if CONFIG_PHANTOM_CACHE
369 uint32_t phantom_object_id;
370 #endif
371 #if CONFIG_IOSCHED || UPL_DEBUG
372 queue_head_t uplq; /* List of outstanding upls */
373 #endif
374
375 #ifdef VM_PIP_DEBUG
376 /*
377 * Keep track of the stack traces for the first holders
378 * of a "paging_in_progress" reference for this VM object.
379 */
380 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
381 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
382 struct __pip_backtrace {
383 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
384 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
385 #endif /* VM_PIP_DEBUG */
386
387 queue_chain_t objq; /* object queue - currently used for purgable queues */
388
389 #if DEBUG
390 void *purgeable_owner_bt[16];
391 task_t vo_purgeable_volatilizer; /* who made it volatile? */
392 void *purgeable_volatilizer_bt[16];
393 #endif /* DEBUG */
394 };
395
396 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
397 ((object)->volatile_fault && \
398 ((object)->purgable == VM_PURGABLE_VOLATILE || \
399 (object)->purgable == VM_PURGABLE_EMPTY))
400
401 extern
402 vm_object_t kernel_object; /* the single kernel object */
403
404 extern
405 vm_object_t compressor_object; /* the single compressor object */
406
407 extern
408 unsigned int vm_object_absent_max; /* maximum number of absent pages
409 at a time for each object */
410
411 # define VM_MSYNC_INITIALIZED 0
412 # define VM_MSYNC_SYNCHRONIZING 1
413 # define VM_MSYNC_DONE 2
414
415
416 extern lck_grp_t vm_map_lck_grp;
417 extern lck_attr_t vm_map_lck_attr;
418
419 #ifndef VM_TAG_ACTIVE_UPDATE
420 #error VM_TAG_ACTIVE_UPDATE
421 #endif
422
423 #define VM_OBJECT_WIRED(object, tag) \
424 MACRO_BEGIN \
425 assert(VM_KERN_MEMORY_NONE != (tag)); \
426 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
427 (object)->wire_tag = (tag); \
428 if (!VM_TAG_ACTIVE_UPDATE \
429 && ((object)->purgable == VM_PURGABLE_DENY)) \
430 { \
431 lck_spin_lock(&vm_objects_wired_lock); \
432 assert(!(object)->objq.next); \
433 assert(!(object)->objq.prev); \
434 queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \
435 lck_spin_unlock(&vm_objects_wired_lock); \
436 } \
437 MACRO_END
438
439 #define VM_OBJECT_UNWIRED(object) \
440 MACRO_BEGIN \
441 if (!VM_TAG_ACTIVE_UPDATE \
442 && ((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
443 { \
444 lck_spin_lock(&vm_objects_wired_lock); \
445 queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \
446 lck_spin_unlock(&vm_objects_wired_lock); \
447 } \
448 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
449 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
450 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
451 } \
452 MACRO_END
453
454 // These two macros start & end a C block
455 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
456 MACRO_BEGIN \
457 { \
458 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
459
460 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
461 if (__wireddelta) { \
462 boolean_t __overflow __assert_only = \
463 os_add_overflow((object)->wired_page_count, __wireddelta, \
464 (unsigned int *)(uintptr_t)&(object)->wired_page_count); \
465 assert(!__overflow); \
466 if (!(object)->pageout && !(object)->no_tag_update) { \
467 if (__wireddelta > 0) { \
468 assert (VM_KERN_MEMORY_NONE != (tag)); \
469 if (VM_KERN_MEMORY_NONE == __waswired) { \
470 VM_OBJECT_WIRED((object), (tag)); \
471 } \
472 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
473 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
474 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
475 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
476 if (!(object)->wired_page_count) { \
477 VM_OBJECT_UNWIRED((object)); \
478 } \
479 } \
480 } \
481 } \
482 } \
483 MACRO_END
484
485 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
486 __wireddelta += delta; \
487
488 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
489 if (!m->private && !m->fictitious) __wireddelta++;
490
491 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
492 if (!m->private && !m->fictitious) __wireddelta--;
493
494
495
496 #define OBJECT_LOCK_SHARED 0
497 #define OBJECT_LOCK_EXCLUSIVE 1
498
499 extern lck_grp_t vm_object_lck_grp;
500 extern lck_grp_attr_t vm_object_lck_grp_attr;
501 extern lck_attr_t vm_object_lck_attr;
502 extern lck_attr_t kernel_object_lck_attr;
503 extern lck_attr_t compressor_object_lck_attr;
504
505 extern vm_object_t vm_pageout_scan_wants_object;
506
507 extern void vm_object_lock(vm_object_t);
508 extern boolean_t vm_object_lock_try(vm_object_t);
509 extern boolean_t _vm_object_lock_try(vm_object_t);
510 extern boolean_t vm_object_lock_avoid(vm_object_t);
511 extern void vm_object_lock_shared(vm_object_t);
512 extern boolean_t vm_object_lock_yield_shared(vm_object_t);
513 extern boolean_t vm_object_lock_try_shared(vm_object_t);
514 extern void vm_object_unlock(vm_object_t);
515 extern boolean_t vm_object_lock_upgrade(vm_object_t);
516
517 /*
518 * Object locking macros
519 */
520
521 #define vm_object_lock_init(object) \
522 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
523 (((object) == kernel_object || \
524 (object) == vm_submap_object) ? \
525 &kernel_object_lck_attr : \
526 (((object) == compressor_object) ? \
527 &compressor_object_lck_attr : \
528 &vm_object_lck_attr)))
529 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
530
531 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
532
533 /*
534 * CAUTION: the following vm_object_lock_assert_held*() macros merely
535 * check if anyone is holding the lock, but the holder may not necessarily
536 * be the caller...
537 */
538 #if MACH_ASSERT || DEBUG
539 #define vm_object_lock_assert_held(object) \
540 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
541 #define vm_object_lock_assert_shared(object) \
542 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
543 #define vm_object_lock_assert_exclusive(object) \
544 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
545 #define vm_object_lock_assert_notheld(object) \
546 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
547 #else /* MACH_ASSERT || DEBUG */
548 #define vm_object_lock_assert_held(object)
549 #define vm_object_lock_assert_shared(object)
550 #define vm_object_lock_assert_exclusive(object)
551 #define vm_object_lock_assert_notheld(object)
552 #endif /* MACH_ASSERT || DEBUG */
553
554
555 /*
556 * Declare procedures that operate on VM objects.
557 */
558
559 __private_extern__ void vm_object_bootstrap(void);
560
561 __private_extern__ void vm_object_init(void);
562
563 __private_extern__ void vm_object_init_lck_grp(void);
564
565 __private_extern__ void vm_object_reaper_init(void);
566
567 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
568
569 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
570 vm_object_t object);
571
572 #if TASK_SWAPPER
573
574 __private_extern__ void vm_object_res_reference(
575 vm_object_t object);
576 __private_extern__ void vm_object_res_deallocate(
577 vm_object_t object);
578 #define VM_OBJ_RES_INCR(object) (object)->res_count++
579 #define VM_OBJ_RES_DECR(object) (object)->res_count--
580
581 #else /* TASK_SWAPPER */
582
583 #define VM_OBJ_RES_INCR(object)
584 #define VM_OBJ_RES_DECR(object)
585 #define vm_object_res_reference(object)
586 #define vm_object_res_deallocate(object)
587
588 #endif /* TASK_SWAPPER */
589
590 #define vm_object_reference_locked(object) \
591 MACRO_BEGIN \
592 vm_object_t RLObject = (object); \
593 vm_object_lock_assert_exclusive(object); \
594 assert((RLObject)->ref_count > 0); \
595 (RLObject)->ref_count++; \
596 assert((RLObject)->ref_count > 1); \
597 vm_object_res_reference(RLObject); \
598 MACRO_END
599
600
601 #define vm_object_reference_shared(object) \
602 MACRO_BEGIN \
603 vm_object_t RLObject = (object); \
604 vm_object_lock_assert_shared(object); \
605 assert((RLObject)->ref_count > 0); \
606 OSAddAtomic(1, &(RLObject)->ref_count); \
607 assert((RLObject)->ref_count > 0); \
608 /* XXX we would need an atomic version of the following ... */ \
609 vm_object_res_reference(RLObject); \
610 MACRO_END
611
612
613 __private_extern__ void vm_object_reference(
614 vm_object_t object);
615
616 #if !MACH_ASSERT
617
618 #define vm_object_reference(object) \
619 MACRO_BEGIN \
620 vm_object_t RObject = (object); \
621 if (RObject) { \
622 vm_object_lock_shared(RObject); \
623 vm_object_reference_shared(RObject); \
624 vm_object_unlock(RObject); \
625 } \
626 MACRO_END
627
628 #endif /* MACH_ASSERT */
629
630 __private_extern__ void vm_object_deallocate(
631 vm_object_t object);
632
633 __private_extern__ kern_return_t vm_object_release_name(
634 vm_object_t object,
635 int flags);
636
637 __private_extern__ void vm_object_pmap_protect(
638 vm_object_t object,
639 vm_object_offset_t offset,
640 vm_object_size_t size,
641 pmap_t pmap,
642 vm_map_offset_t pmap_start,
643 vm_prot_t prot);
644
645 __private_extern__ void vm_object_pmap_protect_options(
646 vm_object_t object,
647 vm_object_offset_t offset,
648 vm_object_size_t size,
649 pmap_t pmap,
650 vm_map_offset_t pmap_start,
651 vm_prot_t prot,
652 int options);
653
654 __private_extern__ void vm_object_page_remove(
655 vm_object_t object,
656 vm_object_offset_t start,
657 vm_object_offset_t end);
658
659 __private_extern__ void vm_object_deactivate_pages(
660 vm_object_t object,
661 vm_object_offset_t offset,
662 vm_object_size_t size,
663 boolean_t kill_page,
664 boolean_t reusable_page,
665 struct pmap *pmap,
666 vm_map_offset_t pmap_offset);
667
668 __private_extern__ void vm_object_reuse_pages(
669 vm_object_t object,
670 vm_object_offset_t start_offset,
671 vm_object_offset_t end_offset,
672 boolean_t allow_partial_reuse);
673
674 __private_extern__ void vm_object_purge(
675 vm_object_t object,
676 int flags);
677
678 __private_extern__ kern_return_t vm_object_purgable_control(
679 vm_object_t object,
680 vm_purgable_t control,
681 int *state);
682
683 __private_extern__ kern_return_t vm_object_get_page_counts(
684 vm_object_t object,
685 vm_object_offset_t offset,
686 vm_object_size_t size,
687 unsigned int *resident_page_count,
688 unsigned int *dirty_page_count);
689
690 __private_extern__ boolean_t vm_object_coalesce(
691 vm_object_t prev_object,
692 vm_object_t next_object,
693 vm_object_offset_t prev_offset,
694 vm_object_offset_t next_offset,
695 vm_object_size_t prev_size,
696 vm_object_size_t next_size);
697
698 __private_extern__ boolean_t vm_object_shadow(
699 vm_object_t *object,
700 vm_object_offset_t *offset,
701 vm_object_size_t length);
702
703 __private_extern__ void vm_object_collapse(
704 vm_object_t object,
705 vm_object_offset_t offset,
706 boolean_t can_bypass);
707
708 __private_extern__ boolean_t vm_object_copy_quickly(
709 vm_object_t *_object,
710 vm_object_offset_t src_offset,
711 vm_object_size_t size,
712 boolean_t *_src_needs_copy,
713 boolean_t *_dst_needs_copy);
714
715 __private_extern__ kern_return_t vm_object_copy_strategically(
716 vm_object_t src_object,
717 vm_object_offset_t src_offset,
718 vm_object_size_t size,
719 vm_object_t *dst_object,
720 vm_object_offset_t *dst_offset,
721 boolean_t *dst_needs_copy);
722
723 __private_extern__ kern_return_t vm_object_copy_slowly(
724 vm_object_t src_object,
725 vm_object_offset_t src_offset,
726 vm_object_size_t size,
727 boolean_t interruptible,
728 vm_object_t *_result_object);
729
730 __private_extern__ vm_object_t vm_object_copy_delayed(
731 vm_object_t src_object,
732 vm_object_offset_t src_offset,
733 vm_object_size_t size,
734 boolean_t src_object_shared);
735
736
737
738 __private_extern__ kern_return_t vm_object_destroy(
739 vm_object_t object,
740 kern_return_t reason);
741
742 __private_extern__ void vm_object_pager_create(
743 vm_object_t object);
744
745 __private_extern__ void vm_object_compressor_pager_create(
746 vm_object_t object);
747
748 __private_extern__ void vm_object_page_map(
749 vm_object_t object,
750 vm_object_offset_t offset,
751 vm_object_size_t size,
752 vm_object_offset_t (*map_fn)
753 (void *, vm_object_offset_t),
754 void *map_fn_data);
755
756 __private_extern__ kern_return_t vm_object_upl_request(
757 vm_object_t object,
758 vm_object_offset_t offset,
759 upl_size_t size,
760 upl_t *upl,
761 upl_page_info_t *page_info,
762 unsigned int *count,
763 upl_control_flags_t flags,
764 vm_tag_t tag);
765
766 __private_extern__ kern_return_t vm_object_transpose(
767 vm_object_t object1,
768 vm_object_t object2,
769 vm_object_size_t transpose_size);
770
771 __private_extern__ boolean_t vm_object_sync(
772 vm_object_t object,
773 vm_object_offset_t offset,
774 vm_object_size_t size,
775 boolean_t should_flush,
776 boolean_t should_return,
777 boolean_t should_iosync);
778
779 __private_extern__ kern_return_t vm_object_update(
780 vm_object_t object,
781 vm_object_offset_t offset,
782 vm_object_size_t size,
783 vm_object_offset_t *error_offset,
784 int *io_errno,
785 memory_object_return_t should_return,
786 int flags,
787 vm_prot_t prot);
788
789 __private_extern__ kern_return_t vm_object_lock_request(
790 vm_object_t object,
791 vm_object_offset_t offset,
792 vm_object_size_t size,
793 memory_object_return_t should_return,
794 int flags,
795 vm_prot_t prot);
796
797
798
799 __private_extern__ vm_object_t vm_object_memory_object_associate(
800 memory_object_t pager,
801 vm_object_t object,
802 vm_object_size_t size,
803 boolean_t check_named);
804
805
806 __private_extern__ void vm_object_cluster_size(
807 vm_object_t object,
808 vm_object_offset_t *start,
809 vm_size_t *length,
810 vm_object_fault_info_t fault_info,
811 uint32_t *io_streaming);
812
813 __private_extern__ kern_return_t vm_object_populate_with_private(
814 vm_object_t object,
815 vm_object_offset_t offset,
816 ppnum_t phys_page,
817 vm_size_t size);
818
819 __private_extern__ void vm_object_change_wimg_mode(
820 vm_object_t object,
821 unsigned int wimg_mode);
822
823 extern kern_return_t adjust_vm_object_cache(
824 vm_size_t oval,
825 vm_size_t nval);
826
827 extern kern_return_t vm_object_page_op(
828 vm_object_t object,
829 vm_object_offset_t offset,
830 int ops,
831 ppnum_t *phys_entry,
832 int *flags);
833
834 extern kern_return_t vm_object_range_op(
835 vm_object_t object,
836 vm_object_offset_t offset_beg,
837 vm_object_offset_t offset_end,
838 int ops,
839 uint32_t *range);
840
841
842 __private_extern__ void vm_object_reap_pages(
843 vm_object_t object,
844 int reap_type);
845 #define REAP_REAP 0
846 #define REAP_TERMINATE 1
847 #define REAP_PURGEABLE 2
848 #define REAP_DATA_FLUSH 3
849
850 #if CONFIG_FREEZE
851
852 __private_extern__ void
853 vm_object_compressed_freezer_pageout(
854 vm_object_t object);
855
856 __private_extern__ void
857 vm_object_compressed_freezer_done(
858 void);
859
860 #endif /* CONFIG_FREEZE */
861
862 __private_extern__ void
863 vm_object_pageout(
864 vm_object_t object);
865
866 #if CONFIG_IOSCHED
867 struct io_reprioritize_req {
868 uint64_t blkno;
869 uint32_t len;
870 int priority;
871 struct vnode *devvp;
872 queue_chain_t io_reprioritize_list;
873 };
874 typedef struct io_reprioritize_req *io_reprioritize_req_t;
875
876 extern void vm_io_reprioritize_init(void);
877 #endif
878
879 /*
880 * Event waiting handling
881 */
882
883 #define VM_OBJECT_EVENT_INITIALIZED 0
884 #define VM_OBJECT_EVENT_PAGER_READY 1
885 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
886 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
887 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
888 #define VM_OBJECT_EVENT_UNCACHING 5
889 #define VM_OBJECT_EVENT_COPY_CALL 6
890 #define VM_OBJECT_EVENT_CACHING 7
891 #define VM_OBJECT_EVENT_UNBLOCKED 8
892 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
893
894 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
895
896 static __inline__ wait_result_t
897 vm_object_assert_wait(
898 vm_object_t object,
899 int event,
900 wait_interrupt_t interruptible)
901 {
902 wait_result_t wr;
903
904 vm_object_lock_assert_exclusive(object);
905 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
906
907 object->all_wanted |= 1 << event;
908 wr = assert_wait((event_t)((vm_offset_t)object + event),
909 interruptible);
910 return wr;
911 }
912
913 static __inline__ wait_result_t
914 vm_object_wait(
915 vm_object_t object,
916 int event,
917 wait_interrupt_t interruptible)
918 {
919 wait_result_t wr;
920
921 vm_object_assert_wait(object, event, interruptible);
922 vm_object_unlock(object);
923 wr = thread_block(THREAD_CONTINUE_NULL);
924 return wr;
925 }
926
927 static __inline__ wait_result_t
928 thread_sleep_vm_object(
929 vm_object_t object,
930 event_t event,
931 wait_interrupt_t interruptible)
932 {
933 wait_result_t wr;
934
935 #if DEVELOPMENT || DEBUG
936 if (object->Lock_owner != current_thread())
937 panic("thread_sleep_vm_object: now owner - %p\n", object);
938 object->Lock_owner = 0;
939 #endif
940 wr = lck_rw_sleep(&object->Lock,
941 LCK_SLEEP_PROMOTED_PRI,
942 event,
943 interruptible);
944 #if DEVELOPMENT || DEBUG
945 object->Lock_owner = current_thread();
946 #endif
947 return wr;
948 }
949
950 static __inline__ wait_result_t
951 vm_object_sleep(
952 vm_object_t object,
953 int event,
954 wait_interrupt_t interruptible)
955 {
956 wait_result_t wr;
957
958 vm_object_lock_assert_exclusive(object);
959 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
960
961 object->all_wanted |= 1 << event;
962 wr = thread_sleep_vm_object(object,
963 (event_t)((vm_offset_t)object + event),
964 interruptible);
965 return wr;
966 }
967
968 static __inline__ void
969 vm_object_wakeup(
970 vm_object_t object,
971 int event)
972 {
973 vm_object_lock_assert_exclusive(object);
974 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
975
976 if (object->all_wanted & (1 << event))
977 thread_wakeup((event_t)((vm_offset_t)object + event));
978 object->all_wanted &= ~(1 << event);
979 }
980
981 static __inline__ void
982 vm_object_set_wanted(
983 vm_object_t object,
984 int event)
985 {
986 vm_object_lock_assert_exclusive(object);
987 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
988
989 object->all_wanted |= (1 << event);
990 }
991
992 static __inline__ int
993 vm_object_wanted(
994 vm_object_t object,
995 int event)
996 {
997 vm_object_lock_assert_held(object);
998 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
999
1000 return object->all_wanted & (1 << event);
1001 }
1002
1003 /*
1004 * Routines implemented as macros
1005 */
1006 #ifdef VM_PIP_DEBUG
1007 #include <libkern/OSDebug.h>
1008 #define VM_PIP_DEBUG_BEGIN(object) \
1009 MACRO_BEGIN \
1010 int pip = ((object)->paging_in_progress + \
1011 (object)->activity_in_progress); \
1012 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1013 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1014 VM_PIP_DEBUG_STACK_FRAMES); \
1015 } \
1016 MACRO_END
1017 #else /* VM_PIP_DEBUG */
1018 #define VM_PIP_DEBUG_BEGIN(object)
1019 #endif /* VM_PIP_DEBUG */
1020
1021 #define vm_object_activity_begin(object) \
1022 MACRO_BEGIN \
1023 vm_object_lock_assert_exclusive((object)); \
1024 VM_PIP_DEBUG_BEGIN((object)); \
1025 (object)->activity_in_progress++; \
1026 if ((object)->activity_in_progress == 0) { \
1027 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1028 } \
1029 MACRO_END
1030
1031 #define vm_object_activity_end(object) \
1032 MACRO_BEGIN \
1033 vm_object_lock_assert_exclusive((object)); \
1034 if ((object)->activity_in_progress == 0) { \
1035 panic("vm_object_activity_end(%p): underflow\n", (object));\
1036 } \
1037 (object)->activity_in_progress--; \
1038 if ((object)->paging_in_progress == 0 && \
1039 (object)->activity_in_progress == 0) \
1040 vm_object_wakeup((object), \
1041 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1042 MACRO_END
1043
1044 #define vm_object_paging_begin(object) \
1045 MACRO_BEGIN \
1046 vm_object_lock_assert_exclusive((object)); \
1047 VM_PIP_DEBUG_BEGIN((object)); \
1048 (object)->paging_in_progress++; \
1049 if ((object)->paging_in_progress == 0) { \
1050 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1051 } \
1052 MACRO_END
1053
1054 #define vm_object_paging_end(object) \
1055 MACRO_BEGIN \
1056 vm_object_lock_assert_exclusive((object)); \
1057 if ((object)->paging_in_progress == 0) { \
1058 panic("vm_object_paging_end(%p): underflow\n", (object));\
1059 } \
1060 (object)->paging_in_progress--; \
1061 if ((object)->paging_in_progress == 0) { \
1062 vm_object_wakeup((object), \
1063 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1064 if ((object)->activity_in_progress == 0) \
1065 vm_object_wakeup((object), \
1066 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1067 } \
1068 MACRO_END
1069
1070 #define vm_object_paging_wait(object, interruptible) \
1071 MACRO_BEGIN \
1072 vm_object_lock_assert_exclusive((object)); \
1073 while ((object)->paging_in_progress != 0 || \
1074 (object)->activity_in_progress != 0) { \
1075 wait_result_t _wr; \
1076 \
1077 _wr = vm_object_sleep((object), \
1078 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1079 (interruptible)); \
1080 \
1081 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1082 /*XXX break; */ \
1083 } \
1084 MACRO_END
1085
1086 #define vm_object_paging_only_wait(object, interruptible) \
1087 MACRO_BEGIN \
1088 vm_object_lock_assert_exclusive((object)); \
1089 while ((object)->paging_in_progress != 0) { \
1090 wait_result_t _wr; \
1091 \
1092 _wr = vm_object_sleep((object), \
1093 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1094 (interruptible)); \
1095 \
1096 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1097 /*XXX break; */ \
1098 } \
1099 MACRO_END
1100
1101
1102 #define vm_object_mapping_begin(object) \
1103 MACRO_BEGIN \
1104 vm_object_lock_assert_exclusive((object)); \
1105 assert(! (object)->mapping_in_progress); \
1106 (object)->mapping_in_progress = TRUE; \
1107 MACRO_END
1108
1109 #define vm_object_mapping_end(object) \
1110 MACRO_BEGIN \
1111 vm_object_lock_assert_exclusive((object)); \
1112 assert((object)->mapping_in_progress); \
1113 (object)->mapping_in_progress = FALSE; \
1114 vm_object_wakeup((object), \
1115 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1116 MACRO_END
1117
1118 #define vm_object_mapping_wait(object, interruptible) \
1119 MACRO_BEGIN \
1120 vm_object_lock_assert_exclusive((object)); \
1121 while ((object)->mapping_in_progress) { \
1122 wait_result_t _wr; \
1123 \
1124 _wr = vm_object_sleep((object), \
1125 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1126 (interruptible)); \
1127 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1128 /*XXX break; */ \
1129 } \
1130 assert(!(object)->mapping_in_progress); \
1131 MACRO_END
1132
1133
1134
1135 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1136 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1137
1138 extern void vm_object_cache_add(vm_object_t);
1139 extern void vm_object_cache_remove(vm_object_t);
1140 extern int vm_object_cache_evict(int, int);
1141
1142 #endif /* _VM_VM_OBJECT_H_ */