]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_object.h
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66#ifndef _VM_VM_OBJECT_H_
67#define _VM_VM_OBJECT_H_
68
6d2010ae
A
69#include <debug.h>
70#include <mach_assert.h>
1c79356b
A
71#include <mach_pagemap.h>
72#include <task_swapper.h>
73
74#include <mach/kern_return.h>
75#include <mach/boolean.h>
76#include <mach/memory_object_types.h>
77#include <mach/port.h>
78#include <mach/vm_prot.h>
91447636 79#include <mach/vm_param.h>
1c79356b
A
80#include <mach/machine/vm_types.h>
81#include <kern/queue.h>
2d21ac55 82#include <kern/locks.h>
1c79356b 83#include <kern/assert.h>
0b4e3aa0 84#include <kern/misc_protos.h>
1c79356b
A
85#include <kern/macro_help.h>
86#include <ipc/ipc_types.h>
87#include <vm/pmap.h>
1c79356b 88
1c79356b 89#include <vm/vm_external.h>
1c79356b 90
b0d623f7 91#include <vm/vm_options.h>
39037602 92#include <vm/vm_page.h>
b0d623f7 93
fe8ab488
A
94#if VM_OBJECT_TRACKING
95#include <libkern/OSDebug.h>
96#include <kern/btlog.h>
97extern void vm_object_tracking_init(void);
98extern boolean_t vm_object_tracking_inited;
99extern btlog_t *vm_object_tracking_btlog;
39037602 100#define VM_OBJECT_TRACKING_NUM_RECORDS 50000
fe8ab488
A
101#define VM_OBJECT_TRACKING_BTDEPTH 7
102#define VM_OBJECT_TRACKING_OP_CREATED 1
103#define VM_OBJECT_TRACKING_OP_MODIFIED 2
104#define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105#endif /* VM_OBJECT_TRACKING */
106
91447636 107struct vm_page;
39236c6e 108struct vm_shared_region_slide_info;
1c79356b
A
109
110/*
111 * Types defined:
112 *
113 * vm_object_t Virtual memory object.
2d21ac55 114 * vm_object_fault_info_t Used to determine cluster size.
1c79356b
A
115 */
116
2d21ac55
A
117struct vm_object_fault_info {
118 int interruptible;
119 uint32_t user_tag;
120 vm_size_t cluster_size;
121 vm_behavior_t behavior;
122 vm_map_offset_t lo_offset;
123 vm_map_offset_t hi_offset;
6d2010ae
A
124 unsigned int
125 /* boolean_t */ no_cache:1,
126 /* boolean_t */ stealth:1,
127 /* boolean_t */ io_sync:1,
128 /* boolean_t */ cs_bypass:1,
129 /* boolean_t */ mark_zf_absent:1,
316670eb
A
130 /* boolean_t */ batch_pmap_op:1,
131 __vm_object_fault_info_unused_bits:26;
fe8ab488 132 int pmap_options;
2d21ac55
A
133};
134
135
39236c6e
A
136#define vo_size vo_un1.vou_size
137#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138#define vo_shadow_offset vo_un2.vou_shadow_offset
139#define vo_cache_ts vo_un2.vou_cache_ts
140#define vo_purgeable_owner vo_un2.vou_purgeable_owner
141#define vo_slide_info vo_un2.vou_slide_info
2d21ac55 142
1c79356b 143struct vm_object {
39037602
A
144 /*
145 * on 64 bit systems we pack the pointers hung off the memq.
146 * those pointers have to be able to point back to the memq.
147 * the packed pointers are required to be on a 64 byte boundary
148 * which means 2 things for the vm_object... (1) the memq
149 * struct has to be the first element of the structure so that
150 * we can control it's alignment... (2) the vm_object must be
151 * aligned on a 64 byte boundary... for static vm_object's
152 * this is accomplished via the 'aligned' attribute... for
153 * vm_object's in the zone pool, this is accomplished by
154 * rounding the size of the vm_object element to the nearest
155 * 64 byte size before creating the zone.
156 */
157 vm_page_queue_head_t memq; /* Resident memory - must be first */
2d21ac55 158 lck_rw_t Lock; /* Synchronization */
1c79356b 159
39037602
A
160#if DEVELOPMENT || DEBUG
161 thread_t Lock_owner;
162#endif
6d2010ae
A
163 union {
164 vm_object_size_t vou_size; /* Object size (only valid if internal) */
165 int vou_cache_pages_to_scan; /* pages yet to be visited in an
166 * external object in cache
167 */
168 } vo_un1;
169
91447636 170 struct vm_page *memq_hint;
1c79356b 171 int ref_count; /* Number of references */
1c79356b
A
172 unsigned int resident_page_count;
173 /* number of resident pages */
b0d623f7
A
174 unsigned int wired_page_count; /* number of wired pages */
175 unsigned int reusable_page_count;
1c79356b
A
176
177 struct vm_object *copy; /* Object that should receive
178 * a copy of my changed pages,
179 * for copy_delay, or just the
180 * temporary object that
181 * shadows this object, for
182 * copy_call.
183 */
184 struct vm_object *shadow; /* My shadow */
6d2010ae
A
185
186 union {
187 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
39236c6e
A
188 clock_sec_t vou_cache_ts; /* age of an external object
189 * present in cache
190 */
191 task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set
192 * to volatile/emtpy, this is the task
193 * that owns this purgeable object.
6d2010ae 194 */
39236c6e 195 struct vm_shared_region_slide_info *vou_slide_info;
6d2010ae 196 } vo_un2;
1c79356b 197
0b4e3aa0 198 memory_object_t pager; /* Where to get data */
1c79356b 199 vm_object_offset_t paging_offset; /* Offset into memory object */
91447636 200 memory_object_control_t pager_control; /* Where data comes back */
1c79356b
A
201
202 memory_object_copy_strategy_t
203 copy_strategy; /* How to handle data copy */
204
fe8ab488
A
205#if __LP64__
206 /*
207 * Some user processes (mostly VirtualMachine software) take a large
208 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
209 * VM objects and overflow the 16-bit "activity_in_progress" counter.
210 * Since we never enforced any limit there, let's give them 32 bits
211 * for backwards compatibility's sake.
212 */
213 unsigned int paging_in_progress:16,
214 __object1_unused_bits:16;
215 unsigned int activity_in_progress;
216#else /* __LP64__ */
217 /*
218 * On 32-bit platforms, enlarging "activity_in_progress" would increase
219 * the size of "struct vm_object". Since we don't know of any actual
220 * overflow of these counters on these platforms, let's keep the
221 * counters as 16-bit integers.
222 */
223 unsigned short paging_in_progress;
224 unsigned short activity_in_progress;
225#endif /* __LP64__ */
1c79356b
A
226 /* The memory object ports are
227 * being used (e.g., for pagein
228 * or pageout) -- don't change
229 * any of these fields (i.e.,
230 * don't collapse, destroy or
231 * terminate)
232 */
b0d623f7 233
1c79356b
A
234 unsigned int
235 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
236 * awakened" notations. See
237 * VM_OBJECT_EVENT_* items
238 * below */
239 /* boolean_t */ pager_created:1, /* Has pager been created? */
240 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
241 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
242
243 /* boolean_t */ pager_trusted:1,/* The pager for this object
244 * is trusted. This is true for
245 * all internal objects (backed
246 * by the default pager)
247 */
248 /* boolean_t */ can_persist:1, /* The kernel may keep the data
249 * for this object (and rights
250 * to the memory object) after
251 * all address map references
252 * are deallocated?
253 */
254 /* boolean_t */ internal:1, /* Created by the kernel (and
255 * therefore, managed by the
256 * default memory manger)
257 */
258 /* boolean_t */ temporary:1, /* Permanent objects may be
259 * changed externally by the
260 * memory manager, and changes
261 * made in memory must be
262 * reflected back to the memory
263 * manager. Temporary objects
264 * lack both of these
265 * characteristics.
266 */
267 /* boolean_t */ private:1, /* magic device_pager object,
268 * holds private pages only */
269 /* boolean_t */ pageout:1, /* pageout object. contains
270 * private pages that refer to
271 * a real memory object. */
272 /* boolean_t */ alive:1, /* Not yet terminated */
273
91447636 274 /* boolean_t */ purgable:2, /* Purgable state. See
2d21ac55 275 * VM_PURGABLE_*
1c79356b 276 */
39236c6e
A
277 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
278 * becomes ripe.
279 */
1c79356b 280 /* boolean_t */ shadowed:1, /* Shadow may exist */
1c79356b
A
281 /* boolean_t */ advisory_pageout:1,
282 /* Instead of sending page
283 * via OOL, just notify
284 * pager that the kernel
285 * wants to discard it, page
286 * remains in object */
287 /* boolean_t */ true_share:1,
288 /* This object is mapped
289 * in more than one place
290 * and hence cannot be
291 * coalesced */
292 /* boolean_t */ terminating:1,
293 /* Allows vm_object_lookup
294 * and vm_object_deallocate
295 * to special case their
296 * behavior when they are
297 * called as a result of
298 * page cleaning during
299 * object termination
300 */
301 /* boolean_t */ named:1, /* An enforces an internal
302 * naming convention, by
303 * calling the right routines
304 * for allocation and
305 * destruction, UBC references
306 * against the vm_object are
307 * checked.
308 */
309 /* boolean_t */ shadow_severed:1,
310 /* When a permanent object
311 * backing a COW goes away
312 * unexpectedly. This bit
313 * allows vm_fault to return
314 * an error rather than a
315 * zero filled page.
316 */
0b4e3aa0 317 /* boolean_t */ phys_contiguous:1,
1c79356b
A
318 /* Memory is wired and
319 * guaranteed physically
320 * contiguous. However
321 * it is not device memory
322 * and obeys normal virtual
323 * memory rules w.r.t pmap
324 * access bits.
325 */
0b4e3aa0
A
326 /* boolean_t */ nophyscache:1;
327 /* When mapped at the
328 * pmap level, don't allow
329 * primary caching. (for
330 * I/O)
331 */
1c79356b
A
332
333 queue_chain_t cached_list; /* Attachment point for the
334 * list of objects cached as a
335 * result of their can_persist
336 * value
337 */
338
339 queue_head_t msr_q; /* memory object synchronise
340 request queue */
341
2d21ac55
A
342 /*
343 * the following fields are not protected by any locks
344 * they are updated via atomic compare and swap
345 */
1c79356b 346 vm_object_offset_t last_alloc; /* last allocation offset */
2d21ac55
A
347 int sequential; /* sequential access size */
348
349 uint32_t pages_created;
350 uint32_t pages_used;
55e303ae 351 vm_offset_t cow_hint; /* last page present in */
0b4e3aa0 352 /* shadow but not in object */
1c79356b
A
353#if MACH_ASSERT
354 struct vm_object *paging_object; /* object which pages to be
355 * swapped out are temporary
356 * put in current object
357 */
358#endif
2d21ac55
A
359 /* hold object lock when altering */
360 unsigned int
361 wimg_bits:8, /* cache WIMG bits */
362 code_signed:1, /* pages are signed and should be
363 validated; the signatures are stored
364 with the pager */
b0d623f7
A
365 hashed:1, /* object/pager entered in hash */
366 transposed:1, /* object was transposed with another */
593a1d5f 367 mapping_in_progress:1, /* pager being mapped/unmapped */
fe8ab488 368 phantom_isssd:1,
b0d623f7
A
369 volatile_empty:1,
370 volatile_fault:1,
371 all_reusable:1,
372 blocked_access:1,
6d2010ae 373 set_cache_attr:1,
39236c6e
A
374 object_slid:1,
375 purgeable_queue_type:2,
376 purgeable_queue_group:3,
fe8ab488 377 io_tracking:1,
39037602
A
378#if CONFIG_SECLUDED_MEMORY
379 eligible_for_secluded:1,
380 can_grab_secluded:1,
381#else /* CONFIG_SECLUDED_MEMORY */
382 __object3_unused_bits:2,
383#endif /* CONFIG_SECLUDED_MEMORY */
384 __object2_unused_bits:5; /* for expansion */
6d2010ae 385
3e170ce0
A
386 uint8_t scan_collisions;
387 vm_tag_t wire_tag;
388 uint8_t __object4_unused_bits[2];
389
fe8ab488
A
390#if CONFIG_PHANTOM_CACHE
391 uint32_t phantom_object_id;
392#endif
393#if CONFIG_IOSCHED || UPL_DEBUG
1c79356b 394 queue_head_t uplq; /* List of outstanding upls */
fe8ab488 395#endif
2d21ac55 396
0c530ab8
A
397#ifdef VM_PIP_DEBUG
398/*
399 * Keep track of the stack traces for the first holders
400 * of a "paging_in_progress" reference for this VM object.
401 */
402#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
403#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
404 struct __pip_backtrace {
405 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
406 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
407#endif /* VM_PIP_DEBUG */
2d21ac55 408
6d2010ae 409 queue_chain_t objq; /* object queue - currently used for purgable queues */
fe8ab488
A
410
411#if DEBUG
412 void *purgeable_owner_bt[16];
413 task_t vo_purgeable_volatilizer; /* who made it volatile? */
414 void *purgeable_volatilizer_bt[16];
415#endif /* DEBUG */
1c79356b
A
416};
417
b0d623f7
A
418#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
419 ((object)->volatile_fault && \
420 ((object)->purgable == VM_PURGABLE_VOLATILE || \
421 (object)->purgable == VM_PURGABLE_EMPTY))
422
39236c6e 423extern
1c79356b
A
424vm_object_t kernel_object; /* the single kernel object */
425
39236c6e
A
426extern
427vm_object_t compressor_object; /* the single compressor object */
428
429extern
91447636 430unsigned int vm_object_absent_max; /* maximum number of absent pages
1c79356b
A
431 at a time for each object */
432
433# define VM_MSYNC_INITIALIZED 0
434# define VM_MSYNC_SYNCHRONIZING 1
435# define VM_MSYNC_DONE 2
436
437struct msync_req {
438 queue_chain_t msr_q; /* object request queue */
439 queue_chain_t req_q; /* vm_msync request queue */
440 unsigned int flag;
441 vm_object_offset_t offset;
442 vm_object_size_t length;
443 vm_object_t object; /* back pointer */
b0d623f7 444 decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */
1c79356b
A
445};
446
447typedef struct msync_req *msync_req_t;
448#define MSYNC_REQ_NULL ((msync_req_t) 0)
449
b0d623f7
A
450
451extern lck_grp_t vm_map_lck_grp;
452extern lck_attr_t vm_map_lck_attr;
453
1c79356b
A
454/*
455 * Macros to allocate and free msync_reqs
456 */
457#define msync_req_alloc(msr) \
b0d623f7 458 MACRO_BEGIN \
1c79356b 459 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
b0d623f7
A
460 lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \
461 msr->flag = VM_MSYNC_INITIALIZED; \
462 MACRO_END
1c79356b
A
463
464#define msync_req_free(msr) \
39236c6e
A
465 MACRO_BEGIN \
466 lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \
467 kfree((msr), sizeof(struct msync_req)); \
468 MACRO_END
1c79356b 469
b0d623f7
A
470#define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock)
471#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock)
1c79356b 472
3e170ce0
A
473#define VM_OBJECT_WIRED(object) \
474 MACRO_BEGIN \
475 if ((object)->purgable == VM_PURGABLE_DENY) \
476 { \
477 lck_spin_lock(&vm_objects_wired_lock); \
478 assert(!(object)->objq.next); \
39037602 479 assert(!(object)->objq.prev); \
3e170ce0
A
480 queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \
481 lck_spin_unlock(&vm_objects_wired_lock); \
482 } \
483 MACRO_END
484
485#define VM_OBJECT_UNWIRED(object) \
486 MACRO_BEGIN \
487 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
488 if (((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \
489 { \
490 lck_spin_lock(&vm_objects_wired_lock); \
491 queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \
492 lck_spin_unlock(&vm_objects_wired_lock); \
493 } \
494 MACRO_END
495
496
39037602
A
497#define OBJECT_LOCK_SHARED 0
498#define OBJECT_LOCK_EXCLUSIVE 1
499
500extern lck_grp_t vm_object_lck_grp;
501extern lck_grp_attr_t vm_object_lck_grp_attr;
502extern lck_attr_t vm_object_lck_attr;
503extern lck_attr_t kernel_object_lck_attr;
504extern lck_attr_t compressor_object_lck_attr;
505
506extern vm_object_t vm_pageout_scan_wants_object;
507
508extern void vm_object_lock(vm_object_t);
509extern boolean_t vm_object_lock_try(vm_object_t);
510extern boolean_t _vm_object_lock_try(vm_object_t);
511extern boolean_t vm_object_lock_avoid(vm_object_t);
512extern void vm_object_lock_shared(vm_object_t);
513extern boolean_t vm_object_lock_try_shared(vm_object_t);
514extern void vm_object_unlock(vm_object_t);
515extern boolean_t vm_object_lock_upgrade(vm_object_t);
516
517/*
518 * Object locking macros
519 */
520
521#define vm_object_lock_init(object) \
522 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
523 (((object) == kernel_object || \
524 (object) == vm_submap_object) ? \
525 &kernel_object_lck_attr : \
526 (((object) == compressor_object) ? \
527 &compressor_object_lck_attr : \
528 &vm_object_lck_attr)))
529#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
530
531#define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
532
533/*
534 * CAUTION: the following vm_object_lock_assert_held*() macros merely
535 * check if anyone is holding the lock, but the holder may not necessarily
536 * be the caller...
537 */
538#if MACH_ASSERT || DEBUG
539#define vm_object_lock_assert_held(object) \
540 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
541#define vm_object_lock_assert_shared(object) \
542 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
543#define vm_object_lock_assert_exclusive(object) \
544 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
545#define vm_object_lock_assert_notheld(object) \
546 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
547#else /* MACH_ASSERT || DEBUG */
548#define vm_object_lock_assert_held(object)
549#define vm_object_lock_assert_shared(object)
550#define vm_object_lock_assert_exclusive(object)
551#define vm_object_lock_assert_notheld(object)
552#endif /* MACH_ASSERT || DEBUG */
553
554
1c79356b
A
555/*
556 * Declare procedures that operate on VM objects.
557 */
558
39236c6e 559__private_extern__ void vm_object_bootstrap(void);
1c79356b 560
0b4e3aa0 561__private_extern__ void vm_object_init(void);
1c79356b 562
2d21ac55
A
563__private_extern__ void vm_object_init_lck_grp(void);
564
8f6c56a5
A
565__private_extern__ void vm_object_reaper_init(void);
566
3e170ce0 567__private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
1c79356b 568
91447636
A
569__private_extern__ void _vm_object_allocate(vm_object_size_t size,
570 vm_object_t object);
571
0b4e3aa0
A
572#if TASK_SWAPPER
573
574__private_extern__ void vm_object_res_reference(
575 vm_object_t object);
576__private_extern__ void vm_object_res_deallocate(
577 vm_object_t object);
578#define VM_OBJ_RES_INCR(object) (object)->res_count++
579#define VM_OBJ_RES_DECR(object) (object)->res_count--
580
581#else /* TASK_SWAPPER */
582
583#define VM_OBJ_RES_INCR(object)
584#define VM_OBJ_RES_DECR(object)
585#define vm_object_res_reference(object)
586#define vm_object_res_deallocate(object)
587
588#endif /* TASK_SWAPPER */
589
590#define vm_object_reference_locked(object) \
2d21ac55
A
591 MACRO_BEGIN \
592 vm_object_t RLObject = (object); \
593 vm_object_lock_assert_exclusive(object); \
594 assert((RLObject)->ref_count > 0); \
595 (RLObject)->ref_count++; \
596 assert((RLObject)->ref_count > 1); \
597 vm_object_res_reference(RLObject); \
598 MACRO_END
599
600
601#define vm_object_reference_shared(object) \
602 MACRO_BEGIN \
603 vm_object_t RLObject = (object); \
604 vm_object_lock_assert_shared(object); \
605 assert((RLObject)->ref_count > 0); \
b0d623f7 606 OSAddAtomic(1, &(RLObject)->ref_count); \
316670eb 607 assert((RLObject)->ref_count > 0); \
2d21ac55
A
608 /* XXX we would need an atomic version of the following ... */ \
609 vm_object_res_reference(RLObject); \
610 MACRO_END
0b4e3aa0
A
611
612
0b4e3aa0 613__private_extern__ void vm_object_reference(
1c79356b 614 vm_object_t object);
0b4e3aa0 615
91447636 616#if !MACH_ASSERT
0b4e3aa0 617
1c79356b
A
618#define vm_object_reference(object) \
619MACRO_BEGIN \
0b4e3aa0
A
620 vm_object_t RObject = (object); \
621 if (RObject) { \
b0d623f7
A
622 vm_object_lock_shared(RObject); \
623 vm_object_reference_shared(RObject); \
0b4e3aa0 624 vm_object_unlock(RObject); \
1c79356b
A
625 } \
626MACRO_END
0b4e3aa0 627
1c79356b
A
628#endif /* MACH_ASSERT */
629
0b4e3aa0 630__private_extern__ void vm_object_deallocate(
1c79356b
A
631 vm_object_t object);
632
0b4e3aa0
A
633__private_extern__ kern_return_t vm_object_release_name(
634 vm_object_t object,
635 int flags);
636
637__private_extern__ void vm_object_pmap_protect(
1c79356b
A
638 vm_object_t object,
639 vm_object_offset_t offset,
91447636 640 vm_object_size_t size,
1c79356b 641 pmap_t pmap,
91447636 642 vm_map_offset_t pmap_start,
1c79356b
A
643 vm_prot_t prot);
644
39236c6e
A
645__private_extern__ void vm_object_pmap_protect_options(
646 vm_object_t object,
647 vm_object_offset_t offset,
648 vm_object_size_t size,
649 pmap_t pmap,
650 vm_map_offset_t pmap_start,
651 vm_prot_t prot,
652 int options);
653
0b4e3aa0 654__private_extern__ void vm_object_page_remove(
1c79356b
A
655 vm_object_t object,
656 vm_object_offset_t start,
657 vm_object_offset_t end);
658
0b4e3aa0
A
659__private_extern__ void vm_object_deactivate_pages(
660 vm_object_t object,
661 vm_object_offset_t offset,
662 vm_object_size_t size,
b0d623f7 663 boolean_t kill_page,
3e170ce0
A
664 boolean_t reusable_page,
665 struct pmap *pmap,
666 vm_map_offset_t pmap_offset);
b0d623f7
A
667
668__private_extern__ void vm_object_reuse_pages(
669 vm_object_t object,
670 vm_object_offset_t start_offset,
671 vm_object_offset_t end_offset,
672 boolean_t allow_partial_reuse);
0b4e3aa0 673
b0d623f7 674__private_extern__ void vm_object_purge(
fe8ab488
A
675 vm_object_t object,
676 int flags);
91447636
A
677
678__private_extern__ kern_return_t vm_object_purgable_control(
679 vm_object_t object,
680 vm_purgable_t control,
681 int *state);
682
39236c6e
A
683__private_extern__ kern_return_t vm_object_get_page_counts(
684 vm_object_t object,
685 vm_object_offset_t offset,
686 vm_object_size_t size,
687 unsigned int *resident_page_count,
688 unsigned int *dirty_page_count);
689
0b4e3aa0 690__private_extern__ boolean_t vm_object_coalesce(
1c79356b
A
691 vm_object_t prev_object,
692 vm_object_t next_object,
693 vm_object_offset_t prev_offset,
694 vm_object_offset_t next_offset,
695 vm_object_size_t prev_size,
696 vm_object_size_t next_size);
697
0b4e3aa0 698__private_extern__ boolean_t vm_object_shadow(
1c79356b
A
699 vm_object_t *object,
700 vm_object_offset_t *offset,
701 vm_object_size_t length);
702
0b4e3aa0 703__private_extern__ void vm_object_collapse(
55e303ae 704 vm_object_t object,
0c530ab8
A
705 vm_object_offset_t offset,
706 boolean_t can_bypass);
1c79356b 707
0b4e3aa0 708__private_extern__ boolean_t vm_object_copy_quickly(
1c79356b
A
709 vm_object_t *_object,
710 vm_object_offset_t src_offset,
711 vm_object_size_t size,
712 boolean_t *_src_needs_copy,
713 boolean_t *_dst_needs_copy);
714
0b4e3aa0 715__private_extern__ kern_return_t vm_object_copy_strategically(
1c79356b
A
716 vm_object_t src_object,
717 vm_object_offset_t src_offset,
718 vm_object_size_t size,
719 vm_object_t *dst_object,
720 vm_object_offset_t *dst_offset,
721 boolean_t *dst_needs_copy);
722
0b4e3aa0 723__private_extern__ kern_return_t vm_object_copy_slowly(
1c79356b
A
724 vm_object_t src_object,
725 vm_object_offset_t src_offset,
726 vm_object_size_t size,
b0d623f7 727 boolean_t interruptible,
1c79356b
A
728 vm_object_t *_result_object);
729
0b4e3aa0
A
730__private_extern__ vm_object_t vm_object_copy_delayed(
731 vm_object_t src_object,
732 vm_object_offset_t src_offset,
2d21ac55
A
733 vm_object_size_t size,
734 boolean_t src_object_shared);
0b4e3aa0 735
1c79356b 736
1c79356b 737
0b4e3aa0
A
738__private_extern__ kern_return_t vm_object_destroy(
739 vm_object_t object,
740 kern_return_t reason);
1c79356b 741
0b4e3aa0
A
742__private_extern__ void vm_object_pager_create(
743 vm_object_t object);
744
39236c6e
A
745__private_extern__ void vm_object_compressor_pager_create(
746 vm_object_t object);
747
0b4e3aa0 748__private_extern__ void vm_object_page_map(
1c79356b
A
749 vm_object_t object,
750 vm_object_offset_t offset,
751 vm_object_size_t size,
752 vm_object_offset_t (*map_fn)
753 (void *, vm_object_offset_t),
754 void *map_fn_data);
755
0b4e3aa0
A
756__private_extern__ kern_return_t vm_object_upl_request(
757 vm_object_t object,
758 vm_object_offset_t offset,
91447636 759 upl_size_t size,
0b4e3aa0
A
760 upl_t *upl,
761 upl_page_info_t *page_info,
762 unsigned int *count,
3e170ce0 763 upl_control_flags_t flags);
0b4e3aa0 764
91447636
A
765__private_extern__ kern_return_t vm_object_transpose(
766 vm_object_t object1,
767 vm_object_t object2,
768 vm_object_size_t transpose_size);
769
0b4e3aa0
A
770__private_extern__ boolean_t vm_object_sync(
771 vm_object_t object,
772 vm_object_offset_t offset,
91447636 773 vm_object_size_t size,
0b4e3aa0 774 boolean_t should_flush,
91447636
A
775 boolean_t should_return,
776 boolean_t should_iosync);
1c79356b 777
0b4e3aa0
A
778__private_extern__ kern_return_t vm_object_update(
779 vm_object_t object,
780 vm_object_offset_t offset,
91447636
A
781 vm_object_size_t size,
782 vm_object_offset_t *error_offset,
783 int *io_errno,
0b4e3aa0
A
784 memory_object_return_t should_return,
785 int flags,
786 vm_prot_t prot);
1c79356b 787
0b4e3aa0
A
788__private_extern__ kern_return_t vm_object_lock_request(
789 vm_object_t object,
790 vm_object_offset_t offset,
791 vm_object_size_t size,
792 memory_object_return_t should_return,
793 int flags,
794 vm_prot_t prot);
1c79356b 795
1c79356b 796
1c79356b 797
0b4e3aa0
A
798__private_extern__ vm_object_t vm_object_enter(
799 memory_object_t pager,
1c79356b
A
800 vm_object_size_t size,
801 boolean_t internal,
802 boolean_t init,
803 boolean_t check_named);
804
805
2d21ac55
A
806__private_extern__ void vm_object_cluster_size(
807 vm_object_t object,
808 vm_object_offset_t *start,
809 vm_size_t *length,
b0d623f7
A
810 vm_object_fault_info_t fault_info,
811 uint32_t *io_streaming);
91447636
A
812
813__private_extern__ kern_return_t vm_object_populate_with_private(
814 vm_object_t object,
815 vm_object_offset_t offset,
816 ppnum_t phys_page,
817 vm_size_t size);
818
6d2010ae
A
819__private_extern__ void vm_object_change_wimg_mode(
820 vm_object_t object,
821 unsigned int wimg_mode);
822
0c530ab8 823extern kern_return_t adjust_vm_object_cache(
91447636
A
824 vm_size_t oval,
825 vm_size_t nval);
826
0c530ab8
A
827extern kern_return_t vm_object_page_op(
828 vm_object_t object,
829 vm_object_offset_t offset,
830 int ops,
831 ppnum_t *phys_entry,
832 int *flags);
833
834extern kern_return_t vm_object_range_op(
835 vm_object_t object,
836 vm_object_offset_t offset_beg,
837 vm_object_offset_t offset_end,
838 int ops,
b0d623f7
A
839 uint32_t *range);
840
841
842__private_extern__ void vm_object_reap_pages(
843 vm_object_t object,
844 int reap_type);
845#define REAP_REAP 0
846#define REAP_TERMINATE 1
847#define REAP_PURGEABLE 2
848#define REAP_DATA_FLUSH 3
849
6d2010ae 850#if CONFIG_FREEZE
6d2010ae 851
316670eb 852__private_extern__ void
3e170ce0 853vm_object_compressed_freezer_pageout(
316670eb 854 vm_object_t object);
6d2010ae 855
3e170ce0
A
856__private_extern__ void
857vm_object_compressed_freezer_done(
858 void);
859
6d2010ae 860#endif /* CONFIG_FREEZE */
0c530ab8 861
3e170ce0
A
862__private_extern__ void
863vm_object_pageout(
864 vm_object_t object);
865
fe8ab488
A
866#if CONFIG_IOSCHED
867struct io_reprioritize_req {
868 uint64_t blkno;
869 uint32_t len;
870 int priority;
871 struct vnode *devvp;
872 queue_chain_t io_reprioritize_list;
873};
874typedef struct io_reprioritize_req *io_reprioritize_req_t;
875
876extern void vm_io_reprioritize_init(void);
877#endif
878
1c79356b
A
879/*
880 * Event waiting handling
881 */
882
883#define VM_OBJECT_EVENT_INITIALIZED 0
884#define VM_OBJECT_EVENT_PAGER_READY 1
885#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
593a1d5f 886#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
1c79356b
A
887#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
888#define VM_OBJECT_EVENT_UNCACHING 5
889#define VM_OBJECT_EVENT_COPY_CALL 6
890#define VM_OBJECT_EVENT_CACHING 7
b0d623f7
A
891#define VM_OBJECT_EVENT_UNBLOCKED 8
892#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
1c79356b 893
39037602 894#define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
1c79356b 895
39037602
A
896static __inline__ wait_result_t
897vm_object_assert_wait(
898 vm_object_t object,
899 int event,
900 wait_interrupt_t interruptible)
901{
902 wait_result_t wr;
903
904 vm_object_lock_assert_exclusive(object);
905 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
906
907 object->all_wanted |= 1 << event;
908 wr = assert_wait((event_t)((vm_offset_t)object + event),
909 interruptible);
910 return wr;
911}
912
913static __inline__ wait_result_t
914vm_object_wait(
915 vm_object_t object,
916 int event,
917 wait_interrupt_t interruptible)
918{
919 wait_result_t wr;
920
921 vm_object_assert_wait(object, event, interruptible);
922 vm_object_unlock(object);
923 wr = thread_block(THREAD_CONTINUE_NULL);
924 return wr;
925}
926
927static __inline__ wait_result_t
928thread_sleep_vm_object(
929 vm_object_t object,
930 event_t event,
931 wait_interrupt_t interruptible)
932{
933 wait_result_t wr;
934
935#if DEVELOPMENT || DEBUG
936 if (object->Lock_owner != current_thread())
937 panic("thread_sleep_vm_object: now owner - %p\n", object);
938 object->Lock_owner = 0;
939#endif
940 wr = lck_rw_sleep(&object->Lock,
941 LCK_SLEEP_PROMOTED_PRI,
942 event,
943 interruptible);
944#if DEVELOPMENT || DEBUG
945 object->Lock_owner = current_thread();
946#endif
947 return wr;
948}
9bccf70c 949
39037602
A
950static __inline__ wait_result_t
951vm_object_sleep(
952 vm_object_t object,
953 int event,
954 wait_interrupt_t interruptible)
955{
956 wait_result_t wr;
957
958 vm_object_lock_assert_exclusive(object);
959 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
960
961 object->all_wanted |= 1 << event;
962 wr = thread_sleep_vm_object(object,
963 (event_t)((vm_offset_t)object + event),
964 interruptible);
965 return wr;
966}
967
968static __inline__ void
969vm_object_wakeup(
970 vm_object_t object,
971 int event)
972{
973 vm_object_lock_assert_exclusive(object);
974 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
975
976 if (object->all_wanted & (1 << event))
977 thread_wakeup((event_t)((vm_offset_t)object + event));
978 object->all_wanted &= ~(1 << event);
979}
980
981static __inline__ void
982vm_object_set_wanted(
983 vm_object_t object,
984 int event)
985{
986 vm_object_lock_assert_exclusive(object);
987 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1c79356b 988
39037602
A
989 object->all_wanted |= (1 << event);
990}
1c79356b 991
39037602
A
992static __inline__ int
993vm_object_wanted(
994 vm_object_t object,
995 int event)
996{
997 vm_object_lock_assert_held(object);
998 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1c79356b 999
39037602
A
1000 return object->all_wanted & (1 << event);
1001}
1c79356b
A
1002
1003/*
1004 * Routines implemented as macros
1005 */
0c530ab8 1006#ifdef VM_PIP_DEBUG
2d21ac55 1007#include <libkern/OSDebug.h>
0c530ab8
A
1008#define VM_PIP_DEBUG_BEGIN(object) \
1009 MACRO_BEGIN \
b0d623f7
A
1010 int pip = ((object)->paging_in_progress + \
1011 (object)->activity_in_progress); \
1012 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
2d21ac55 1013 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
0c530ab8
A
1014 VM_PIP_DEBUG_STACK_FRAMES); \
1015 } \
1016 MACRO_END
1017#else /* VM_PIP_DEBUG */
1018#define VM_PIP_DEBUG_BEGIN(object)
1019#endif /* VM_PIP_DEBUG */
1c79356b 1020
b0d623f7
A
1021#define vm_object_activity_begin(object) \
1022 MACRO_BEGIN \
1023 vm_object_lock_assert_exclusive((object)); \
b0d623f7
A
1024 VM_PIP_DEBUG_BEGIN((object)); \
1025 (object)->activity_in_progress++; \
fe8ab488
A
1026 if ((object)->activity_in_progress == 0) { \
1027 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1028 } \
b0d623f7
A
1029 MACRO_END
1030
1031#define vm_object_activity_end(object) \
1032 MACRO_BEGIN \
1033 vm_object_lock_assert_exclusive((object)); \
fe8ab488
A
1034 if ((object)->activity_in_progress == 0) { \
1035 panic("vm_object_activity_end(%p): underflow\n", (object));\
1036 } \
b0d623f7
A
1037 (object)->activity_in_progress--; \
1038 if ((object)->paging_in_progress == 0 && \
1039 (object)->activity_in_progress == 0) \
1040 vm_object_wakeup((object), \
1041 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1042 MACRO_END
1043
1044#define vm_object_paging_begin(object) \
1c79356b 1045 MACRO_BEGIN \
2d21ac55 1046 vm_object_lock_assert_exclusive((object)); \
0c530ab8 1047 VM_PIP_DEBUG_BEGIN((object)); \
1c79356b 1048 (object)->paging_in_progress++; \
fe8ab488
A
1049 if ((object)->paging_in_progress == 0) { \
1050 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1051 } \
1c79356b
A
1052 MACRO_END
1053
b0d623f7 1054#define vm_object_paging_end(object) \
1c79356b 1055 MACRO_BEGIN \
2d21ac55 1056 vm_object_lock_assert_exclusive((object)); \
fe8ab488
A
1057 if ((object)->paging_in_progress == 0) { \
1058 panic("vm_object_paging_end(%p): underflow\n", (object));\
1059 } \
b0d623f7
A
1060 (object)->paging_in_progress--; \
1061 if ((object)->paging_in_progress == 0) { \
1062 vm_object_wakeup((object), \
1063 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1064 if ((object)->activity_in_progress == 0) \
1065 vm_object_wakeup((object), \
1066 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1c79356b
A
1067 } \
1068 MACRO_END
1069
1070#define vm_object_paging_wait(object, interruptible) \
1071 MACRO_BEGIN \
2d21ac55 1072 vm_object_lock_assert_exclusive((object)); \
b0d623f7
A
1073 while ((object)->paging_in_progress != 0 || \
1074 (object)->activity_in_progress != 0) { \
9bccf70c
A
1075 wait_result_t _wr; \
1076 \
1077 _wr = vm_object_sleep((object), \
1c79356b
A
1078 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1079 (interruptible)); \
1c79356b 1080 \
9bccf70c 1081 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1c79356b
A
1082 /*XXX break; */ \
1083 } \
1084 MACRO_END
1085
b0d623f7
A
1086#define vm_object_paging_only_wait(object, interruptible) \
1087 MACRO_BEGIN \
1088 vm_object_lock_assert_exclusive((object)); \
1089 while ((object)->paging_in_progress != 0) { \
1090 wait_result_t _wr; \
1091 \
1092 _wr = vm_object_sleep((object), \
1093 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1094 (interruptible)); \
1095 \
1096 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1097 /*XXX break; */ \
1098 } \
1099 MACRO_END
1100
1c79356b 1101
593a1d5f
A
1102#define vm_object_mapping_begin(object) \
1103 MACRO_BEGIN \
1104 vm_object_lock_assert_exclusive((object)); \
1105 assert(! (object)->mapping_in_progress); \
1106 (object)->mapping_in_progress = TRUE; \
1107 MACRO_END
1108
1109#define vm_object_mapping_end(object) \
1110 MACRO_BEGIN \
1111 vm_object_lock_assert_exclusive((object)); \
1112 assert((object)->mapping_in_progress); \
1113 (object)->mapping_in_progress = FALSE; \
1114 vm_object_wakeup((object), \
1115 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1116 MACRO_END
1117
1118#define vm_object_mapping_wait(object, interruptible) \
1119 MACRO_BEGIN \
1120 vm_object_lock_assert_exclusive((object)); \
1121 while ((object)->mapping_in_progress) { \
1122 wait_result_t _wr; \
1123 \
1124 _wr = vm_object_sleep((object), \
1125 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1126 (interruptible)); \
1127 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
1128 /*XXX break; */ \
1129 } \
1130 assert(!(object)->mapping_in_progress); \
1131 MACRO_END
1132
1133
1c79356b 1134
91447636
A
1135#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1136#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1137
6d2010ae
A
1138extern void vm_object_cache_add(vm_object_t);
1139extern void vm_object_cache_remove(vm_object_t);
1140extern int vm_object_cache_evict(int, int);
1141
1c79356b 1142#endif /* _VM_VM_OBJECT_H_ */