]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
6399f95d093b23100547bd823fa90a05443ceac0
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
73
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
87 #include <vm/pmap.h>
88
89 #include <vm/vm_external.h>
90
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
93
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited;
99 extern btlog_t *vm_object_tracking_btlog;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
106
107 struct vm_page;
108
109 /*
110 * Types defined:
111 *
112 * vm_object_t Virtual memory object.
113 * vm_object_fault_info_t Used to determine cluster size.
114 */
115
116 struct vm_object_fault_info {
117 int interruptible;
118 uint32_t user_tag;
119 vm_size_t cluster_size;
120 vm_behavior_t behavior;
121 vm_object_offset_t lo_offset;
122 vm_object_offset_t hi_offset;
123 unsigned int
124 /* boolean_t */ no_cache:1,
125 /* boolean_t */ stealth:1,
126 /* boolean_t */ io_sync:1,
127 /* boolean_t */ cs_bypass:1,
128 /* boolean_t */ pmap_cs_associated:1,
129 /* boolean_t */ mark_zf_absent:1,
130 /* boolean_t */ batch_pmap_op:1,
131 /* boolean_t */ resilient_media:1,
132 /* boolean_t */ no_copy_on_read:1,
133 __vm_object_fault_info_unused_bits:23;
134 int pmap_options;
135 };
136
137
138 #define vo_size vo_un1.vou_size
139 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
140 #define vo_shadow_offset vo_un2.vou_shadow_offset
141 #define vo_cache_ts vo_un2.vou_cache_ts
142 #define vo_owner vo_un2.vou_owner
143
144 struct vm_object {
145 /*
146 * on 64 bit systems we pack the pointers hung off the memq.
147 * those pointers have to be able to point back to the memq.
148 * the packed pointers are required to be on a 64 byte boundary
149 * which means 2 things for the vm_object... (1) the memq
150 * struct has to be the first element of the structure so that
151 * we can control it's alignment... (2) the vm_object must be
152 * aligned on a 64 byte boundary... for static vm_object's
153 * this is accomplished via the 'aligned' attribute... for
154 * vm_object's in the zone pool, this is accomplished by
155 * rounding the size of the vm_object element to the nearest
156 * 64 byte size before creating the zone.
157 */
158 vm_page_queue_head_t memq; /* Resident memory - must be first */
159 lck_rw_t Lock; /* Synchronization */
160
161 #if DEVELOPMENT || DEBUG
162 thread_t Lock_owner;
163 #endif
164 union {
165 vm_object_size_t vou_size; /* Object size (only valid if internal) */
166 int vou_cache_pages_to_scan; /* pages yet to be visited in an
167 * external object in cache
168 */
169 } vo_un1;
170
171 struct vm_page *memq_hint;
172 int ref_count; /* Number of references */
173 unsigned int resident_page_count;
174 /* number of resident pages */
175 unsigned int wired_page_count; /* number of wired pages
176 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
177 unsigned int reusable_page_count;
178
179 struct vm_object *copy; /* Object that should receive
180 * a copy of my changed pages,
181 * for copy_delay, or just the
182 * temporary object that
183 * shadows this object, for
184 * copy_call.
185 */
186 struct vm_object *shadow; /* My shadow */
187 memory_object_t pager; /* Where to get data */
188
189 union {
190 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
191 clock_sec_t vou_cache_ts; /* age of an external object
192 * present in cache
193 */
194 task_t vou_owner; /* If the object is purgeable
195 * or has a "ledger_tag", this
196 * is the task that owns it.
197 */
198 } vo_un2;
199
200 vm_object_offset_t paging_offset; /* Offset into memory object */
201 memory_object_control_t pager_control; /* Where data comes back */
202
203 memory_object_copy_strategy_t
204 copy_strategy; /* How to handle data copy */
205
206 #if __LP64__
207 /*
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
213 */
214 unsigned int paging_in_progress:16,
215 __object1_unused_bits:16;
216 unsigned int activity_in_progress;
217 #else /* __LP64__ */
218 /*
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
223 */
224 unsigned short paging_in_progress;
225 unsigned short activity_in_progress;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
232 * terminate)
233 */
234
235 unsigned int
236 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
239 * below */
240 /* boolean_t */ pager_created:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
243
244 /* boolean_t */ pager_trusted:1, /* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
248 */
249 /* boolean_t */ can_persist:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
253 * are deallocated?
254 */
255 /* boolean_t */ internal:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
258 */
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive:1, /* Not yet terminated */
265
266 /* boolean_t */ purgable:2, /* Purgable state. See
267 * VM_PURGABLE_*
268 */
269 /* boolean_t */ purgeable_only_by_kernel:1,
270 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
271 * becomes ripe.
272 */
273 /* boolean_t */ shadowed:1, /* Shadow may exist */
274 /* boolean_t */ true_share:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
278 * coalesced */
279 /* boolean_t */ terminating:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
286 * object termination
287 */
288 /* boolean_t */ named:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
291 * for allocation and
292 * destruction, UBC references
293 * against the vm_object are
294 * checked.
295 */
296 /* boolean_t */ shadow_severed:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
302 * zero filled page.
303 */
304 /* boolean_t */ phys_contiguous:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
311 * access bits.
312 */
313 /* boolean_t */ nophyscache:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
317 * I/O)
318 */
319 /* boolean_t */ _object5_unused_bits:1;
320
321 queue_chain_t cached_list; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
324 * value
325 */
326 /*
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
329 */
330 vm_object_offset_t last_alloc; /* last allocation offset */
331 vm_offset_t cow_hint; /* last page present in */
332 /* shadow but not in object */
333 int sequential; /* sequential access size */
334
335 uint32_t pages_created;
336 uint32_t pages_used;
337 /* hold object lock when altering */
338 unsigned int
339 wimg_bits:8, /* cache WIMG bits */
340 code_signed:1, /* pages are signed and should be
341 * validated; the signatures are stored
342 * with the pager */
343 transposed:1, /* object was transposed with another */
344 mapping_in_progress:1, /* pager being mapped/unmapped */
345 phantom_isssd:1,
346 volatile_empty:1,
347 volatile_fault:1,
348 all_reusable:1,
349 blocked_access:1,
350 set_cache_attr:1,
351 object_is_shared_cache:1,
352 purgeable_queue_type:2,
353 purgeable_queue_group:3,
354 io_tracking:1,
355 no_tag_update:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded:1,
358 can_grab_secluded:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 #if VM_OBJECT_ACCESS_TRACKING
363 access_tracking:1,
364 #else /* VM_OBJECT_ACCESS_TRACKING */
365 __unused_access_tracking:1,
366 #endif /* VM_OBJECT_ACCESS_TRACKING */
367 vo_ledger_tag:3,
368 vo_no_footprint:1;
369
370 #if VM_OBJECT_ACCESS_TRACKING
371 uint32_t access_tracking_reads;
372 uint32_t access_tracking_writes;
373 #endif /* VM_OBJECT_ACCESS_TRACKING */
374
375 uint8_t scan_collisions;
376 uint8_t __object4_unused_bits[1];
377 vm_tag_t wire_tag;
378
379 #if CONFIG_PHANTOM_CACHE
380 uint32_t phantom_object_id;
381 #endif
382 #if CONFIG_IOSCHED || UPL_DEBUG
383 queue_head_t uplq; /* List of outstanding upls */
384 #endif
385
386 #ifdef VM_PIP_DEBUG
387 /*
388 * Keep track of the stack traces for the first holders
389 * of a "paging_in_progress" reference for this VM object.
390 */
391 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
392 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
393 struct __pip_backtrace {
394 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
395 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
396 #endif /* VM_PIP_DEBUG */
397
398 queue_chain_t objq; /* object queue - currently used for purgable queues */
399 queue_chain_t task_objq; /* objects owned by task - protected by task lock */
400
401 #if !VM_TAG_ACTIVE_UPDATE
402 queue_chain_t wired_objq;
403 #endif /* !VM_TAG_ACTIVE_UPDATE */
404
405 #if DEBUG
406 void *purgeable_owner_bt[16];
407 task_t vo_purgeable_volatilizer; /* who made it volatile? */
408 void *purgeable_volatilizer_bt[16];
409 #endif /* DEBUG */
410 };
411
412 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
413 ((object)->volatile_fault && \
414 ((object)->purgable == VM_PURGABLE_VOLATILE || \
415 (object)->purgable == VM_PURGABLE_EMPTY))
416
417 #if VM_OBJECT_ACCESS_TRACKING
418 extern uint64_t vm_object_access_tracking_reads;
419 extern uint64_t vm_object_access_tracking_writes;
420 extern void vm_object_access_tracking(vm_object_t object,
421 int *access_tracking,
422 uint32_t *access_tracking_reads,
423 uint32_t *acess_tracking_writes);
424 #endif /* VM_OBJECT_ACCESS_TRACKING */
425
426 extern
427 vm_object_t kernel_object; /* the single kernel object */
428
429 extern
430 vm_object_t compressor_object; /* the single compressor object */
431
432 extern
433 unsigned int vm_object_absent_max; /* maximum number of absent pages
434 * at a time for each object */
435
436 # define VM_MSYNC_INITIALIZED 0
437 # define VM_MSYNC_SYNCHRONIZING 1
438 # define VM_MSYNC_DONE 2
439
440
441 extern lck_grp_t vm_map_lck_grp;
442 extern lck_attr_t vm_map_lck_attr;
443
444 #ifndef VM_TAG_ACTIVE_UPDATE
445 #error VM_TAG_ACTIVE_UPDATE
446 #endif
447
448 #if VM_TAG_ACTIVE_UPDATE
449 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
450 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
451 #else /* VM_TAG_ACTIVE_UPDATE */
452 #define VM_OBJECT_WIRED_ENQUEUE(object) \
453 MACRO_BEGIN \
454 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
455 assert(!(object)->wired_objq.next); \
456 assert(!(object)->wired_objq.prev); \
457 queue_enter(&vm_objects_wired, (object), \
458 vm_object_t, wired_objq); \
459 lck_spin_unlock(&vm_objects_wired_lock); \
460 MACRO_END
461 #define VM_OBJECT_WIRED_DEQUEUE(object) \
462 MACRO_BEGIN \
463 if ((object)->wired_objq.next) { \
464 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
465 queue_remove(&vm_objects_wired, (object), \
466 vm_object_t, wired_objq); \
467 lck_spin_unlock(&vm_objects_wired_lock); \
468 } \
469 MACRO_END
470 #endif /* VM_TAG_ACTIVE_UPDATE */
471
472 #define VM_OBJECT_WIRED(object, tag) \
473 MACRO_BEGIN \
474 assert(VM_KERN_MEMORY_NONE != (tag)); \
475 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
476 (object)->wire_tag = (tag); \
477 if (!VM_TAG_ACTIVE_UPDATE) { \
478 VM_OBJECT_WIRED_ENQUEUE((object)); \
479 } \
480 MACRO_END
481
482 #define VM_OBJECT_UNWIRED(object) \
483 MACRO_BEGIN \
484 if (!VM_TAG_ACTIVE_UPDATE) { \
485 VM_OBJECT_WIRED_DEQUEUE((object)); \
486 } \
487 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
488 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
489 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
490 } \
491 MACRO_END
492
493 // These two macros start & end a C block
494 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
495 MACRO_BEGIN \
496 { \
497 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
498
499 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
500 if (__wireddelta) { \
501 boolean_t __overflow __assert_only = \
502 os_add_overflow((object)->wired_page_count, __wireddelta, \
503 &(object)->wired_page_count); \
504 assert(!__overflow); \
505 if (!(object)->pageout && !(object)->no_tag_update) { \
506 if (__wireddelta > 0) { \
507 assert (VM_KERN_MEMORY_NONE != (tag)); \
508 if (VM_KERN_MEMORY_NONE == __waswired) { \
509 VM_OBJECT_WIRED((object), (tag)); \
510 } \
511 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
512 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
513 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
514 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
515 if (!(object)->wired_page_count) { \
516 VM_OBJECT_UNWIRED((object)); \
517 } \
518 } \
519 } \
520 } \
521 } \
522 MACRO_END
523
524 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
525 __wireddelta += delta; \
526
527 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
528 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
529
530 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
531 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
532
533
534
535 #define OBJECT_LOCK_SHARED 0
536 #define OBJECT_LOCK_EXCLUSIVE 1
537
538 extern lck_grp_t vm_object_lck_grp;
539 extern lck_attr_t vm_object_lck_attr;
540 extern lck_attr_t kernel_object_lck_attr;
541 extern lck_attr_t compressor_object_lck_attr;
542
543 extern vm_object_t vm_pageout_scan_wants_object;
544
545 extern void vm_object_lock(vm_object_t);
546 extern bool vm_object_lock_check_contended(vm_object_t);
547 extern boolean_t vm_object_lock_try(vm_object_t);
548 extern boolean_t _vm_object_lock_try(vm_object_t);
549 extern boolean_t vm_object_lock_avoid(vm_object_t);
550 extern void vm_object_lock_shared(vm_object_t);
551 extern boolean_t vm_object_lock_yield_shared(vm_object_t);
552 extern boolean_t vm_object_lock_try_shared(vm_object_t);
553 extern void vm_object_unlock(vm_object_t);
554 extern boolean_t vm_object_lock_upgrade(vm_object_t);
555
556 /*
557 * Object locking macros
558 */
559
560 #define vm_object_lock_init(object) \
561 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
562 (((object) == kernel_object || \
563 (object) == vm_submap_object) ? \
564 &kernel_object_lck_attr : \
565 (((object) == compressor_object) ? \
566 &compressor_object_lck_attr : \
567 &vm_object_lck_attr)))
568 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
569
570 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
571
572 /*
573 * CAUTION: the following vm_object_lock_assert_held*() macros merely
574 * check if anyone is holding the lock, but the holder may not necessarily
575 * be the caller...
576 */
577 #if MACH_ASSERT || DEBUG
578 #define vm_object_lock_assert_held(object) \
579 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
580 #define vm_object_lock_assert_shared(object) \
581 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
582 #define vm_object_lock_assert_exclusive(object) \
583 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
584 #define vm_object_lock_assert_notheld(object) \
585 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
586 #else /* MACH_ASSERT || DEBUG */
587 #define vm_object_lock_assert_held(object)
588 #define vm_object_lock_assert_shared(object)
589 #define vm_object_lock_assert_exclusive(object)
590 #define vm_object_lock_assert_notheld(object)
591 #endif /* MACH_ASSERT || DEBUG */
592
593
594 /*
595 * Declare procedures that operate on VM objects.
596 */
597
598 __private_extern__ void vm_object_bootstrap(void);
599
600 __private_extern__ void vm_object_reaper_init(void);
601
602 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
603
604 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
605 vm_object_t object);
606
607 #if TASK_SWAPPER
608
609 __private_extern__ void vm_object_res_reference(
610 vm_object_t object);
611 __private_extern__ void vm_object_res_deallocate(
612 vm_object_t object);
613 #define VM_OBJ_RES_INCR(object) (object)->res_count++
614 #define VM_OBJ_RES_DECR(object) (object)->res_count--
615
616 #else /* TASK_SWAPPER */
617
618 #define VM_OBJ_RES_INCR(object)
619 #define VM_OBJ_RES_DECR(object)
620 #define vm_object_res_reference(object)
621 #define vm_object_res_deallocate(object)
622
623 #endif /* TASK_SWAPPER */
624
625 #define vm_object_reference_locked(object) \
626 MACRO_BEGIN \
627 vm_object_t RLObject = (object); \
628 vm_object_lock_assert_exclusive(object); \
629 assert((RLObject)->ref_count > 0); \
630 (RLObject)->ref_count++; \
631 assert((RLObject)->ref_count > 1); \
632 vm_object_res_reference(RLObject); \
633 MACRO_END
634
635
636 #define vm_object_reference_shared(object) \
637 MACRO_BEGIN \
638 vm_object_t RLObject = (object); \
639 vm_object_lock_assert_shared(object); \
640 assert((RLObject)->ref_count > 0); \
641 OSAddAtomic(1, &(RLObject)->ref_count); \
642 assert((RLObject)->ref_count > 0); \
643 /* XXX we would need an atomic version of the following ... */ \
644 vm_object_res_reference(RLObject); \
645 MACRO_END
646
647
648 __private_extern__ void vm_object_reference(
649 vm_object_t object);
650
651 #if !MACH_ASSERT
652
653 #define vm_object_reference(object) \
654 MACRO_BEGIN \
655 vm_object_t RObject = (object); \
656 if (RObject) { \
657 vm_object_lock_shared(RObject); \
658 vm_object_reference_shared(RObject); \
659 vm_object_unlock(RObject); \
660 } \
661 MACRO_END
662
663 #endif /* MACH_ASSERT */
664
665 __private_extern__ void vm_object_deallocate(
666 vm_object_t object);
667
668 __private_extern__ kern_return_t vm_object_release_name(
669 vm_object_t object,
670 int flags);
671
672 __private_extern__ void vm_object_pmap_protect(
673 vm_object_t object,
674 vm_object_offset_t offset,
675 vm_object_size_t size,
676 pmap_t pmap,
677 vm_map_size_t pmap_page_size,
678 vm_map_offset_t pmap_start,
679 vm_prot_t prot);
680
681 __private_extern__ void vm_object_pmap_protect_options(
682 vm_object_t object,
683 vm_object_offset_t offset,
684 vm_object_size_t size,
685 pmap_t pmap,
686 vm_map_size_t pmap_page_size,
687 vm_map_offset_t pmap_start,
688 vm_prot_t prot,
689 int options);
690
691 __private_extern__ void vm_object_page_remove(
692 vm_object_t object,
693 vm_object_offset_t start,
694 vm_object_offset_t end);
695
696 __private_extern__ void vm_object_deactivate_pages(
697 vm_object_t object,
698 vm_object_offset_t offset,
699 vm_object_size_t size,
700 boolean_t kill_page,
701 boolean_t reusable_page,
702 struct pmap *pmap,
703 /* XXX TODO4K: need pmap_page_size here too? */
704 vm_map_offset_t pmap_offset);
705
706 __private_extern__ void vm_object_reuse_pages(
707 vm_object_t object,
708 vm_object_offset_t start_offset,
709 vm_object_offset_t end_offset,
710 boolean_t allow_partial_reuse);
711
712 __private_extern__ uint64_t vm_object_purge(
713 vm_object_t object,
714 int flags);
715
716 __private_extern__ kern_return_t vm_object_purgable_control(
717 vm_object_t object,
718 vm_purgable_t control,
719 int *state);
720
721 __private_extern__ kern_return_t vm_object_get_page_counts(
722 vm_object_t object,
723 vm_object_offset_t offset,
724 vm_object_size_t size,
725 unsigned int *resident_page_count,
726 unsigned int *dirty_page_count);
727
728 __private_extern__ boolean_t vm_object_coalesce(
729 vm_object_t prev_object,
730 vm_object_t next_object,
731 vm_object_offset_t prev_offset,
732 vm_object_offset_t next_offset,
733 vm_object_size_t prev_size,
734 vm_object_size_t next_size);
735
736 __private_extern__ boolean_t vm_object_shadow(
737 vm_object_t *object,
738 vm_object_offset_t *offset,
739 vm_object_size_t length);
740
741 __private_extern__ void vm_object_collapse(
742 vm_object_t object,
743 vm_object_offset_t offset,
744 boolean_t can_bypass);
745
746 __private_extern__ boolean_t vm_object_copy_quickly(
747 vm_object_t *_object,
748 vm_object_offset_t src_offset,
749 vm_object_size_t size,
750 boolean_t *_src_needs_copy,
751 boolean_t *_dst_needs_copy);
752
753 __private_extern__ kern_return_t vm_object_copy_strategically(
754 vm_object_t src_object,
755 vm_object_offset_t src_offset,
756 vm_object_size_t size,
757 vm_object_t *dst_object,
758 vm_object_offset_t *dst_offset,
759 boolean_t *dst_needs_copy);
760
761 __private_extern__ kern_return_t vm_object_copy_slowly(
762 vm_object_t src_object,
763 vm_object_offset_t src_offset,
764 vm_object_size_t size,
765 boolean_t interruptible,
766 vm_object_t *_result_object);
767
768 __private_extern__ vm_object_t vm_object_copy_delayed(
769 vm_object_t src_object,
770 vm_object_offset_t src_offset,
771 vm_object_size_t size,
772 boolean_t src_object_shared);
773
774
775
776 __private_extern__ kern_return_t vm_object_destroy(
777 vm_object_t object,
778 kern_return_t reason);
779
780 __private_extern__ void vm_object_pager_create(
781 vm_object_t object);
782
783 __private_extern__ void vm_object_compressor_pager_create(
784 vm_object_t object);
785
786 __private_extern__ void vm_object_page_map(
787 vm_object_t object,
788 vm_object_offset_t offset,
789 vm_object_size_t size,
790 vm_object_offset_t (*map_fn)
791 (void *, vm_object_offset_t),
792 void *map_fn_data);
793
794 __private_extern__ kern_return_t vm_object_upl_request(
795 vm_object_t object,
796 vm_object_offset_t offset,
797 upl_size_t size,
798 upl_t *upl,
799 upl_page_info_t *page_info,
800 unsigned int *count,
801 upl_control_flags_t flags,
802 vm_tag_t tag);
803
804 __private_extern__ kern_return_t vm_object_transpose(
805 vm_object_t object1,
806 vm_object_t object2,
807 vm_object_size_t transpose_size);
808
809 __private_extern__ boolean_t vm_object_sync(
810 vm_object_t object,
811 vm_object_offset_t offset,
812 vm_object_size_t size,
813 boolean_t should_flush,
814 boolean_t should_return,
815 boolean_t should_iosync);
816
817 __private_extern__ kern_return_t vm_object_update(
818 vm_object_t object,
819 vm_object_offset_t offset,
820 vm_object_size_t size,
821 vm_object_offset_t *error_offset,
822 int *io_errno,
823 memory_object_return_t should_return,
824 int flags,
825 vm_prot_t prot);
826
827 __private_extern__ kern_return_t vm_object_lock_request(
828 vm_object_t object,
829 vm_object_offset_t offset,
830 vm_object_size_t size,
831 memory_object_return_t should_return,
832 int flags,
833 vm_prot_t prot);
834
835
836
837 __private_extern__ vm_object_t vm_object_memory_object_associate(
838 memory_object_t pager,
839 vm_object_t object,
840 vm_object_size_t size,
841 boolean_t check_named);
842
843
844 __private_extern__ void vm_object_cluster_size(
845 vm_object_t object,
846 vm_object_offset_t *start,
847 vm_size_t *length,
848 vm_object_fault_info_t fault_info,
849 uint32_t *io_streaming);
850
851 __private_extern__ kern_return_t vm_object_populate_with_private(
852 vm_object_t object,
853 vm_object_offset_t offset,
854 ppnum_t phys_page,
855 vm_size_t size);
856
857 __private_extern__ void vm_object_change_wimg_mode(
858 vm_object_t object,
859 unsigned int wimg_mode);
860
861 extern kern_return_t adjust_vm_object_cache(
862 vm_size_t oval,
863 vm_size_t nval);
864
865 extern kern_return_t vm_object_page_op(
866 vm_object_t object,
867 vm_object_offset_t offset,
868 int ops,
869 ppnum_t *phys_entry,
870 int *flags);
871
872 extern kern_return_t vm_object_range_op(
873 vm_object_t object,
874 vm_object_offset_t offset_beg,
875 vm_object_offset_t offset_end,
876 int ops,
877 uint32_t *range);
878
879
880 __private_extern__ void vm_object_reap_pages(
881 vm_object_t object,
882 int reap_type);
883 #define REAP_REAP 0
884 #define REAP_TERMINATE 1
885 #define REAP_PURGEABLE 2
886 #define REAP_DATA_FLUSH 3
887
888 #if CONFIG_FREEZE
889
890 __private_extern__ uint32_t
891 vm_object_compressed_freezer_pageout(
892 vm_object_t object, uint32_t dirty_budget);
893
894 __private_extern__ void
895 vm_object_compressed_freezer_done(
896 void);
897
898 #endif /* CONFIG_FREEZE */
899
900 __private_extern__ void
901 vm_object_pageout(
902 vm_object_t object);
903
904 #if CONFIG_IOSCHED
905 struct io_reprioritize_req {
906 uint64_t blkno;
907 uint32_t len;
908 int priority;
909 struct vnode *devvp;
910 queue_chain_t io_reprioritize_list;
911 };
912 typedef struct io_reprioritize_req *io_reprioritize_req_t;
913
914 extern void vm_io_reprioritize_init(void);
915 #endif
916
917 /*
918 * Event waiting handling
919 */
920
921 #define VM_OBJECT_EVENT_INITIALIZED 0
922 #define VM_OBJECT_EVENT_PAGER_READY 1
923 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
924 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
925 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
926 #define VM_OBJECT_EVENT_UNCACHING 5
927 #define VM_OBJECT_EVENT_COPY_CALL 6
928 #define VM_OBJECT_EVENT_CACHING 7
929 #define VM_OBJECT_EVENT_UNBLOCKED 8
930 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
931
932 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
933
934 static __inline__ wait_result_t
935 vm_object_assert_wait(
936 vm_object_t object,
937 int event,
938 wait_interrupt_t interruptible)
939 {
940 wait_result_t wr;
941
942 vm_object_lock_assert_exclusive(object);
943 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
944
945 object->all_wanted |= 1 << event;
946 wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
947 interruptible);
948 return wr;
949 }
950
951 static __inline__ wait_result_t
952 vm_object_wait(
953 vm_object_t object,
954 int event,
955 wait_interrupt_t interruptible)
956 {
957 wait_result_t wr;
958
959 vm_object_assert_wait(object, event, interruptible);
960 vm_object_unlock(object);
961 wr = thread_block(THREAD_CONTINUE_NULL);
962 return wr;
963 }
964
965 static __inline__ wait_result_t
966 thread_sleep_vm_object(
967 vm_object_t object,
968 event_t event,
969 wait_interrupt_t interruptible)
970 {
971 wait_result_t wr;
972
973 #if DEVELOPMENT || DEBUG
974 if (object->Lock_owner != current_thread()) {
975 panic("thread_sleep_vm_object: now owner - %p\n", object);
976 }
977 object->Lock_owner = 0;
978 #endif
979 wr = lck_rw_sleep(&object->Lock,
980 LCK_SLEEP_PROMOTED_PRI,
981 event,
982 interruptible);
983 #if DEVELOPMENT || DEBUG
984 object->Lock_owner = current_thread();
985 #endif
986 return wr;
987 }
988
989 static __inline__ wait_result_t
990 vm_object_sleep(
991 vm_object_t object,
992 int event,
993 wait_interrupt_t interruptible)
994 {
995 wait_result_t wr;
996
997 vm_object_lock_assert_exclusive(object);
998 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
999
1000 object->all_wanted |= 1 << event;
1001 wr = thread_sleep_vm_object(object,
1002 (event_t)((vm_offset_t)object + (vm_offset_t)event),
1003 interruptible);
1004 return wr;
1005 }
1006
1007 static __inline__ void
1008 vm_object_wakeup(
1009 vm_object_t object,
1010 int event)
1011 {
1012 vm_object_lock_assert_exclusive(object);
1013 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1014
1015 if (object->all_wanted & (1 << event)) {
1016 thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
1017 }
1018 object->all_wanted &= ~(1 << event);
1019 }
1020
1021 static __inline__ void
1022 vm_object_set_wanted(
1023 vm_object_t object,
1024 int event)
1025 {
1026 vm_object_lock_assert_exclusive(object);
1027 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1028
1029 object->all_wanted |= (1 << event);
1030 }
1031
1032 static __inline__ int
1033 vm_object_wanted(
1034 vm_object_t object,
1035 int event)
1036 {
1037 vm_object_lock_assert_held(object);
1038 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1039
1040 return object->all_wanted & (1 << event);
1041 }
1042
1043 /*
1044 * Routines implemented as macros
1045 */
1046 #ifdef VM_PIP_DEBUG
1047 #include <libkern/OSDebug.h>
1048 #define VM_PIP_DEBUG_BEGIN(object) \
1049 MACRO_BEGIN \
1050 int pip = ((object)->paging_in_progress + \
1051 (object)->activity_in_progress); \
1052 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1053 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1054 VM_PIP_DEBUG_STACK_FRAMES); \
1055 } \
1056 MACRO_END
1057 #else /* VM_PIP_DEBUG */
1058 #define VM_PIP_DEBUG_BEGIN(object)
1059 #endif /* VM_PIP_DEBUG */
1060
1061 #define vm_object_activity_begin(object) \
1062 MACRO_BEGIN \
1063 vm_object_lock_assert_exclusive((object)); \
1064 VM_PIP_DEBUG_BEGIN((object)); \
1065 (object)->activity_in_progress++; \
1066 if ((object)->activity_in_progress == 0) { \
1067 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1068 } \
1069 MACRO_END
1070
1071 #define vm_object_activity_end(object) \
1072 MACRO_BEGIN \
1073 vm_object_lock_assert_exclusive((object)); \
1074 if ((object)->activity_in_progress == 0) { \
1075 panic("vm_object_activity_end(%p): underflow\n", (object));\
1076 } \
1077 (object)->activity_in_progress--; \
1078 if ((object)->paging_in_progress == 0 && \
1079 (object)->activity_in_progress == 0) \
1080 vm_object_wakeup((object), \
1081 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1082 MACRO_END
1083
1084 #define vm_object_paging_begin(object) \
1085 MACRO_BEGIN \
1086 vm_object_lock_assert_exclusive((object)); \
1087 VM_PIP_DEBUG_BEGIN((object)); \
1088 (object)->paging_in_progress++; \
1089 if ((object)->paging_in_progress == 0) { \
1090 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1091 } \
1092 MACRO_END
1093
1094 #define vm_object_paging_end(object) \
1095 MACRO_BEGIN \
1096 vm_object_lock_assert_exclusive((object)); \
1097 if ((object)->paging_in_progress == 0) { \
1098 panic("vm_object_paging_end(%p): underflow\n", (object));\
1099 } \
1100 (object)->paging_in_progress--; \
1101 if ((object)->paging_in_progress == 0) { \
1102 vm_object_wakeup((object), \
1103 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1104 if ((object)->activity_in_progress == 0) \
1105 vm_object_wakeup((object), \
1106 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1107 } \
1108 MACRO_END
1109
1110 #define vm_object_paging_wait(object, interruptible) \
1111 MACRO_BEGIN \
1112 vm_object_lock_assert_exclusive((object)); \
1113 while ((object)->paging_in_progress != 0 || \
1114 (object)->activity_in_progress != 0) { \
1115 wait_result_t _wr; \
1116 \
1117 _wr = vm_object_sleep((object), \
1118 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1119 (interruptible)); \
1120 \
1121 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1122 /*XXX break; */ \
1123 } \
1124 MACRO_END
1125
1126 #define vm_object_paging_only_wait(object, interruptible) \
1127 MACRO_BEGIN \
1128 vm_object_lock_assert_exclusive((object)); \
1129 while ((object)->paging_in_progress != 0) { \
1130 wait_result_t _wr; \
1131 \
1132 _wr = vm_object_sleep((object), \
1133 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1134 (interruptible)); \
1135 \
1136 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1137 /*XXX break; */ \
1138 } \
1139 MACRO_END
1140
1141
1142 #define vm_object_mapping_begin(object) \
1143 MACRO_BEGIN \
1144 vm_object_lock_assert_exclusive((object)); \
1145 assert(! (object)->mapping_in_progress); \
1146 (object)->mapping_in_progress = TRUE; \
1147 MACRO_END
1148
1149 #define vm_object_mapping_end(object) \
1150 MACRO_BEGIN \
1151 vm_object_lock_assert_exclusive((object)); \
1152 assert((object)->mapping_in_progress); \
1153 (object)->mapping_in_progress = FALSE; \
1154 vm_object_wakeup((object), \
1155 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1156 MACRO_END
1157
1158 #define vm_object_mapping_wait(object, interruptible) \
1159 MACRO_BEGIN \
1160 vm_object_lock_assert_exclusive((object)); \
1161 while ((object)->mapping_in_progress) { \
1162 wait_result_t _wr; \
1163 \
1164 _wr = vm_object_sleep((object), \
1165 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1166 (interruptible)); \
1167 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1168 /*XXX break; */ \
1169 } \
1170 assert(!(object)->mapping_in_progress); \
1171 MACRO_END
1172
1173
1174
1175 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1176 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1177
1178 extern void vm_object_cache_add(vm_object_t);
1179 extern void vm_object_cache_remove(vm_object_t);
1180 extern int vm_object_cache_evict(int, int);
1181
1182 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1183 #define VM_OBJECT_OWNER(object) \
1184 ((((object)->purgable == VM_PURGABLE_DENY && \
1185 (object)->vo_ledger_tag == 0) || \
1186 (object)->vo_owner == TASK_NULL) \
1187 ? TASK_NULL /* not owned */ \
1188 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1189 ? kernel_task /* disowned -> kernel */ \
1190 : (object)->vo_owner)) /* explicit owner */ \
1191
1192 extern void vm_object_ledger_tag_ledgers(
1193 vm_object_t object,
1194 int *ledger_idx_volatile,
1195 int *ledger_idx_nonvolatile,
1196 int *ledger_idx_volatile_compressed,
1197 int *ledger_idx_nonvolatile_compressed,
1198 boolean_t *do_footprint);
1199 extern kern_return_t vm_object_ownership_change(
1200 vm_object_t object,
1201 int new_ledger_tag,
1202 task_t new_owner,
1203 int new_ledger_flags,
1204 boolean_t task_objq_locked);
1205
1206 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1207 // so probably should be a real 32b ID vs. ptr.
1208 // Current users just check for equality
1209 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1210
1211 #endif /* _VM_VM_OBJECT_H_ */