]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
xnu-792.6.76.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm_object.h
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Virtual memory object module definitions.
58 */
59
60 #ifndef _VM_VM_OBJECT_H_
61 #define _VM_VM_OBJECT_H_
62
63 #include <mach_pagemap.h>
64 #include <task_swapper.h>
65
66 #include <mach/kern_return.h>
67 #include <mach/boolean.h>
68 #include <mach/memory_object_types.h>
69 #include <mach/port.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_param.h>
72 #include <mach/machine/vm_types.h>
73 #include <kern/queue.h>
74 #include <kern/lock.h>
75 #include <kern/assert.h>
76 #include <kern/misc_protos.h>
77 #include <kern/macro_help.h>
78 #include <ipc/ipc_types.h>
79 #include <vm/pmap.h>
80
81 #if MACH_PAGEMAP
82 #include <vm/vm_external.h>
83 #endif /* MACH_PAGEMAP */
84
85 struct vm_page;
86
87 /*
88 * Types defined:
89 *
90 * vm_object_t Virtual memory object.
91 */
92
93 struct vm_object {
94 queue_head_t memq; /* Resident memory */
95 decl_mutex_data(, Lock) /* Synchronization */
96
97 vm_object_size_t size; /* Object size (only valid
98 * if internal)
99 */
100 struct vm_page *memq_hint;
101 int ref_count; /* Number of references */
102 #if TASK_SWAPPER
103 int res_count; /* Residency references (swap)*/
104 #endif /* TASK_SWAPPER */
105 unsigned int resident_page_count;
106 /* number of resident pages */
107
108 struct vm_object *copy; /* Object that should receive
109 * a copy of my changed pages,
110 * for copy_delay, or just the
111 * temporary object that
112 * shadows this object, for
113 * copy_call.
114 */
115 struct vm_object *shadow; /* My shadow */
116 vm_object_offset_t shadow_offset; /* Offset into shadow */
117
118 memory_object_t pager; /* Where to get data */
119 vm_object_offset_t paging_offset; /* Offset into memory object */
120 memory_object_control_t pager_control; /* Where data comes back */
121
122 memory_object_copy_strategy_t
123 copy_strategy; /* How to handle data copy */
124
125 unsigned int absent_count; /* The number of pages that
126 * have been requested but
127 * not filled. That is, the
128 * number of pages for which
129 * the "absent" attribute is
130 * asserted.
131 */
132
133 unsigned int paging_in_progress;
134 /* The memory object ports are
135 * being used (e.g., for pagein
136 * or pageout) -- don't change
137 * any of these fields (i.e.,
138 * don't collapse, destroy or
139 * terminate)
140 */
141 unsigned int
142 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
143 * awakened" notations. See
144 * VM_OBJECT_EVENT_* items
145 * below */
146 /* boolean_t */ pager_created:1, /* Has pager been created? */
147 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
148 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
149
150 /* boolean_t */ pager_trusted:1,/* The pager for this object
151 * is trusted. This is true for
152 * all internal objects (backed
153 * by the default pager)
154 */
155 /* boolean_t */ can_persist:1, /* The kernel may keep the data
156 * for this object (and rights
157 * to the memory object) after
158 * all address map references
159 * are deallocated?
160 */
161 /* boolean_t */ internal:1, /* Created by the kernel (and
162 * therefore, managed by the
163 * default memory manger)
164 */
165 /* boolean_t */ temporary:1, /* Permanent objects may be
166 * changed externally by the
167 * memory manager, and changes
168 * made in memory must be
169 * reflected back to the memory
170 * manager. Temporary objects
171 * lack both of these
172 * characteristics.
173 */
174 /* boolean_t */ private:1, /* magic device_pager object,
175 * holds private pages only */
176 /* boolean_t */ pageout:1, /* pageout object. contains
177 * private pages that refer to
178 * a real memory object. */
179 /* boolean_t */ alive:1, /* Not yet terminated */
180
181 /* boolean_t */ purgable:2, /* Purgable state. See
182 * VM_OBJECT_PURGABLE_*
183 * items below.
184 */
185 /* boolean_t */ shadowed:1, /* Shadow may exist */
186 /* boolean_t */ silent_overwrite:1,
187 /* Allow full page overwrite
188 * without data_request if
189 * page is absent */
190 /* boolean_t */ advisory_pageout:1,
191 /* Instead of sending page
192 * via OOL, just notify
193 * pager that the kernel
194 * wants to discard it, page
195 * remains in object */
196 /* boolean_t */ true_share:1,
197 /* This object is mapped
198 * in more than one place
199 * and hence cannot be
200 * coalesced */
201 /* boolean_t */ terminating:1,
202 /* Allows vm_object_lookup
203 * and vm_object_deallocate
204 * to special case their
205 * behavior when they are
206 * called as a result of
207 * page cleaning during
208 * object termination
209 */
210 /* boolean_t */ named:1, /* An enforces an internal
211 * naming convention, by
212 * calling the right routines
213 * for allocation and
214 * destruction, UBC references
215 * against the vm_object are
216 * checked.
217 */
218 /* boolean_t */ shadow_severed:1,
219 /* When a permanent object
220 * backing a COW goes away
221 * unexpectedly. This bit
222 * allows vm_fault to return
223 * an error rather than a
224 * zero filled page.
225 */
226 /* boolean_t */ phys_contiguous:1,
227 /* Memory is wired and
228 * guaranteed physically
229 * contiguous. However
230 * it is not device memory
231 * and obeys normal virtual
232 * memory rules w.r.t pmap
233 * access bits.
234 */
235 /* boolean_t */ nophyscache:1;
236 /* When mapped at the
237 * pmap level, don't allow
238 * primary caching. (for
239 * I/O)
240 */
241
242
243
244 queue_chain_t cached_list; /* Attachment point for the
245 * list of objects cached as a
246 * result of their can_persist
247 * value
248 */
249
250 queue_head_t msr_q; /* memory object synchronise
251 request queue */
252
253 vm_object_offset_t last_alloc; /* last allocation offset */
254 vm_object_offset_t sequential; /* sequential access size */
255 vm_size_t cluster_size; /* size of paging cluster */
256 #if MACH_PAGEMAP
257 vm_external_map_t existence_map; /* bitmap of pages written to
258 * backing storage */
259 #endif /* MACH_PAGEMAP */
260 vm_offset_t cow_hint; /* last page present in */
261 /* shadow but not in object */
262 #if MACH_ASSERT
263 struct vm_object *paging_object; /* object which pages to be
264 * swapped out are temporary
265 * put in current object
266 */
267 #endif
268 /* hold object lock when altering */
269 unsigned int /* cache WIMG bits */
270 wimg_bits:8, /* wimg plus some expansion*/
271 not_in_use:24;
272 #ifdef UPL_DEBUG
273 queue_head_t uplq; /* List of outstanding upls */
274 #endif /* UPL_DEBUG */
275 };
276
277 #define VM_PAGE_REMOVE(page) \
278 MACRO_BEGIN \
279 vm_page_t __page = (page); \
280 vm_object_t __object = __page->object; \
281 if (__page == __object->memq_hint) { \
282 vm_page_t __new_hint; \
283 queue_entry_t __qe; \
284 __qe = queue_next(&__page->listq); \
285 if (queue_end(&__object->memq, __qe)) { \
286 __qe = queue_prev(&__page->listq); \
287 if (queue_end(&__object->memq, __qe)) { \
288 __qe = NULL; \
289 } \
290 } \
291 __new_hint = (vm_page_t) __qe; \
292 __object->memq_hint = __new_hint; \
293 } \
294 queue_remove(&__object->memq, __page, vm_page_t, listq); \
295 MACRO_END
296
297 #define VM_PAGE_INSERT(page, object) \
298 MACRO_BEGIN \
299 vm_page_t __page = (page); \
300 vm_object_t __object = (object); \
301 queue_enter(&__object->memq, __page, vm_page_t, listq); \
302 __object->memq_hint = __page; \
303 MACRO_END
304
305 __private_extern__
306 vm_object_t kernel_object; /* the single kernel object */
307
308 __private_extern__
309 unsigned int vm_object_absent_max; /* maximum number of absent pages
310 at a time for each object */
311
312 # define VM_MSYNC_INITIALIZED 0
313 # define VM_MSYNC_SYNCHRONIZING 1
314 # define VM_MSYNC_DONE 2
315
316 struct msync_req {
317 queue_chain_t msr_q; /* object request queue */
318 queue_chain_t req_q; /* vm_msync request queue */
319 unsigned int flag;
320 vm_object_offset_t offset;
321 vm_object_size_t length;
322 vm_object_t object; /* back pointer */
323 decl_mutex_data(, msync_req_lock) /* Lock for this structure */
324 };
325
326 typedef struct msync_req *msync_req_t;
327 #define MSYNC_REQ_NULL ((msync_req_t) 0)
328
329 /*
330 * Macros to allocate and free msync_reqs
331 */
332 #define msync_req_alloc(msr) \
333 MACRO_BEGIN \
334 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
335 mutex_init(&(msr)->msync_req_lock, 0); \
336 msr->flag = VM_MSYNC_INITIALIZED; \
337 MACRO_END
338
339 #define msync_req_free(msr) \
340 (kfree((msr), sizeof(struct msync_req)))
341
342 #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
343 #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
344
345 /*
346 * Declare procedures that operate on VM objects.
347 */
348
349 __private_extern__ void vm_object_bootstrap(void);
350
351 __private_extern__ void vm_object_init(void);
352
353 __private_extern__ vm_object_t vm_object_allocate(
354 vm_object_size_t size);
355
356 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
357 vm_object_t object);
358
359 #if TASK_SWAPPER
360
361 __private_extern__ void vm_object_res_reference(
362 vm_object_t object);
363 __private_extern__ void vm_object_res_deallocate(
364 vm_object_t object);
365 #define VM_OBJ_RES_INCR(object) (object)->res_count++
366 #define VM_OBJ_RES_DECR(object) (object)->res_count--
367
368 #else /* TASK_SWAPPER */
369
370 #define VM_OBJ_RES_INCR(object)
371 #define VM_OBJ_RES_DECR(object)
372 #define vm_object_res_reference(object)
373 #define vm_object_res_deallocate(object)
374
375 #endif /* TASK_SWAPPER */
376
377 #define vm_object_reference_locked(object) \
378 MACRO_BEGIN \
379 vm_object_t RLObject = (object); \
380 assert((RLObject)->ref_count > 0); \
381 (RLObject)->ref_count++; \
382 vm_object_res_reference(RLObject); \
383 MACRO_END
384
385
386 __private_extern__ void vm_object_reference(
387 vm_object_t object);
388
389 #if !MACH_ASSERT
390
391 #define vm_object_reference(object) \
392 MACRO_BEGIN \
393 vm_object_t RObject = (object); \
394 if (RObject) { \
395 vm_object_lock(RObject); \
396 vm_object_reference_locked(RObject); \
397 vm_object_unlock(RObject); \
398 } \
399 MACRO_END
400
401 #endif /* MACH_ASSERT */
402
403 __private_extern__ void vm_object_deallocate(
404 vm_object_t object);
405
406 __private_extern__ kern_return_t vm_object_release_name(
407 vm_object_t object,
408 int flags);
409
410 __private_extern__ void vm_object_pmap_protect(
411 vm_object_t object,
412 vm_object_offset_t offset,
413 vm_object_size_t size,
414 pmap_t pmap,
415 vm_map_offset_t pmap_start,
416 vm_prot_t prot);
417
418 __private_extern__ void vm_object_page_remove(
419 vm_object_t object,
420 vm_object_offset_t start,
421 vm_object_offset_t end);
422
423 __private_extern__ void vm_object_deactivate_pages(
424 vm_object_t object,
425 vm_object_offset_t offset,
426 vm_object_size_t size,
427 boolean_t kill_page);
428
429 __private_extern__ unsigned int vm_object_purge(
430 vm_object_t object);
431
432 __private_extern__ kern_return_t vm_object_purgable_control(
433 vm_object_t object,
434 vm_purgable_t control,
435 int *state);
436
437 __private_extern__ boolean_t vm_object_coalesce(
438 vm_object_t prev_object,
439 vm_object_t next_object,
440 vm_object_offset_t prev_offset,
441 vm_object_offset_t next_offset,
442 vm_object_size_t prev_size,
443 vm_object_size_t next_size);
444
445 __private_extern__ boolean_t vm_object_shadow(
446 vm_object_t *object,
447 vm_object_offset_t *offset,
448 vm_object_size_t length);
449
450 __private_extern__ void vm_object_collapse(
451 vm_object_t object,
452 vm_object_offset_t offset);
453
454 __private_extern__ boolean_t vm_object_copy_quickly(
455 vm_object_t *_object,
456 vm_object_offset_t src_offset,
457 vm_object_size_t size,
458 boolean_t *_src_needs_copy,
459 boolean_t *_dst_needs_copy);
460
461 __private_extern__ kern_return_t vm_object_copy_strategically(
462 vm_object_t src_object,
463 vm_object_offset_t src_offset,
464 vm_object_size_t size,
465 vm_object_t *dst_object,
466 vm_object_offset_t *dst_offset,
467 boolean_t *dst_needs_copy);
468
469 __private_extern__ kern_return_t vm_object_copy_slowly(
470 vm_object_t src_object,
471 vm_object_offset_t src_offset,
472 vm_object_size_t size,
473 int interruptible,
474 vm_object_t *_result_object);
475
476 __private_extern__ vm_object_t vm_object_copy_delayed(
477 vm_object_t src_object,
478 vm_object_offset_t src_offset,
479 vm_object_size_t size);
480
481
482
483 __private_extern__ kern_return_t vm_object_destroy(
484 vm_object_t object,
485 kern_return_t reason);
486
487 __private_extern__ void vm_object_pager_create(
488 vm_object_t object);
489
490 __private_extern__ void vm_object_page_map(
491 vm_object_t object,
492 vm_object_offset_t offset,
493 vm_object_size_t size,
494 vm_object_offset_t (*map_fn)
495 (void *, vm_object_offset_t),
496 void *map_fn_data);
497
498 __private_extern__ kern_return_t vm_object_upl_request(
499 vm_object_t object,
500 vm_object_offset_t offset,
501 upl_size_t size,
502 upl_t *upl,
503 upl_page_info_t *page_info,
504 unsigned int *count,
505 int flags);
506
507 __private_extern__ kern_return_t vm_object_transpose(
508 vm_object_t object1,
509 vm_object_t object2,
510 vm_object_size_t transpose_size);
511
512 __private_extern__ boolean_t vm_object_sync(
513 vm_object_t object,
514 vm_object_offset_t offset,
515 vm_object_size_t size,
516 boolean_t should_flush,
517 boolean_t should_return,
518 boolean_t should_iosync);
519
520 __private_extern__ kern_return_t vm_object_update(
521 vm_object_t object,
522 vm_object_offset_t offset,
523 vm_object_size_t size,
524 vm_object_offset_t *error_offset,
525 int *io_errno,
526 memory_object_return_t should_return,
527 int flags,
528 vm_prot_t prot);
529
530 __private_extern__ kern_return_t vm_object_lock_request(
531 vm_object_t object,
532 vm_object_offset_t offset,
533 vm_object_size_t size,
534 memory_object_return_t should_return,
535 int flags,
536 vm_prot_t prot);
537
538
539
540 __private_extern__ vm_object_t vm_object_enter(
541 memory_object_t pager,
542 vm_object_size_t size,
543 boolean_t internal,
544 boolean_t init,
545 boolean_t check_named);
546
547
548 /*
549 * Purgable object state.
550 */
551
552 #define VM_OBJECT_NONPURGABLE 0 /* not a purgable object */
553 #define VM_OBJECT_PURGABLE_NONVOLATILE 1 /* non-volatile purgable object */
554 #define VM_OBJECT_PURGABLE_VOLATILE 2 /* volatile (but intact) purgable object */
555 #define VM_OBJECT_PURGABLE_EMPTY 3 /* volatile purgable object that has been emptied */
556
557 __private_extern__ kern_return_t vm_object_populate_with_private(
558 vm_object_t object,
559 vm_object_offset_t offset,
560 ppnum_t phys_page,
561 vm_size_t size);
562
563 __private_extern__ kern_return_t adjust_vm_object_cache(
564 vm_size_t oval,
565 vm_size_t nval);
566
567 /*
568 * Event waiting handling
569 */
570
571 #define VM_OBJECT_EVENT_INITIALIZED 0
572 #define VM_OBJECT_EVENT_PAGER_READY 1
573 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
574 #define VM_OBJECT_EVENT_ABSENT_COUNT 3
575 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
576 #define VM_OBJECT_EVENT_UNCACHING 5
577 #define VM_OBJECT_EVENT_COPY_CALL 6
578 #define VM_OBJECT_EVENT_CACHING 7
579
580 #define vm_object_assert_wait(object, event, interruptible) \
581 (((object)->all_wanted |= 1 << (event)), \
582 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
583
584 #define vm_object_wait(object, event, interruptible) \
585 (vm_object_assert_wait((object),(event),(interruptible)), \
586 vm_object_unlock(object), \
587 thread_block(THREAD_CONTINUE_NULL)) \
588
589 #define thread_sleep_vm_object(object, event, interruptible) \
590 thread_sleep_mutex((event_t)(event), &(object)->Lock, (interruptible))
591
592 #define vm_object_sleep(object, event, interruptible) \
593 (((object)->all_wanted |= 1 << (event)), \
594 thread_sleep_vm_object((object), \
595 ((vm_offset_t)(object)+(event)), (interruptible)))
596
597 #define vm_object_wakeup(object, event) \
598 MACRO_BEGIN \
599 if ((object)->all_wanted & (1 << (event))) \
600 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
601 (object)->all_wanted &= ~(1 << (event)); \
602 MACRO_END
603
604 #define vm_object_set_wanted(object, event) \
605 MACRO_BEGIN \
606 ((object)->all_wanted |= (1 << (event))); \
607 MACRO_END
608
609 #define vm_object_wanted(object, event) \
610 ((object)->all_wanted & (1 << (event)))
611
612 /*
613 * Routines implemented as macros
614 */
615
616 #define vm_object_paging_begin(object) \
617 MACRO_BEGIN \
618 (object)->paging_in_progress++; \
619 MACRO_END
620
621 #define vm_object_paging_end(object) \
622 MACRO_BEGIN \
623 assert((object)->paging_in_progress != 0); \
624 if (--(object)->paging_in_progress == 0) { \
625 vm_object_wakeup(object, \
626 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
627 } \
628 MACRO_END
629
630 #define vm_object_paging_wait(object, interruptible) \
631 MACRO_BEGIN \
632 while ((object)->paging_in_progress != 0) { \
633 wait_result_t _wr; \
634 \
635 _wr = vm_object_sleep((object), \
636 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
637 (interruptible)); \
638 \
639 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
640 /*XXX break; */ \
641 } \
642 MACRO_END
643
644 #define vm_object_absent_assert_wait(object, interruptible) \
645 MACRO_BEGIN \
646 vm_object_assert_wait( (object), \
647 VM_OBJECT_EVENT_ABSENT_COUNT, \
648 (interruptible)); \
649 MACRO_END
650
651
652 #define vm_object_absent_release(object) \
653 MACRO_BEGIN \
654 (object)->absent_count--; \
655 vm_object_wakeup((object), \
656 VM_OBJECT_EVENT_ABSENT_COUNT); \
657 MACRO_END
658
659 /*
660 * Object locking macros
661 */
662
663 #define vm_object_lock_init(object) mutex_init(&(object)->Lock, 0)
664 #define vm_object_lock(object) mutex_lock(&(object)->Lock)
665 #define vm_object_unlock(object) mutex_unlock(&(object)->Lock)
666 #define vm_object_lock_try(object) mutex_try(&(object)->Lock)
667
668 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
669 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
670
671 #endif /* _VM_VM_OBJECT_H_ */