]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <mach_pagemap.h>
70 #include <task_swapper.h>
71
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/lock.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
86 #include <vm/pmap.h>
87
88 #if MACH_PAGEMAP
89 #include <vm/vm_external.h>
90 #endif /* MACH_PAGEMAP */
91
92 struct vm_page;
93
94 /*
95 * Types defined:
96 *
97 * vm_object_t Virtual memory object.
98 * vm_object_fault_info_t Used to determine cluster size.
99 */
100
101 struct vm_object_fault_info {
102 int interruptible;
103 uint32_t user_tag;
104 vm_size_t cluster_size;
105 vm_behavior_t behavior;
106 vm_map_offset_t lo_offset;
107 vm_map_offset_t hi_offset;
108 boolean_t no_cache;
109 };
110
111
112
113 struct vm_object {
114 queue_head_t memq; /* Resident memory */
115 lck_rw_t Lock; /* Synchronization */
116
117 vm_object_size_t size; /* Object size (only valid
118 * if internal)
119 */
120 struct vm_page *memq_hint;
121 int ref_count; /* Number of references */
122 #if TASK_SWAPPER
123 int res_count; /* Residency references (swap)*/
124 #endif /* TASK_SWAPPER */
125 unsigned int resident_page_count;
126 /* number of resident pages */
127
128 struct vm_object *copy; /* Object that should receive
129 * a copy of my changed pages,
130 * for copy_delay, or just the
131 * temporary object that
132 * shadows this object, for
133 * copy_call.
134 */
135 struct vm_object *shadow; /* My shadow */
136 vm_object_offset_t shadow_offset; /* Offset into shadow */
137
138 memory_object_t pager; /* Where to get data */
139 vm_object_offset_t paging_offset; /* Offset into memory object */
140 memory_object_control_t pager_control; /* Where data comes back */
141
142 memory_object_copy_strategy_t
143 copy_strategy; /* How to handle data copy */
144
145 int paging_in_progress;
146 /* The memory object ports are
147 * being used (e.g., for pagein
148 * or pageout) -- don't change
149 * any of these fields (i.e.,
150 * don't collapse, destroy or
151 * terminate)
152 */
153 unsigned int
154 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
155 * awakened" notations. See
156 * VM_OBJECT_EVENT_* items
157 * below */
158 /* boolean_t */ pager_created:1, /* Has pager been created? */
159 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
160 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
161
162 /* boolean_t */ pager_trusted:1,/* The pager for this object
163 * is trusted. This is true for
164 * all internal objects (backed
165 * by the default pager)
166 */
167 /* boolean_t */ can_persist:1, /* The kernel may keep the data
168 * for this object (and rights
169 * to the memory object) after
170 * all address map references
171 * are deallocated?
172 */
173 /* boolean_t */ internal:1, /* Created by the kernel (and
174 * therefore, managed by the
175 * default memory manger)
176 */
177 /* boolean_t */ temporary:1, /* Permanent objects may be
178 * changed externally by the
179 * memory manager, and changes
180 * made in memory must be
181 * reflected back to the memory
182 * manager. Temporary objects
183 * lack both of these
184 * characteristics.
185 */
186 /* boolean_t */ private:1, /* magic device_pager object,
187 * holds private pages only */
188 /* boolean_t */ pageout:1, /* pageout object. contains
189 * private pages that refer to
190 * a real memory object. */
191 /* boolean_t */ alive:1, /* Not yet terminated */
192
193 /* boolean_t */ purgable:2, /* Purgable state. See
194 * VM_PURGABLE_*
195 */
196 /* boolean_t */ shadowed:1, /* Shadow may exist */
197 /* boolean_t */ silent_overwrite:1,
198 /* Allow full page overwrite
199 * without data_request if
200 * page is absent */
201 /* boolean_t */ advisory_pageout:1,
202 /* Instead of sending page
203 * via OOL, just notify
204 * pager that the kernel
205 * wants to discard it, page
206 * remains in object */
207 /* boolean_t */ true_share:1,
208 /* This object is mapped
209 * in more than one place
210 * and hence cannot be
211 * coalesced */
212 /* boolean_t */ terminating:1,
213 /* Allows vm_object_lookup
214 * and vm_object_deallocate
215 * to special case their
216 * behavior when they are
217 * called as a result of
218 * page cleaning during
219 * object termination
220 */
221 /* boolean_t */ named:1, /* An enforces an internal
222 * naming convention, by
223 * calling the right routines
224 * for allocation and
225 * destruction, UBC references
226 * against the vm_object are
227 * checked.
228 */
229 /* boolean_t */ shadow_severed:1,
230 /* When a permanent object
231 * backing a COW goes away
232 * unexpectedly. This bit
233 * allows vm_fault to return
234 * an error rather than a
235 * zero filled page.
236 */
237 /* boolean_t */ phys_contiguous:1,
238 /* Memory is wired and
239 * guaranteed physically
240 * contiguous. However
241 * it is not device memory
242 * and obeys normal virtual
243 * memory rules w.r.t pmap
244 * access bits.
245 */
246 /* boolean_t */ nophyscache:1;
247 /* When mapped at the
248 * pmap level, don't allow
249 * primary caching. (for
250 * I/O)
251 */
252
253
254
255 queue_chain_t cached_list; /* Attachment point for the
256 * list of objects cached as a
257 * result of their can_persist
258 * value
259 */
260
261 queue_head_t msr_q; /* memory object synchronise
262 request queue */
263
264 /*
265 * the following fields are not protected by any locks
266 * they are updated via atomic compare and swap
267 */
268 vm_object_offset_t last_alloc; /* last allocation offset */
269 int sequential; /* sequential access size */
270
271 uint32_t pages_created;
272 uint32_t pages_used;
273 #if MACH_PAGEMAP
274 vm_external_map_t existence_map; /* bitmap of pages written to
275 * backing storage */
276 #endif /* MACH_PAGEMAP */
277 vm_offset_t cow_hint; /* last page present in */
278 /* shadow but not in object */
279 #if MACH_ASSERT
280 struct vm_object *paging_object; /* object which pages to be
281 * swapped out are temporary
282 * put in current object
283 */
284 #endif
285 /* hold object lock when altering */
286 unsigned int
287 wimg_bits:8, /* cache WIMG bits */
288 code_signed:1, /* pages are signed and should be
289 validated; the signatures are stored
290 with the pager */
291 mapping_in_progress:1, /* pager being mapped/unmapped */
292 not_in_use:22; /* for expansion */
293
294 #ifdef UPL_DEBUG
295 queue_head_t uplq; /* List of outstanding upls */
296 #endif /* UPL_DEBUG */
297
298 #ifdef VM_PIP_DEBUG
299 /*
300 * Keep track of the stack traces for the first holders
301 * of a "paging_in_progress" reference for this VM object.
302 */
303 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
304 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
305 struct __pip_backtrace {
306 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
307 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
308 #endif /* VM_PIP_DEBUG */
309
310 queue_chain_t objq; /* object queue - currently used for purgable queues */
311 };
312
313 #define VM_PAGE_REMOVE(page) \
314 MACRO_BEGIN \
315 vm_page_t __page = (page); \
316 vm_object_t __object = __page->object; \
317 if (__page == __object->memq_hint) { \
318 vm_page_t __new_hint; \
319 queue_entry_t __qe; \
320 __qe = queue_next(&__page->listq); \
321 if (queue_end(&__object->memq, __qe)) { \
322 __qe = queue_prev(&__page->listq); \
323 if (queue_end(&__object->memq, __qe)) { \
324 __qe = NULL; \
325 } \
326 } \
327 __new_hint = (vm_page_t) __qe; \
328 __object->memq_hint = __new_hint; \
329 } \
330 queue_remove(&__object->memq, __page, vm_page_t, listq); \
331 MACRO_END
332
333 #define VM_PAGE_INSERT(page, object) \
334 MACRO_BEGIN \
335 vm_page_t __page = (page); \
336 vm_object_t __object = (object); \
337 queue_enter(&__object->memq, __page, vm_page_t, listq); \
338 __object->memq_hint = __page; \
339 MACRO_END
340
341 __private_extern__
342 vm_object_t kernel_object; /* the single kernel object */
343
344 __private_extern__
345 unsigned int vm_object_absent_max; /* maximum number of absent pages
346 at a time for each object */
347
348 # define VM_MSYNC_INITIALIZED 0
349 # define VM_MSYNC_SYNCHRONIZING 1
350 # define VM_MSYNC_DONE 2
351
352 struct msync_req {
353 queue_chain_t msr_q; /* object request queue */
354 queue_chain_t req_q; /* vm_msync request queue */
355 unsigned int flag;
356 vm_object_offset_t offset;
357 vm_object_size_t length;
358 vm_object_t object; /* back pointer */
359 decl_mutex_data(, msync_req_lock) /* Lock for this structure */
360 };
361
362 typedef struct msync_req *msync_req_t;
363 #define MSYNC_REQ_NULL ((msync_req_t) 0)
364
365 /*
366 * Macros to allocate and free msync_reqs
367 */
368 #define msync_req_alloc(msr) \
369 MACRO_BEGIN \
370 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
371 mutex_init(&(msr)->msync_req_lock, 0); \
372 msr->flag = VM_MSYNC_INITIALIZED; \
373 MACRO_END
374
375 #define msync_req_free(msr) \
376 (kfree((msr), sizeof(struct msync_req)))
377
378 #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
379 #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
380
381 /*
382 * Declare procedures that operate on VM objects.
383 */
384
385 __private_extern__ void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
386
387 __private_extern__ void vm_object_init(void);
388
389 __private_extern__ void vm_object_init_lck_grp(void);
390
391 __private_extern__ void vm_object_reaper_init(void);
392
393 __private_extern__ vm_object_t vm_object_allocate(
394 vm_object_size_t size);
395
396 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
397 vm_object_t object);
398
399 #if TASK_SWAPPER
400
401 __private_extern__ void vm_object_res_reference(
402 vm_object_t object);
403 __private_extern__ void vm_object_res_deallocate(
404 vm_object_t object);
405 #define VM_OBJ_RES_INCR(object) (object)->res_count++
406 #define VM_OBJ_RES_DECR(object) (object)->res_count--
407
408 #else /* TASK_SWAPPER */
409
410 #define VM_OBJ_RES_INCR(object)
411 #define VM_OBJ_RES_DECR(object)
412 #define vm_object_res_reference(object)
413 #define vm_object_res_deallocate(object)
414
415 #endif /* TASK_SWAPPER */
416
417 #define vm_object_reference_locked(object) \
418 MACRO_BEGIN \
419 vm_object_t RLObject = (object); \
420 vm_object_lock_assert_exclusive(object); \
421 assert((RLObject)->ref_count > 0); \
422 (RLObject)->ref_count++; \
423 assert((RLObject)->ref_count > 1); \
424 vm_object_res_reference(RLObject); \
425 MACRO_END
426
427
428 #define vm_object_reference_shared(object) \
429 MACRO_BEGIN \
430 vm_object_t RLObject = (object); \
431 vm_object_lock_assert_shared(object); \
432 assert((RLObject)->ref_count > 0); \
433 OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \
434 assert((RLObject)->ref_count > 1); \
435 /* XXX we would need an atomic version of the following ... */ \
436 vm_object_res_reference(RLObject); \
437 MACRO_END
438
439
440 __private_extern__ void vm_object_reference(
441 vm_object_t object);
442
443 #if !MACH_ASSERT
444
445 #define vm_object_reference(object) \
446 MACRO_BEGIN \
447 vm_object_t RObject = (object); \
448 if (RObject) { \
449 vm_object_lock(RObject); \
450 vm_object_reference_locked(RObject); \
451 vm_object_unlock(RObject); \
452 } \
453 MACRO_END
454
455 #endif /* MACH_ASSERT */
456
457 __private_extern__ void vm_object_deallocate(
458 vm_object_t object);
459
460 __private_extern__ kern_return_t vm_object_release_name(
461 vm_object_t object,
462 int flags);
463
464 __private_extern__ void vm_object_pmap_protect(
465 vm_object_t object,
466 vm_object_offset_t offset,
467 vm_object_size_t size,
468 pmap_t pmap,
469 vm_map_offset_t pmap_start,
470 vm_prot_t prot);
471
472 __private_extern__ void vm_object_page_remove(
473 vm_object_t object,
474 vm_object_offset_t start,
475 vm_object_offset_t end);
476
477 __private_extern__ void vm_object_deactivate_pages(
478 vm_object_t object,
479 vm_object_offset_t offset,
480 vm_object_size_t size,
481 boolean_t kill_page);
482
483 __private_extern__ unsigned int vm_object_purge(
484 vm_object_t object);
485
486 __private_extern__ kern_return_t vm_object_purgable_control(
487 vm_object_t object,
488 vm_purgable_t control,
489 int *state);
490
491 __private_extern__ boolean_t vm_object_coalesce(
492 vm_object_t prev_object,
493 vm_object_t next_object,
494 vm_object_offset_t prev_offset,
495 vm_object_offset_t next_offset,
496 vm_object_size_t prev_size,
497 vm_object_size_t next_size);
498
499 __private_extern__ boolean_t vm_object_shadow(
500 vm_object_t *object,
501 vm_object_offset_t *offset,
502 vm_object_size_t length);
503
504 __private_extern__ void vm_object_collapse(
505 vm_object_t object,
506 vm_object_offset_t offset,
507 boolean_t can_bypass);
508
509 __private_extern__ boolean_t vm_object_copy_quickly(
510 vm_object_t *_object,
511 vm_object_offset_t src_offset,
512 vm_object_size_t size,
513 boolean_t *_src_needs_copy,
514 boolean_t *_dst_needs_copy);
515
516 __private_extern__ kern_return_t vm_object_copy_strategically(
517 vm_object_t src_object,
518 vm_object_offset_t src_offset,
519 vm_object_size_t size,
520 vm_object_t *dst_object,
521 vm_object_offset_t *dst_offset,
522 boolean_t *dst_needs_copy);
523
524 __private_extern__ kern_return_t vm_object_copy_slowly(
525 vm_object_t src_object,
526 vm_object_offset_t src_offset,
527 vm_object_size_t size,
528 int interruptible,
529 vm_object_t *_result_object);
530
531 __private_extern__ vm_object_t vm_object_copy_delayed(
532 vm_object_t src_object,
533 vm_object_offset_t src_offset,
534 vm_object_size_t size,
535 boolean_t src_object_shared);
536
537
538
539 __private_extern__ kern_return_t vm_object_destroy(
540 vm_object_t object,
541 kern_return_t reason);
542
543 __private_extern__ void vm_object_pager_create(
544 vm_object_t object);
545
546 __private_extern__ void vm_object_page_map(
547 vm_object_t object,
548 vm_object_offset_t offset,
549 vm_object_size_t size,
550 vm_object_offset_t (*map_fn)
551 (void *, vm_object_offset_t),
552 void *map_fn_data);
553
554 __private_extern__ kern_return_t vm_object_upl_request(
555 vm_object_t object,
556 vm_object_offset_t offset,
557 upl_size_t size,
558 upl_t *upl,
559 upl_page_info_t *page_info,
560 unsigned int *count,
561 int flags);
562
563 __private_extern__ kern_return_t vm_object_transpose(
564 vm_object_t object1,
565 vm_object_t object2,
566 vm_object_size_t transpose_size);
567
568 __private_extern__ boolean_t vm_object_sync(
569 vm_object_t object,
570 vm_object_offset_t offset,
571 vm_object_size_t size,
572 boolean_t should_flush,
573 boolean_t should_return,
574 boolean_t should_iosync);
575
576 __private_extern__ kern_return_t vm_object_update(
577 vm_object_t object,
578 vm_object_offset_t offset,
579 vm_object_size_t size,
580 vm_object_offset_t *error_offset,
581 int *io_errno,
582 memory_object_return_t should_return,
583 int flags,
584 vm_prot_t prot);
585
586 __private_extern__ kern_return_t vm_object_lock_request(
587 vm_object_t object,
588 vm_object_offset_t offset,
589 vm_object_size_t size,
590 memory_object_return_t should_return,
591 int flags,
592 vm_prot_t prot);
593
594
595
596 __private_extern__ vm_object_t vm_object_enter(
597 memory_object_t pager,
598 vm_object_size_t size,
599 boolean_t internal,
600 boolean_t init,
601 boolean_t check_named);
602
603
604 __private_extern__ void vm_object_cluster_size(
605 vm_object_t object,
606 vm_object_offset_t *start,
607 vm_size_t *length,
608 vm_object_fault_info_t fault_info);
609
610 __private_extern__ kern_return_t vm_object_populate_with_private(
611 vm_object_t object,
612 vm_object_offset_t offset,
613 ppnum_t phys_page,
614 vm_size_t size);
615
616 extern kern_return_t adjust_vm_object_cache(
617 vm_size_t oval,
618 vm_size_t nval);
619
620 extern kern_return_t vm_object_page_op(
621 vm_object_t object,
622 vm_object_offset_t offset,
623 int ops,
624 ppnum_t *phys_entry,
625 int *flags);
626
627 extern kern_return_t vm_object_range_op(
628 vm_object_t object,
629 vm_object_offset_t offset_beg,
630 vm_object_offset_t offset_end,
631 int ops,
632 int *range);
633
634 /*
635 * Event waiting handling
636 */
637
638 #define VM_OBJECT_EVENT_INITIALIZED 0
639 #define VM_OBJECT_EVENT_PAGER_READY 1
640 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
641 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
642 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
643 #define VM_OBJECT_EVENT_UNCACHING 5
644 #define VM_OBJECT_EVENT_COPY_CALL 6
645 #define VM_OBJECT_EVENT_CACHING 7
646
647 #define vm_object_assert_wait(object, event, interruptible) \
648 (((object)->all_wanted |= 1 << (event)), \
649 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
650
651 #define vm_object_wait(object, event, interruptible) \
652 (vm_object_assert_wait((object),(event),(interruptible)), \
653 vm_object_unlock(object), \
654 thread_block(THREAD_CONTINUE_NULL)) \
655
656 #define thread_sleep_vm_object(object, event, interruptible) \
657 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
658
659 #define vm_object_sleep(object, event, interruptible) \
660 (((object)->all_wanted |= 1 << (event)), \
661 thread_sleep_vm_object((object), \
662 ((vm_offset_t)(object)+(event)), (interruptible)))
663
664 #define vm_object_wakeup(object, event) \
665 MACRO_BEGIN \
666 if ((object)->all_wanted & (1 << (event))) \
667 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
668 (object)->all_wanted &= ~(1 << (event)); \
669 MACRO_END
670
671 #define vm_object_set_wanted(object, event) \
672 MACRO_BEGIN \
673 ((object)->all_wanted |= (1 << (event))); \
674 MACRO_END
675
676 #define vm_object_wanted(object, event) \
677 ((object)->all_wanted & (1 << (event)))
678
679 /*
680 * Routines implemented as macros
681 */
682 #ifdef VM_PIP_DEBUG
683 #include <libkern/OSDebug.h>
684 #define VM_PIP_DEBUG_BEGIN(object) \
685 MACRO_BEGIN \
686 if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \
687 int pip = (object)->paging_in_progress; \
688 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
689 VM_PIP_DEBUG_STACK_FRAMES); \
690 } \
691 MACRO_END
692 #else /* VM_PIP_DEBUG */
693 #define VM_PIP_DEBUG_BEGIN(object)
694 #endif /* VM_PIP_DEBUG */
695
696 #define vm_object_paging_begin(object) \
697 MACRO_BEGIN \
698 vm_object_lock_assert_exclusive((object)); \
699 assert((object)->paging_in_progress >= 0); \
700 VM_PIP_DEBUG_BEGIN((object)); \
701 (object)->paging_in_progress++; \
702 MACRO_END
703
704 #define vm_object_paging_end(object) \
705 MACRO_BEGIN \
706 vm_object_lock_assert_exclusive((object)); \
707 assert((object)->paging_in_progress > 0); \
708 if (--(object)->paging_in_progress == 0) { \
709 vm_object_wakeup(object, \
710 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
711 } \
712 MACRO_END
713
714 #define vm_object_paging_wait(object, interruptible) \
715 MACRO_BEGIN \
716 vm_object_lock_assert_exclusive((object)); \
717 while ((object)->paging_in_progress != 0) { \
718 wait_result_t _wr; \
719 \
720 _wr = vm_object_sleep((object), \
721 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
722 (interruptible)); \
723 \
724 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
725 /*XXX break; */ \
726 } \
727 MACRO_END
728
729
730 #define vm_object_mapping_begin(object) \
731 MACRO_BEGIN \
732 vm_object_lock_assert_exclusive((object)); \
733 assert(! (object)->mapping_in_progress); \
734 (object)->mapping_in_progress = TRUE; \
735 MACRO_END
736
737 #define vm_object_mapping_end(object) \
738 MACRO_BEGIN \
739 vm_object_lock_assert_exclusive((object)); \
740 assert((object)->mapping_in_progress); \
741 (object)->mapping_in_progress = FALSE; \
742 vm_object_wakeup((object), \
743 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
744 MACRO_END
745
746 #define vm_object_mapping_wait(object, interruptible) \
747 MACRO_BEGIN \
748 vm_object_lock_assert_exclusive((object)); \
749 while ((object)->mapping_in_progress) { \
750 wait_result_t _wr; \
751 \
752 _wr = vm_object_sleep((object), \
753 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
754 (interruptible)); \
755 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
756 /*XXX break; */ \
757 } \
758 assert(!(object)->mapping_in_progress); \
759 MACRO_END
760
761
762
763 #define OBJECT_LOCK_SHARED 0
764 #define OBJECT_LOCK_EXCLUSIVE 1
765
766 extern lck_grp_t vm_object_lck_grp;
767 extern lck_grp_attr_t vm_object_lck_grp_attr;
768 extern lck_attr_t vm_object_lck_attr;
769 extern lck_attr_t kernel_object_lck_attr;
770
771 extern vm_object_t vm_pageout_scan_wants_object;
772
773 extern void vm_object_lock(vm_object_t);
774 extern boolean_t vm_object_lock_try(vm_object_t);
775 extern void vm_object_lock_shared(vm_object_t);
776 extern boolean_t vm_object_lock_try_shared(vm_object_t);
777
778 /*
779 * Object locking macros
780 */
781
782 #define vm_object_lock_init(object) \
783 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
784 (((object) == kernel_object || \
785 (object) == vm_submap_object) ? \
786 &kernel_object_lck_attr : \
787 &vm_object_lck_attr))
788 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
789
790 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
791 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
792 #define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock)
793
794 /*
795 * CAUTION: the following vm_object_lock_assert_held*() macros merely
796 * check if anyone is holding the lock, but the holder may not necessarily
797 * be the caller...
798 */
799 #if DEBUG
800 #define vm_object_lock_assert_held(object) \
801 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
802 #define vm_object_lock_assert_shared(object) \
803 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
804 #define vm_object_lock_assert_exclusive(object) \
805 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
806 #else /* DEBUG */
807 #define vm_object_lock_assert_held(object)
808 #define vm_object_lock_assert_shared(object)
809 #define vm_object_lock_assert_exclusive(object)
810 #endif /* DEBUG */
811
812 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
813 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
814
815 #endif /* _VM_VM_OBJECT_H_ */