]> git.saurik.com Git - apple/xnu.git/blob - osfmk/mach/memory_object_types.h
96abd8026c3263b42c98f6cf8720f398b1d75ef8
[apple/xnu.git] / osfmk / mach / memory_object_types.h
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: memory_object.h
60 * Author: Michael Wayne Young
61 *
62 * External memory management interface definition.
63 */
64
65 #ifndef _MACH_MEMORY_OBJECT_TYPES_H_
66 #define _MACH_MEMORY_OBJECT_TYPES_H_
67
68 /*
69 * User-visible types used in the external memory
70 * management interface:
71 */
72
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_sync.h>
77 #include <mach/vm_types.h>
78 #include <mach/machine/vm_types.h>
79
80 #include <sys/cdefs.h>
81
82 #define VM_64_BIT_DATA_OBJECTS
83
84 typedef unsigned long long memory_object_offset_t;
85 typedef unsigned long long memory_object_size_t;
86 typedef natural_t memory_object_cluster_size_t;
87 typedef natural_t * memory_object_fault_info_t;
88
89 typedef unsigned long long vm_object_id_t;
90
91
92 /*
93 * Temporary until real EMMI version gets re-implemented
94 */
95
96 #ifdef KERNEL_PRIVATE
97
98 struct memory_object_pager_ops; /* forward declaration */
99
100 typedef struct memory_object {
101 unsigned int _pad1; /* struct ipc_object_header */
102 #ifdef __LP64__
103 unsigned int _pad2; /* pad to natural boundary */
104 #endif
105 const struct memory_object_pager_ops *mo_pager_ops;
106 } *memory_object_t;
107
108 typedef struct memory_object_control {
109 unsigned int moc_ikot; /* struct ipc_object_header */
110 #ifdef __LP64__
111 unsigned int _pad; /* pad to natural boundary */
112 #endif
113 struct vm_object *moc_object;
114 } *memory_object_control_t;
115
116 typedef const struct memory_object_pager_ops {
117 void (*memory_object_reference)(
118 memory_object_t mem_obj);
119 void (*memory_object_deallocate)(
120 memory_object_t mem_obj);
121 kern_return_t (*memory_object_init)(
122 memory_object_t mem_obj,
123 memory_object_control_t mem_control,
124 memory_object_cluster_size_t size);
125 kern_return_t (*memory_object_terminate)(
126 memory_object_t mem_obj);
127 kern_return_t (*memory_object_data_request)(
128 memory_object_t mem_obj,
129 memory_object_offset_t offset,
130 memory_object_cluster_size_t length,
131 vm_prot_t desired_access,
132 memory_object_fault_info_t fault_info);
133 kern_return_t (*memory_object_data_return)(
134 memory_object_t mem_obj,
135 memory_object_offset_t offset,
136 memory_object_cluster_size_t size,
137 memory_object_offset_t *resid_offset,
138 int *io_error,
139 boolean_t dirty,
140 boolean_t kernel_copy,
141 int upl_flags);
142 kern_return_t (*memory_object_data_initialize)(
143 memory_object_t mem_obj,
144 memory_object_offset_t offset,
145 memory_object_cluster_size_t size);
146 kern_return_t (*memory_object_data_unlock)(
147 memory_object_t mem_obj,
148 memory_object_offset_t offset,
149 memory_object_size_t size,
150 vm_prot_t desired_access);
151 kern_return_t (*memory_object_synchronize)(
152 memory_object_t mem_obj,
153 memory_object_offset_t offset,
154 memory_object_size_t size,
155 vm_sync_t sync_flags);
156 kern_return_t (*memory_object_map)(
157 memory_object_t mem_obj,
158 vm_prot_t prot);
159 kern_return_t (*memory_object_last_unmap)(
160 memory_object_t mem_obj);
161 kern_return_t (*memory_object_data_reclaim)(
162 memory_object_t mem_obj,
163 boolean_t reclaim_backing_store);
164 const char *memory_object_pager_name;
165 } * memory_object_pager_ops_t;
166
167 #else /* KERNEL_PRIVATE */
168
169 typedef mach_port_t memory_object_t;
170 typedef mach_port_t memory_object_control_t;
171
172 #endif /* KERNEL_PRIVATE */
173
174 typedef memory_object_t *memory_object_array_t;
175 /* A memory object ... */
176 /* Used by the kernel to retrieve */
177 /* or store data */
178
179 typedef mach_port_t memory_object_name_t;
180 /* Used to describe the memory ... */
181 /* object in vm_regions() calls */
182
183 typedef mach_port_t memory_object_default_t;
184 /* Registered with the host ... */
185 /* for creating new internal objects */
186
187 #define MEMORY_OBJECT_NULL ((memory_object_t) 0)
188 #define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0)
189 #define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0)
190 #define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0)
191
192
193 typedef int memory_object_copy_strategy_t;
194 /* How memory manager handles copy: */
195 #define MEMORY_OBJECT_COPY_NONE 0
196 /* ... No special support */
197 #define MEMORY_OBJECT_COPY_CALL 1
198 /* ... Make call on memory manager */
199 #define MEMORY_OBJECT_COPY_DELAY 2
200 /* ... Memory manager doesn't
201 * change data externally.
202 */
203 #define MEMORY_OBJECT_COPY_TEMPORARY 3
204 /* ... Memory manager doesn't
205 * change data externally, and
206 * doesn't need to see changes.
207 */
208 #define MEMORY_OBJECT_COPY_SYMMETRIC 4
209 /* ... Memory manager doesn't
210 * change data externally,
211 * doesn't need to see changes,
212 * and object will not be
213 * multiply mapped.
214 *
215 * XXX
216 * Not yet safe for non-kernel use.
217 */
218
219 #define MEMORY_OBJECT_COPY_INVALID 5
220 /* ... An invalid copy strategy,
221 * for external objects which
222 * have not been initialized.
223 * Allows copy_strategy to be
224 * examined without also
225 * examining pager_ready and
226 * internal.
227 */
228
229 typedef int memory_object_return_t;
230 /* Which pages to return to manager
231 this time (lock_request) */
232 #define MEMORY_OBJECT_RETURN_NONE 0
233 /* ... don't return any. */
234 #define MEMORY_OBJECT_RETURN_DIRTY 1
235 /* ... only dirty pages. */
236 #define MEMORY_OBJECT_RETURN_ALL 2
237 /* ... dirty and precious pages. */
238 #define MEMORY_OBJECT_RETURN_ANYTHING 3
239 /* ... any resident page. */
240
241 /*
242 * Data lock request flags
243 */
244
245 #define MEMORY_OBJECT_DATA_FLUSH 0x1
246 #define MEMORY_OBJECT_DATA_NO_CHANGE 0x2
247 #define MEMORY_OBJECT_DATA_PURGE 0x4
248 #define MEMORY_OBJECT_COPY_SYNC 0x8
249 #define MEMORY_OBJECT_DATA_SYNC 0x10
250 #define MEMORY_OBJECT_IO_SYNC 0x20
251 #define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40
252
253 /*
254 * Types for the memory object flavor interfaces
255 */
256
257 #define MEMORY_OBJECT_INFO_MAX (1024)
258 typedef int *memory_object_info_t;
259 typedef int memory_object_flavor_t;
260 typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX];
261
262
263 #define MEMORY_OBJECT_PERFORMANCE_INFO 11
264 #define MEMORY_OBJECT_ATTRIBUTE_INFO 14
265 #define MEMORY_OBJECT_BEHAVIOR_INFO 15
266
267 #ifdef PRIVATE
268
269 #define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10
270 #define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12
271
272 struct old_memory_object_behave_info {
273 memory_object_copy_strategy_t copy_strategy;
274 boolean_t temporary;
275 boolean_t invalidate;
276 };
277
278 struct old_memory_object_attr_info { /* old attr list */
279 boolean_t object_ready;
280 boolean_t may_cache;
281 memory_object_copy_strategy_t copy_strategy;
282 };
283
284 typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t;
285 typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t;
286 typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t;
287 typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t;
288
289 #define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \
290 (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)))
291 #define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \
292 (sizeof(old_memory_object_attr_info_data_t)/sizeof(int)))
293
294 #ifdef KERNEL
295
296 __BEGIN_DECLS
297 extern void memory_object_reference(memory_object_t object);
298 extern void memory_object_deallocate(memory_object_t object);
299
300 extern void memory_object_default_reference(memory_object_default_t);
301 extern void memory_object_default_deallocate(memory_object_default_t);
302
303 extern void memory_object_control_reference(memory_object_control_t control);
304 extern void memory_object_control_deallocate(memory_object_control_t control);
305 extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int);
306 __END_DECLS
307
308 #endif /* KERNEL */
309
310 #endif /* PRIVATE */
311
312 struct memory_object_perf_info {
313 memory_object_cluster_size_t cluster_size;
314 boolean_t may_cache;
315 };
316
317 struct memory_object_attr_info {
318 memory_object_copy_strategy_t copy_strategy;
319 memory_object_cluster_size_t cluster_size;
320 boolean_t may_cache_object;
321 boolean_t temporary;
322 };
323
324 struct memory_object_behave_info {
325 memory_object_copy_strategy_t copy_strategy;
326 boolean_t temporary;
327 boolean_t invalidate;
328 boolean_t silent_overwrite;
329 boolean_t advisory_pageout;
330 };
331
332
333 typedef struct memory_object_behave_info *memory_object_behave_info_t;
334 typedef struct memory_object_behave_info memory_object_behave_info_data_t;
335
336 typedef struct memory_object_perf_info *memory_object_perf_info_t;
337 typedef struct memory_object_perf_info memory_object_perf_info_data_t;
338
339 typedef struct memory_object_attr_info *memory_object_attr_info_t;
340 typedef struct memory_object_attr_info memory_object_attr_info_data_t;
341
342 #define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \
343 (sizeof(memory_object_behave_info_data_t)/sizeof(int)))
344 #define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \
345 (sizeof(memory_object_perf_info_data_t)/sizeof(int)))
346 #define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \
347 (sizeof(memory_object_attr_info_data_t)/sizeof(int)))
348
349 #define invalid_memory_object_flavor(f) \
350 (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \
351 f != MEMORY_OBJECT_PERFORMANCE_INFO && \
352 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \
353 f != MEMORY_OBJECT_BEHAVIOR_INFO && \
354 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO)
355
356
357 /*
358 * Used to support options on memory_object_release_name call
359 */
360 #define MEMORY_OBJECT_TERMINATE_IDLE 0x1
361 #define MEMORY_OBJECT_RESPECT_CACHE 0x2
362 #define MEMORY_OBJECT_RELEASE_NO_OP 0x4
363
364
365 /* named entry processor mapping options */
366 /* enumerated */
367 #define MAP_MEM_NOOP 0
368 #define MAP_MEM_COPYBACK 1
369 #define MAP_MEM_IO 2
370 #define MAP_MEM_WTHRU 3
371 #define MAP_MEM_WCOMB 4 /* Write combining mode */
372 /* aka store gather */
373 #define MAP_MEM_INNERWBACK 5
374
375 #define GET_MAP_MEM(flags) \
376 ((((unsigned int)(flags)) >> 24) & 0xFF)
377
378 #define SET_MAP_MEM(caching, flags) \
379 ((flags) = ((((unsigned int)(caching)) << 24) \
380 & 0xFF000000) | ((flags) & 0xFFFFFF));
381
382 /* leave room for vm_prot bits */
383 #define MAP_MEM_ONLY 0x010000 /* change processor caching */
384 #define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */
385 #define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */
386 #define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */
387 #define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */
388 #define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */
389 #define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */
390
391 #ifdef KERNEL
392
393 /*
394 * Universal Page List data structures
395 *
396 * A UPL describes a bounded set of physical pages
397 * associated with some range of an object or map
398 * and a snapshot of the attributes associated with
399 * each of those pages.
400 */
401 #ifdef PRIVATE
402 #define MAX_UPL_TRANSFER 256
403 #define MAX_UPL_SIZE 8192
404
405 struct upl_page_info {
406 ppnum_t phys_addr; /* physical page index number */
407 unsigned int
408 #ifdef XNU_KERNEL_PRIVATE
409 pageout:1, /* page is to be removed on commit */
410 absent:1, /* No valid data in this page */
411 dirty:1, /* Page must be cleaned (O) */
412 precious:1, /* must be cleaned, we have only copy */
413 device:1, /* no page data, mapped dev memory */
414 speculative:1, /* page is valid, but not yet accessed */
415 cs_validated:1, /* CODE SIGNING: page was validated */
416 cs_tainted:1, /* CODE SIGNING: page is tainted */
417 needed:1, /* page should be left in cache on abort */
418 :0; /* force to long boundary */
419 #else
420 opaque; /* use upl_page_xxx() accessor funcs */
421 #endif /* XNU_KERNEL_PRIVATE */
422 };
423
424 #else
425
426 struct upl_page_info {
427 unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */
428 };
429
430 #endif /* PRIVATE */
431
432 typedef struct upl_page_info upl_page_info_t;
433 typedef upl_page_info_t *upl_page_info_array_t;
434 typedef upl_page_info_array_t upl_page_list_ptr_t;
435
436 typedef uint32_t upl_offset_t; /* page-aligned byte offset */
437 typedef uint32_t upl_size_t; /* page-aligned byte size */
438
439 /* upl invocation flags */
440 /* top nibble is used by super upl */
441
442 #define UPL_FLAGS_NONE 0x00000000
443 #define UPL_COPYOUT_FROM 0x00000001
444 #define UPL_PRECIOUS 0x00000002
445 #define UPL_NO_SYNC 0x00000004
446 #define UPL_CLEAN_IN_PLACE 0x00000008
447 #define UPL_NOBLOCK 0x00000010
448 #define UPL_RET_ONLY_DIRTY 0x00000020
449 #define UPL_SET_INTERNAL 0x00000040
450 #define UPL_QUERY_OBJECT_TYPE 0x00000080
451 #define UPL_RET_ONLY_ABSENT 0x00000100 /* used only for COPY_FROM = FALSE */
452 #define UPL_FILE_IO 0x00000200
453 #define UPL_SET_LITE 0x00000400
454 #define UPL_SET_INTERRUPTIBLE 0x00000800
455 #define UPL_SET_IO_WIRE 0x00001000
456 #define UPL_FOR_PAGEOUT 0x00002000
457 #define UPL_WILL_BE_DUMPED 0x00004000
458 #define UPL_FORCE_DATA_SYNC 0x00008000
459 /* continued after the ticket bits... */
460
461 #define UPL_PAGE_TICKET_MASK 0x000F0000
462 #define UPL_PAGE_TICKET_SHIFT 16
463
464 /* ... flags resume here */
465 #define UPL_BLOCK_ACCESS 0x00100000
466 #define UPL_ENCRYPT 0x00200000
467 #define UPL_NOZEROFILL 0x00400000
468 #define UPL_WILL_MODIFY 0x00800000 /* caller will modify the pages */
469
470 #define UPL_NEED_32BIT_ADDR 0x01000000
471 #define UPL_UBC_MSYNC 0x02000000
472 #define UPL_UBC_PAGEOUT 0x04000000
473 #define UPL_UBC_PAGEIN 0x08000000
474 #define UPL_REQUEST_SET_DIRTY 0x10000000
475 #define UPL_REQUEST_NO_FAULT 0x20000000 /* fail if pages not all resident */
476 #define UPL_NOZEROFILLIO 0x40000000 /* allow non zerofill pages present */
477 #define UPL_REQUEST_FORCE_COHERENCY 0x80000000
478
479 /* UPL flags known by this kernel */
480 #define UPL_VALID_FLAGS 0xFFFFFFFF
481
482
483 /* upl abort error flags */
484 #define UPL_ABORT_RESTART 0x1
485 #define UPL_ABORT_UNAVAILABLE 0x2
486 #define UPL_ABORT_ERROR 0x4
487 #define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */
488 #define UPL_ABORT_DUMP_PAGES 0x10
489 #define UPL_ABORT_NOTIFY_EMPTY 0x20
490 /* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */
491 #define UPL_ABORT_REFERENCE 0x80
492
493 /* upl pages check flags */
494 #define UPL_CHECK_DIRTY 0x1
495
496
497 /*
498 * upl pagein/pageout flags
499 *
500 *
501 * when I/O is issued from this UPL it should be done synchronously
502 */
503 #define UPL_IOSYNC 0x1
504
505 /*
506 * the passed in UPL should not have either a commit or abort
507 * applied to it by the underlying layers... the site that
508 * created the UPL is responsible for cleaning it up.
509 */
510 #define UPL_NOCOMMIT 0x2
511
512 /*
513 * turn off any speculative read-ahead applied at the I/O layer
514 */
515 #define UPL_NORDAHEAD 0x4
516
517 /*
518 * pageout request is targeting a real file
519 * as opposed to a swap file.
520 */
521
522 #define UPL_VNODE_PAGER 0x8
523 /*
524 * this pageout is being originated as part of an explicit
525 * memory synchronization operation... no speculative clustering
526 * should be applied, only the range specified should be pushed.
527 */
528 #define UPL_MSYNC 0x10
529
530 /*
531 *
532 */
533 #define UPL_PAGING_ENCRYPTED 0x20
534
535 /*
536 * this pageout is being originated as part of an explicit
537 * memory synchronization operation that is checking for I/O
538 * errors and taking it's own action... if an error occurs,
539 * just abort the pages back into the cache unchanged
540 */
541 #define UPL_KEEPCACHED 0x40
542
543 /*
544 * this pageout originated from within cluster_io to deal
545 * with a dirty page that hasn't yet been seen by the FS
546 * that backs it... tag it so that the FS can take the
547 * appropriate action w/r to its locking model since the
548 * pageout will reenter the FS for the same file currently
549 * being handled in this context.
550 */
551 #define UPL_NESTED_PAGEOUT 0x80
552
553 /*
554 * we've detected a sequential access pattern and
555 * we are speculatively and aggressively pulling
556 * pages in... do not count these as real PAGEINs
557 * w/r to our hard throttle maintenance
558 */
559 #define UPL_IOSTREAMING 0x100
560
561 /*
562 * Currently, it's only used for the swap pagein path.
563 * Since the swap + compressed pager layer manage their
564 * pages, these pages are not marked "absent" i.e. these
565 * are "valid" pages. The pagein path will _not_ issue an
566 * I/O (correctly) for valid pages. So, this flag is used
567 * to override that logic in the vnode I/O path.
568 */
569 #define UPL_IGNORE_VALID_PAGE_CHECK 0x200
570
571
572
573 /* upl commit flags */
574 #define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */
575 #define UPL_COMMIT_CLEAR_DIRTY 0x2
576 #define UPL_COMMIT_SET_DIRTY 0x4
577 #define UPL_COMMIT_INACTIVATE 0x8
578 #define UPL_COMMIT_NOTIFY_EMPTY 0x10
579 /* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */
580 #define UPL_COMMIT_CS_VALIDATED 0x40
581 #define UPL_COMMIT_CLEAR_PRECIOUS 0x80
582 #define UPL_COMMIT_SPECULATE 0x100
583 #define UPL_COMMIT_FREE_ABSENT 0x200
584 #define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400
585
586 #define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT)
587
588 /* flags for return of state from vm_map_get_upl, vm_upl address space */
589 /* based call */
590 #define UPL_DEV_MEMORY 0x1
591 #define UPL_PHYS_CONTIG 0x2
592
593
594 /*
595 * Flags for the UPL page ops routine. This routine is not exported
596 * out of the kernel at the moment and so the defs live here.
597 */
598 #define UPL_POP_DIRTY 0x1
599 #define UPL_POP_PAGEOUT 0x2
600 #define UPL_POP_PRECIOUS 0x4
601 #define UPL_POP_ABSENT 0x8
602 #define UPL_POP_BUSY 0x10
603
604 #define UPL_POP_PHYSICAL 0x10000000
605 #define UPL_POP_DUMP 0x20000000
606 #define UPL_POP_SET 0x40000000
607 #define UPL_POP_CLR 0x80000000
608
609 /*
610 * Flags for the UPL range op routine. This routine is not exported
611 * out of the kernel at the moemet and so the defs live here.
612 */
613 /*
614 * UPL_ROP_ABSENT: Returns the extent of the range presented which
615 * is absent, starting with the start address presented
616 */
617 #define UPL_ROP_ABSENT 0x01
618 /*
619 * UPL_ROP_PRESENT: Returns the extent of the range presented which
620 * is present (i.e. resident), starting with the start address presented
621 */
622 #define UPL_ROP_PRESENT 0x02
623 /*
624 * UPL_ROP_DUMP: Dump the pages which are found in the target object
625 * for the target range.
626 */
627 #define UPL_ROP_DUMP 0x04
628
629 #ifdef PRIVATE
630
631 /* access macros for upl_t */
632
633 #define UPL_DEVICE_PAGE(upl) \
634 (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE)
635
636 #define UPL_PAGE_PRESENT(upl, index) \
637 ((upl)[(index)].phys_addr != 0)
638
639 #define UPL_PHYS_PAGE(upl, index) \
640 ((upl)[(index)].phys_addr)
641
642 #define UPL_SPECULATIVE_PAGE(upl, index) \
643 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE)
644
645 #define UPL_DIRTY_PAGE(upl, index) \
646 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE)
647
648 #define UPL_PRECIOUS_PAGE(upl, index) \
649 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE)
650
651 #define UPL_VALID_PAGE(upl, index) \
652 (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE)
653
654 #define UPL_PAGEOUT_PAGE(upl, index) \
655 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].pageout) : FALSE)
656
657 #define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \
658 (((upl)[(index)].phys_addr != 0) ? \
659 ((upl)[(index)].pageout = TRUE) : FALSE)
660
661 #define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \
662 (((upl)[(index)].phys_addr != 0) ? \
663 ((upl)[(index)].pageout = FALSE) : FALSE)
664
665 /* modifier macros for upl_t */
666
667 #define UPL_SET_CS_VALIDATED(upl, index, value) \
668 ((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE))
669
670 #define UPL_SET_CS_TAINTED(upl, index, value) \
671 ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE))
672
673 /* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */
674
675 extern vm_size_t upl_offset_to_pagelist;
676 extern vm_size_t upl_get_internal_pagelist_offset(void);
677 extern void* upl_get_internal_vectorupl(upl_t);
678 extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t);
679
680 /*Use this variant to get the UPL's page list iff:*/
681 /*- the upl being passed in is already part of a vector UPL*/
682 /*- the page list you want is that of this "sub-upl" and not that of the entire vector-upl*/
683
684 #define UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl) \
685 ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \
686 (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \
687 (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist))
688
689 /* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */
690 /* list request was made with the UPL_INTERNAL flag */
691
692
693 #define UPL_GET_INTERNAL_PAGE_LIST(upl) \
694 ((upl_get_internal_vectorupl(upl) != NULL ) ? (upl_get_internal_vectorupl_pagelist(upl)) : \
695 ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \
696 (uintptr_t)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \
697 (uintptr_t)upl + (unsigned int)upl_offset_to_pagelist)))
698
699 __BEGIN_DECLS
700
701 extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index);
702 extern boolean_t upl_device_page(upl_page_info_t *upl);
703 extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index);
704 extern void upl_clear_dirty(upl_t upl, boolean_t value);
705 extern void upl_set_referenced(upl_t upl, boolean_t value);
706 extern void upl_range_needed(upl_t upl, int index, int count);
707
708 __END_DECLS
709
710 #endif /* PRIVATE */
711
712 __BEGIN_DECLS
713
714 extern boolean_t upl_page_present(upl_page_info_t *upl, int index);
715 extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index);
716 extern boolean_t upl_valid_page(upl_page_info_t *upl, int index);
717 extern void upl_deallocate(upl_t upl);
718
719 __END_DECLS
720
721 #endif /* KERNEL */
722
723 #endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */