]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52 /*
53 * iokit
54 */
55 extern kern_return_t device_data_action(
56 uintptr_t device_handle,
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62 extern kern_return_t device_close(
63 uintptr_t device_handle);
64
65 extern boolean_t vm_swap_files_pinned(void);
66
67 /*
68 * osfmk
69 */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74 extern task_t port_name_to_task(
75 mach_port_name_t name);
76 extern task_t port_name_to_task_inspect(
77 mach_port_name_t name);
78 extern void ipc_port_release_send(
79 ipc_port_t port);
80 #endif /* _IPC_IPC_PORT_H_ */
81
82 extern ipc_space_t get_task_ipcspace(
83 task_t t);
84
85 #if CONFIG_MEMORYSTATUS
86 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
87 #endif /* CONFIG_MEMORYSTATUS */
88
89 /* Some loose-ends VM stuff */
90
91 extern vm_map_t kalloc_map;
92 extern vm_size_t msg_ool_size_small;
93 extern vm_map_t zone_map;
94
95 extern void consider_machine_adjust(void);
96 extern vm_map_offset_t get_map_min(vm_map_t);
97 extern vm_map_offset_t get_map_max(vm_map_t);
98 extern vm_map_size_t get_vmmap_size(vm_map_t);
99 #if CONFIG_COREDUMP
100 extern int get_vmmap_entries(vm_map_t);
101 #endif
102 extern int get_map_nentries(vm_map_t);
103
104 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
105
106 extern kern_return_t vm_map_purgable_control(
107 vm_map_t map,
108 vm_map_offset_t address,
109 vm_purgable_t control,
110 int *state);
111
112 #if MACH_ASSERT
113 extern void vm_map_pmap_check_ledgers(
114 pmap_t pmap,
115 ledger_t ledger,
116 int pid,
117 char *procname);
118 #endif /* MACH_ASSERT */
119
120 extern kern_return_t
121 vnode_pager_get_object_vnode(
122 memory_object_t mem_obj,
123 uintptr_t * vnodeaddr,
124 uint32_t * vid);
125
126 #if CONFIG_COREDUMP
127 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
128 #endif
129
130 /*
131 * VM routines that used to be published to
132 * user space, and are now restricted to the kernel.
133 *
134 * They should eventually go away entirely -
135 * to be replaced with standard vm_map() and
136 * vm_deallocate() calls.
137 */
138
139 extern kern_return_t vm_upl_map
140 (
141 vm_map_t target_task,
142 upl_t upl,
143 vm_address_t *address
144 );
145
146 extern kern_return_t vm_upl_unmap
147 (
148 vm_map_t target_task,
149 upl_t upl
150 );
151
152 extern kern_return_t vm_region_object_create
153 (
154 vm_map_t target_task,
155 vm_size_t size,
156 ipc_port_t *object_handle
157 );
158
159 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
160 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
161
162 #if CONFIG_CODE_DECRYPTION
163 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
164 #if VM_MAP_DEBUG_APPLE_PROTECT
165 extern int vm_map_debug_apple_protect;
166 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
167 struct pager_crypt_info;
168 extern kern_return_t vm_map_apple_protected(
169 vm_map_t map,
170 vm_map_offset_t start,
171 vm_map_offset_t end,
172 vm_object_offset_t crypto_backing_offset,
173 struct pager_crypt_info *crypt_info);
174 extern void apple_protect_pager_bootstrap(void);
175 extern memory_object_t apple_protect_pager_setup(
176 vm_object_t backing_object,
177 vm_object_offset_t backing_offset,
178 vm_object_offset_t crypto_backing_offset,
179 struct pager_crypt_info *crypt_info,
180 vm_object_offset_t crypto_start,
181 vm_object_offset_t crypto_end);
182 #endif /* CONFIG_CODE_DECRYPTION */
183
184 struct vm_shared_region_slide_info;
185 extern kern_return_t vm_map_shared_region(
186 vm_map_t map,
187 vm_map_offset_t start,
188 vm_map_offset_t end,
189 vm_object_offset_t backing_offset,
190 struct vm_shared_region_slide_info *slide_info);
191 extern void shared_region_pager_bootstrap(void);
192 extern memory_object_t shared_region_pager_setup(
193 vm_object_t backing_object,
194 vm_object_offset_t backing_offset,
195 struct vm_shared_region_slide_info *slide_info);
196
197 struct vnode;
198 extern void swapfile_pager_bootstrap(void);
199 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
200 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
201
202 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
203 #define SIXTEENK_PAGE_SIZE 0x4000
204 #define SIXTEENK_PAGE_MASK 0x3FFF
205 #define SIXTEENK_PAGE_SHIFT 14
206 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
207
208 #if __arm64__
209 #define FOURK_PAGE_SIZE 0x1000
210 #define FOURK_PAGE_MASK 0xFFF
211 #define FOURK_PAGE_SHIFT 12
212
213 extern unsigned int page_shift_user32;
214
215 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
216 #if VM_MAP_DEBUG_FOURK
217 extern int vm_map_debug_fourk;
218 #endif /* VM_MAP_DEBUG_FOURK */
219 extern void fourk_pager_bootstrap(void);
220 extern memory_object_t fourk_pager_create(void);
221 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
222 extern kern_return_t fourk_pager_populate(
223 memory_object_t mem_obj,
224 boolean_t overwrite,
225 int index,
226 vm_object_t new_backing_object,
227 vm_object_offset_t new_backing_offset,
228 vm_object_t *old_backing_object,
229 vm_object_offset_t *old_backing_offset);
230 #endif /* __arm64__ */
231
232 /*
233 * bsd
234 */
235 struct vnode;
236 extern void *upl_get_internal_page_list(
237 upl_t upl);
238
239 extern void vnode_setswapmount(struct vnode *);
240 extern int64_t vnode_getswappin_avail(struct vnode *);
241
242 extern void vnode_pager_was_dirtied(
243 struct vnode *,
244 vm_object_offset_t,
245 vm_object_offset_t);
246
247 typedef int pager_return_t;
248 extern pager_return_t vnode_pagein(
249 struct vnode *, upl_t,
250 upl_offset_t, vm_object_offset_t,
251 upl_size_t, int, int *);
252 extern pager_return_t vnode_pageout(
253 struct vnode *, upl_t,
254 upl_offset_t, vm_object_offset_t,
255 upl_size_t, int, int *);
256 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
257 extern memory_object_t vnode_pager_setup(
258 struct vnode *, memory_object_t);
259 extern vm_object_offset_t vnode_pager_get_filesize(
260 struct vnode *);
261 extern uint32_t vnode_pager_isinuse(
262 struct vnode *);
263 extern boolean_t vnode_pager_isSSD(
264 struct vnode *);
265 extern void vnode_pager_throttle(
266 void);
267 extern uint32_t vnode_pager_return_throttle_io_limit(
268 struct vnode *,
269 uint32_t *);
270 extern kern_return_t vnode_pager_get_name(
271 struct vnode *vp,
272 char *pathname,
273 vm_size_t pathname_len,
274 char *filename,
275 vm_size_t filename_len,
276 boolean_t *truncated_path_p);
277 struct timespec;
278 extern kern_return_t vnode_pager_get_mtime(
279 struct vnode *vp,
280 struct timespec *mtime,
281 struct timespec *cs_mtime);
282 extern kern_return_t vnode_pager_get_cs_blobs(
283 struct vnode *vp,
284 void **blobs);
285
286 #if CONFIG_IOSCHED
287 void vnode_pager_issue_reprioritize_io(
288 struct vnode *devvp,
289 uint64_t blkno,
290 uint32_t len,
291 int priority);
292 #endif
293
294 #if CHECK_CS_VALIDATION_BITMAP
295 /* used by the vnode_pager_cs_validation_bitmap routine*/
296 #define CS_BITMAP_SET 1
297 #define CS_BITMAP_CLEAR 2
298 #define CS_BITMAP_CHECK 3
299
300 #endif /* CHECK_CS_VALIDATION_BITMAP */
301
302 extern void vnode_pager_bootstrap(void);
303 extern kern_return_t
304 vnode_pager_data_unlock(
305 memory_object_t mem_obj,
306 memory_object_offset_t offset,
307 memory_object_size_t size,
308 vm_prot_t desired_access);
309 extern kern_return_t vnode_pager_init(
310 memory_object_t,
311 memory_object_control_t,
312 memory_object_cluster_size_t);
313 extern kern_return_t vnode_pager_get_object_size(
314 memory_object_t,
315 memory_object_offset_t *);
316
317 #if CONFIG_IOSCHED
318 extern kern_return_t vnode_pager_get_object_devvp(
319 memory_object_t,
320 uintptr_t *);
321 #endif
322
323 extern void vnode_pager_dirtied(
324 memory_object_t,
325 vm_object_offset_t,
326 vm_object_offset_t);
327 extern kern_return_t vnode_pager_get_isinuse(
328 memory_object_t,
329 uint32_t *);
330 extern kern_return_t vnode_pager_get_isSSD(
331 memory_object_t,
332 boolean_t *);
333 extern kern_return_t vnode_pager_get_throttle_io_limit(
334 memory_object_t,
335 uint32_t *);
336 extern kern_return_t vnode_pager_get_object_name(
337 memory_object_t mem_obj,
338 char *pathname,
339 vm_size_t pathname_len,
340 char *filename,
341 vm_size_t filename_len,
342 boolean_t *truncated_path_p);
343 extern kern_return_t vnode_pager_get_object_mtime(
344 memory_object_t mem_obj,
345 struct timespec *mtime,
346 struct timespec *cs_mtime);
347
348 #if CHECK_CS_VALIDATION_BITMAP
349 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
350 memory_object_t mem_obj,
351 memory_object_offset_t offset,
352 int optype);
353 #endif /*CHECK_CS_VALIDATION_BITMAP*/
354
355 extern kern_return_t ubc_cs_check_validation_bitmap(
356 struct vnode *vp,
357 memory_object_offset_t offset,
358 int optype);
359
360 extern kern_return_t vnode_pager_data_request(
361 memory_object_t,
362 memory_object_offset_t,
363 memory_object_cluster_size_t,
364 vm_prot_t,
365 memory_object_fault_info_t);
366 extern kern_return_t vnode_pager_data_return(
367 memory_object_t,
368 memory_object_offset_t,
369 memory_object_cluster_size_t,
370 memory_object_offset_t *,
371 int *,
372 boolean_t,
373 boolean_t,
374 int);
375 extern kern_return_t vnode_pager_data_initialize(
376 memory_object_t,
377 memory_object_offset_t,
378 memory_object_cluster_size_t);
379 extern void vnode_pager_reference(
380 memory_object_t mem_obj);
381 extern kern_return_t vnode_pager_synchronize(
382 memory_object_t mem_obj,
383 memory_object_offset_t offset,
384 memory_object_size_t length,
385 vm_sync_t sync_flags);
386 extern kern_return_t vnode_pager_map(
387 memory_object_t mem_obj,
388 vm_prot_t prot);
389 extern kern_return_t vnode_pager_last_unmap(
390 memory_object_t mem_obj);
391 extern void vnode_pager_deallocate(
392 memory_object_t);
393 extern kern_return_t vnode_pager_terminate(
394 memory_object_t);
395 extern void vnode_pager_vrele(
396 struct vnode *vp);
397 extern struct vnode *vnode_pager_lookup_vnode(
398 memory_object_t);
399
400 extern int ubc_map(
401 struct vnode *vp,
402 int flags);
403 extern void ubc_unmap(
404 struct vnode *vp);
405
406 struct vm_map_entry;
407 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
408
409 extern void device_pager_reference(memory_object_t);
410 extern void device_pager_deallocate(memory_object_t);
411 extern kern_return_t device_pager_init(memory_object_t,
412 memory_object_control_t,
413 memory_object_cluster_size_t);
414 extern kern_return_t device_pager_terminate(memory_object_t);
415 extern kern_return_t device_pager_data_request(memory_object_t,
416 memory_object_offset_t,
417 memory_object_cluster_size_t,
418 vm_prot_t,
419 memory_object_fault_info_t);
420 extern kern_return_t device_pager_data_return(memory_object_t,
421 memory_object_offset_t,
422 memory_object_cluster_size_t,
423 memory_object_offset_t *,
424 int *,
425 boolean_t,
426 boolean_t,
427 int);
428 extern kern_return_t device_pager_data_initialize(memory_object_t,
429 memory_object_offset_t,
430 memory_object_cluster_size_t);
431 extern kern_return_t device_pager_data_unlock(memory_object_t,
432 memory_object_offset_t,
433 memory_object_size_t,
434 vm_prot_t);
435 extern kern_return_t device_pager_synchronize(memory_object_t,
436 memory_object_offset_t,
437 memory_object_size_t,
438 vm_sync_t);
439 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
440 extern kern_return_t device_pager_last_unmap(memory_object_t);
441 extern kern_return_t device_pager_populate_object(
442 memory_object_t device,
443 memory_object_offset_t offset,
444 ppnum_t page_num,
445 vm_size_t size);
446 extern memory_object_t device_pager_setup(
447 memory_object_t,
448 uintptr_t,
449 vm_size_t,
450 int);
451 extern void device_pager_bootstrap(void);
452 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
453
454 extern kern_return_t pager_map_to_phys_contiguous(
455 memory_object_control_t object,
456 memory_object_offset_t offset,
457 addr64_t base_vaddr,
458 vm_size_t size);
459
460 extern kern_return_t memory_object_create_named(
461 memory_object_t pager,
462 memory_object_offset_t size,
463 memory_object_control_t *control);
464
465 struct macx_triggers_args;
466 extern int mach_macx_triggers(
467 struct macx_triggers_args *args);
468
469 extern int macx_swapinfo(
470 memory_object_size_t *total_p,
471 memory_object_size_t *avail_p,
472 vm_size_t *pagesize_p,
473 boolean_t *encrypted_p);
474
475 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
476 extern void log_unnest_badness(
477 vm_map_t map,
478 vm_map_offset_t start_unnest,
479 vm_map_offset_t end_unnest,
480 boolean_t is_nested_map,
481 vm_map_offset_t lowest_unnestable_addr);
482
483 struct proc;
484 struct proc *current_proc(void);
485 extern int cs_allow_invalid(struct proc *p);
486 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
487
488 #define CS_VALIDATE_TAINTED 0x00000001
489 #define CS_VALIDATE_NX 0x00000002
490 extern boolean_t cs_validate_range(struct vnode *vp,
491 memory_object_t pager,
492 memory_object_offset_t offset,
493 const void *data,
494 vm_size_t size,
495 unsigned *result);
496 #if PMAP_CS
497 extern kern_return_t cs_associate_blob_with_mapping(
498 void *pmap,
499 vm_map_offset_t start,
500 vm_map_size_t size,
501 vm_object_offset_t offset,
502 void *blobs_p);
503 #endif /* PMAP_CS */
504
505 extern kern_return_t memory_entry_purgeable_control_internal(
506 ipc_port_t entry_port,
507 vm_purgable_t control,
508 int *state);
509
510 extern kern_return_t memory_entry_access_tracking_internal(
511 ipc_port_t entry_port,
512 int *access_tracking,
513 uint32_t *access_tracking_reads,
514 uint32_t *access_tracking_writes);
515
516 extern kern_return_t mach_memory_entry_purgable_control(
517 ipc_port_t entry_port,
518 vm_purgable_t control,
519 int *state);
520
521 extern kern_return_t mach_memory_entry_get_page_counts(
522 ipc_port_t entry_port,
523 unsigned int *resident_page_count,
524 unsigned int *dirty_page_count);
525
526 extern kern_return_t mach_memory_entry_page_op(
527 ipc_port_t entry_port,
528 vm_object_offset_t offset,
529 int ops,
530 ppnum_t *phys_entry,
531 int *flags);
532
533 extern kern_return_t mach_memory_entry_range_op(
534 ipc_port_t entry_port,
535 vm_object_offset_t offset_beg,
536 vm_object_offset_t offset_end,
537 int ops,
538 int *range);
539
540 extern void mach_memory_entry_port_release(ipc_port_t port);
541 extern void mach_destroy_memory_entry(ipc_port_t port);
542 extern kern_return_t mach_memory_entry_allocate(
543 struct vm_named_entry **user_entry_p,
544 ipc_port_t *user_handle_p);
545
546 extern void vm_paging_map_init(void);
547
548 extern int macx_backing_store_compaction(int flags);
549 extern unsigned int mach_vm_ctl_page_free_wanted(void);
550
551 extern int no_paging_space_action(void);
552
553 #define VM_TOGGLE_CLEAR 0
554 #define VM_TOGGLE_SET 1
555 #define VM_TOGGLE_GETVALUE 999
556 int vm_toggle_entry_reuse(int, int*);
557
558 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
559 #define SWAP_READ 0x00000001 /* Read buffer. */
560 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
561
562 extern void vm_compressor_pager_init(void);
563 extern kern_return_t compressor_memory_object_create(
564 memory_object_size_t,
565 memory_object_t *);
566
567 extern boolean_t vm_compressor_low_on_space(void);
568 extern boolean_t vm_compressor_out_of_space(void);
569 extern int vm_swap_low_on_space(void);
570 void do_fastwake_warmup_all(void);
571 #if CONFIG_JETSAM
572 extern int proc_get_memstat_priority(struct proc*, boolean_t);
573 #endif /* CONFIG_JETSAM */
574
575 /* the object purger. purges the next eligible object from memory. */
576 /* returns TRUE if an object was purged, otherwise FALSE. */
577 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
578 void vm_purgeable_nonvolatile_owner_update(task_t owner,
579 int delta);
580 void vm_purgeable_volatile_owner_update(task_t owner,
581 int delta);
582 void vm_owned_objects_disown(task_t task);
583
584
585 struct trim_list {
586 uint64_t tl_offset;
587 uint64_t tl_length;
588 struct trim_list *tl_next;
589 };
590
591 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
592
593 #define MAX_SWAPFILENAME_LEN 1024
594 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
595
596 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
597
598 struct vm_counters {
599 unsigned int do_collapse_compressor;
600 unsigned int do_collapse_compressor_pages;
601 unsigned int do_collapse_terminate;
602 unsigned int do_collapse_terminate_failure;
603 unsigned int should_cow_but_wired;
604 unsigned int create_upl_extra_cow;
605 unsigned int create_upl_extra_cow_pages;
606 unsigned int create_upl_lookup_failure_write;
607 unsigned int create_upl_lookup_failure_copy;
608 };
609 extern struct vm_counters vm_counters;
610
611 #if CONFIG_SECLUDED_MEMORY
612 struct vm_page_secluded_data {
613 int eligible_for_secluded;
614 int grab_success_free;
615 int grab_success_other;
616 int grab_failure_locked;
617 int grab_failure_state;
618 int grab_failure_dirty;
619 int grab_for_iokit;
620 int grab_for_iokit_success;
621 };
622 extern struct vm_page_secluded_data vm_page_secluded;
623
624 extern int num_tasks_can_use_secluded_mem;
625
626 /* boot-args */
627 extern int secluded_for_apps;
628 extern int secluded_for_iokit;
629 extern int secluded_for_filecache;
630 #if 11
631 extern int secluded_for_fbdp;
632 #endif
633
634 extern uint64_t vm_page_secluded_drain(void);
635 extern void memory_object_mark_eligible_for_secluded(
636 memory_object_control_t control,
637 boolean_t eligible_for_secluded);
638
639 #endif /* CONFIG_SECLUDED_MEMORY */
640
641 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
642
643 extern kern_return_t mach_make_memory_entry_internal(
644 vm_map_t target_map,
645 memory_object_size_t *size,
646 memory_object_offset_t offset,
647 vm_prot_t permission,
648 vm_named_entry_kernel_flags_t vmne_kflags,
649 ipc_port_t *object_handle,
650 ipc_port_t parent_handle);
651
652 #define roundup(x, y) ((((x) % (y)) == 0) ? \
653 (x) : ((x) + ((y) - ((x) % (y)))))
654
655 #ifdef __cplusplus
656 }
657 #endif
658
659 /*
660 * Flags for the VM swapper/reclaimer.
661 * Used by vm_swap_consider_defragment()
662 * to force defrag/reclaim by the swap
663 * GC thread.
664 */
665 #define VM_SWAP_FLAGS_NONE 0
666 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1
667 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2
668
669 #if __arm64__
670 /*
671 * Flags to control the behavior of
672 * the legacy footprint entitlement.
673 */
674 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
675 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
676 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
677
678 #endif /* __arm64__ */
679
680 #endif /* _VM_VM_PROTOS_H_ */
681
682 #endif /* XNU_KERNEL_PRIVATE */