]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_protos.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
0a7de745 29#ifdef XNU_KERNEL_PRIVATE
91447636
A
30
31#ifndef _VM_VM_PROTOS_H_
32#define _VM_VM_PROTOS_H_
33
34#include <mach/mach_types.h>
35#include <kern/kern_types.h>
36
39037602
A
37#ifdef __cplusplus
38extern "C" {
39#endif
40
91447636
A
41/*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52/*
53 * iokit
54 */
55extern kern_return_t device_data_action(
0a7de745 56 uintptr_t device_handle,
91447636 57 ipc_port_t device_pager,
0a7de745
A
58 vm_prot_t protection,
59 vm_object_offset_t offset,
91447636
A
60 vm_size_t size);
61
62extern kern_return_t device_close(
b0d623f7 63 uintptr_t device_handle);
91447636 64
39037602 65extern boolean_t vm_swap_files_pinned(void);
91447636
A
66
67/*
68 * osfmk
69 */
91447636
A
70#ifndef _IPC_IPC_PORT_H_
71extern mach_port_name_t ipc_port_copyout_send(
0a7de745
A
72 ipc_port_t sright,
73 ipc_space_t space);
91447636
A
74extern task_t port_name_to_task(
75 mach_port_name_t name);
813fb2f6
A
76extern task_t port_name_to_task_inspect(
77 mach_port_name_t name);
5ba3f43e 78extern void ipc_port_release_send(
0a7de745 79 ipc_port_t port);
91447636
A
80#endif /* _IPC_IPC_PORT_H_ */
81
82extern ipc_space_t get_task_ipcspace(
83 task_t t);
84
39037602 85#if CONFIG_MEMORYSTATUS
0a7de745 86extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
39037602 87#endif /* CONFIG_MEMORYSTATUS */
3e170ce0 88
91447636
A
89/* Some loose-ends VM stuff */
90
0a7de745
A
91extern vm_map_t kalloc_map;
92extern vm_size_t msg_ool_size_small;
93extern vm_map_t zone_map;
91447636
A
94
95extern void consider_machine_adjust(void);
91447636
A
96extern vm_map_offset_t get_map_min(vm_map_t);
97extern vm_map_offset_t get_map_max(vm_map_t);
98extern vm_map_size_t get_vmmap_size(vm_map_t);
39037602 99#if CONFIG_COREDUMP
91447636 100extern int get_vmmap_entries(vm_map_t);
39037602
A
101#endif
102extern int get_map_nentries(vm_map_t);
91447636 103
3e170ce0 104extern vm_map_offset_t vm_map_page_mask(vm_map_t);
39236c6e 105
5ba3f43e 106extern kern_return_t vm_map_purgable_control(
0a7de745
A
107 vm_map_t map,
108 vm_map_offset_t address,
109 vm_purgable_t control,
110 int *state);
5ba3f43e 111
cb323159
A
112#if MACH_ASSERT
113extern void vm_map_pmap_check_ledgers(
114 pmap_t pmap,
115 ledger_t ledger,
116 int pid,
117 char *procname);
118#endif /* MACH_ASSERT */
119
a39ff7e2
A
120extern kern_return_t
121vnode_pager_get_object_vnode(
122 memory_object_t mem_obj,
123 uintptr_t * vnodeaddr,
124 uint32_t * vid);
125
39037602 126#if CONFIG_COREDUMP
91447636 127extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
39037602 128#endif
91447636
A
129
130/*
131 * VM routines that used to be published to
132 * user space, and are now restricted to the kernel.
133 *
134 * They should eventually go away entirely -
135 * to be replaced with standard vm_map() and
136 * vm_deallocate() calls.
137 */
138
139extern kern_return_t vm_upl_map
140(
141 vm_map_t target_task,
142 upl_t upl,
143 vm_address_t *address
144);
145
146extern kern_return_t vm_upl_unmap
147(
148 vm_map_t target_task,
149 upl_t upl
150);
151
152extern kern_return_t vm_region_object_create
153(
154 vm_map_t target_task,
155 vm_size_t size,
156 ipc_port_t *object_handle
157);
158
159extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
160extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
161
593a1d5f 162#if CONFIG_CODE_DECRYPTION
0a7de745 163#define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
39037602
A
164#if VM_MAP_DEBUG_APPLE_PROTECT
165extern int vm_map_debug_apple_protect;
166#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
593a1d5f 167struct pager_crypt_info;
0c530ab8 168extern kern_return_t vm_map_apple_protected(
0a7de745
A
169 vm_map_t map,
170 vm_map_offset_t start,
171 vm_map_offset_t end,
172 vm_object_offset_t crypto_backing_offset,
3e170ce0 173 struct pager_crypt_info *crypt_info);
0c530ab8 174extern void apple_protect_pager_bootstrap(void);
3e170ce0 175extern memory_object_t apple_protect_pager_setup(
0a7de745
A
176 vm_object_t backing_object,
177 vm_object_offset_t backing_offset,
178 vm_object_offset_t crypto_backing_offset,
3e170ce0 179 struct pager_crypt_info *crypt_info,
0a7de745
A
180 vm_object_offset_t crypto_start,
181 vm_object_offset_t crypto_end);
182#endif /* CONFIG_CODE_DECRYPTION */
0c530ab8 183
d9a64523
A
184struct vm_shared_region_slide_info;
185extern kern_return_t vm_map_shared_region(
0a7de745
A
186 vm_map_t map,
187 vm_map_offset_t start,
188 vm_map_offset_t end,
189 vm_object_offset_t backing_offset,
d9a64523
A
190 struct vm_shared_region_slide_info *slide_info);
191extern void shared_region_pager_bootstrap(void);
192extern memory_object_t shared_region_pager_setup(
0a7de745
A
193 vm_object_t backing_object,
194 vm_object_offset_t backing_offset,
d9a64523
A
195 struct vm_shared_region_slide_info *slide_info);
196
b0d623f7
A
197struct vnode;
198extern void swapfile_pager_bootstrap(void);
199extern memory_object_t swapfile_pager_setup(struct vnode *vp);
200extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
201
cb323159 202#if __arm64__ || (__ARM_ARCH_7K__ >= 2)
0a7de745
A
203#define SIXTEENK_PAGE_SIZE 0x4000
204#define SIXTEENK_PAGE_MASK 0x3FFF
205#define SIXTEENK_PAGE_SHIFT 14
cb323159 206#endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
3e170ce0 207
5ba3f43e 208#if __arm64__
0a7de745
A
209#define FOURK_PAGE_SIZE 0x1000
210#define FOURK_PAGE_MASK 0xFFF
211#define FOURK_PAGE_SHIFT 12
5ba3f43e
A
212
213extern unsigned int page_shift_user32;
214
0a7de745 215#define VM_MAP_DEBUG_FOURK MACH_ASSERT
5ba3f43e
A
216#if VM_MAP_DEBUG_FOURK
217extern int vm_map_debug_fourk;
218#endif /* VM_MAP_DEBUG_FOURK */
219extern void fourk_pager_bootstrap(void);
220extern memory_object_t fourk_pager_create(void);
221extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
222extern kern_return_t fourk_pager_populate(
223 memory_object_t mem_obj,
224 boolean_t overwrite,
225 int index,
226 vm_object_t new_backing_object,
227 vm_object_offset_t new_backing_offset,
228 vm_object_t *old_backing_object,
229 vm_object_offset_t *old_backing_offset);
230#endif /* __arm64__ */
91447636
A
231
232/*
233 * bsd
234 */
235struct vnode;
91447636
A
236extern void *upl_get_internal_page_list(
237 upl_t upl);
b0d623f7 238
fe8ab488 239extern void vnode_setswapmount(struct vnode *);
3e170ce0 240extern int64_t vnode_getswappin_avail(struct vnode *);
fe8ab488 241
d9a64523 242extern void vnode_pager_was_dirtied(
0a7de745 243 struct vnode *,
d9a64523
A
244 vm_object_offset_t,
245 vm_object_offset_t);
246
91447636 247typedef int pager_return_t;
0a7de745 248extern pager_return_t vnode_pagein(
91447636 249 struct vnode *, upl_t,
b0d623f7
A
250 upl_offset_t, vm_object_offset_t,
251 upl_size_t, int, int *);
0a7de745 252extern pager_return_t vnode_pageout(
91447636 253 struct vnode *, upl_t,
b0d623f7
A
254 upl_offset_t, vm_object_offset_t,
255 upl_size_t, int, int *);
0a7de745 256extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
91447636
A
257extern memory_object_t vnode_pager_setup(
258 struct vnode *, memory_object_t);
259extern vm_object_offset_t vnode_pager_get_filesize(
260 struct vnode *);
b0d623f7
A
261extern uint32_t vnode_pager_isinuse(
262 struct vnode *);
6d2010ae
A
263extern boolean_t vnode_pager_isSSD(
264 struct vnode *);
265extern void vnode_pager_throttle(
266 void);
39236c6e 267extern uint32_t vnode_pager_return_throttle_io_limit(
b0d623f7 268 struct vnode *,
39236c6e 269 uint32_t *);
15129b1c 270extern kern_return_t vnode_pager_get_name(
0a7de745
A
271 struct vnode *vp,
272 char *pathname,
273 vm_size_t pathname_len,
274 char *filename,
275 vm_size_t filename_len,
276 boolean_t *truncated_path_p);
15129b1c
A
277struct timespec;
278extern kern_return_t vnode_pager_get_mtime(
0a7de745
A
279 struct vnode *vp,
280 struct timespec *mtime,
281 struct timespec *cs_mtime);
2d21ac55 282extern kern_return_t vnode_pager_get_cs_blobs(
0a7de745
A
283 struct vnode *vp,
284 void **blobs);
6d2010ae 285
fe8ab488
A
286#if CONFIG_IOSCHED
287void vnode_pager_issue_reprioritize_io(
0a7de745
A
288 struct vnode *devvp,
289 uint64_t blkno,
290 uint32_t len,
291 int priority);
fe8ab488
A
292#endif
293
0a7de745 294#if CHECK_CS_VALIDATION_BITMAP
6d2010ae 295/* used by the vnode_pager_cs_validation_bitmap routine*/
0a7de745
A
296#define CS_BITMAP_SET 1
297#define CS_BITMAP_CLEAR 2
298#define CS_BITMAP_CHECK 3
6d2010ae
A
299
300#endif /* CHECK_CS_VALIDATION_BITMAP */
b0d623f7 301
39236c6e 302extern void vnode_pager_bootstrap(void);
91447636
A
303extern kern_return_t
304vnode_pager_data_unlock(
0a7de745
A
305 memory_object_t mem_obj,
306 memory_object_offset_t offset,
307 memory_object_size_t size,
308 vm_prot_t desired_access);
91447636 309extern kern_return_t vnode_pager_init(
0a7de745
A
310 memory_object_t,
311 memory_object_control_t,
b0d623f7 312 memory_object_cluster_size_t);
91447636
A
313extern kern_return_t vnode_pager_get_object_size(
314 memory_object_t,
315 memory_object_offset_t *);
fe8ab488
A
316
317#if CONFIG_IOSCHED
318extern kern_return_t vnode_pager_get_object_devvp(
0a7de745
A
319 memory_object_t,
320 uintptr_t *);
fe8ab488
A
321#endif
322
d9a64523
A
323extern void vnode_pager_dirtied(
324 memory_object_t,
325 vm_object_offset_t,
326 vm_object_offset_t);
b0d623f7
A
327extern kern_return_t vnode_pager_get_isinuse(
328 memory_object_t,
329 uint32_t *);
6d2010ae
A
330extern kern_return_t vnode_pager_get_isSSD(
331 memory_object_t,
332 boolean_t *);
39236c6e 333extern kern_return_t vnode_pager_get_throttle_io_limit(
b0d623f7 334 memory_object_t,
39236c6e 335 uint32_t *);
15129b1c 336extern kern_return_t vnode_pager_get_object_name(
0a7de745
A
337 memory_object_t mem_obj,
338 char *pathname,
339 vm_size_t pathname_len,
340 char *filename,
341 vm_size_t filename_len,
342 boolean_t *truncated_path_p);
15129b1c 343extern kern_return_t vnode_pager_get_object_mtime(
0a7de745 344 memory_object_t mem_obj,
15129b1c 345 struct timespec *mtime,
0a7de745 346 struct timespec *cs_mtime);
6d2010ae 347
0a7de745
A
348#if CHECK_CS_VALIDATION_BITMAP
349extern kern_return_t vnode_pager_cs_check_validation_bitmap(
350 memory_object_t mem_obj,
351 memory_object_offset_t offset,
352 int optype);
6d2010ae
A
353#endif /*CHECK_CS_VALIDATION_BITMAP*/
354
0a7de745
A
355extern kern_return_t ubc_cs_check_validation_bitmap(
356 struct vnode *vp,
6d2010ae
A
357 memory_object_offset_t offset,
358 int optype);
359
0a7de745
A
360extern kern_return_t vnode_pager_data_request(
361 memory_object_t,
2d21ac55 362 memory_object_offset_t,
b0d623f7 363 memory_object_cluster_size_t,
2d21ac55
A
364 vm_prot_t,
365 memory_object_fault_info_t);
91447636
A
366extern kern_return_t vnode_pager_data_return(
367 memory_object_t,
368 memory_object_offset_t,
b0d623f7 369 memory_object_cluster_size_t,
91447636
A
370 memory_object_offset_t *,
371 int *,
372 boolean_t,
373 boolean_t,
374 int);
375extern kern_return_t vnode_pager_data_initialize(
376 memory_object_t,
377 memory_object_offset_t,
b0d623f7 378 memory_object_cluster_size_t);
91447636 379extern void vnode_pager_reference(
0a7de745 380 memory_object_t mem_obj);
91447636 381extern kern_return_t vnode_pager_synchronize(
0a7de745
A
382 memory_object_t mem_obj,
383 memory_object_offset_t offset,
384 memory_object_size_t length,
385 vm_sync_t sync_flags);
593a1d5f 386extern kern_return_t vnode_pager_map(
0a7de745
A
387 memory_object_t mem_obj,
388 vm_prot_t prot);
593a1d5f 389extern kern_return_t vnode_pager_last_unmap(
0a7de745 390 memory_object_t mem_obj);
91447636
A
391extern void vnode_pager_deallocate(
392 memory_object_t);
393extern kern_return_t vnode_pager_terminate(
394 memory_object_t);
395extern void vnode_pager_vrele(
396 struct vnode *vp);
39037602
A
397extern struct vnode *vnode_pager_lookup_vnode(
398 memory_object_t);
399
593a1d5f
A
400extern int ubc_map(
401 struct vnode *vp,
402 int flags);
91447636
A
403extern void ubc_unmap(
404 struct vnode *vp);
405
b0d623f7
A
406struct vm_map_entry;
407extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
408
91447636
A
409extern void device_pager_reference(memory_object_t);
410extern void device_pager_deallocate(memory_object_t);
411extern kern_return_t device_pager_init(memory_object_t,
0a7de745
A
412 memory_object_control_t,
413 memory_object_cluster_size_t);
414extern kern_return_t device_pager_terminate(memory_object_t);
415extern kern_return_t device_pager_data_request(memory_object_t,
416 memory_object_offset_t,
417 memory_object_cluster_size_t,
418 vm_prot_t,
419 memory_object_fault_info_t);
91447636 420extern kern_return_t device_pager_data_return(memory_object_t,
0a7de745
A
421 memory_object_offset_t,
422 memory_object_cluster_size_t,
423 memory_object_offset_t *,
424 int *,
425 boolean_t,
426 boolean_t,
427 int);
91447636 428extern kern_return_t device_pager_data_initialize(memory_object_t,
0a7de745
A
429 memory_object_offset_t,
430 memory_object_cluster_size_t);
91447636 431extern kern_return_t device_pager_data_unlock(memory_object_t,
0a7de745
A
432 memory_object_offset_t,
433 memory_object_size_t,
434 vm_prot_t);
91447636 435extern kern_return_t device_pager_synchronize(memory_object_t,
0a7de745
A
436 memory_object_offset_t,
437 memory_object_size_t,
438 vm_sync_t);
593a1d5f
A
439extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
440extern kern_return_t device_pager_last_unmap(memory_object_t);
91447636 441extern kern_return_t device_pager_populate_object(
0a7de745
A
442 memory_object_t device,
443 memory_object_offset_t offset,
444 ppnum_t page_num,
445 vm_size_t size);
91447636
A
446extern memory_object_t device_pager_setup(
447 memory_object_t,
b0d623f7 448 uintptr_t,
91447636
A
449 vm_size_t,
450 int);
39236c6e 451extern void device_pager_bootstrap(void);
39037602 452extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
39236c6e
A
453
454extern kern_return_t pager_map_to_phys_contiguous(
0a7de745
A
455 memory_object_control_t object,
456 memory_object_offset_t offset,
457 addr64_t base_vaddr,
458 vm_size_t size);
91447636
A
459
460extern kern_return_t memory_object_create_named(
0a7de745
A
461 memory_object_t pager,
462 memory_object_offset_t size,
463 memory_object_control_t *control);
91447636 464
b0d623f7
A
465struct macx_triggers_args;
466extern int mach_macx_triggers(
0a7de745 467 struct macx_triggers_args *args);
91447636
A
468
469extern int macx_swapinfo(
0a7de745
A
470 memory_object_size_t *total_p,
471 memory_object_size_t *avail_p,
472 vm_size_t *pagesize_p,
473 boolean_t *encrypted_p);
91447636 474
2d21ac55 475extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
39037602
A
476extern void log_unnest_badness(
477 vm_map_t map,
478 vm_map_offset_t start_unnest,
479 vm_map_offset_t end_unnest,
480 boolean_t is_nested_map,
481 vm_map_offset_t lowest_unnestable_addr);
b0d623f7 482
39236c6e 483struct proc;
cb323159 484struct proc *current_proc(void);
b0d623f7 485extern int cs_allow_invalid(struct proc *p);
39037602 486extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
c18c124e 487
0a7de745
A
488#define CS_VALIDATE_TAINTED 0x00000001
489#define CS_VALIDATE_NX 0x00000002
39037602 490extern boolean_t cs_validate_range(struct vnode *vp,
0a7de745
A
491 memory_object_t pager,
492 memory_object_offset_t offset,
493 const void *data,
494 vm_size_t size,
495 unsigned *result);
d9a64523
A
496#if PMAP_CS
497extern kern_return_t cs_associate_blob_with_mapping(
498 void *pmap,
499 vm_map_offset_t start,
500 vm_map_size_t size,
501 vm_object_offset_t offset,
502 void *blobs_p);
503#endif /* PMAP_CS */
2d21ac55 504
5ba3f43e 505extern kern_return_t memory_entry_purgeable_control_internal(
0a7de745
A
506 ipc_port_t entry_port,
507 vm_purgable_t control,
508 int *state);
5ba3f43e 509
d9a64523 510extern kern_return_t memory_entry_access_tracking_internal(
0a7de745
A
511 ipc_port_t entry_port,
512 int *access_tracking,
513 uint32_t *access_tracking_reads,
514 uint32_t *access_tracking_writes);
d9a64523 515
2d21ac55 516extern kern_return_t mach_memory_entry_purgable_control(
0a7de745
A
517 ipc_port_t entry_port,
518 vm_purgable_t control,
519 int *state);
2d21ac55 520
39236c6e 521extern kern_return_t mach_memory_entry_get_page_counts(
0a7de745
A
522 ipc_port_t entry_port,
523 unsigned int *resident_page_count,
524 unsigned int *dirty_page_count);
39236c6e 525
2d21ac55 526extern kern_return_t mach_memory_entry_page_op(
0a7de745
A
527 ipc_port_t entry_port,
528 vm_object_offset_t offset,
529 int ops,
530 ppnum_t *phys_entry,
531 int *flags);
2d21ac55
A
532
533extern kern_return_t mach_memory_entry_range_op(
0a7de745
A
534 ipc_port_t entry_port,
535 vm_object_offset_t offset_beg,
536 vm_object_offset_t offset_end,
2d21ac55
A
537 int ops,
538 int *range);
539
540extern void mach_memory_entry_port_release(ipc_port_t port);
541extern void mach_destroy_memory_entry(ipc_port_t port);
542extern kern_return_t mach_memory_entry_allocate(
543 struct vm_named_entry **user_entry_p,
544 ipc_port_t *user_handle_p);
545
546extern void vm_paging_map_init(void);
0c530ab8 547
b0d623f7
A
548extern int macx_backing_store_compaction(int flags);
549extern unsigned int mach_vm_ctl_page_free_wanted(void);
550
fe8ab488 551extern int no_paging_space_action(void);
6d2010ae 552
0a7de745
A
553#define VM_TOGGLE_CLEAR 0
554#define VM_TOGGLE_SET 1
555#define VM_TOGGLE_GETVALUE 999
6d2010ae 556int vm_toggle_entry_reuse(int, int*);
39236c6e 557
0a7de745
A
558#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
559#define SWAP_READ 0x00000001 /* Read buffer. */
560#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
39236c6e
A
561
562extern void vm_compressor_pager_init(void);
563extern kern_return_t compressor_memory_object_create(
22ba694c 564 memory_object_size_t,
39236c6e
A
565 memory_object_t *);
566
3e170ce0 567extern boolean_t vm_compressor_low_on_space(void);
5ba3f43e 568extern boolean_t vm_compressor_out_of_space(void);
0a7de745
A
569extern int vm_swap_low_on_space(void);
570void do_fastwake_warmup_all(void);
fe8ab488
A
571#if CONFIG_JETSAM
572extern int proc_get_memstat_priority(struct proc*, boolean_t);
573#endif /* CONFIG_JETSAM */
574
39236c6e
A
575/* the object purger. purges the next eligible object from memory. */
576/* returns TRUE if an object was purged, otherwise FALSE. */
577boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
0a7de745
A
578void vm_purgeable_nonvolatile_owner_update(task_t owner,
579 int delta);
580void vm_purgeable_volatile_owner_update(task_t owner,
581 int delta);
cb323159 582void vm_owned_objects_disown(task_t task);
d9a64523 583
39236c6e
A
584
585struct trim_list {
0a7de745
A
586 uint64_t tl_offset;
587 uint64_t tl_length;
39236c6e
A
588 struct trim_list *tl_next;
589};
590
fe8ab488
A
591u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
592
0a7de745
A
593#define MAX_SWAPFILENAME_LEN 1024
594#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
fe8ab488 595
0a7de745 596extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
fe8ab488
A
597
598struct vm_counters {
0a7de745
A
599 unsigned int do_collapse_compressor;
600 unsigned int do_collapse_compressor_pages;
601 unsigned int do_collapse_terminate;
602 unsigned int do_collapse_terminate_failure;
603 unsigned int should_cow_but_wired;
604 unsigned int create_upl_extra_cow;
605 unsigned int create_upl_extra_cow_pages;
606 unsigned int create_upl_lookup_failure_write;
607 unsigned int create_upl_lookup_failure_copy;
fe8ab488
A
608};
609extern struct vm_counters vm_counters;
39236c6e 610
39037602
A
611#if CONFIG_SECLUDED_MEMORY
612struct vm_page_secluded_data {
0a7de745
A
613 int eligible_for_secluded;
614 int grab_success_free;
615 int grab_success_other;
616 int grab_failure_locked;
617 int grab_failure_state;
618 int grab_failure_dirty;
619 int grab_for_iokit;
620 int grab_for_iokit_success;
39037602
A
621};
622extern struct vm_page_secluded_data vm_page_secluded;
623
624extern int num_tasks_can_use_secluded_mem;
625
626/* boot-args */
627extern int secluded_for_apps;
628extern int secluded_for_iokit;
629extern int secluded_for_filecache;
630#if 11
631extern int secluded_for_fbdp;
632#endif
633
cb323159 634extern uint64_t vm_page_secluded_drain(void);
0a7de745
A
635extern void memory_object_mark_eligible_for_secluded(
636 memory_object_control_t control,
637 boolean_t eligible_for_secluded);
39037602
A
638
639#endif /* CONFIG_SECLUDED_MEMORY */
640
0a7de745 641#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
5ba3f43e 642
9d749ea3 643extern kern_return_t mach_make_memory_entry_internal(
0a7de745
A
644 vm_map_t target_map,
645 memory_object_size_t *size,
9d749ea3 646 memory_object_offset_t offset,
0a7de745 647 vm_prot_t permission,
cb323159 648 vm_named_entry_kernel_flags_t vmne_kflags,
0a7de745
A
649 ipc_port_t *object_handle,
650 ipc_port_t parent_handle);
9d749ea3 651
0a7de745
A
652#define roundup(x, y) ((((x) % (y)) == 0) ? \
653 (x) : ((x) + ((y) - ((x) % (y)))))
d9a64523 654
39037602
A
655#ifdef __cplusplus
656}
657#endif
658
d9a64523
A
659/*
660 * Flags for the VM swapper/reclaimer.
661 * Used by vm_swap_consider_defragment()
662 * to force defrag/reclaim by the swap
663 * GC thread.
664 */
665#define VM_SWAP_FLAGS_NONE 0
666#define VM_SWAP_FLAGS_FORCE_DEFRAG 1
667#define VM_SWAP_FLAGS_FORCE_RECLAIM 2
668
cb323159
A
669#if __arm64__
670/*
671 * Flags to control the behavior of
672 * the legacy footprint entitlement.
673 */
674#define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
675#define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
676#define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
677
678#endif /* __arm64__ */
679
0a7de745 680#endif /* _VM_VM_PROTOS_H_ */
91447636 681
0a7de745 682#endif /* XNU_KERNEL_PRIVATE */