]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_protos.h
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#ifdef XNU_KERNEL_PRIVATE
30
31#ifndef _VM_VM_PROTOS_H_
32#define _VM_VM_PROTOS_H_
33
34#include <mach/mach_types.h>
35#include <kern/kern_types.h>
36
39037602
A
37#ifdef __cplusplus
38extern "C" {
39#endif
40
91447636
A
41/*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52/*
53 * iokit
54 */
55extern kern_return_t device_data_action(
b0d623f7 56 uintptr_t device_handle,
91447636
A
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62extern kern_return_t device_close(
b0d623f7 63 uintptr_t device_handle);
91447636 64
39037602 65extern boolean_t vm_swap_files_pinned(void);
91447636
A
66
67/*
68 * osfmk
69 */
91447636
A
70#ifndef _IPC_IPC_PORT_H_
71extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74extern task_t port_name_to_task(
75 mach_port_name_t name);
76#endif /* _IPC_IPC_PORT_H_ */
77
78extern ipc_space_t get_task_ipcspace(
79 task_t t);
80
39037602 81#if CONFIG_MEMORYSTATUS
3e170ce0 82extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
39037602 83#endif /* CONFIG_MEMORYSTATUS */
3e170ce0 84
91447636
A
85/* Some loose-ends VM stuff */
86
87extern vm_map_t kalloc_map;
88extern vm_size_t msg_ool_size_small;
89extern vm_map_t zone_map;
90
91extern void consider_machine_adjust(void);
91447636
A
92extern vm_map_offset_t get_map_min(vm_map_t);
93extern vm_map_offset_t get_map_max(vm_map_t);
94extern vm_map_size_t get_vmmap_size(vm_map_t);
39037602 95#if CONFIG_COREDUMP
91447636 96extern int get_vmmap_entries(vm_map_t);
39037602
A
97#endif
98extern int get_map_nentries(vm_map_t);
91447636 99
3e170ce0 100extern vm_map_offset_t vm_map_page_mask(vm_map_t);
39236c6e 101
39037602 102#if CONFIG_COREDUMP
91447636 103extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
39037602 104#endif
91447636
A
105
106/*
107 * VM routines that used to be published to
108 * user space, and are now restricted to the kernel.
109 *
110 * They should eventually go away entirely -
111 * to be replaced with standard vm_map() and
112 * vm_deallocate() calls.
113 */
114
115extern kern_return_t vm_upl_map
116(
117 vm_map_t target_task,
118 upl_t upl,
119 vm_address_t *address
120);
121
122extern kern_return_t vm_upl_unmap
123(
124 vm_map_t target_task,
125 upl_t upl
126);
127
128extern kern_return_t vm_region_object_create
129(
130 vm_map_t target_task,
131 vm_size_t size,
132 ipc_port_t *object_handle
133);
134
135extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
136extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
137
593a1d5f 138#if CONFIG_CODE_DECRYPTION
3e170ce0 139#define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
39037602
A
140#if VM_MAP_DEBUG_APPLE_PROTECT
141extern int vm_map_debug_apple_protect;
142#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
593a1d5f 143struct pager_crypt_info;
0c530ab8 144extern kern_return_t vm_map_apple_protected(
3e170ce0
A
145 vm_map_t map,
146 vm_map_offset_t start,
147 vm_map_offset_t end,
148 vm_object_offset_t crypto_backing_offset,
149 struct pager_crypt_info *crypt_info);
0c530ab8 150extern void apple_protect_pager_bootstrap(void);
3e170ce0
A
151extern memory_object_t apple_protect_pager_setup(
152 vm_object_t backing_object,
153 vm_object_offset_t backing_offset,
154 vm_object_offset_t crypto_backing_offset,
155 struct pager_crypt_info *crypt_info,
156 vm_object_offset_t crypto_start,
157 vm_object_offset_t crypto_end);
593a1d5f 158#endif /* CONFIG_CODE_DECRYPTION */
0c530ab8 159
b0d623f7
A
160struct vnode;
161extern void swapfile_pager_bootstrap(void);
162extern memory_object_t swapfile_pager_setup(struct vnode *vp);
163extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
164
3e170ce0
A
165#if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
166#define SIXTEENK_PAGE_SIZE 0x4000
167#define SIXTEENK_PAGE_MASK 0x3FFF
168#define SIXTEENK_PAGE_SHIFT 14
169#endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
170
91447636
A
171
172/*
173 * bsd
174 */
175struct vnode;
91447636
A
176extern void *upl_get_internal_page_list(
177 upl_t upl);
b0d623f7 178
fe8ab488 179extern void vnode_setswapmount(struct vnode *);
3e170ce0 180extern int64_t vnode_getswappin_avail(struct vnode *);
fe8ab488 181
91447636
A
182typedef int pager_return_t;
183extern pager_return_t vnode_pagein(
184 struct vnode *, upl_t,
b0d623f7
A
185 upl_offset_t, vm_object_offset_t,
186 upl_size_t, int, int *);
91447636
A
187extern pager_return_t vnode_pageout(
188 struct vnode *, upl_t,
b0d623f7
A
189 upl_offset_t, vm_object_offset_t,
190 upl_size_t, int, int *);
6d2010ae 191extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
91447636
A
192extern memory_object_t vnode_pager_setup(
193 struct vnode *, memory_object_t);
194extern vm_object_offset_t vnode_pager_get_filesize(
195 struct vnode *);
b0d623f7
A
196extern uint32_t vnode_pager_isinuse(
197 struct vnode *);
6d2010ae
A
198extern boolean_t vnode_pager_isSSD(
199 struct vnode *);
200extern void vnode_pager_throttle(
201 void);
39236c6e 202extern uint32_t vnode_pager_return_throttle_io_limit(
b0d623f7 203 struct vnode *,
39236c6e 204 uint32_t *);
15129b1c 205extern kern_return_t vnode_pager_get_name(
0c530ab8
A
206 struct vnode *vp,
207 char *pathname,
15129b1c
A
208 vm_size_t pathname_len,
209 char *filename,
210 vm_size_t filename_len,
211 boolean_t *truncated_path_p);
212struct timespec;
213extern kern_return_t vnode_pager_get_mtime(
0c530ab8 214 struct vnode *vp,
15129b1c
A
215 struct timespec *mtime,
216 struct timespec *cs_mtime);
2d21ac55
A
217extern kern_return_t vnode_pager_get_cs_blobs(
218 struct vnode *vp,
219 void **blobs);
6d2010ae 220
fe8ab488
A
221#if CONFIG_IOSCHED
222void vnode_pager_issue_reprioritize_io(
223 struct vnode *devvp,
224 uint64_t blkno,
225 uint32_t len,
226 int priority);
227#endif
228
6d2010ae
A
229#if CHECK_CS_VALIDATION_BITMAP
230/* used by the vnode_pager_cs_validation_bitmap routine*/
231#define CS_BITMAP_SET 1
232#define CS_BITMAP_CLEAR 2
233#define CS_BITMAP_CHECK 3
234
235#endif /* CHECK_CS_VALIDATION_BITMAP */
b0d623f7 236
39236c6e 237extern void vnode_pager_bootstrap(void);
91447636
A
238extern kern_return_t
239vnode_pager_data_unlock(
240 memory_object_t mem_obj,
241 memory_object_offset_t offset,
b0d623f7 242 memory_object_size_t size,
91447636
A
243 vm_prot_t desired_access);
244extern kern_return_t vnode_pager_init(
245 memory_object_t,
246 memory_object_control_t,
b0d623f7 247 memory_object_cluster_size_t);
91447636
A
248extern kern_return_t vnode_pager_get_object_size(
249 memory_object_t,
250 memory_object_offset_t *);
fe8ab488
A
251
252#if CONFIG_IOSCHED
253extern kern_return_t vnode_pager_get_object_devvp(
254 memory_object_t,
255 uintptr_t *);
256#endif
257
b0d623f7
A
258extern kern_return_t vnode_pager_get_isinuse(
259 memory_object_t,
260 uint32_t *);
6d2010ae
A
261extern kern_return_t vnode_pager_get_isSSD(
262 memory_object_t,
263 boolean_t *);
39236c6e 264extern kern_return_t vnode_pager_get_throttle_io_limit(
b0d623f7 265 memory_object_t,
39236c6e 266 uint32_t *);
15129b1c 267extern kern_return_t vnode_pager_get_object_name(
0c530ab8
A
268 memory_object_t mem_obj,
269 char *pathname,
15129b1c
A
270 vm_size_t pathname_len,
271 char *filename,
272 vm_size_t filename_len,
273 boolean_t *truncated_path_p);
274extern kern_return_t vnode_pager_get_object_mtime(
0c530ab8 275 memory_object_t mem_obj,
15129b1c
A
276 struct timespec *mtime,
277 struct timespec *cs_mtime);
6d2010ae
A
278
279#if CHECK_CS_VALIDATION_BITMAP
280extern kern_return_t vnode_pager_cs_check_validation_bitmap(
281 memory_object_t mem_obj,
282 memory_object_offset_t offset,
283 int optype);
284#endif /*CHECK_CS_VALIDATION_BITMAP*/
285
286extern kern_return_t ubc_cs_check_validation_bitmap (
287 struct vnode *vp,
288 memory_object_offset_t offset,
289 int optype);
290
91447636
A
291extern kern_return_t vnode_pager_data_request(
292 memory_object_t,
2d21ac55 293 memory_object_offset_t,
b0d623f7 294 memory_object_cluster_size_t,
2d21ac55
A
295 vm_prot_t,
296 memory_object_fault_info_t);
91447636
A
297extern kern_return_t vnode_pager_data_return(
298 memory_object_t,
299 memory_object_offset_t,
b0d623f7 300 memory_object_cluster_size_t,
91447636
A
301 memory_object_offset_t *,
302 int *,
303 boolean_t,
304 boolean_t,
305 int);
306extern kern_return_t vnode_pager_data_initialize(
307 memory_object_t,
308 memory_object_offset_t,
b0d623f7 309 memory_object_cluster_size_t);
91447636
A
310extern void vnode_pager_reference(
311 memory_object_t mem_obj);
312extern kern_return_t vnode_pager_synchronize(
313 memory_object_t mem_obj,
314 memory_object_offset_t offset,
b0d623f7 315 memory_object_size_t length,
91447636 316 vm_sync_t sync_flags);
593a1d5f
A
317extern kern_return_t vnode_pager_map(
318 memory_object_t mem_obj,
319 vm_prot_t prot);
320extern kern_return_t vnode_pager_last_unmap(
91447636
A
321 memory_object_t mem_obj);
322extern void vnode_pager_deallocate(
323 memory_object_t);
324extern kern_return_t vnode_pager_terminate(
325 memory_object_t);
326extern void vnode_pager_vrele(
327 struct vnode *vp);
328extern void vnode_pager_release_from_cache(
329 int *);
39037602
A
330extern struct vnode *vnode_pager_lookup_vnode(
331 memory_object_t);
332
593a1d5f
A
333extern int ubc_map(
334 struct vnode *vp,
335 int flags);
91447636
A
336extern void ubc_unmap(
337 struct vnode *vp);
338
b0d623f7
A
339struct vm_map_entry;
340extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
341
91447636
A
342extern void device_pager_reference(memory_object_t);
343extern void device_pager_deallocate(memory_object_t);
344extern kern_return_t device_pager_init(memory_object_t,
345 memory_object_control_t,
b0d623f7 346 memory_object_cluster_size_t);
91447636
A
347extern kern_return_t device_pager_terminate(memory_object_t);
348extern kern_return_t device_pager_data_request(memory_object_t,
349 memory_object_offset_t,
b0d623f7 350 memory_object_cluster_size_t,
2d21ac55
A
351 vm_prot_t,
352 memory_object_fault_info_t);
91447636
A
353extern kern_return_t device_pager_data_return(memory_object_t,
354 memory_object_offset_t,
b0d623f7 355 memory_object_cluster_size_t,
0c530ab8
A
356 memory_object_offset_t *,
357 int *,
91447636
A
358 boolean_t,
359 boolean_t,
360 int);
361extern kern_return_t device_pager_data_initialize(memory_object_t,
362 memory_object_offset_t,
b0d623f7 363 memory_object_cluster_size_t);
91447636
A
364extern kern_return_t device_pager_data_unlock(memory_object_t,
365 memory_object_offset_t,
b0d623f7 366 memory_object_size_t,
91447636
A
367 vm_prot_t);
368extern kern_return_t device_pager_synchronize(memory_object_t,
369 memory_object_offset_t,
b0d623f7 370 memory_object_size_t,
91447636 371 vm_sync_t);
593a1d5f
A
372extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
373extern kern_return_t device_pager_last_unmap(memory_object_t);
91447636
A
374extern kern_return_t device_pager_populate_object(
375 memory_object_t device,
376 memory_object_offset_t offset,
377 ppnum_t page_num,
378 vm_size_t size);
379extern memory_object_t device_pager_setup(
380 memory_object_t,
b0d623f7 381 uintptr_t,
91447636
A
382 vm_size_t,
383 int);
39236c6e 384extern void device_pager_bootstrap(void);
39037602 385extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
39236c6e
A
386
387extern kern_return_t pager_map_to_phys_contiguous(
388 memory_object_control_t object,
389 memory_object_offset_t offset,
390 addr64_t base_vaddr,
391 vm_size_t size);
91447636
A
392
393extern kern_return_t memory_object_create_named(
394 memory_object_t pager,
395 memory_object_offset_t size,
396 memory_object_control_t *control);
397
b0d623f7
A
398struct macx_triggers_args;
399extern int mach_macx_triggers(
400 struct macx_triggers_args *args);
91447636
A
401
402extern int macx_swapinfo(
403 memory_object_size_t *total_p,
404 memory_object_size_t *avail_p,
405 vm_size_t *pagesize_p,
406 boolean_t *encrypted_p);
407
2d21ac55 408extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
39037602
A
409extern void log_unnest_badness(
410 vm_map_t map,
411 vm_map_offset_t start_unnest,
412 vm_map_offset_t end_unnest,
413 boolean_t is_nested_map,
414 vm_map_offset_t lowest_unnestable_addr);
b0d623f7 415
39236c6e 416struct proc;
b0d623f7 417extern int cs_allow_invalid(struct proc *p);
39037602 418extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
c18c124e
A
419
420#define CS_VALIDATE_TAINTED 0x00000001
421#define CS_VALIDATE_NX 0x00000002
39037602
A
422extern boolean_t cs_validate_range(struct vnode *vp,
423 memory_object_t pager,
424 memory_object_offset_t offset,
425 const void *data,
426 vm_size_t size,
427 unsigned *result);
2d21ac55
A
428
429extern kern_return_t mach_memory_entry_purgable_control(
430 ipc_port_t entry_port,
431 vm_purgable_t control,
432 int *state);
433
39236c6e
A
434extern kern_return_t mach_memory_entry_get_page_counts(
435 ipc_port_t entry_port,
436 unsigned int *resident_page_count,
437 unsigned int *dirty_page_count);
438
2d21ac55
A
439extern kern_return_t mach_memory_entry_page_op(
440 ipc_port_t entry_port,
441 vm_object_offset_t offset,
442 int ops,
443 ppnum_t *phys_entry,
444 int *flags);
445
446extern kern_return_t mach_memory_entry_range_op(
447 ipc_port_t entry_port,
448 vm_object_offset_t offset_beg,
449 vm_object_offset_t offset_end,
450 int ops,
451 int *range);
452
453extern void mach_memory_entry_port_release(ipc_port_t port);
454extern void mach_destroy_memory_entry(ipc_port_t port);
455extern kern_return_t mach_memory_entry_allocate(
456 struct vm_named_entry **user_entry_p,
457 ipc_port_t *user_handle_p);
458
459extern void vm_paging_map_init(void);
0c530ab8 460
b0d623f7
A
461extern int macx_backing_store_compaction(int flags);
462extern unsigned int mach_vm_ctl_page_free_wanted(void);
463
fe8ab488 464extern int no_paging_space_action(void);
6d2010ae
A
465
466#define VM_TOGGLE_CLEAR 0
467#define VM_TOGGLE_SET 1
468#define VM_TOGGLE_GETVALUE 999
469int vm_toggle_entry_reuse(int, int*);
39236c6e
A
470
471#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
472#define SWAP_READ 0x00000001 /* Read buffer. */
473#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
474
475extern void vm_compressor_pager_init(void);
476extern kern_return_t compressor_memory_object_create(
22ba694c 477 memory_object_size_t,
39236c6e
A
478 memory_object_t *);
479
3e170ce0
A
480extern boolean_t vm_compressor_low_on_space(void);
481extern int vm_swap_low_on_space(void);
482
fe8ab488
A
483#if CONFIG_JETSAM
484extern int proc_get_memstat_priority(struct proc*, boolean_t);
485#endif /* CONFIG_JETSAM */
486
39236c6e
A
487/* the object purger. purges the next eligible object from memory. */
488/* returns TRUE if an object was purged, otherwise FALSE. */
489boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
fe8ab488 490void vm_purgeable_disown(task_t task);
39236c6e
A
491
492struct trim_list {
493 uint64_t tl_offset;
494 uint64_t tl_length;
495 struct trim_list *tl_next;
496};
497
fe8ab488
A
498u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
499
500#define MAX_SWAPFILENAME_LEN 1024
501#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
502
503extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
504
505struct vm_counters {
506 unsigned int do_collapse_compressor;
507 unsigned int do_collapse_compressor_pages;
508 unsigned int do_collapse_terminate;
509 unsigned int do_collapse_terminate_failure;
510 unsigned int should_cow_but_wired;
511 unsigned int create_upl_extra_cow;
512 unsigned int create_upl_extra_cow_pages;
513 unsigned int create_upl_lookup_failure_write;
514 unsigned int create_upl_lookup_failure_copy;
515};
516extern struct vm_counters vm_counters;
39236c6e 517
39037602
A
518#if CONFIG_SECLUDED_MEMORY
519struct vm_page_secluded_data {
520 int eligible_for_secluded;
521 int grab_success_free;
522 int grab_success_other;
523 int grab_failure_locked;
524 int grab_failure_state;
525 int grab_failure_dirty;
526 int grab_for_iokit;
527 int grab_for_iokit_success;
528};
529extern struct vm_page_secluded_data vm_page_secluded;
530
531extern int num_tasks_can_use_secluded_mem;
532
533/* boot-args */
534extern int secluded_for_apps;
535extern int secluded_for_iokit;
536extern int secluded_for_filecache;
537#if 11
538extern int secluded_for_fbdp;
539#endif
540
541/*
542 * "secluded_aging_policy" controls the aging of secluded pages:
543 *
544 * SECLUDED_AGING_FIFO
545 * When a page eligible for the secluded queue is activated or
546 * deactivated, it is inserted in the secluded queue.
547 * When it get pushed out of the secluded queue, it gets freed.
548 *
549 * SECLUDED_AGING_ALONG_ACTIVE
550 * When a page eligible for the secluded queue is activated, it is
551 * inserted in the secluded queue.
552 * When it gets pushed out of the secluded queue, its "referenced" bit
553 * is reset and it is inserted in the inactive queue.
554 *
555 * SECLUDED_AGING_AFTER_INACTIVE
556 * A page eligible for the secluded queue first makes its way through the
557 * active and inactive queues.
558 * When it is pushed out of the inactive queue without being re-activated,
559 * it is inserted in the secluded queue instead of being reclaimed.
560 * When it is pushed out of the secluded queue, it is either freed if it
561 * hasn't been re-referenced, or re-activated if it has been re-referenced.
562 *
563 * SECLUDED_AGING_BEFORE_ACTIVE
564 * A page eligible for the secluded queue will first make its way through
565 * the secluded queue. When it gets pushed out of the secluded queue (by
566 * new secluded pages), it goes back to the normal aging path, through the
567 * active queue and then the inactive queue.
568 */
569extern int secluded_aging_policy;
570#define SECLUDED_AGING_FIFO 0
571#define SECLUDED_AGING_ALONG_ACTIVE 1
572#define SECLUDED_AGING_AFTER_INACTIVE 2
573#define SECLUDED_AGING_BEFORE_ACTIVE 3
574
575extern void memory_object_mark_eligible_for_secluded(
576 memory_object_control_t control,
577 boolean_t eligible_for_secluded);
578
579#endif /* CONFIG_SECLUDED_MEMORY */
580
581#ifdef __cplusplus
582}
583#endif
584
91447636
A
585#endif /* _VM_VM_PROTOS_H_ */
586
587#endif /* XNU_KERNEL_PRIVATE */