]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_protos.h
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#ifdef XNU_KERNEL_PRIVATE
30
31#ifndef _VM_VM_PROTOS_H_
32#define _VM_VM_PROTOS_H_
33
34#include <mach/mach_types.h>
35#include <kern/kern_types.h>
36
39037602
A
37#ifdef __cplusplus
38extern "C" {
39#endif
40
91447636
A
41/*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52/*
53 * iokit
54 */
55extern kern_return_t device_data_action(
b0d623f7 56 uintptr_t device_handle,
91447636
A
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62extern kern_return_t device_close(
b0d623f7 63 uintptr_t device_handle);
91447636 64
39037602 65extern boolean_t vm_swap_files_pinned(void);
91447636
A
66
67/*
68 * osfmk
69 */
91447636
A
70#ifndef _IPC_IPC_PORT_H_
71extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74extern task_t port_name_to_task(
75 mach_port_name_t name);
813fb2f6
A
76extern task_t port_name_to_task_inspect(
77 mach_port_name_t name);
5ba3f43e
A
78extern void ipc_port_release_send(
79 ipc_port_t port);
91447636
A
80#endif /* _IPC_IPC_PORT_H_ */
81
82extern ipc_space_t get_task_ipcspace(
83 task_t t);
84
39037602 85#if CONFIG_MEMORYSTATUS
3e170ce0 86extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
39037602 87#endif /* CONFIG_MEMORYSTATUS */
3e170ce0 88
91447636
A
89/* Some loose-ends VM stuff */
90
91extern vm_map_t kalloc_map;
92extern vm_size_t msg_ool_size_small;
93extern vm_map_t zone_map;
94
95extern void consider_machine_adjust(void);
91447636
A
96extern vm_map_offset_t get_map_min(vm_map_t);
97extern vm_map_offset_t get_map_max(vm_map_t);
98extern vm_map_size_t get_vmmap_size(vm_map_t);
39037602 99#if CONFIG_COREDUMP
91447636 100extern int get_vmmap_entries(vm_map_t);
39037602
A
101#endif
102extern int get_map_nentries(vm_map_t);
91447636 103
3e170ce0 104extern vm_map_offset_t vm_map_page_mask(vm_map_t);
39236c6e 105
5ba3f43e
A
106extern kern_return_t vm_map_purgable_control(
107 vm_map_t map,
108 vm_map_offset_t address,
109 vm_purgable_t control,
110 int *state);
111
39037602 112#if CONFIG_COREDUMP
91447636 113extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
39037602 114#endif
91447636
A
115
116/*
117 * VM routines that used to be published to
118 * user space, and are now restricted to the kernel.
119 *
120 * They should eventually go away entirely -
121 * to be replaced with standard vm_map() and
122 * vm_deallocate() calls.
123 */
124
125extern kern_return_t vm_upl_map
126(
127 vm_map_t target_task,
128 upl_t upl,
129 vm_address_t *address
130);
131
132extern kern_return_t vm_upl_unmap
133(
134 vm_map_t target_task,
135 upl_t upl
136);
137
138extern kern_return_t vm_region_object_create
139(
140 vm_map_t target_task,
141 vm_size_t size,
142 ipc_port_t *object_handle
143);
144
145extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
146extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
147
593a1d5f 148#if CONFIG_CODE_DECRYPTION
3e170ce0 149#define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
39037602
A
150#if VM_MAP_DEBUG_APPLE_PROTECT
151extern int vm_map_debug_apple_protect;
152#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
593a1d5f 153struct pager_crypt_info;
0c530ab8 154extern kern_return_t vm_map_apple_protected(
3e170ce0
A
155 vm_map_t map,
156 vm_map_offset_t start,
157 vm_map_offset_t end,
158 vm_object_offset_t crypto_backing_offset,
159 struct pager_crypt_info *crypt_info);
0c530ab8 160extern void apple_protect_pager_bootstrap(void);
3e170ce0
A
161extern memory_object_t apple_protect_pager_setup(
162 vm_object_t backing_object,
163 vm_object_offset_t backing_offset,
164 vm_object_offset_t crypto_backing_offset,
165 struct pager_crypt_info *crypt_info,
166 vm_object_offset_t crypto_start,
167 vm_object_offset_t crypto_end);
593a1d5f 168#endif /* CONFIG_CODE_DECRYPTION */
0c530ab8 169
b0d623f7
A
170struct vnode;
171extern void swapfile_pager_bootstrap(void);
172extern memory_object_t swapfile_pager_setup(struct vnode *vp);
173extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
174
3e170ce0
A
175#if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
176#define SIXTEENK_PAGE_SIZE 0x4000
177#define SIXTEENK_PAGE_MASK 0x3FFF
178#define SIXTEENK_PAGE_SHIFT 14
179#endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
180
5ba3f43e
A
181#if __arm64__
182#define FOURK_PAGE_SIZE 0x1000
183#define FOURK_PAGE_MASK 0xFFF
184#define FOURK_PAGE_SHIFT 12
185
186extern unsigned int page_shift_user32;
187
188#define VM_MAP_DEBUG_FOURK MACH_ASSERT
189#if VM_MAP_DEBUG_FOURK
190extern int vm_map_debug_fourk;
191#endif /* VM_MAP_DEBUG_FOURK */
192extern void fourk_pager_bootstrap(void);
193extern memory_object_t fourk_pager_create(void);
194extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
195extern kern_return_t fourk_pager_populate(
196 memory_object_t mem_obj,
197 boolean_t overwrite,
198 int index,
199 vm_object_t new_backing_object,
200 vm_object_offset_t new_backing_offset,
201 vm_object_t *old_backing_object,
202 vm_object_offset_t *old_backing_offset);
203#endif /* __arm64__ */
91447636
A
204
205/*
206 * bsd
207 */
208struct vnode;
91447636
A
209extern void *upl_get_internal_page_list(
210 upl_t upl);
b0d623f7 211
fe8ab488 212extern void vnode_setswapmount(struct vnode *);
3e170ce0 213extern int64_t vnode_getswappin_avail(struct vnode *);
fe8ab488 214
91447636
A
215typedef int pager_return_t;
216extern pager_return_t vnode_pagein(
217 struct vnode *, upl_t,
b0d623f7
A
218 upl_offset_t, vm_object_offset_t,
219 upl_size_t, int, int *);
91447636
A
220extern pager_return_t vnode_pageout(
221 struct vnode *, upl_t,
b0d623f7
A
222 upl_offset_t, vm_object_offset_t,
223 upl_size_t, int, int *);
6d2010ae 224extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
91447636
A
225extern memory_object_t vnode_pager_setup(
226 struct vnode *, memory_object_t);
227extern vm_object_offset_t vnode_pager_get_filesize(
228 struct vnode *);
b0d623f7
A
229extern uint32_t vnode_pager_isinuse(
230 struct vnode *);
6d2010ae
A
231extern boolean_t vnode_pager_isSSD(
232 struct vnode *);
233extern void vnode_pager_throttle(
234 void);
39236c6e 235extern uint32_t vnode_pager_return_throttle_io_limit(
b0d623f7 236 struct vnode *,
39236c6e 237 uint32_t *);
15129b1c 238extern kern_return_t vnode_pager_get_name(
0c530ab8
A
239 struct vnode *vp,
240 char *pathname,
15129b1c
A
241 vm_size_t pathname_len,
242 char *filename,
243 vm_size_t filename_len,
244 boolean_t *truncated_path_p);
245struct timespec;
246extern kern_return_t vnode_pager_get_mtime(
0c530ab8 247 struct vnode *vp,
15129b1c
A
248 struct timespec *mtime,
249 struct timespec *cs_mtime);
2d21ac55
A
250extern kern_return_t vnode_pager_get_cs_blobs(
251 struct vnode *vp,
252 void **blobs);
6d2010ae 253
fe8ab488
A
254#if CONFIG_IOSCHED
255void vnode_pager_issue_reprioritize_io(
256 struct vnode *devvp,
257 uint64_t blkno,
258 uint32_t len,
259 int priority);
260#endif
261
6d2010ae
A
262#if CHECK_CS_VALIDATION_BITMAP
263/* used by the vnode_pager_cs_validation_bitmap routine*/
264#define CS_BITMAP_SET 1
265#define CS_BITMAP_CLEAR 2
266#define CS_BITMAP_CHECK 3
267
268#endif /* CHECK_CS_VALIDATION_BITMAP */
b0d623f7 269
39236c6e 270extern void vnode_pager_bootstrap(void);
91447636
A
271extern kern_return_t
272vnode_pager_data_unlock(
273 memory_object_t mem_obj,
274 memory_object_offset_t offset,
b0d623f7 275 memory_object_size_t size,
91447636
A
276 vm_prot_t desired_access);
277extern kern_return_t vnode_pager_init(
278 memory_object_t,
279 memory_object_control_t,
b0d623f7 280 memory_object_cluster_size_t);
91447636
A
281extern kern_return_t vnode_pager_get_object_size(
282 memory_object_t,
283 memory_object_offset_t *);
fe8ab488
A
284
285#if CONFIG_IOSCHED
286extern kern_return_t vnode_pager_get_object_devvp(
287 memory_object_t,
288 uintptr_t *);
289#endif
290
b0d623f7
A
291extern kern_return_t vnode_pager_get_isinuse(
292 memory_object_t,
293 uint32_t *);
6d2010ae
A
294extern kern_return_t vnode_pager_get_isSSD(
295 memory_object_t,
296 boolean_t *);
39236c6e 297extern kern_return_t vnode_pager_get_throttle_io_limit(
b0d623f7 298 memory_object_t,
39236c6e 299 uint32_t *);
15129b1c 300extern kern_return_t vnode_pager_get_object_name(
0c530ab8
A
301 memory_object_t mem_obj,
302 char *pathname,
15129b1c
A
303 vm_size_t pathname_len,
304 char *filename,
305 vm_size_t filename_len,
306 boolean_t *truncated_path_p);
307extern kern_return_t vnode_pager_get_object_mtime(
0c530ab8 308 memory_object_t mem_obj,
15129b1c
A
309 struct timespec *mtime,
310 struct timespec *cs_mtime);
6d2010ae
A
311
312#if CHECK_CS_VALIDATION_BITMAP
313extern kern_return_t vnode_pager_cs_check_validation_bitmap(
314 memory_object_t mem_obj,
315 memory_object_offset_t offset,
316 int optype);
317#endif /*CHECK_CS_VALIDATION_BITMAP*/
318
319extern kern_return_t ubc_cs_check_validation_bitmap (
320 struct vnode *vp,
321 memory_object_offset_t offset,
322 int optype);
323
91447636
A
324extern kern_return_t vnode_pager_data_request(
325 memory_object_t,
2d21ac55 326 memory_object_offset_t,
b0d623f7 327 memory_object_cluster_size_t,
2d21ac55
A
328 vm_prot_t,
329 memory_object_fault_info_t);
91447636
A
330extern kern_return_t vnode_pager_data_return(
331 memory_object_t,
332 memory_object_offset_t,
b0d623f7 333 memory_object_cluster_size_t,
91447636
A
334 memory_object_offset_t *,
335 int *,
336 boolean_t,
337 boolean_t,
338 int);
339extern kern_return_t vnode_pager_data_initialize(
340 memory_object_t,
341 memory_object_offset_t,
b0d623f7 342 memory_object_cluster_size_t);
91447636
A
343extern void vnode_pager_reference(
344 memory_object_t mem_obj);
345extern kern_return_t vnode_pager_synchronize(
346 memory_object_t mem_obj,
347 memory_object_offset_t offset,
b0d623f7 348 memory_object_size_t length,
91447636 349 vm_sync_t sync_flags);
593a1d5f
A
350extern kern_return_t vnode_pager_map(
351 memory_object_t mem_obj,
352 vm_prot_t prot);
353extern kern_return_t vnode_pager_last_unmap(
91447636
A
354 memory_object_t mem_obj);
355extern void vnode_pager_deallocate(
356 memory_object_t);
357extern kern_return_t vnode_pager_terminate(
358 memory_object_t);
359extern void vnode_pager_vrele(
360 struct vnode *vp);
39037602
A
361extern struct vnode *vnode_pager_lookup_vnode(
362 memory_object_t);
363
593a1d5f
A
364extern int ubc_map(
365 struct vnode *vp,
366 int flags);
91447636
A
367extern void ubc_unmap(
368 struct vnode *vp);
369
b0d623f7
A
370struct vm_map_entry;
371extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
372
91447636
A
373extern void device_pager_reference(memory_object_t);
374extern void device_pager_deallocate(memory_object_t);
375extern kern_return_t device_pager_init(memory_object_t,
376 memory_object_control_t,
b0d623f7 377 memory_object_cluster_size_t);
91447636
A
378extern kern_return_t device_pager_terminate(memory_object_t);
379extern kern_return_t device_pager_data_request(memory_object_t,
380 memory_object_offset_t,
b0d623f7 381 memory_object_cluster_size_t,
2d21ac55
A
382 vm_prot_t,
383 memory_object_fault_info_t);
91447636
A
384extern kern_return_t device_pager_data_return(memory_object_t,
385 memory_object_offset_t,
b0d623f7 386 memory_object_cluster_size_t,
0c530ab8
A
387 memory_object_offset_t *,
388 int *,
91447636
A
389 boolean_t,
390 boolean_t,
391 int);
392extern kern_return_t device_pager_data_initialize(memory_object_t,
393 memory_object_offset_t,
b0d623f7 394 memory_object_cluster_size_t);
91447636
A
395extern kern_return_t device_pager_data_unlock(memory_object_t,
396 memory_object_offset_t,
b0d623f7 397 memory_object_size_t,
91447636
A
398 vm_prot_t);
399extern kern_return_t device_pager_synchronize(memory_object_t,
400 memory_object_offset_t,
b0d623f7 401 memory_object_size_t,
91447636 402 vm_sync_t);
593a1d5f
A
403extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
404extern kern_return_t device_pager_last_unmap(memory_object_t);
91447636
A
405extern kern_return_t device_pager_populate_object(
406 memory_object_t device,
407 memory_object_offset_t offset,
408 ppnum_t page_num,
409 vm_size_t size);
410extern memory_object_t device_pager_setup(
411 memory_object_t,
b0d623f7 412 uintptr_t,
91447636
A
413 vm_size_t,
414 int);
39236c6e 415extern void device_pager_bootstrap(void);
39037602 416extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
39236c6e
A
417
418extern kern_return_t pager_map_to_phys_contiguous(
419 memory_object_control_t object,
420 memory_object_offset_t offset,
421 addr64_t base_vaddr,
422 vm_size_t size);
91447636
A
423
424extern kern_return_t memory_object_create_named(
425 memory_object_t pager,
426 memory_object_offset_t size,
427 memory_object_control_t *control);
428
b0d623f7
A
429struct macx_triggers_args;
430extern int mach_macx_triggers(
431 struct macx_triggers_args *args);
91447636
A
432
433extern int macx_swapinfo(
434 memory_object_size_t *total_p,
435 memory_object_size_t *avail_p,
436 vm_size_t *pagesize_p,
437 boolean_t *encrypted_p);
438
2d21ac55 439extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
39037602
A
440extern void log_unnest_badness(
441 vm_map_t map,
442 vm_map_offset_t start_unnest,
443 vm_map_offset_t end_unnest,
444 boolean_t is_nested_map,
445 vm_map_offset_t lowest_unnestable_addr);
b0d623f7 446
39236c6e 447struct proc;
b0d623f7 448extern int cs_allow_invalid(struct proc *p);
39037602 449extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
c18c124e
A
450
451#define CS_VALIDATE_TAINTED 0x00000001
452#define CS_VALIDATE_NX 0x00000002
39037602
A
453extern boolean_t cs_validate_range(struct vnode *vp,
454 memory_object_t pager,
455 memory_object_offset_t offset,
456 const void *data,
457 vm_size_t size,
458 unsigned *result);
2d21ac55 459
5ba3f43e
A
460extern kern_return_t memory_entry_purgeable_control_internal(
461 ipc_port_t entry_port,
462 vm_purgable_t control,
463 int *state);
464
2d21ac55
A
465extern kern_return_t mach_memory_entry_purgable_control(
466 ipc_port_t entry_port,
467 vm_purgable_t control,
468 int *state);
469
39236c6e
A
470extern kern_return_t mach_memory_entry_get_page_counts(
471 ipc_port_t entry_port,
472 unsigned int *resident_page_count,
473 unsigned int *dirty_page_count);
474
2d21ac55
A
475extern kern_return_t mach_memory_entry_page_op(
476 ipc_port_t entry_port,
477 vm_object_offset_t offset,
478 int ops,
479 ppnum_t *phys_entry,
480 int *flags);
481
482extern kern_return_t mach_memory_entry_range_op(
483 ipc_port_t entry_port,
484 vm_object_offset_t offset_beg,
485 vm_object_offset_t offset_end,
486 int ops,
487 int *range);
488
489extern void mach_memory_entry_port_release(ipc_port_t port);
490extern void mach_destroy_memory_entry(ipc_port_t port);
491extern kern_return_t mach_memory_entry_allocate(
492 struct vm_named_entry **user_entry_p,
493 ipc_port_t *user_handle_p);
494
495extern void vm_paging_map_init(void);
0c530ab8 496
b0d623f7
A
497extern int macx_backing_store_compaction(int flags);
498extern unsigned int mach_vm_ctl_page_free_wanted(void);
499
fe8ab488 500extern int no_paging_space_action(void);
6d2010ae
A
501
502#define VM_TOGGLE_CLEAR 0
503#define VM_TOGGLE_SET 1
504#define VM_TOGGLE_GETVALUE 999
505int vm_toggle_entry_reuse(int, int*);
39236c6e
A
506
507#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
508#define SWAP_READ 0x00000001 /* Read buffer. */
509#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
510
511extern void vm_compressor_pager_init(void);
512extern kern_return_t compressor_memory_object_create(
22ba694c 513 memory_object_size_t,
39236c6e
A
514 memory_object_t *);
515
3e170ce0 516extern boolean_t vm_compressor_low_on_space(void);
5ba3f43e 517extern boolean_t vm_compressor_out_of_space(void);
3e170ce0 518extern int vm_swap_low_on_space(void);
d190cdc3 519void do_fastwake_warmup_all(void);
fe8ab488
A
520#if CONFIG_JETSAM
521extern int proc_get_memstat_priority(struct proc*, boolean_t);
522#endif /* CONFIG_JETSAM */
523
39236c6e
A
524/* the object purger. purges the next eligible object from memory. */
525/* returns TRUE if an object was purged, otherwise FALSE. */
526boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
fe8ab488 527void vm_purgeable_disown(task_t task);
39236c6e
A
528
529struct trim_list {
530 uint64_t tl_offset;
531 uint64_t tl_length;
532 struct trim_list *tl_next;
533};
534
fe8ab488
A
535u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
536
537#define MAX_SWAPFILENAME_LEN 1024
538#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
539
540extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
541
542struct vm_counters {
543 unsigned int do_collapse_compressor;
544 unsigned int do_collapse_compressor_pages;
545 unsigned int do_collapse_terminate;
546 unsigned int do_collapse_terminate_failure;
547 unsigned int should_cow_but_wired;
548 unsigned int create_upl_extra_cow;
549 unsigned int create_upl_extra_cow_pages;
550 unsigned int create_upl_lookup_failure_write;
551 unsigned int create_upl_lookup_failure_copy;
552};
553extern struct vm_counters vm_counters;
39236c6e 554
39037602
A
555#if CONFIG_SECLUDED_MEMORY
556struct vm_page_secluded_data {
557 int eligible_for_secluded;
558 int grab_success_free;
559 int grab_success_other;
560 int grab_failure_locked;
561 int grab_failure_state;
562 int grab_failure_dirty;
563 int grab_for_iokit;
564 int grab_for_iokit_success;
565};
566extern struct vm_page_secluded_data vm_page_secluded;
567
568extern int num_tasks_can_use_secluded_mem;
569
570/* boot-args */
571extern int secluded_for_apps;
572extern int secluded_for_iokit;
573extern int secluded_for_filecache;
574#if 11
575extern int secluded_for_fbdp;
576#endif
577
39037602
A
578extern void memory_object_mark_eligible_for_secluded(
579 memory_object_control_t control,
580 boolean_t eligible_for_secluded);
581
582#endif /* CONFIG_SECLUDED_MEMORY */
583
5ba3f43e
A
584#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
585
39037602
A
586#ifdef __cplusplus
587}
588#endif
589
91447636
A
590#endif /* _VM_VM_PROTOS_H_ */
591
592#endif /* XNU_KERNEL_PRIVATE */