]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52 /*
53 * iokit
54 */
55 extern kern_return_t device_data_action(
56 uintptr_t device_handle,
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62 extern kern_return_t device_close(
63 uintptr_t device_handle);
64
65 extern boolean_t vm_swap_files_pinned(void);
66
67 /*
68 * osfmk
69 */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74 extern task_t port_name_to_task(
75 mach_port_name_t name);
76 extern task_t port_name_to_task_inspect(
77 mach_port_name_t name);
78 #endif /* _IPC_IPC_PORT_H_ */
79
80 extern ipc_space_t get_task_ipcspace(
81 task_t t);
82
83 #if CONFIG_MEMORYSTATUS
84 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
85 #endif /* CONFIG_MEMORYSTATUS */
86
87 /* Some loose-ends VM stuff */
88
89 extern vm_map_t kalloc_map;
90 extern vm_size_t msg_ool_size_small;
91 extern vm_map_t zone_map;
92
93 extern void consider_machine_adjust(void);
94 extern vm_map_offset_t get_map_min(vm_map_t);
95 extern vm_map_offset_t get_map_max(vm_map_t);
96 extern vm_map_size_t get_vmmap_size(vm_map_t);
97 #if CONFIG_COREDUMP
98 extern int get_vmmap_entries(vm_map_t);
99 #endif
100 extern int get_map_nentries(vm_map_t);
101
102 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
103
104 #if CONFIG_COREDUMP
105 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
106 #endif
107
108 /*
109 * VM routines that used to be published to
110 * user space, and are now restricted to the kernel.
111 *
112 * They should eventually go away entirely -
113 * to be replaced with standard vm_map() and
114 * vm_deallocate() calls.
115 */
116
117 extern kern_return_t vm_upl_map
118 (
119 vm_map_t target_task,
120 upl_t upl,
121 vm_address_t *address
122 );
123
124 extern kern_return_t vm_upl_unmap
125 (
126 vm_map_t target_task,
127 upl_t upl
128 );
129
130 extern kern_return_t vm_region_object_create
131 (
132 vm_map_t target_task,
133 vm_size_t size,
134 ipc_port_t *object_handle
135 );
136
137 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
138 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
139
140 #if CONFIG_CODE_DECRYPTION
141 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
142 #if VM_MAP_DEBUG_APPLE_PROTECT
143 extern int vm_map_debug_apple_protect;
144 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
145 struct pager_crypt_info;
146 extern kern_return_t vm_map_apple_protected(
147 vm_map_t map,
148 vm_map_offset_t start,
149 vm_map_offset_t end,
150 vm_object_offset_t crypto_backing_offset,
151 struct pager_crypt_info *crypt_info);
152 extern void apple_protect_pager_bootstrap(void);
153 extern memory_object_t apple_protect_pager_setup(
154 vm_object_t backing_object,
155 vm_object_offset_t backing_offset,
156 vm_object_offset_t crypto_backing_offset,
157 struct pager_crypt_info *crypt_info,
158 vm_object_offset_t crypto_start,
159 vm_object_offset_t crypto_end);
160 #endif /* CONFIG_CODE_DECRYPTION */
161
162 struct vnode;
163 extern void swapfile_pager_bootstrap(void);
164 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
165 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
166
167 #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
168 #define SIXTEENK_PAGE_SIZE 0x4000
169 #define SIXTEENK_PAGE_MASK 0x3FFF
170 #define SIXTEENK_PAGE_SHIFT 14
171 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
172
173
174 /*
175 * bsd
176 */
177 struct vnode;
178 extern void *upl_get_internal_page_list(
179 upl_t upl);
180
181 extern void vnode_setswapmount(struct vnode *);
182 extern int64_t vnode_getswappin_avail(struct vnode *);
183
184 typedef int pager_return_t;
185 extern pager_return_t vnode_pagein(
186 struct vnode *, upl_t,
187 upl_offset_t, vm_object_offset_t,
188 upl_size_t, int, int *);
189 extern pager_return_t vnode_pageout(
190 struct vnode *, upl_t,
191 upl_offset_t, vm_object_offset_t,
192 upl_size_t, int, int *);
193 extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
194 extern memory_object_t vnode_pager_setup(
195 struct vnode *, memory_object_t);
196 extern vm_object_offset_t vnode_pager_get_filesize(
197 struct vnode *);
198 extern uint32_t vnode_pager_isinuse(
199 struct vnode *);
200 extern boolean_t vnode_pager_isSSD(
201 struct vnode *);
202 extern void vnode_pager_throttle(
203 void);
204 extern uint32_t vnode_pager_return_throttle_io_limit(
205 struct vnode *,
206 uint32_t *);
207 extern kern_return_t vnode_pager_get_name(
208 struct vnode *vp,
209 char *pathname,
210 vm_size_t pathname_len,
211 char *filename,
212 vm_size_t filename_len,
213 boolean_t *truncated_path_p);
214 struct timespec;
215 extern kern_return_t vnode_pager_get_mtime(
216 struct vnode *vp,
217 struct timespec *mtime,
218 struct timespec *cs_mtime);
219 extern kern_return_t vnode_pager_get_cs_blobs(
220 struct vnode *vp,
221 void **blobs);
222
223 #if CONFIG_IOSCHED
224 void vnode_pager_issue_reprioritize_io(
225 struct vnode *devvp,
226 uint64_t blkno,
227 uint32_t len,
228 int priority);
229 #endif
230
231 #if CHECK_CS_VALIDATION_BITMAP
232 /* used by the vnode_pager_cs_validation_bitmap routine*/
233 #define CS_BITMAP_SET 1
234 #define CS_BITMAP_CLEAR 2
235 #define CS_BITMAP_CHECK 3
236
237 #endif /* CHECK_CS_VALIDATION_BITMAP */
238
239 extern void vnode_pager_bootstrap(void);
240 extern kern_return_t
241 vnode_pager_data_unlock(
242 memory_object_t mem_obj,
243 memory_object_offset_t offset,
244 memory_object_size_t size,
245 vm_prot_t desired_access);
246 extern kern_return_t vnode_pager_init(
247 memory_object_t,
248 memory_object_control_t,
249 memory_object_cluster_size_t);
250 extern kern_return_t vnode_pager_get_object_size(
251 memory_object_t,
252 memory_object_offset_t *);
253
254 #if CONFIG_IOSCHED
255 extern kern_return_t vnode_pager_get_object_devvp(
256 memory_object_t,
257 uintptr_t *);
258 #endif
259
260 extern kern_return_t vnode_pager_get_isinuse(
261 memory_object_t,
262 uint32_t *);
263 extern kern_return_t vnode_pager_get_isSSD(
264 memory_object_t,
265 boolean_t *);
266 extern kern_return_t vnode_pager_get_throttle_io_limit(
267 memory_object_t,
268 uint32_t *);
269 extern kern_return_t vnode_pager_get_object_name(
270 memory_object_t mem_obj,
271 char *pathname,
272 vm_size_t pathname_len,
273 char *filename,
274 vm_size_t filename_len,
275 boolean_t *truncated_path_p);
276 extern kern_return_t vnode_pager_get_object_mtime(
277 memory_object_t mem_obj,
278 struct timespec *mtime,
279 struct timespec *cs_mtime);
280
281 #if CHECK_CS_VALIDATION_BITMAP
282 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
283 memory_object_t mem_obj,
284 memory_object_offset_t offset,
285 int optype);
286 #endif /*CHECK_CS_VALIDATION_BITMAP*/
287
288 extern kern_return_t ubc_cs_check_validation_bitmap (
289 struct vnode *vp,
290 memory_object_offset_t offset,
291 int optype);
292
293 extern kern_return_t vnode_pager_data_request(
294 memory_object_t,
295 memory_object_offset_t,
296 memory_object_cluster_size_t,
297 vm_prot_t,
298 memory_object_fault_info_t);
299 extern kern_return_t vnode_pager_data_return(
300 memory_object_t,
301 memory_object_offset_t,
302 memory_object_cluster_size_t,
303 memory_object_offset_t *,
304 int *,
305 boolean_t,
306 boolean_t,
307 int);
308 extern kern_return_t vnode_pager_data_initialize(
309 memory_object_t,
310 memory_object_offset_t,
311 memory_object_cluster_size_t);
312 extern void vnode_pager_reference(
313 memory_object_t mem_obj);
314 extern kern_return_t vnode_pager_synchronize(
315 memory_object_t mem_obj,
316 memory_object_offset_t offset,
317 memory_object_size_t length,
318 vm_sync_t sync_flags);
319 extern kern_return_t vnode_pager_map(
320 memory_object_t mem_obj,
321 vm_prot_t prot);
322 extern kern_return_t vnode_pager_last_unmap(
323 memory_object_t mem_obj);
324 extern void vnode_pager_deallocate(
325 memory_object_t);
326 extern kern_return_t vnode_pager_terminate(
327 memory_object_t);
328 extern void vnode_pager_vrele(
329 struct vnode *vp);
330 extern void vnode_pager_release_from_cache(
331 int *);
332 extern struct vnode *vnode_pager_lookup_vnode(
333 memory_object_t);
334
335 extern int ubc_map(
336 struct vnode *vp,
337 int flags);
338 extern void ubc_unmap(
339 struct vnode *vp);
340
341 struct vm_map_entry;
342 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
343
344 extern void device_pager_reference(memory_object_t);
345 extern void device_pager_deallocate(memory_object_t);
346 extern kern_return_t device_pager_init(memory_object_t,
347 memory_object_control_t,
348 memory_object_cluster_size_t);
349 extern kern_return_t device_pager_terminate(memory_object_t);
350 extern kern_return_t device_pager_data_request(memory_object_t,
351 memory_object_offset_t,
352 memory_object_cluster_size_t,
353 vm_prot_t,
354 memory_object_fault_info_t);
355 extern kern_return_t device_pager_data_return(memory_object_t,
356 memory_object_offset_t,
357 memory_object_cluster_size_t,
358 memory_object_offset_t *,
359 int *,
360 boolean_t,
361 boolean_t,
362 int);
363 extern kern_return_t device_pager_data_initialize(memory_object_t,
364 memory_object_offset_t,
365 memory_object_cluster_size_t);
366 extern kern_return_t device_pager_data_unlock(memory_object_t,
367 memory_object_offset_t,
368 memory_object_size_t,
369 vm_prot_t);
370 extern kern_return_t device_pager_synchronize(memory_object_t,
371 memory_object_offset_t,
372 memory_object_size_t,
373 vm_sync_t);
374 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
375 extern kern_return_t device_pager_last_unmap(memory_object_t);
376 extern kern_return_t device_pager_populate_object(
377 memory_object_t device,
378 memory_object_offset_t offset,
379 ppnum_t page_num,
380 vm_size_t size);
381 extern memory_object_t device_pager_setup(
382 memory_object_t,
383 uintptr_t,
384 vm_size_t,
385 int);
386 extern void device_pager_bootstrap(void);
387 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
388
389 extern kern_return_t pager_map_to_phys_contiguous(
390 memory_object_control_t object,
391 memory_object_offset_t offset,
392 addr64_t base_vaddr,
393 vm_size_t size);
394
395 extern kern_return_t memory_object_create_named(
396 memory_object_t pager,
397 memory_object_offset_t size,
398 memory_object_control_t *control);
399
400 struct macx_triggers_args;
401 extern int mach_macx_triggers(
402 struct macx_triggers_args *args);
403
404 extern int macx_swapinfo(
405 memory_object_size_t *total_p,
406 memory_object_size_t *avail_p,
407 vm_size_t *pagesize_p,
408 boolean_t *encrypted_p);
409
410 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
411 extern void log_unnest_badness(
412 vm_map_t map,
413 vm_map_offset_t start_unnest,
414 vm_map_offset_t end_unnest,
415 boolean_t is_nested_map,
416 vm_map_offset_t lowest_unnestable_addr);
417
418 struct proc;
419 extern int cs_allow_invalid(struct proc *p);
420 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
421
422 #define CS_VALIDATE_TAINTED 0x00000001
423 #define CS_VALIDATE_NX 0x00000002
424 extern boolean_t cs_validate_range(struct vnode *vp,
425 memory_object_t pager,
426 memory_object_offset_t offset,
427 const void *data,
428 vm_size_t size,
429 unsigned *result);
430
431 extern kern_return_t mach_memory_entry_purgable_control(
432 ipc_port_t entry_port,
433 vm_purgable_t control,
434 int *state);
435
436 extern kern_return_t mach_memory_entry_get_page_counts(
437 ipc_port_t entry_port,
438 unsigned int *resident_page_count,
439 unsigned int *dirty_page_count);
440
441 extern kern_return_t mach_memory_entry_page_op(
442 ipc_port_t entry_port,
443 vm_object_offset_t offset,
444 int ops,
445 ppnum_t *phys_entry,
446 int *flags);
447
448 extern kern_return_t mach_memory_entry_range_op(
449 ipc_port_t entry_port,
450 vm_object_offset_t offset_beg,
451 vm_object_offset_t offset_end,
452 int ops,
453 int *range);
454
455 extern void mach_memory_entry_port_release(ipc_port_t port);
456 extern void mach_destroy_memory_entry(ipc_port_t port);
457 extern kern_return_t mach_memory_entry_allocate(
458 struct vm_named_entry **user_entry_p,
459 ipc_port_t *user_handle_p);
460
461 extern void vm_paging_map_init(void);
462
463 extern int macx_backing_store_compaction(int flags);
464 extern unsigned int mach_vm_ctl_page_free_wanted(void);
465
466 extern int no_paging_space_action(void);
467
468 #define VM_TOGGLE_CLEAR 0
469 #define VM_TOGGLE_SET 1
470 #define VM_TOGGLE_GETVALUE 999
471 int vm_toggle_entry_reuse(int, int*);
472
473 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
474 #define SWAP_READ 0x00000001 /* Read buffer. */
475 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
476
477 extern void vm_compressor_pager_init(void);
478 extern kern_return_t compressor_memory_object_create(
479 memory_object_size_t,
480 memory_object_t *);
481
482 extern boolean_t vm_compressor_low_on_space(void);
483 extern int vm_swap_low_on_space(void);
484 void do_fastwake_warmup_all(void);
485 #if CONFIG_JETSAM
486 extern int proc_get_memstat_priority(struct proc*, boolean_t);
487 #endif /* CONFIG_JETSAM */
488
489 /* the object purger. purges the next eligible object from memory. */
490 /* returns TRUE if an object was purged, otherwise FALSE. */
491 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
492 void vm_purgeable_disown(task_t task);
493
494 struct trim_list {
495 uint64_t tl_offset;
496 uint64_t tl_length;
497 struct trim_list *tl_next;
498 };
499
500 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
501
502 #define MAX_SWAPFILENAME_LEN 1024
503 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
504
505 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
506
507 struct vm_counters {
508 unsigned int do_collapse_compressor;
509 unsigned int do_collapse_compressor_pages;
510 unsigned int do_collapse_terminate;
511 unsigned int do_collapse_terminate_failure;
512 unsigned int should_cow_but_wired;
513 unsigned int create_upl_extra_cow;
514 unsigned int create_upl_extra_cow_pages;
515 unsigned int create_upl_lookup_failure_write;
516 unsigned int create_upl_lookup_failure_copy;
517 };
518 extern struct vm_counters vm_counters;
519
520 #if CONFIG_SECLUDED_MEMORY
521 struct vm_page_secluded_data {
522 int eligible_for_secluded;
523 int grab_success_free;
524 int grab_success_other;
525 int grab_failure_locked;
526 int grab_failure_state;
527 int grab_failure_dirty;
528 int grab_for_iokit;
529 int grab_for_iokit_success;
530 };
531 extern struct vm_page_secluded_data vm_page_secluded;
532
533 extern int num_tasks_can_use_secluded_mem;
534
535 /* boot-args */
536 extern int secluded_for_apps;
537 extern int secluded_for_iokit;
538 extern int secluded_for_filecache;
539 #if 11
540 extern int secluded_for_fbdp;
541 #endif
542
543 /*
544 * "secluded_aging_policy" controls the aging of secluded pages:
545 *
546 * SECLUDED_AGING_FIFO
547 * When a page eligible for the secluded queue is activated or
548 * deactivated, it is inserted in the secluded queue.
549 * When it get pushed out of the secluded queue, it gets freed.
550 *
551 * SECLUDED_AGING_ALONG_ACTIVE
552 * When a page eligible for the secluded queue is activated, it is
553 * inserted in the secluded queue.
554 * When it gets pushed out of the secluded queue, its "referenced" bit
555 * is reset and it is inserted in the inactive queue.
556 *
557 * SECLUDED_AGING_AFTER_INACTIVE
558 * A page eligible for the secluded queue first makes its way through the
559 * active and inactive queues.
560 * When it is pushed out of the inactive queue without being re-activated,
561 * it is inserted in the secluded queue instead of being reclaimed.
562 * When it is pushed out of the secluded queue, it is either freed if it
563 * hasn't been re-referenced, or re-activated if it has been re-referenced.
564 *
565 * SECLUDED_AGING_BEFORE_ACTIVE
566 * A page eligible for the secluded queue will first make its way through
567 * the secluded queue. When it gets pushed out of the secluded queue (by
568 * new secluded pages), it goes back to the normal aging path, through the
569 * active queue and then the inactive queue.
570 */
571 extern int secluded_aging_policy;
572 #define SECLUDED_AGING_FIFO 0
573 #define SECLUDED_AGING_ALONG_ACTIVE 1
574 #define SECLUDED_AGING_AFTER_INACTIVE 2
575 #define SECLUDED_AGING_BEFORE_ACTIVE 3
576
577 extern void memory_object_mark_eligible_for_secluded(
578 memory_object_control_t control,
579 boolean_t eligible_for_secluded);
580
581 #endif /* CONFIG_SECLUDED_MEMORY */
582
583 #ifdef __cplusplus
584 }
585 #endif
586
587 #endif /* _VM_VM_PROTOS_H_ */
588
589 #endif /* XNU_KERNEL_PRIVATE */