]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
54781267b2cff0b569313e797dd8122f5ec25e89
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52 /*
53 * iokit
54 */
55 extern kern_return_t device_data_action(
56 uintptr_t device_handle,
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62 extern kern_return_t device_close(
63 uintptr_t device_handle);
64
65 extern boolean_t vm_swap_files_pinned(void);
66
67 /*
68 * osfmk
69 */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74 extern task_t port_name_to_task(
75 mach_port_name_t name);
76 extern task_t port_name_to_task_inspect(
77 mach_port_name_t name);
78 extern void ipc_port_release_send(
79 ipc_port_t port);
80 #endif /* _IPC_IPC_PORT_H_ */
81
82 extern ipc_space_t get_task_ipcspace(
83 task_t t);
84
85 #if CONFIG_MEMORYSTATUS
86 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
87 #endif /* CONFIG_MEMORYSTATUS */
88
89 /* Some loose-ends VM stuff */
90
91 extern vm_map_t kalloc_map;
92 extern vm_size_t msg_ool_size_small;
93 extern vm_map_t zone_map;
94
95 extern void consider_machine_adjust(void);
96 extern vm_map_offset_t get_map_min(vm_map_t);
97 extern vm_map_offset_t get_map_max(vm_map_t);
98 extern vm_map_size_t get_vmmap_size(vm_map_t);
99 #if CONFIG_COREDUMP
100 extern int get_vmmap_entries(vm_map_t);
101 #endif
102 extern int get_map_nentries(vm_map_t);
103
104 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
105
106 extern kern_return_t vm_map_purgable_control(
107 vm_map_t map,
108 vm_map_offset_t address,
109 vm_purgable_t control,
110 int *state);
111
112 extern kern_return_t
113 vnode_pager_get_object_vnode(
114 memory_object_t mem_obj,
115 uintptr_t * vnodeaddr,
116 uint32_t * vid);
117
118 #if CONFIG_COREDUMP
119 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
120 #endif
121
122 /*
123 * VM routines that used to be published to
124 * user space, and are now restricted to the kernel.
125 *
126 * They should eventually go away entirely -
127 * to be replaced with standard vm_map() and
128 * vm_deallocate() calls.
129 */
130
131 extern kern_return_t vm_upl_map
132 (
133 vm_map_t target_task,
134 upl_t upl,
135 vm_address_t *address
136 );
137
138 extern kern_return_t vm_upl_unmap
139 (
140 vm_map_t target_task,
141 upl_t upl
142 );
143
144 extern kern_return_t vm_region_object_create
145 (
146 vm_map_t target_task,
147 vm_size_t size,
148 ipc_port_t *object_handle
149 );
150
151 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
152 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
153
154 #if CONFIG_CODE_DECRYPTION
155 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
156 #if VM_MAP_DEBUG_APPLE_PROTECT
157 extern int vm_map_debug_apple_protect;
158 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
159 struct pager_crypt_info;
160 extern kern_return_t vm_map_apple_protected(
161 vm_map_t map,
162 vm_map_offset_t start,
163 vm_map_offset_t end,
164 vm_object_offset_t crypto_backing_offset,
165 struct pager_crypt_info *crypt_info);
166 extern void apple_protect_pager_bootstrap(void);
167 extern memory_object_t apple_protect_pager_setup(
168 vm_object_t backing_object,
169 vm_object_offset_t backing_offset,
170 vm_object_offset_t crypto_backing_offset,
171 struct pager_crypt_info *crypt_info,
172 vm_object_offset_t crypto_start,
173 vm_object_offset_t crypto_end);
174 #endif /* CONFIG_CODE_DECRYPTION */
175
176 struct vnode;
177 extern void swapfile_pager_bootstrap(void);
178 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
179 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
180
181 #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
182 #define SIXTEENK_PAGE_SIZE 0x4000
183 #define SIXTEENK_PAGE_MASK 0x3FFF
184 #define SIXTEENK_PAGE_SHIFT 14
185 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
186
187 #if __arm64__
188 #define FOURK_PAGE_SIZE 0x1000
189 #define FOURK_PAGE_MASK 0xFFF
190 #define FOURK_PAGE_SHIFT 12
191
192 extern unsigned int page_shift_user32;
193
194 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
195 #if VM_MAP_DEBUG_FOURK
196 extern int vm_map_debug_fourk;
197 #endif /* VM_MAP_DEBUG_FOURK */
198 extern void fourk_pager_bootstrap(void);
199 extern memory_object_t fourk_pager_create(void);
200 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
201 extern kern_return_t fourk_pager_populate(
202 memory_object_t mem_obj,
203 boolean_t overwrite,
204 int index,
205 vm_object_t new_backing_object,
206 vm_object_offset_t new_backing_offset,
207 vm_object_t *old_backing_object,
208 vm_object_offset_t *old_backing_offset);
209 #endif /* __arm64__ */
210
211 /*
212 * bsd
213 */
214 struct vnode;
215 extern void *upl_get_internal_page_list(
216 upl_t upl);
217
218 extern void vnode_setswapmount(struct vnode *);
219 extern int64_t vnode_getswappin_avail(struct vnode *);
220
221 typedef int pager_return_t;
222 extern pager_return_t vnode_pagein(
223 struct vnode *, upl_t,
224 upl_offset_t, vm_object_offset_t,
225 upl_size_t, int, int *);
226 extern pager_return_t vnode_pageout(
227 struct vnode *, upl_t,
228 upl_offset_t, vm_object_offset_t,
229 upl_size_t, int, int *);
230 extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
231 extern memory_object_t vnode_pager_setup(
232 struct vnode *, memory_object_t);
233 extern vm_object_offset_t vnode_pager_get_filesize(
234 struct vnode *);
235 extern uint32_t vnode_pager_isinuse(
236 struct vnode *);
237 extern boolean_t vnode_pager_isSSD(
238 struct vnode *);
239 extern void vnode_pager_throttle(
240 void);
241 extern uint32_t vnode_pager_return_throttle_io_limit(
242 struct vnode *,
243 uint32_t *);
244 extern kern_return_t vnode_pager_get_name(
245 struct vnode *vp,
246 char *pathname,
247 vm_size_t pathname_len,
248 char *filename,
249 vm_size_t filename_len,
250 boolean_t *truncated_path_p);
251 struct timespec;
252 extern kern_return_t vnode_pager_get_mtime(
253 struct vnode *vp,
254 struct timespec *mtime,
255 struct timespec *cs_mtime);
256 extern kern_return_t vnode_pager_get_cs_blobs(
257 struct vnode *vp,
258 void **blobs);
259
260 #if CONFIG_IOSCHED
261 void vnode_pager_issue_reprioritize_io(
262 struct vnode *devvp,
263 uint64_t blkno,
264 uint32_t len,
265 int priority);
266 #endif
267
268 #if CHECK_CS_VALIDATION_BITMAP
269 /* used by the vnode_pager_cs_validation_bitmap routine*/
270 #define CS_BITMAP_SET 1
271 #define CS_BITMAP_CLEAR 2
272 #define CS_BITMAP_CHECK 3
273
274 #endif /* CHECK_CS_VALIDATION_BITMAP */
275
276 extern void vnode_pager_bootstrap(void);
277 extern kern_return_t
278 vnode_pager_data_unlock(
279 memory_object_t mem_obj,
280 memory_object_offset_t offset,
281 memory_object_size_t size,
282 vm_prot_t desired_access);
283 extern kern_return_t vnode_pager_init(
284 memory_object_t,
285 memory_object_control_t,
286 memory_object_cluster_size_t);
287 extern kern_return_t vnode_pager_get_object_size(
288 memory_object_t,
289 memory_object_offset_t *);
290
291 #if CONFIG_IOSCHED
292 extern kern_return_t vnode_pager_get_object_devvp(
293 memory_object_t,
294 uintptr_t *);
295 #endif
296
297 extern kern_return_t vnode_pager_get_isinuse(
298 memory_object_t,
299 uint32_t *);
300 extern kern_return_t vnode_pager_get_isSSD(
301 memory_object_t,
302 boolean_t *);
303 extern kern_return_t vnode_pager_get_throttle_io_limit(
304 memory_object_t,
305 uint32_t *);
306 extern kern_return_t vnode_pager_get_object_name(
307 memory_object_t mem_obj,
308 char *pathname,
309 vm_size_t pathname_len,
310 char *filename,
311 vm_size_t filename_len,
312 boolean_t *truncated_path_p);
313 extern kern_return_t vnode_pager_get_object_mtime(
314 memory_object_t mem_obj,
315 struct timespec *mtime,
316 struct timespec *cs_mtime);
317
318 #if CHECK_CS_VALIDATION_BITMAP
319 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
320 memory_object_t mem_obj,
321 memory_object_offset_t offset,
322 int optype);
323 #endif /*CHECK_CS_VALIDATION_BITMAP*/
324
325 extern kern_return_t ubc_cs_check_validation_bitmap (
326 struct vnode *vp,
327 memory_object_offset_t offset,
328 int optype);
329
330 extern kern_return_t vnode_pager_data_request(
331 memory_object_t,
332 memory_object_offset_t,
333 memory_object_cluster_size_t,
334 vm_prot_t,
335 memory_object_fault_info_t);
336 extern kern_return_t vnode_pager_data_return(
337 memory_object_t,
338 memory_object_offset_t,
339 memory_object_cluster_size_t,
340 memory_object_offset_t *,
341 int *,
342 boolean_t,
343 boolean_t,
344 int);
345 extern kern_return_t vnode_pager_data_initialize(
346 memory_object_t,
347 memory_object_offset_t,
348 memory_object_cluster_size_t);
349 extern void vnode_pager_reference(
350 memory_object_t mem_obj);
351 extern kern_return_t vnode_pager_synchronize(
352 memory_object_t mem_obj,
353 memory_object_offset_t offset,
354 memory_object_size_t length,
355 vm_sync_t sync_flags);
356 extern kern_return_t vnode_pager_map(
357 memory_object_t mem_obj,
358 vm_prot_t prot);
359 extern kern_return_t vnode_pager_last_unmap(
360 memory_object_t mem_obj);
361 extern void vnode_pager_deallocate(
362 memory_object_t);
363 extern kern_return_t vnode_pager_terminate(
364 memory_object_t);
365 extern void vnode_pager_vrele(
366 struct vnode *vp);
367 extern struct vnode *vnode_pager_lookup_vnode(
368 memory_object_t);
369
370 extern int ubc_map(
371 struct vnode *vp,
372 int flags);
373 extern void ubc_unmap(
374 struct vnode *vp);
375
376 struct vm_map_entry;
377 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
378
379 extern void device_pager_reference(memory_object_t);
380 extern void device_pager_deallocate(memory_object_t);
381 extern kern_return_t device_pager_init(memory_object_t,
382 memory_object_control_t,
383 memory_object_cluster_size_t);
384 extern kern_return_t device_pager_terminate(memory_object_t);
385 extern kern_return_t device_pager_data_request(memory_object_t,
386 memory_object_offset_t,
387 memory_object_cluster_size_t,
388 vm_prot_t,
389 memory_object_fault_info_t);
390 extern kern_return_t device_pager_data_return(memory_object_t,
391 memory_object_offset_t,
392 memory_object_cluster_size_t,
393 memory_object_offset_t *,
394 int *,
395 boolean_t,
396 boolean_t,
397 int);
398 extern kern_return_t device_pager_data_initialize(memory_object_t,
399 memory_object_offset_t,
400 memory_object_cluster_size_t);
401 extern kern_return_t device_pager_data_unlock(memory_object_t,
402 memory_object_offset_t,
403 memory_object_size_t,
404 vm_prot_t);
405 extern kern_return_t device_pager_synchronize(memory_object_t,
406 memory_object_offset_t,
407 memory_object_size_t,
408 vm_sync_t);
409 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
410 extern kern_return_t device_pager_last_unmap(memory_object_t);
411 extern kern_return_t device_pager_populate_object(
412 memory_object_t device,
413 memory_object_offset_t offset,
414 ppnum_t page_num,
415 vm_size_t size);
416 extern memory_object_t device_pager_setup(
417 memory_object_t,
418 uintptr_t,
419 vm_size_t,
420 int);
421 extern void device_pager_bootstrap(void);
422 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
423
424 extern kern_return_t pager_map_to_phys_contiguous(
425 memory_object_control_t object,
426 memory_object_offset_t offset,
427 addr64_t base_vaddr,
428 vm_size_t size);
429
430 extern kern_return_t memory_object_create_named(
431 memory_object_t pager,
432 memory_object_offset_t size,
433 memory_object_control_t *control);
434
435 struct macx_triggers_args;
436 extern int mach_macx_triggers(
437 struct macx_triggers_args *args);
438
439 extern int macx_swapinfo(
440 memory_object_size_t *total_p,
441 memory_object_size_t *avail_p,
442 vm_size_t *pagesize_p,
443 boolean_t *encrypted_p);
444
445 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
446 extern void log_unnest_badness(
447 vm_map_t map,
448 vm_map_offset_t start_unnest,
449 vm_map_offset_t end_unnest,
450 boolean_t is_nested_map,
451 vm_map_offset_t lowest_unnestable_addr);
452
453 struct proc;
454 extern int cs_allow_invalid(struct proc *p);
455 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
456
457 #define CS_VALIDATE_TAINTED 0x00000001
458 #define CS_VALIDATE_NX 0x00000002
459 extern boolean_t cs_validate_range(struct vnode *vp,
460 memory_object_t pager,
461 memory_object_offset_t offset,
462 const void *data,
463 vm_size_t size,
464 unsigned *result);
465
466 extern kern_return_t memory_entry_purgeable_control_internal(
467 ipc_port_t entry_port,
468 vm_purgable_t control,
469 int *state);
470
471 extern kern_return_t mach_memory_entry_purgable_control(
472 ipc_port_t entry_port,
473 vm_purgable_t control,
474 int *state);
475
476 extern kern_return_t mach_memory_entry_get_page_counts(
477 ipc_port_t entry_port,
478 unsigned int *resident_page_count,
479 unsigned int *dirty_page_count);
480
481 extern kern_return_t mach_memory_entry_page_op(
482 ipc_port_t entry_port,
483 vm_object_offset_t offset,
484 int ops,
485 ppnum_t *phys_entry,
486 int *flags);
487
488 extern kern_return_t mach_memory_entry_range_op(
489 ipc_port_t entry_port,
490 vm_object_offset_t offset_beg,
491 vm_object_offset_t offset_end,
492 int ops,
493 int *range);
494
495 extern void mach_memory_entry_port_release(ipc_port_t port);
496 extern void mach_destroy_memory_entry(ipc_port_t port);
497 extern kern_return_t mach_memory_entry_allocate(
498 struct vm_named_entry **user_entry_p,
499 ipc_port_t *user_handle_p);
500
501 extern void vm_paging_map_init(void);
502
503 extern int macx_backing_store_compaction(int flags);
504 extern unsigned int mach_vm_ctl_page_free_wanted(void);
505
506 extern int no_paging_space_action(void);
507
508 #define VM_TOGGLE_CLEAR 0
509 #define VM_TOGGLE_SET 1
510 #define VM_TOGGLE_GETVALUE 999
511 int vm_toggle_entry_reuse(int, int*);
512
513 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
514 #define SWAP_READ 0x00000001 /* Read buffer. */
515 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
516
517 extern void vm_compressor_pager_init(void);
518 extern kern_return_t compressor_memory_object_create(
519 memory_object_size_t,
520 memory_object_t *);
521
522 extern boolean_t vm_compressor_low_on_space(void);
523 extern boolean_t vm_compressor_out_of_space(void);
524 extern int vm_swap_low_on_space(void);
525 void do_fastwake_warmup_all(void);
526 #if CONFIG_JETSAM
527 extern int proc_get_memstat_priority(struct proc*, boolean_t);
528 #endif /* CONFIG_JETSAM */
529
530 /* the object purger. purges the next eligible object from memory. */
531 /* returns TRUE if an object was purged, otherwise FALSE. */
532 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
533 void vm_purgeable_disown(task_t task);
534
535 struct trim_list {
536 uint64_t tl_offset;
537 uint64_t tl_length;
538 struct trim_list *tl_next;
539 };
540
541 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
542
543 #define MAX_SWAPFILENAME_LEN 1024
544 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
545
546 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
547
548 struct vm_counters {
549 unsigned int do_collapse_compressor;
550 unsigned int do_collapse_compressor_pages;
551 unsigned int do_collapse_terminate;
552 unsigned int do_collapse_terminate_failure;
553 unsigned int should_cow_but_wired;
554 unsigned int create_upl_extra_cow;
555 unsigned int create_upl_extra_cow_pages;
556 unsigned int create_upl_lookup_failure_write;
557 unsigned int create_upl_lookup_failure_copy;
558 };
559 extern struct vm_counters vm_counters;
560
561 #if CONFIG_SECLUDED_MEMORY
562 struct vm_page_secluded_data {
563 int eligible_for_secluded;
564 int grab_success_free;
565 int grab_success_other;
566 int grab_failure_locked;
567 int grab_failure_state;
568 int grab_failure_dirty;
569 int grab_for_iokit;
570 int grab_for_iokit_success;
571 };
572 extern struct vm_page_secluded_data vm_page_secluded;
573
574 extern int num_tasks_can_use_secluded_mem;
575
576 /* boot-args */
577 extern int secluded_for_apps;
578 extern int secluded_for_iokit;
579 extern int secluded_for_filecache;
580 #if 11
581 extern int secluded_for_fbdp;
582 #endif
583
584 extern void memory_object_mark_eligible_for_secluded(
585 memory_object_control_t control,
586 boolean_t eligible_for_secluded);
587
588 #endif /* CONFIG_SECLUDED_MEMORY */
589
590 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
591
592 #ifdef __cplusplus
593 }
594 #endif
595
596 #endif /* _VM_VM_PROTOS_H_ */
597
598 #endif /* XNU_KERNEL_PRIVATE */