]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52 /*
53 * iokit
54 */
55 extern kern_return_t device_data_action(
56 uintptr_t device_handle,
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62 extern kern_return_t device_close(
63 uintptr_t device_handle);
64
65 extern boolean_t vm_swap_files_pinned(void);
66
67 /*
68 * osfmk
69 */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74 extern mach_port_name_t ipc_port_copyout_send_pinned(
75 ipc_port_t sright,
76 ipc_space_t space);
77 extern task_t port_name_to_task(
78 mach_port_name_t name);
79 extern task_t port_name_to_task_read(
80 mach_port_name_t name);
81 extern task_t port_name_to_task_name(
82 mach_port_name_t name);
83 extern void ipc_port_release_send(
84 ipc_port_t port);
85 #endif /* _IPC_IPC_PORT_H_ */
86
87 extern ipc_space_t get_task_ipcspace(
88 task_t t);
89
90 #if CONFIG_MEMORYSTATUS
91 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
92 #endif /* CONFIG_MEMORYSTATUS */
93
94 /* Some loose-ends VM stuff */
95
96 extern vm_map_t kalloc_map;
97 extern vm_size_t msg_ool_size_small;
98
99 extern kern_return_t vm_tests(void);
100 extern void consider_machine_adjust(void);
101 extern vm_map_offset_t get_map_min(vm_map_t);
102 extern vm_map_offset_t get_map_max(vm_map_t);
103 extern vm_map_size_t get_vmmap_size(vm_map_t);
104 extern int get_task_page_size(task_t);
105 #if CONFIG_COREDUMP
106 extern int get_vmmap_entries(vm_map_t);
107 #endif
108 extern int get_map_nentries(vm_map_t);
109
110 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
111
112 extern kern_return_t vm_map_purgable_control(
113 vm_map_t map,
114 vm_map_offset_t address,
115 vm_purgable_t control,
116 int *state);
117
118 #if MACH_ASSERT
119 extern void vm_map_pmap_check_ledgers(
120 pmap_t pmap,
121 ledger_t ledger,
122 int pid,
123 char *procname);
124 #endif /* MACH_ASSERT */
125
126 extern kern_return_t
127 vnode_pager_get_object_vnode(
128 memory_object_t mem_obj,
129 uintptr_t * vnodeaddr,
130 uint32_t * vid);
131
132 #if CONFIG_COREDUMP
133 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va);
134 #endif
135
136 /*
137 * VM routines that used to be published to
138 * user space, and are now restricted to the kernel.
139 *
140 * They should eventually go away entirely -
141 * to be replaced with standard vm_map() and
142 * vm_deallocate() calls.
143 */
144
145 extern kern_return_t vm_upl_map
146 (
147 vm_map_t target_task,
148 upl_t upl,
149 vm_address_t *address
150 );
151
152 extern kern_return_t vm_upl_unmap
153 (
154 vm_map_t target_task,
155 upl_t upl
156 );
157
158 extern kern_return_t vm_region_object_create
159 (
160 vm_map_t target_task,
161 vm_size_t size,
162 ipc_port_t *object_handle
163 );
164
165 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
166 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
167
168 #if CONFIG_CODE_DECRYPTION
169 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
170 #if VM_MAP_DEBUG_APPLE_PROTECT
171 extern int vm_map_debug_apple_protect;
172 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
173 struct pager_crypt_info;
174 extern kern_return_t vm_map_apple_protected(
175 vm_map_t map,
176 vm_map_offset_t start,
177 vm_map_offset_t end,
178 vm_object_offset_t crypto_backing_offset,
179 struct pager_crypt_info *crypt_info,
180 uint32_t cryptid);
181 extern memory_object_t apple_protect_pager_setup(
182 vm_object_t backing_object,
183 vm_object_offset_t backing_offset,
184 vm_object_offset_t crypto_backing_offset,
185 struct pager_crypt_info *crypt_info,
186 vm_object_offset_t crypto_start,
187 vm_object_offset_t crypto_end,
188 boolean_t cache_pager);
189 #endif /* CONFIG_CODE_DECRYPTION */
190
191 struct vm_shared_region_slide_info;
192 extern kern_return_t vm_map_shared_region(
193 vm_map_t map,
194 vm_map_offset_t start,
195 vm_map_offset_t end,
196 vm_object_offset_t backing_offset,
197 struct vm_shared_region_slide_info *slide_info);
198
199 extern memory_object_t shared_region_pager_setup(
200 vm_object_t backing_object,
201 vm_object_offset_t backing_offset,
202 struct vm_shared_region_slide_info *slide_info,
203 uint64_t jop_key);
204 #if __has_feature(ptrauth_calls)
205 extern memory_object_t shared_region_pager_match(
206 vm_object_t backing_object,
207 vm_object_offset_t backing_offset,
208 struct vm_shared_region_slide_info *slide_info,
209 uint64_t jop_key);
210 extern void shared_region_key_alloc(
211 char *shared_region_id,
212 bool inherit,
213 uint64_t inherited_key);
214 extern void shared_region_key_dealloc(
215 char *shared_region_id);
216 extern uint64_t generate_jop_key(void);
217 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task);
218 #endif /* __has_feature(ptrauth_calls) */
219 extern bool vm_shared_region_is_reslide(struct task *task);
220
221 struct vnode;
222 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
223 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
224
225 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
226 #define SIXTEENK_PAGE_SIZE 0x4000
227 #define SIXTEENK_PAGE_MASK 0x3FFF
228 #define SIXTEENK_PAGE_SHIFT 14
229 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
230
231 #define FOURK_PAGE_SIZE 0x1000
232 #define FOURK_PAGE_MASK 0xFFF
233 #define FOURK_PAGE_SHIFT 12
234
235 #if __arm64__
236
237 extern unsigned int page_shift_user32;
238
239 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
240 #if VM_MAP_DEBUG_FOURK
241 extern int vm_map_debug_fourk;
242 #endif /* VM_MAP_DEBUG_FOURK */
243 extern memory_object_t fourk_pager_create(void);
244 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
245 extern kern_return_t fourk_pager_populate(
246 memory_object_t mem_obj,
247 boolean_t overwrite,
248 int index,
249 vm_object_t new_backing_object,
250 vm_object_offset_t new_backing_offset,
251 vm_object_t *old_backing_object,
252 vm_object_offset_t *old_backing_offset);
253 #endif /* __arm64__ */
254
255 /*
256 * bsd
257 */
258 struct vnode;
259 extern void *upl_get_internal_page_list(
260 upl_t upl);
261
262 extern void vnode_setswapmount(struct vnode *);
263 extern int64_t vnode_getswappin_avail(struct vnode *);
264
265 extern void vnode_pager_was_dirtied(
266 struct vnode *,
267 vm_object_offset_t,
268 vm_object_offset_t);
269
270 typedef int pager_return_t;
271 extern pager_return_t vnode_pagein(
272 struct vnode *, upl_t,
273 upl_offset_t, vm_object_offset_t,
274 upl_size_t, int, int *);
275 extern pager_return_t vnode_pageout(
276 struct vnode *, upl_t,
277 upl_offset_t, vm_object_offset_t,
278 upl_size_t, int, int *);
279 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
280 extern memory_object_t vnode_pager_setup(
281 struct vnode *, memory_object_t);
282 extern vm_object_offset_t vnode_pager_get_filesize(
283 struct vnode *);
284 extern uint32_t vnode_pager_isinuse(
285 struct vnode *);
286 extern boolean_t vnode_pager_isSSD(
287 struct vnode *);
288 extern void vnode_pager_throttle(
289 void);
290 extern uint32_t vnode_pager_return_throttle_io_limit(
291 struct vnode *,
292 uint32_t *);
293 extern kern_return_t vnode_pager_get_name(
294 struct vnode *vp,
295 char *pathname,
296 vm_size_t pathname_len,
297 char *filename,
298 vm_size_t filename_len,
299 boolean_t *truncated_path_p);
300 struct timespec;
301 extern kern_return_t vnode_pager_get_mtime(
302 struct vnode *vp,
303 struct timespec *mtime,
304 struct timespec *cs_mtime);
305 extern kern_return_t vnode_pager_get_cs_blobs(
306 struct vnode *vp,
307 void **blobs);
308
309 #if CONFIG_IOSCHED
310 void vnode_pager_issue_reprioritize_io(
311 struct vnode *devvp,
312 uint64_t blkno,
313 uint32_t len,
314 int priority);
315 #endif
316
317 #if CHECK_CS_VALIDATION_BITMAP
318 /* used by the vnode_pager_cs_validation_bitmap routine*/
319 #define CS_BITMAP_SET 1
320 #define CS_BITMAP_CLEAR 2
321 #define CS_BITMAP_CHECK 3
322
323 #endif /* CHECK_CS_VALIDATION_BITMAP */
324
325 extern kern_return_t
326 vnode_pager_data_unlock(
327 memory_object_t mem_obj,
328 memory_object_offset_t offset,
329 memory_object_size_t size,
330 vm_prot_t desired_access);
331 extern kern_return_t vnode_pager_init(
332 memory_object_t,
333 memory_object_control_t,
334 memory_object_cluster_size_t);
335 extern kern_return_t vnode_pager_get_object_size(
336 memory_object_t,
337 memory_object_offset_t *);
338
339 #if CONFIG_IOSCHED
340 extern kern_return_t vnode_pager_get_object_devvp(
341 memory_object_t,
342 uintptr_t *);
343 #endif
344
345 extern void vnode_pager_dirtied(
346 memory_object_t,
347 vm_object_offset_t,
348 vm_object_offset_t);
349 extern kern_return_t vnode_pager_get_isinuse(
350 memory_object_t,
351 uint32_t *);
352 extern kern_return_t vnode_pager_get_isSSD(
353 memory_object_t,
354 boolean_t *);
355 extern kern_return_t vnode_pager_get_throttle_io_limit(
356 memory_object_t,
357 uint32_t *);
358 extern kern_return_t vnode_pager_get_object_name(
359 memory_object_t mem_obj,
360 char *pathname,
361 vm_size_t pathname_len,
362 char *filename,
363 vm_size_t filename_len,
364 boolean_t *truncated_path_p);
365 extern kern_return_t vnode_pager_get_object_mtime(
366 memory_object_t mem_obj,
367 struct timespec *mtime,
368 struct timespec *cs_mtime);
369
370 #if CHECK_CS_VALIDATION_BITMAP
371 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
372 memory_object_t mem_obj,
373 memory_object_offset_t offset,
374 int optype);
375 #endif /*CHECK_CS_VALIDATION_BITMAP*/
376
377 extern kern_return_t ubc_cs_check_validation_bitmap(
378 struct vnode *vp,
379 memory_object_offset_t offset,
380 int optype);
381
382 extern kern_return_t vnode_pager_data_request(
383 memory_object_t,
384 memory_object_offset_t,
385 memory_object_cluster_size_t,
386 vm_prot_t,
387 memory_object_fault_info_t);
388 extern kern_return_t vnode_pager_data_return(
389 memory_object_t,
390 memory_object_offset_t,
391 memory_object_cluster_size_t,
392 memory_object_offset_t *,
393 int *,
394 boolean_t,
395 boolean_t,
396 int);
397 extern kern_return_t vnode_pager_data_initialize(
398 memory_object_t,
399 memory_object_offset_t,
400 memory_object_cluster_size_t);
401 extern void vnode_pager_reference(
402 memory_object_t mem_obj);
403 extern kern_return_t vnode_pager_synchronize(
404 memory_object_t mem_obj,
405 memory_object_offset_t offset,
406 memory_object_size_t length,
407 vm_sync_t sync_flags);
408 extern kern_return_t vnode_pager_map(
409 memory_object_t mem_obj,
410 vm_prot_t prot);
411 extern kern_return_t vnode_pager_last_unmap(
412 memory_object_t mem_obj);
413 extern void vnode_pager_deallocate(
414 memory_object_t);
415 extern kern_return_t vnode_pager_terminate(
416 memory_object_t);
417 extern void vnode_pager_vrele(
418 struct vnode *vp);
419 extern struct vnode *vnode_pager_lookup_vnode(
420 memory_object_t);
421
422 extern int ubc_map(
423 struct vnode *vp,
424 int flags);
425 extern void ubc_unmap(
426 struct vnode *vp);
427
428 struct vm_map_entry;
429 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
430
431 extern void device_pager_reference(memory_object_t);
432 extern void device_pager_deallocate(memory_object_t);
433 extern kern_return_t device_pager_init(memory_object_t,
434 memory_object_control_t,
435 memory_object_cluster_size_t);
436 extern kern_return_t device_pager_terminate(memory_object_t);
437 extern kern_return_t device_pager_data_request(memory_object_t,
438 memory_object_offset_t,
439 memory_object_cluster_size_t,
440 vm_prot_t,
441 memory_object_fault_info_t);
442 extern kern_return_t device_pager_data_return(memory_object_t,
443 memory_object_offset_t,
444 memory_object_cluster_size_t,
445 memory_object_offset_t *,
446 int *,
447 boolean_t,
448 boolean_t,
449 int);
450 extern kern_return_t device_pager_data_initialize(memory_object_t,
451 memory_object_offset_t,
452 memory_object_cluster_size_t);
453 extern kern_return_t device_pager_data_unlock(memory_object_t,
454 memory_object_offset_t,
455 memory_object_size_t,
456 vm_prot_t);
457 extern kern_return_t device_pager_synchronize(memory_object_t,
458 memory_object_offset_t,
459 memory_object_size_t,
460 vm_sync_t);
461 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
462 extern kern_return_t device_pager_last_unmap(memory_object_t);
463 extern kern_return_t device_pager_populate_object(
464 memory_object_t device,
465 memory_object_offset_t offset,
466 ppnum_t page_num,
467 vm_size_t size);
468 extern memory_object_t device_pager_setup(
469 memory_object_t,
470 uintptr_t,
471 vm_size_t,
472 int);
473
474 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
475
476 extern kern_return_t pager_map_to_phys_contiguous(
477 memory_object_control_t object,
478 memory_object_offset_t offset,
479 addr64_t base_vaddr,
480 vm_size_t size);
481
482 extern kern_return_t memory_object_create_named(
483 memory_object_t pager,
484 memory_object_offset_t size,
485 memory_object_control_t *control);
486
487 struct macx_triggers_args;
488 extern int mach_macx_triggers(
489 struct macx_triggers_args *args);
490
491 extern int macx_swapinfo(
492 memory_object_size_t *total_p,
493 memory_object_size_t *avail_p,
494 vm_size_t *pagesize_p,
495 boolean_t *encrypted_p);
496
497 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
498 extern void log_unnest_badness(
499 vm_map_t map,
500 vm_map_offset_t start_unnest,
501 vm_map_offset_t end_unnest,
502 boolean_t is_nested_map,
503 vm_map_offset_t lowest_unnestable_addr);
504
505 struct proc;
506 struct proc *current_proc(void);
507 extern int cs_allow_invalid(struct proc *p);
508 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
509
510 #define CS_VALIDATE_TAINTED 0x00000001
511 #define CS_VALIDATE_NX 0x00000002
512 extern boolean_t cs_validate_range(struct vnode *vp,
513 memory_object_t pager,
514 memory_object_offset_t offset,
515 const void *data,
516 vm_size_t size,
517 unsigned *result);
518 extern void cs_validate_page(
519 struct vnode *vp,
520 memory_object_t pager,
521 memory_object_offset_t offset,
522 const void *data,
523 int *validated_p,
524 int *tainted_p,
525 int *nx_p);
526
527 extern kern_return_t memory_entry_purgeable_control_internal(
528 ipc_port_t entry_port,
529 vm_purgable_t control,
530 int *state);
531
532 extern kern_return_t memory_entry_access_tracking_internal(
533 ipc_port_t entry_port,
534 int *access_tracking,
535 uint32_t *access_tracking_reads,
536 uint32_t *access_tracking_writes);
537
538 extern kern_return_t mach_memory_entry_purgable_control(
539 ipc_port_t entry_port,
540 vm_purgable_t control,
541 int *state);
542
543 extern kern_return_t mach_memory_entry_get_page_counts(
544 ipc_port_t entry_port,
545 unsigned int *resident_page_count,
546 unsigned int *dirty_page_count);
547
548 extern kern_return_t mach_memory_entry_phys_page_offset(
549 ipc_port_t entry_port,
550 vm_object_offset_t *offset_p);
551
552 extern kern_return_t mach_memory_entry_map_size(
553 ipc_port_t entry_port,
554 vm_map_t map,
555 memory_object_offset_t offset,
556 memory_object_offset_t size,
557 mach_vm_size_t *map_size);
558
559 extern kern_return_t vm_map_range_physical_size(
560 vm_map_t map,
561 vm_map_address_t start,
562 mach_vm_size_t size,
563 mach_vm_size_t * phys_size);
564
565 extern kern_return_t mach_memory_entry_page_op(
566 ipc_port_t entry_port,
567 vm_object_offset_t offset,
568 int ops,
569 ppnum_t *phys_entry,
570 int *flags);
571
572 extern kern_return_t mach_memory_entry_range_op(
573 ipc_port_t entry_port,
574 vm_object_offset_t offset_beg,
575 vm_object_offset_t offset_end,
576 int ops,
577 int *range);
578
579 extern void mach_memory_entry_port_release(ipc_port_t port);
580 extern void mach_destroy_memory_entry(ipc_port_t port);
581 extern kern_return_t mach_memory_entry_allocate(
582 struct vm_named_entry **user_entry_p,
583 ipc_port_t *user_handle_p);
584 extern vm_object_t vm_named_entry_to_vm_object(
585 vm_named_entry_t named_entry);
586 extern kern_return_t vm_named_entry_from_vm_object(
587 vm_named_entry_t named_entry,
588 vm_object_t object,
589 vm_object_offset_t offset,
590 vm_object_size_t size,
591 vm_prot_t prot);
592
593 extern void vm_paging_map_init(void);
594
595 extern int macx_backing_store_compaction(int flags);
596 extern unsigned int mach_vm_ctl_page_free_wanted(void);
597
598 extern int no_paging_space_action(void);
599
600 /*
601 * counts updated by revalidate_text_page()
602 */
603 extern unsigned int vmtc_total; /* total # of text page corruptions detected */
604 extern unsigned int vmtc_undiagnosed; /* of that what wasn't diagnosed */
605 extern unsigned int vmtc_not_eligible; /* failed to correct, due to page attributes */
606 extern unsigned int vmtc_copyin_fail; /* of undiagnosed, copyin failure count */
607 extern unsigned int vmtc_not_found; /* of diagnosed, no error found - code signing error? */
608 extern unsigned int vmtc_one_bit_flip; /* of diagnosed, single bit errors */
609 #define MAX_TRACK_POWER2 9 /* of diagnosed, counts of 1, 2, 4,... bytes corrupted */
610 extern unsigned int vmtc_byte_counts[MAX_TRACK_POWER2 + 1];
611
612 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t);
613
614 #define VM_TOGGLE_CLEAR 0
615 #define VM_TOGGLE_SET 1
616 #define VM_TOGGLE_GETVALUE 999
617 int vm_toggle_entry_reuse(int, int*);
618
619 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
620 #define SWAP_READ 0x00000001 /* Read buffer. */
621 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
622
623 extern void vm_compressor_pager_init(void);
624 extern kern_return_t compressor_memory_object_create(
625 memory_object_size_t,
626 memory_object_t *);
627
628 extern boolean_t vm_compressor_low_on_space(void);
629 extern boolean_t vm_compressor_out_of_space(void);
630 extern int vm_swap_low_on_space(void);
631 void do_fastwake_warmup_all(void);
632 #if CONFIG_JETSAM
633 extern int proc_get_memstat_priority(struct proc*, boolean_t);
634 #endif /* CONFIG_JETSAM */
635
636 /* the object purger. purges the next eligible object from memory. */
637 /* returns TRUE if an object was purged, otherwise FALSE. */
638 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
639 void vm_purgeable_nonvolatile_owner_update(task_t owner,
640 int delta);
641 void vm_purgeable_volatile_owner_update(task_t owner,
642 int delta);
643 void vm_owned_objects_disown(task_t task);
644
645
646 struct trim_list {
647 uint64_t tl_offset;
648 uint64_t tl_length;
649 struct trim_list *tl_next;
650 };
651
652 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
653
654 #define MAX_SWAPFILENAME_LEN 1024
655 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
656
657 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
658
659 struct vm_counters {
660 unsigned int do_collapse_compressor;
661 unsigned int do_collapse_compressor_pages;
662 unsigned int do_collapse_terminate;
663 unsigned int do_collapse_terminate_failure;
664 unsigned int should_cow_but_wired;
665 unsigned int create_upl_extra_cow;
666 unsigned int create_upl_extra_cow_pages;
667 unsigned int create_upl_lookup_failure_write;
668 unsigned int create_upl_lookup_failure_copy;
669 };
670 extern struct vm_counters vm_counters;
671
672 #if CONFIG_SECLUDED_MEMORY
673 struct vm_page_secluded_data {
674 int eligible_for_secluded;
675 int grab_success_free;
676 int grab_success_other;
677 int grab_failure_locked;
678 int grab_failure_state;
679 int grab_failure_dirty;
680 int grab_for_iokit;
681 int grab_for_iokit_success;
682 };
683 extern struct vm_page_secluded_data vm_page_secluded;
684
685 extern int num_tasks_can_use_secluded_mem;
686
687 /* boot-args */
688 extern int secluded_for_apps;
689 extern int secluded_for_iokit;
690 extern int secluded_for_filecache;
691 #if 11
692 extern int secluded_for_fbdp;
693 #endif
694
695 extern uint64_t vm_page_secluded_drain(void);
696 extern void memory_object_mark_eligible_for_secluded(
697 memory_object_control_t control,
698 boolean_t eligible_for_secluded);
699
700 #endif /* CONFIG_SECLUDED_MEMORY */
701
702 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
703
704 extern kern_return_t mach_make_memory_entry_internal(
705 vm_map_t target_map,
706 memory_object_size_t *size,
707 memory_object_offset_t offset,
708 vm_prot_t permission,
709 vm_named_entry_kernel_flags_t vmne_kflags,
710 ipc_port_t *object_handle,
711 ipc_port_t parent_handle);
712
713 extern kern_return_t
714 memory_entry_check_for_adjustment(
715 vm_map_t src_map,
716 ipc_port_t port,
717 vm_map_offset_t *overmap_start,
718 vm_map_offset_t *overmap_end);
719
720 #define roundup(x, y) ((((x) % (y)) == 0) ? \
721 (x) : ((x) + ((y) - ((x) % (y)))))
722
723 #ifdef __cplusplus
724 }
725 #endif
726
727 /*
728 * Flags for the VM swapper/reclaimer.
729 * Used by vm_swap_consider_defragment()
730 * to force defrag/reclaim by the swap
731 * GC thread.
732 */
733 #define VM_SWAP_FLAGS_NONE 0
734 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1
735 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2
736
737 #if __arm64__
738 /*
739 * Flags to control the behavior of
740 * the legacy footprint entitlement.
741 */
742 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
743 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
744 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
745
746 #endif /* __arm64__ */
747
748 #if MACH_ASSERT
749 struct proc;
750 extern struct proc *current_proc(void);
751 extern int proc_pid(struct proc *);
752 extern char *proc_best_name(struct proc *);
753 struct thread;
754 extern uint64_t thread_tid(struct thread *);
755 extern int debug4k_filter;
756 extern int debug4k_proc_filter;
757 extern char debug4k_proc_name[];
758 extern const char *debug4k_category_name[];
759
760 #define __DEBUG4K(category, fmt, ...) \
761 MACRO_BEGIN \
762 int __category = (category); \
763 struct thread *__t = NULL; \
764 struct proc *__p = NULL; \
765 const char *__pname = "?"; \
766 boolean_t __do_log = FALSE; \
767 \
768 if ((1 << __category) & debug4k_filter) { \
769 __do_log = TRUE; \
770 } else if (((1 << __category) & debug4k_proc_filter) && \
771 debug4k_proc_name[0] != '\0') { \
772 __p = current_proc(); \
773 if (__p != NULL) { \
774 __pname = proc_best_name(__p); \
775 } \
776 if (!strcmp(debug4k_proc_name, __pname)) { \
777 __do_log = TRUE; \
778 } \
779 } \
780 if (__do_log) { \
781 if (__p == NULL) { \
782 __p = current_proc(); \
783 if (__p != NULL) { \
784 __pname = proc_best_name(__p); \
785 } \
786 } \
787 __t = current_thread(); \
788 printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \
789 debug4k_category_name[__category], \
790 __p ? proc_pid(__p) : 0, \
791 __pname, \
792 __t, \
793 thread_tid(__t), \
794 __FUNCTION__, \
795 __LINE__, \
796 ##__VA_ARGS__); \
797 } \
798 MACRO_END
799
800 #define __DEBUG4K_ERROR 0
801 #define __DEBUG4K_LIFE 1
802 #define __DEBUG4K_LOAD 2
803 #define __DEBUG4K_FAULT 3
804 #define __DEBUG4K_COPY 4
805 #define __DEBUG4K_SHARE 5
806 #define __DEBUG4K_ADJUST 6
807 #define __DEBUG4K_PMAP 7
808 #define __DEBUG4K_MEMENTRY 8
809 #define __DEBUG4K_IOKIT 9
810 #define __DEBUG4K_UPL 10
811 #define __DEBUG4K_EXC 11
812 #define __DEBUG4K_VFS 12
813
814 #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
815 #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
816 #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
817 #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
818 #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
819 #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
820 #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
821 #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
822 #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
823 #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
824 #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
825 #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
826 #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
827
828 #else /* MACH_ASSERT */
829
830 #define DEBUG4K_ERROR(...)
831 #define DEBUG4K_LIFE(...)
832 #define DEBUG4K_LOAD(...)
833 #define DEBUG4K_FAULT(...)
834 #define DEBUG4K_COPY(...)
835 #define DEBUG4K_SHARE(...)
836 #define DEBUG4K_ADJUST(...)
837 #define DEBUG4K_PMAP(...)
838 #define DEBUG4K_MEMENTRY(...)
839 #define DEBUG4K_IOKIT(...)
840 #define DEBUG4K_UPL(...)
841 #define DEBUG4K_EXC(...)
842 #define DEBUG4K_VFS(...)
843
844 #endif /* MACH_ASSERT */
845
846 #endif /* _VM_VM_PROTOS_H_ */
847
848 #endif /* XNU_KERNEL_PRIVATE */