]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
871b3c714f524d76df0cb8a5c55adeb824085c9f
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
46 *
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
49 * files.
50 */
51
52 /*
53 * iokit
54 */
55 extern kern_return_t device_data_action(
56 uintptr_t device_handle,
57 ipc_port_t device_pager,
58 vm_prot_t protection,
59 vm_object_offset_t offset,
60 vm_size_t size);
61
62 extern kern_return_t device_close(
63 uintptr_t device_handle);
64
65 extern boolean_t vm_swap_files_pinned(void);
66
67 /*
68 * osfmk
69 */
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t ipc_port_copyout_send(
72 ipc_port_t sright,
73 ipc_space_t space);
74 extern task_t port_name_to_task(
75 mach_port_name_t name);
76 extern task_t port_name_to_task_name(
77 mach_port_name_t name);
78 extern void ipc_port_release_send(
79 ipc_port_t port);
80 #endif /* _IPC_IPC_PORT_H_ */
81
82 extern ipc_space_t get_task_ipcspace(
83 task_t t);
84
85 #if CONFIG_MEMORYSTATUS
86 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
87 #endif /* CONFIG_MEMORYSTATUS */
88
89 /* Some loose-ends VM stuff */
90
91 extern vm_map_t kalloc_map;
92 extern vm_size_t msg_ool_size_small;
93
94 extern kern_return_t vm_tests(void);
95 extern void consider_machine_adjust(void);
96 extern vm_map_offset_t get_map_min(vm_map_t);
97 extern vm_map_offset_t get_map_max(vm_map_t);
98 extern vm_map_size_t get_vmmap_size(vm_map_t);
99 #if CONFIG_COREDUMP
100 extern int get_vmmap_entries(vm_map_t);
101 #endif
102 extern int get_map_nentries(vm_map_t);
103
104 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
105
106 extern kern_return_t vm_map_purgable_control(
107 vm_map_t map,
108 vm_map_offset_t address,
109 vm_purgable_t control,
110 int *state);
111
112 #if MACH_ASSERT
113 extern void vm_map_pmap_check_ledgers(
114 pmap_t pmap,
115 ledger_t ledger,
116 int pid,
117 char *procname);
118 #endif /* MACH_ASSERT */
119
120 extern kern_return_t
121 vnode_pager_get_object_vnode(
122 memory_object_t mem_obj,
123 uintptr_t * vnodeaddr,
124 uint32_t * vid);
125
126 #if CONFIG_COREDUMP
127 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va);
128 #endif
129
130 /*
131 * VM routines that used to be published to
132 * user space, and are now restricted to the kernel.
133 *
134 * They should eventually go away entirely -
135 * to be replaced with standard vm_map() and
136 * vm_deallocate() calls.
137 */
138
139 extern kern_return_t vm_upl_map
140 (
141 vm_map_t target_task,
142 upl_t upl,
143 vm_address_t *address
144 );
145
146 extern kern_return_t vm_upl_unmap
147 (
148 vm_map_t target_task,
149 upl_t upl
150 );
151
152 extern kern_return_t vm_region_object_create
153 (
154 vm_map_t target_task,
155 vm_size_t size,
156 ipc_port_t *object_handle
157 );
158
159 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
160 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
161
162 #if CONFIG_CODE_DECRYPTION
163 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
164 #if VM_MAP_DEBUG_APPLE_PROTECT
165 extern int vm_map_debug_apple_protect;
166 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
167 struct pager_crypt_info;
168 extern kern_return_t vm_map_apple_protected(
169 vm_map_t map,
170 vm_map_offset_t start,
171 vm_map_offset_t end,
172 vm_object_offset_t crypto_backing_offset,
173 struct pager_crypt_info *crypt_info,
174 uint32_t cryptid);
175 extern memory_object_t apple_protect_pager_setup(
176 vm_object_t backing_object,
177 vm_object_offset_t backing_offset,
178 vm_object_offset_t crypto_backing_offset,
179 struct pager_crypt_info *crypt_info,
180 vm_object_offset_t crypto_start,
181 vm_object_offset_t crypto_end);
182 #endif /* CONFIG_CODE_DECRYPTION */
183
184 struct vm_shared_region_slide_info;
185 extern kern_return_t vm_map_shared_region(
186 vm_map_t map,
187 vm_map_offset_t start,
188 vm_map_offset_t end,
189 vm_object_offset_t backing_offset,
190 struct vm_shared_region_slide_info *slide_info);
191
192 extern memory_object_t shared_region_pager_setup(
193 vm_object_t backing_object,
194 vm_object_offset_t backing_offset,
195 struct vm_shared_region_slide_info *slide_info,
196 uint64_t jop_key);
197 #if __has_feature(ptrauth_calls)
198 extern memory_object_t shared_region_pager_match(
199 vm_object_t backing_object,
200 vm_object_offset_t backing_offset,
201 struct vm_shared_region_slide_info *slide_info,
202 uint64_t jop_key);
203 extern void shared_region_key_alloc(
204 char *shared_region_id,
205 bool inherit,
206 uint64_t inherited_key);
207 extern void shared_region_key_dealloc(
208 char *shared_region_id);
209 extern uint64_t generate_jop_key(void);
210 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task);
211 #endif /* __has_feature(ptrauth_calls) */
212 extern bool vm_shared_region_is_reslide(struct task *task);
213
214 struct vnode;
215 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
216 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
217
218 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
219 #define SIXTEENK_PAGE_SIZE 0x4000
220 #define SIXTEENK_PAGE_MASK 0x3FFF
221 #define SIXTEENK_PAGE_SHIFT 14
222 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
223
224 #define FOURK_PAGE_SIZE 0x1000
225 #define FOURK_PAGE_MASK 0xFFF
226 #define FOURK_PAGE_SHIFT 12
227
228 #if __arm64__
229
230 extern unsigned int page_shift_user32;
231
232 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
233 #if VM_MAP_DEBUG_FOURK
234 extern int vm_map_debug_fourk;
235 #endif /* VM_MAP_DEBUG_FOURK */
236 extern memory_object_t fourk_pager_create(void);
237 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
238 extern kern_return_t fourk_pager_populate(
239 memory_object_t mem_obj,
240 boolean_t overwrite,
241 int index,
242 vm_object_t new_backing_object,
243 vm_object_offset_t new_backing_offset,
244 vm_object_t *old_backing_object,
245 vm_object_offset_t *old_backing_offset);
246 #endif /* __arm64__ */
247
248 /*
249 * bsd
250 */
251 struct vnode;
252 extern void *upl_get_internal_page_list(
253 upl_t upl);
254
255 extern void vnode_setswapmount(struct vnode *);
256 extern int64_t vnode_getswappin_avail(struct vnode *);
257
258 extern void vnode_pager_was_dirtied(
259 struct vnode *,
260 vm_object_offset_t,
261 vm_object_offset_t);
262
263 typedef int pager_return_t;
264 extern pager_return_t vnode_pagein(
265 struct vnode *, upl_t,
266 upl_offset_t, vm_object_offset_t,
267 upl_size_t, int, int *);
268 extern pager_return_t vnode_pageout(
269 struct vnode *, upl_t,
270 upl_offset_t, vm_object_offset_t,
271 upl_size_t, int, int *);
272 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len);
273 extern memory_object_t vnode_pager_setup(
274 struct vnode *, memory_object_t);
275 extern vm_object_offset_t vnode_pager_get_filesize(
276 struct vnode *);
277 extern uint32_t vnode_pager_isinuse(
278 struct vnode *);
279 extern boolean_t vnode_pager_isSSD(
280 struct vnode *);
281 extern void vnode_pager_throttle(
282 void);
283 extern uint32_t vnode_pager_return_throttle_io_limit(
284 struct vnode *,
285 uint32_t *);
286 extern kern_return_t vnode_pager_get_name(
287 struct vnode *vp,
288 char *pathname,
289 vm_size_t pathname_len,
290 char *filename,
291 vm_size_t filename_len,
292 boolean_t *truncated_path_p);
293 struct timespec;
294 extern kern_return_t vnode_pager_get_mtime(
295 struct vnode *vp,
296 struct timespec *mtime,
297 struct timespec *cs_mtime);
298 extern kern_return_t vnode_pager_get_cs_blobs(
299 struct vnode *vp,
300 void **blobs);
301
302 #if CONFIG_IOSCHED
303 void vnode_pager_issue_reprioritize_io(
304 struct vnode *devvp,
305 uint64_t blkno,
306 uint32_t len,
307 int priority);
308 #endif
309
310 #if CHECK_CS_VALIDATION_BITMAP
311 /* used by the vnode_pager_cs_validation_bitmap routine*/
312 #define CS_BITMAP_SET 1
313 #define CS_BITMAP_CLEAR 2
314 #define CS_BITMAP_CHECK 3
315
316 #endif /* CHECK_CS_VALIDATION_BITMAP */
317
318 extern kern_return_t
319 vnode_pager_data_unlock(
320 memory_object_t mem_obj,
321 memory_object_offset_t offset,
322 memory_object_size_t size,
323 vm_prot_t desired_access);
324 extern kern_return_t vnode_pager_init(
325 memory_object_t,
326 memory_object_control_t,
327 memory_object_cluster_size_t);
328 extern kern_return_t vnode_pager_get_object_size(
329 memory_object_t,
330 memory_object_offset_t *);
331
332 #if CONFIG_IOSCHED
333 extern kern_return_t vnode_pager_get_object_devvp(
334 memory_object_t,
335 uintptr_t *);
336 #endif
337
338 extern void vnode_pager_dirtied(
339 memory_object_t,
340 vm_object_offset_t,
341 vm_object_offset_t);
342 extern kern_return_t vnode_pager_get_isinuse(
343 memory_object_t,
344 uint32_t *);
345 extern kern_return_t vnode_pager_get_isSSD(
346 memory_object_t,
347 boolean_t *);
348 extern kern_return_t vnode_pager_get_throttle_io_limit(
349 memory_object_t,
350 uint32_t *);
351 extern kern_return_t vnode_pager_get_object_name(
352 memory_object_t mem_obj,
353 char *pathname,
354 vm_size_t pathname_len,
355 char *filename,
356 vm_size_t filename_len,
357 boolean_t *truncated_path_p);
358 extern kern_return_t vnode_pager_get_object_mtime(
359 memory_object_t mem_obj,
360 struct timespec *mtime,
361 struct timespec *cs_mtime);
362
363 #if CHECK_CS_VALIDATION_BITMAP
364 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
365 memory_object_t mem_obj,
366 memory_object_offset_t offset,
367 int optype);
368 #endif /*CHECK_CS_VALIDATION_BITMAP*/
369
370 extern kern_return_t ubc_cs_check_validation_bitmap(
371 struct vnode *vp,
372 memory_object_offset_t offset,
373 int optype);
374
375 extern kern_return_t vnode_pager_data_request(
376 memory_object_t,
377 memory_object_offset_t,
378 memory_object_cluster_size_t,
379 vm_prot_t,
380 memory_object_fault_info_t);
381 extern kern_return_t vnode_pager_data_return(
382 memory_object_t,
383 memory_object_offset_t,
384 memory_object_cluster_size_t,
385 memory_object_offset_t *,
386 int *,
387 boolean_t,
388 boolean_t,
389 int);
390 extern kern_return_t vnode_pager_data_initialize(
391 memory_object_t,
392 memory_object_offset_t,
393 memory_object_cluster_size_t);
394 extern void vnode_pager_reference(
395 memory_object_t mem_obj);
396 extern kern_return_t vnode_pager_synchronize(
397 memory_object_t mem_obj,
398 memory_object_offset_t offset,
399 memory_object_size_t length,
400 vm_sync_t sync_flags);
401 extern kern_return_t vnode_pager_map(
402 memory_object_t mem_obj,
403 vm_prot_t prot);
404 extern kern_return_t vnode_pager_last_unmap(
405 memory_object_t mem_obj);
406 extern void vnode_pager_deallocate(
407 memory_object_t);
408 extern kern_return_t vnode_pager_terminate(
409 memory_object_t);
410 extern void vnode_pager_vrele(
411 struct vnode *vp);
412 extern struct vnode *vnode_pager_lookup_vnode(
413 memory_object_t);
414
415 extern int ubc_map(
416 struct vnode *vp,
417 int flags);
418 extern void ubc_unmap(
419 struct vnode *vp);
420
421 struct vm_map_entry;
422 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
423
424 extern void device_pager_reference(memory_object_t);
425 extern void device_pager_deallocate(memory_object_t);
426 extern kern_return_t device_pager_init(memory_object_t,
427 memory_object_control_t,
428 memory_object_cluster_size_t);
429 extern kern_return_t device_pager_terminate(memory_object_t);
430 extern kern_return_t device_pager_data_request(memory_object_t,
431 memory_object_offset_t,
432 memory_object_cluster_size_t,
433 vm_prot_t,
434 memory_object_fault_info_t);
435 extern kern_return_t device_pager_data_return(memory_object_t,
436 memory_object_offset_t,
437 memory_object_cluster_size_t,
438 memory_object_offset_t *,
439 int *,
440 boolean_t,
441 boolean_t,
442 int);
443 extern kern_return_t device_pager_data_initialize(memory_object_t,
444 memory_object_offset_t,
445 memory_object_cluster_size_t);
446 extern kern_return_t device_pager_data_unlock(memory_object_t,
447 memory_object_offset_t,
448 memory_object_size_t,
449 vm_prot_t);
450 extern kern_return_t device_pager_synchronize(memory_object_t,
451 memory_object_offset_t,
452 memory_object_size_t,
453 vm_sync_t);
454 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
455 extern kern_return_t device_pager_last_unmap(memory_object_t);
456 extern kern_return_t device_pager_populate_object(
457 memory_object_t device,
458 memory_object_offset_t offset,
459 ppnum_t page_num,
460 vm_size_t size);
461 extern memory_object_t device_pager_setup(
462 memory_object_t,
463 uintptr_t,
464 vm_size_t,
465 int);
466
467 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops);
468
469 extern kern_return_t pager_map_to_phys_contiguous(
470 memory_object_control_t object,
471 memory_object_offset_t offset,
472 addr64_t base_vaddr,
473 vm_size_t size);
474
475 extern kern_return_t memory_object_create_named(
476 memory_object_t pager,
477 memory_object_offset_t size,
478 memory_object_control_t *control);
479
480 struct macx_triggers_args;
481 extern int mach_macx_triggers(
482 struct macx_triggers_args *args);
483
484 extern int macx_swapinfo(
485 memory_object_size_t *total_p,
486 memory_object_size_t *avail_p,
487 vm_size_t *pagesize_p,
488 boolean_t *encrypted_p);
489
490 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
491 extern void log_unnest_badness(
492 vm_map_t map,
493 vm_map_offset_t start_unnest,
494 vm_map_offset_t end_unnest,
495 boolean_t is_nested_map,
496 vm_map_offset_t lowest_unnestable_addr);
497
498 struct proc;
499 struct proc *current_proc(void);
500 extern int cs_allow_invalid(struct proc *p);
501 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
502
503 #define CS_VALIDATE_TAINTED 0x00000001
504 #define CS_VALIDATE_NX 0x00000002
505 extern boolean_t cs_validate_range(struct vnode *vp,
506 memory_object_t pager,
507 memory_object_offset_t offset,
508 const void *data,
509 vm_size_t size,
510 unsigned *result);
511 extern void cs_validate_page(
512 struct vnode *vp,
513 memory_object_t pager,
514 memory_object_offset_t offset,
515 const void *data,
516 int *validated_p,
517 int *tainted_p,
518 int *nx_p);
519 #if PMAP_CS
520 extern kern_return_t cs_associate_blob_with_mapping(
521 void *pmap,
522 vm_map_offset_t start,
523 vm_map_size_t size,
524 vm_object_offset_t offset,
525 void *blobs_p);
526 #endif /* PMAP_CS */
527
528 extern kern_return_t memory_entry_purgeable_control_internal(
529 ipc_port_t entry_port,
530 vm_purgable_t control,
531 int *state);
532
533 extern kern_return_t memory_entry_access_tracking_internal(
534 ipc_port_t entry_port,
535 int *access_tracking,
536 uint32_t *access_tracking_reads,
537 uint32_t *access_tracking_writes);
538
539 extern kern_return_t mach_memory_entry_purgable_control(
540 ipc_port_t entry_port,
541 vm_purgable_t control,
542 int *state);
543
544 extern kern_return_t mach_memory_entry_get_page_counts(
545 ipc_port_t entry_port,
546 unsigned int *resident_page_count,
547 unsigned int *dirty_page_count);
548
549 extern kern_return_t mach_memory_entry_phys_page_offset(
550 ipc_port_t entry_port,
551 vm_object_offset_t *offset_p);
552
553 extern kern_return_t mach_memory_entry_map_size(
554 ipc_port_t entry_port,
555 vm_map_t map,
556 memory_object_offset_t offset,
557 memory_object_offset_t size,
558 mach_vm_size_t *map_size);
559
560 extern kern_return_t vm_map_range_physical_size(
561 vm_map_t map,
562 vm_map_address_t start,
563 mach_vm_size_t size,
564 mach_vm_size_t * phys_size);
565
566 extern kern_return_t mach_memory_entry_page_op(
567 ipc_port_t entry_port,
568 vm_object_offset_t offset,
569 int ops,
570 ppnum_t *phys_entry,
571 int *flags);
572
573 extern kern_return_t mach_memory_entry_range_op(
574 ipc_port_t entry_port,
575 vm_object_offset_t offset_beg,
576 vm_object_offset_t offset_end,
577 int ops,
578 int *range);
579
580 extern void mach_memory_entry_port_release(ipc_port_t port);
581 extern void mach_destroy_memory_entry(ipc_port_t port);
582 extern kern_return_t mach_memory_entry_allocate(
583 struct vm_named_entry **user_entry_p,
584 ipc_port_t *user_handle_p);
585 extern vm_object_t vm_named_entry_to_vm_object(
586 vm_named_entry_t named_entry);
587 extern kern_return_t vm_named_entry_from_vm_object(
588 vm_named_entry_t named_entry,
589 vm_object_t object,
590 vm_object_offset_t offset,
591 vm_object_size_t size,
592 vm_prot_t prot);
593
594 extern void vm_paging_map_init(void);
595
596 extern int macx_backing_store_compaction(int flags);
597 extern unsigned int mach_vm_ctl_page_free_wanted(void);
598
599 extern int no_paging_space_action(void);
600
601 #define VM_TOGGLE_CLEAR 0
602 #define VM_TOGGLE_SET 1
603 #define VM_TOGGLE_GETVALUE 999
604 int vm_toggle_entry_reuse(int, int*);
605
606 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
607 #define SWAP_READ 0x00000001 /* Read buffer. */
608 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
609
610 extern void vm_compressor_pager_init(void);
611 extern kern_return_t compressor_memory_object_create(
612 memory_object_size_t,
613 memory_object_t *);
614
615 extern boolean_t vm_compressor_low_on_space(void);
616 extern boolean_t vm_compressor_out_of_space(void);
617 extern int vm_swap_low_on_space(void);
618 void do_fastwake_warmup_all(void);
619 #if CONFIG_JETSAM
620 extern int proc_get_memstat_priority(struct proc*, boolean_t);
621 #endif /* CONFIG_JETSAM */
622
623 /* the object purger. purges the next eligible object from memory. */
624 /* returns TRUE if an object was purged, otherwise FALSE. */
625 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
626 void vm_purgeable_nonvolatile_owner_update(task_t owner,
627 int delta);
628 void vm_purgeable_volatile_owner_update(task_t owner,
629 int delta);
630 void vm_owned_objects_disown(task_t task);
631
632
633 struct trim_list {
634 uint64_t tl_offset;
635 uint64_t tl_length;
636 struct trim_list *tl_next;
637 };
638
639 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
640
641 #define MAX_SWAPFILENAME_LEN 1024
642 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
643
644 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
645
646 struct vm_counters {
647 unsigned int do_collapse_compressor;
648 unsigned int do_collapse_compressor_pages;
649 unsigned int do_collapse_terminate;
650 unsigned int do_collapse_terminate_failure;
651 unsigned int should_cow_but_wired;
652 unsigned int create_upl_extra_cow;
653 unsigned int create_upl_extra_cow_pages;
654 unsigned int create_upl_lookup_failure_write;
655 unsigned int create_upl_lookup_failure_copy;
656 };
657 extern struct vm_counters vm_counters;
658
659 #if CONFIG_SECLUDED_MEMORY
660 struct vm_page_secluded_data {
661 int eligible_for_secluded;
662 int grab_success_free;
663 int grab_success_other;
664 int grab_failure_locked;
665 int grab_failure_state;
666 int grab_failure_dirty;
667 int grab_for_iokit;
668 int grab_for_iokit_success;
669 };
670 extern struct vm_page_secluded_data vm_page_secluded;
671
672 extern int num_tasks_can_use_secluded_mem;
673
674 /* boot-args */
675 extern int secluded_for_apps;
676 extern int secluded_for_iokit;
677 extern int secluded_for_filecache;
678 #if 11
679 extern int secluded_for_fbdp;
680 #endif
681
682 extern uint64_t vm_page_secluded_drain(void);
683 extern void memory_object_mark_eligible_for_secluded(
684 memory_object_control_t control,
685 boolean_t eligible_for_secluded);
686
687 #endif /* CONFIG_SECLUDED_MEMORY */
688
689 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
690
691 extern kern_return_t mach_make_memory_entry_internal(
692 vm_map_t target_map,
693 memory_object_size_t *size,
694 memory_object_offset_t offset,
695 vm_prot_t permission,
696 vm_named_entry_kernel_flags_t vmne_kflags,
697 ipc_port_t *object_handle,
698 ipc_port_t parent_handle);
699
700 extern kern_return_t
701 memory_entry_check_for_adjustment(
702 vm_map_t src_map,
703 ipc_port_t port,
704 vm_map_offset_t *overmap_start,
705 vm_map_offset_t *overmap_end);
706
707 #define roundup(x, y) ((((x) % (y)) == 0) ? \
708 (x) : ((x) + ((y) - ((x) % (y)))))
709
710 #ifdef __cplusplus
711 }
712 #endif
713
714 /*
715 * Flags for the VM swapper/reclaimer.
716 * Used by vm_swap_consider_defragment()
717 * to force defrag/reclaim by the swap
718 * GC thread.
719 */
720 #define VM_SWAP_FLAGS_NONE 0
721 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1
722 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2
723
724 #if __arm64__
725 /*
726 * Flags to control the behavior of
727 * the legacy footprint entitlement.
728 */
729 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
730 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
731 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
732
733 #endif /* __arm64__ */
734
735 #if MACH_ASSERT
736 struct proc;
737 extern struct proc *current_proc(void);
738 extern int proc_pid(struct proc *);
739 extern char *proc_best_name(struct proc *);
740 struct thread;
741 extern uint64_t thread_tid(struct thread *);
742 extern int debug4k_filter;
743 extern int debug4k_proc_filter;
744 extern char debug4k_proc_name[];
745 extern const char *debug4k_category_name[];
746
747 #define __DEBUG4K(category, fmt, ...) \
748 MACRO_BEGIN \
749 int __category = (category); \
750 struct thread *__t = NULL; \
751 struct proc *__p = NULL; \
752 const char *__pname = "?"; \
753 boolean_t __do_log = FALSE; \
754 \
755 if ((1 << __category) & debug4k_filter) { \
756 __do_log = TRUE; \
757 } else if (((1 << __category) & debug4k_proc_filter) && \
758 debug4k_proc_name[0] != '\0') { \
759 __p = current_proc(); \
760 if (__p != NULL) { \
761 __pname = proc_best_name(__p); \
762 } \
763 if (!strcmp(debug4k_proc_name, __pname)) { \
764 __do_log = TRUE; \
765 } \
766 } \
767 if (__do_log) { \
768 if (__p == NULL) { \
769 __p = current_proc(); \
770 if (__p != NULL) { \
771 __pname = proc_best_name(__p); \
772 } \
773 } \
774 __t = current_thread(); \
775 printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \
776 debug4k_category_name[__category], \
777 __p ? proc_pid(__p) : 0, \
778 __pname, \
779 __t, \
780 thread_tid(__t), \
781 __FUNCTION__, \
782 __LINE__, \
783 ##__VA_ARGS__); \
784 } \
785 MACRO_END
786
787 #define __DEBUG4K_ERROR 0
788 #define __DEBUG4K_LIFE 1
789 #define __DEBUG4K_LOAD 2
790 #define __DEBUG4K_FAULT 3
791 #define __DEBUG4K_COPY 4
792 #define __DEBUG4K_SHARE 5
793 #define __DEBUG4K_ADJUST 6
794 #define __DEBUG4K_PMAP 7
795 #define __DEBUG4K_MEMENTRY 8
796 #define __DEBUG4K_IOKIT 9
797 #define __DEBUG4K_UPL 10
798 #define __DEBUG4K_EXC 11
799 #define __DEBUG4K_VFS 12
800
801 #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
802 #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
803 #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
804 #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
805 #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
806 #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
807 #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
808 #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
809 #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
810 #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
811 #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
812 #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
813 #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
814
815 #else /* MACH_ASSERT */
816
817 #define DEBUG4K_ERROR(...)
818 #define DEBUG4K_LIFE(...)
819 #define DEBUG4K_LOAD(...)
820 #define DEBUG4K_FAULT(...)
821 #define DEBUG4K_COPY(...)
822 #define DEBUG4K_SHARE(...)
823 #define DEBUG4K_ADJUST(...)
824 #define DEBUG4K_PMAP(...)
825 #define DEBUG4K_MEMENTRY(...)
826 #define DEBUG4K_IOKIT(...)
827 #define DEBUG4K_UPL(...)
828 #define DEBUG4K_EXC(...)
829 #define DEBUG4K_VFS(...)
830
831 #endif /* MACH_ASSERT */
832
833 #endif /* _VM_VM_PROTOS_H_ */
834
835 #endif /* XNU_KERNEL_PRIVATE */