2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef XNU_KERNEL_PRIVATE
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
55 extern kern_return_t
device_data_action(
56 uintptr_t device_handle
,
57 ipc_port_t device_pager
,
59 vm_object_offset_t offset
,
62 extern kern_return_t
device_close(
63 uintptr_t device_handle
);
65 extern boolean_t
vm_swap_files_pinned(void);
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t
ipc_port_copyout_send(
74 extern task_t
port_name_to_task(
75 mach_port_name_t name
);
76 extern task_t
port_name_to_task_inspect(
77 mach_port_name_t name
);
78 extern void ipc_port_release_send(
80 #endif /* _IPC_IPC_PORT_H_ */
82 extern ipc_space_t
get_task_ipcspace(
85 #if CONFIG_MEMORYSTATUS
86 extern int max_task_footprint_mb
; /* Per-task limit on physical memory consumption in megabytes */
87 #endif /* CONFIG_MEMORYSTATUS */
89 /* Some loose-ends VM stuff */
91 extern vm_map_t kalloc_map
;
92 extern vm_size_t msg_ool_size_small
;
93 extern vm_map_t zone_map
;
95 extern void consider_machine_adjust(void);
96 extern vm_map_offset_t
get_map_min(vm_map_t
);
97 extern vm_map_offset_t
get_map_max(vm_map_t
);
98 extern vm_map_size_t
get_vmmap_size(vm_map_t
);
100 extern int get_vmmap_entries(vm_map_t
);
102 extern int get_map_nentries(vm_map_t
);
104 extern vm_map_offset_t
vm_map_page_mask(vm_map_t
);
106 extern kern_return_t
vm_map_purgable_control(
108 vm_map_offset_t address
,
109 vm_purgable_t control
,
113 vnode_pager_get_object_vnode(
114 memory_object_t mem_obj
,
115 uintptr_t * vnodeaddr
,
119 extern boolean_t
coredumpok(vm_map_t map
, vm_offset_t va
);
123 * VM routines that used to be published to
124 * user space, and are now restricted to the kernel.
126 * They should eventually go away entirely -
127 * to be replaced with standard vm_map() and
128 * vm_deallocate() calls.
131 extern kern_return_t vm_upl_map
133 vm_map_t target_task
,
135 vm_address_t
*address
138 extern kern_return_t vm_upl_unmap
140 vm_map_t target_task
,
144 extern kern_return_t vm_region_object_create
146 vm_map_t target_task
,
148 ipc_port_t
*object_handle
151 extern mach_vm_offset_t
mach_get_vm_start(vm_map_t
);
152 extern mach_vm_offset_t
mach_get_vm_end(vm_map_t
);
154 #if CONFIG_CODE_DECRYPTION
155 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
156 #if VM_MAP_DEBUG_APPLE_PROTECT
157 extern int vm_map_debug_apple_protect
;
158 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
159 struct pager_crypt_info
;
160 extern kern_return_t
vm_map_apple_protected(
162 vm_map_offset_t start
,
164 vm_object_offset_t crypto_backing_offset
,
165 struct pager_crypt_info
*crypt_info
);
166 extern void apple_protect_pager_bootstrap(void);
167 extern memory_object_t
apple_protect_pager_setup(
168 vm_object_t backing_object
,
169 vm_object_offset_t backing_offset
,
170 vm_object_offset_t crypto_backing_offset
,
171 struct pager_crypt_info
*crypt_info
,
172 vm_object_offset_t crypto_start
,
173 vm_object_offset_t crypto_end
);
174 #endif /* CONFIG_CODE_DECRYPTION */
176 struct vm_shared_region_slide_info
;
177 extern kern_return_t
vm_map_shared_region(
179 vm_map_offset_t start
,
181 vm_object_offset_t backing_offset
,
182 struct vm_shared_region_slide_info
*slide_info
);
183 extern void shared_region_pager_bootstrap(void);
184 extern memory_object_t
shared_region_pager_setup(
185 vm_object_t backing_object
,
186 vm_object_offset_t backing_offset
,
187 struct vm_shared_region_slide_info
*slide_info
);
190 extern void swapfile_pager_bootstrap(void);
191 extern memory_object_t
swapfile_pager_setup(struct vnode
*vp
);
192 extern memory_object_control_t
swapfile_pager_control(memory_object_t mem_obj
);
194 #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
195 #define SIXTEENK_PAGE_SIZE 0x4000
196 #define SIXTEENK_PAGE_MASK 0x3FFF
197 #define SIXTEENK_PAGE_SHIFT 14
198 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
201 #define FOURK_PAGE_SIZE 0x1000
202 #define FOURK_PAGE_MASK 0xFFF
203 #define FOURK_PAGE_SHIFT 12
205 extern unsigned int page_shift_user32
;
207 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
208 #if VM_MAP_DEBUG_FOURK
209 extern int vm_map_debug_fourk
;
210 #endif /* VM_MAP_DEBUG_FOURK */
211 extern void fourk_pager_bootstrap(void);
212 extern memory_object_t
fourk_pager_create(void);
213 extern vm_object_t
fourk_pager_to_vm_object(memory_object_t mem_obj
);
214 extern kern_return_t
fourk_pager_populate(
215 memory_object_t mem_obj
,
218 vm_object_t new_backing_object
,
219 vm_object_offset_t new_backing_offset
,
220 vm_object_t
*old_backing_object
,
221 vm_object_offset_t
*old_backing_offset
);
222 #endif /* __arm64__ */
228 extern void *upl_get_internal_page_list(
231 extern void vnode_setswapmount(struct vnode
*);
232 extern int64_t vnode_getswappin_avail(struct vnode
*);
234 extern void vnode_pager_was_dirtied(
239 typedef int pager_return_t
;
240 extern pager_return_t
vnode_pagein(
241 struct vnode
*, upl_t
,
242 upl_offset_t
, vm_object_offset_t
,
243 upl_size_t
, int, int *);
244 extern pager_return_t
vnode_pageout(
245 struct vnode
*, upl_t
,
246 upl_offset_t
, vm_object_offset_t
,
247 upl_size_t
, int, int *);
248 extern uint32_t vnode_trim (struct vnode
*, int64_t offset
, unsigned long len
);
249 extern memory_object_t
vnode_pager_setup(
250 struct vnode
*, memory_object_t
);
251 extern vm_object_offset_t
vnode_pager_get_filesize(
253 extern uint32_t vnode_pager_isinuse(
255 extern boolean_t
vnode_pager_isSSD(
257 extern void vnode_pager_throttle(
259 extern uint32_t vnode_pager_return_throttle_io_limit(
262 extern kern_return_t
vnode_pager_get_name(
265 vm_size_t pathname_len
,
267 vm_size_t filename_len
,
268 boolean_t
*truncated_path_p
);
270 extern kern_return_t
vnode_pager_get_mtime(
272 struct timespec
*mtime
,
273 struct timespec
*cs_mtime
);
274 extern kern_return_t
vnode_pager_get_cs_blobs(
279 void vnode_pager_issue_reprioritize_io(
286 #if CHECK_CS_VALIDATION_BITMAP
287 /* used by the vnode_pager_cs_validation_bitmap routine*/
288 #define CS_BITMAP_SET 1
289 #define CS_BITMAP_CLEAR 2
290 #define CS_BITMAP_CHECK 3
292 #endif /* CHECK_CS_VALIDATION_BITMAP */
294 extern void vnode_pager_bootstrap(void);
296 vnode_pager_data_unlock(
297 memory_object_t mem_obj
,
298 memory_object_offset_t offset
,
299 memory_object_size_t size
,
300 vm_prot_t desired_access
);
301 extern kern_return_t
vnode_pager_init(
303 memory_object_control_t
,
304 memory_object_cluster_size_t
);
305 extern kern_return_t
vnode_pager_get_object_size(
307 memory_object_offset_t
*);
310 extern kern_return_t
vnode_pager_get_object_devvp(
315 extern void vnode_pager_dirtied(
319 extern kern_return_t
vnode_pager_get_isinuse(
322 extern kern_return_t
vnode_pager_get_isSSD(
325 extern kern_return_t
vnode_pager_get_throttle_io_limit(
328 extern kern_return_t
vnode_pager_get_object_name(
329 memory_object_t mem_obj
,
331 vm_size_t pathname_len
,
333 vm_size_t filename_len
,
334 boolean_t
*truncated_path_p
);
335 extern kern_return_t
vnode_pager_get_object_mtime(
336 memory_object_t mem_obj
,
337 struct timespec
*mtime
,
338 struct timespec
*cs_mtime
);
340 #if CHECK_CS_VALIDATION_BITMAP
341 extern kern_return_t
vnode_pager_cs_check_validation_bitmap(
342 memory_object_t mem_obj
,
343 memory_object_offset_t offset
,
345 #endif /*CHECK_CS_VALIDATION_BITMAP*/
347 extern kern_return_t
ubc_cs_check_validation_bitmap (
349 memory_object_offset_t offset
,
352 extern kern_return_t
vnode_pager_data_request(
354 memory_object_offset_t
,
355 memory_object_cluster_size_t
,
357 memory_object_fault_info_t
);
358 extern kern_return_t
vnode_pager_data_return(
360 memory_object_offset_t
,
361 memory_object_cluster_size_t
,
362 memory_object_offset_t
*,
367 extern kern_return_t
vnode_pager_data_initialize(
369 memory_object_offset_t
,
370 memory_object_cluster_size_t
);
371 extern void vnode_pager_reference(
372 memory_object_t mem_obj
);
373 extern kern_return_t
vnode_pager_synchronize(
374 memory_object_t mem_obj
,
375 memory_object_offset_t offset
,
376 memory_object_size_t length
,
377 vm_sync_t sync_flags
);
378 extern kern_return_t
vnode_pager_map(
379 memory_object_t mem_obj
,
381 extern kern_return_t
vnode_pager_last_unmap(
382 memory_object_t mem_obj
);
383 extern void vnode_pager_deallocate(
385 extern kern_return_t
vnode_pager_terminate(
387 extern void vnode_pager_vrele(
389 extern struct vnode
*vnode_pager_lookup_vnode(
395 extern void ubc_unmap(
399 extern struct vm_object
*find_vnode_object(struct vm_map_entry
*entry
);
401 extern void device_pager_reference(memory_object_t
);
402 extern void device_pager_deallocate(memory_object_t
);
403 extern kern_return_t
device_pager_init(memory_object_t
,
404 memory_object_control_t
,
405 memory_object_cluster_size_t
);
406 extern kern_return_t
device_pager_terminate(memory_object_t
);
407 extern kern_return_t
device_pager_data_request(memory_object_t
,
408 memory_object_offset_t
,
409 memory_object_cluster_size_t
,
411 memory_object_fault_info_t
);
412 extern kern_return_t
device_pager_data_return(memory_object_t
,
413 memory_object_offset_t
,
414 memory_object_cluster_size_t
,
415 memory_object_offset_t
*,
420 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
421 memory_object_offset_t
,
422 memory_object_cluster_size_t
);
423 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
424 memory_object_offset_t
,
425 memory_object_size_t
,
427 extern kern_return_t
device_pager_synchronize(memory_object_t
,
428 memory_object_offset_t
,
429 memory_object_size_t
,
431 extern kern_return_t
device_pager_map(memory_object_t
, vm_prot_t
);
432 extern kern_return_t
device_pager_last_unmap(memory_object_t
);
433 extern kern_return_t
device_pager_populate_object(
434 memory_object_t device
,
435 memory_object_offset_t offset
,
438 extern memory_object_t
device_pager_setup(
443 extern void device_pager_bootstrap(void);
444 extern boolean_t
is_device_pager_ops(const struct memory_object_pager_ops
*pager_ops
);
446 extern kern_return_t
pager_map_to_phys_contiguous(
447 memory_object_control_t object
,
448 memory_object_offset_t offset
,
452 extern kern_return_t
memory_object_create_named(
453 memory_object_t pager
,
454 memory_object_offset_t size
,
455 memory_object_control_t
*control
);
457 struct macx_triggers_args
;
458 extern int mach_macx_triggers(
459 struct macx_triggers_args
*args
);
461 extern int macx_swapinfo(
462 memory_object_size_t
*total_p
,
463 memory_object_size_t
*avail_p
,
464 vm_size_t
*pagesize_p
,
465 boolean_t
*encrypted_p
);
467 extern void log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
);
468 extern void log_unnest_badness(
470 vm_map_offset_t start_unnest
,
471 vm_map_offset_t end_unnest
,
472 boolean_t is_nested_map
,
473 vm_map_offset_t lowest_unnestable_addr
);
476 extern int cs_allow_invalid(struct proc
*p
);
477 extern int cs_invalid_page(addr64_t vaddr
, boolean_t
*cs_killed
);
479 #define CS_VALIDATE_TAINTED 0x00000001
480 #define CS_VALIDATE_NX 0x00000002
481 extern boolean_t
cs_validate_range(struct vnode
*vp
,
482 memory_object_t pager
,
483 memory_object_offset_t offset
,
488 extern kern_return_t
cs_associate_blob_with_mapping(
490 vm_map_offset_t start
,
492 vm_object_offset_t offset
,
496 extern kern_return_t
memory_entry_purgeable_control_internal(
497 ipc_port_t entry_port
,
498 vm_purgable_t control
,
501 extern kern_return_t
memory_entry_access_tracking_internal(
502 ipc_port_t entry_port
,
503 int *access_tracking
,
504 uint32_t *access_tracking_reads
,
505 uint32_t *access_tracking_writes
);
507 extern kern_return_t
mach_memory_entry_purgable_control(
508 ipc_port_t entry_port
,
509 vm_purgable_t control
,
512 extern kern_return_t
mach_memory_entry_get_page_counts(
513 ipc_port_t entry_port
,
514 unsigned int *resident_page_count
,
515 unsigned int *dirty_page_count
);
517 extern kern_return_t
mach_memory_entry_page_op(
518 ipc_port_t entry_port
,
519 vm_object_offset_t offset
,
524 extern kern_return_t
mach_memory_entry_range_op(
525 ipc_port_t entry_port
,
526 vm_object_offset_t offset_beg
,
527 vm_object_offset_t offset_end
,
531 extern void mach_memory_entry_port_release(ipc_port_t port
);
532 extern void mach_destroy_memory_entry(ipc_port_t port
);
533 extern kern_return_t
mach_memory_entry_allocate(
534 struct vm_named_entry
**user_entry_p
,
535 ipc_port_t
*user_handle_p
);
537 extern void vm_paging_map_init(void);
539 extern int macx_backing_store_compaction(int flags
);
540 extern unsigned int mach_vm_ctl_page_free_wanted(void);
542 extern int no_paging_space_action(void);
544 #define VM_TOGGLE_CLEAR 0
545 #define VM_TOGGLE_SET 1
546 #define VM_TOGGLE_GETVALUE 999
547 int vm_toggle_entry_reuse(int, int*);
549 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
550 #define SWAP_READ 0x00000001 /* Read buffer. */
551 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
553 extern void vm_compressor_pager_init(void);
554 extern kern_return_t
compressor_memory_object_create(
555 memory_object_size_t
,
558 extern boolean_t
vm_compressor_low_on_space(void);
559 extern boolean_t
vm_compressor_out_of_space(void);
560 extern int vm_swap_low_on_space(void);
561 void do_fastwake_warmup_all(void);
563 extern int proc_get_memstat_priority(struct proc
*, boolean_t
);
564 #endif /* CONFIG_JETSAM */
566 /* the object purger. purges the next eligible object from memory. */
567 /* returns TRUE if an object was purged, otherwise FALSE. */
568 boolean_t
vm_purgeable_object_purge_one_unlocked(int force_purge_below_group
);
569 void vm_purgeable_disown(task_t task
);
570 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
572 void vm_purgeable_volatile_owner_update(task_t owner
,
579 struct trim_list
*tl_next
;
582 u_int32_t
vnode_trim_list(struct vnode
*vp
, struct trim_list
*tl
, boolean_t route_only
);
584 #define MAX_SWAPFILENAME_LEN 1024
585 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
587 extern char swapfilename
[MAX_SWAPFILENAME_LEN
+ 1];
590 unsigned int do_collapse_compressor
;
591 unsigned int do_collapse_compressor_pages
;
592 unsigned int do_collapse_terminate
;
593 unsigned int do_collapse_terminate_failure
;
594 unsigned int should_cow_but_wired
;
595 unsigned int create_upl_extra_cow
;
596 unsigned int create_upl_extra_cow_pages
;
597 unsigned int create_upl_lookup_failure_write
;
598 unsigned int create_upl_lookup_failure_copy
;
600 extern struct vm_counters vm_counters
;
602 #if CONFIG_SECLUDED_MEMORY
603 struct vm_page_secluded_data
{
604 int eligible_for_secluded
;
605 int grab_success_free
;
606 int grab_success_other
;
607 int grab_failure_locked
;
608 int grab_failure_state
;
609 int grab_failure_dirty
;
611 int grab_for_iokit_success
;
613 extern struct vm_page_secluded_data vm_page_secluded
;
615 extern int num_tasks_can_use_secluded_mem
;
618 extern int secluded_for_apps
;
619 extern int secluded_for_iokit
;
620 extern int secluded_for_filecache
;
622 extern int secluded_for_fbdp
;
625 extern void memory_object_mark_eligible_for_secluded(
626 memory_object_control_t control
,
627 boolean_t eligible_for_secluded
);
629 #endif /* CONFIG_SECLUDED_MEMORY */
631 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
633 extern kern_return_t
mach_make_memory_entry_internal(
635 memory_object_size_t
*size
,
636 memory_object_offset_t offset
,
637 vm_prot_t permission
,
638 ipc_port_t
*object_handle
,
639 ipc_port_t parent_handle
);
641 #define roundup(x, y) ((((x) % (y)) == 0) ? \
642 (x) : ((x) + ((y) - ((x) % (y)))))
649 * Flags for the VM swapper/reclaimer.
650 * Used by vm_swap_consider_defragment()
651 * to force defrag/reclaim by the swap
654 #define VM_SWAP_FLAGS_NONE 0
655 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1
656 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2
658 #endif /* _VM_VM_PROTOS_H_ */
660 #endif /* XNU_KERNEL_PRIVATE */