2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef XNU_KERNEL_PRIVATE
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
55 extern kern_return_t
device_data_action(
56 uintptr_t device_handle
,
57 ipc_port_t device_pager
,
59 vm_object_offset_t offset
,
62 extern kern_return_t
device_close(
63 uintptr_t device_handle
);
65 extern boolean_t
vm_swap_files_pinned(void);
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t
ipc_port_copyout_send(
74 extern task_t
port_name_to_task(
75 mach_port_name_t name
);
76 #endif /* _IPC_IPC_PORT_H_ */
78 extern ipc_space_t
get_task_ipcspace(
81 #if CONFIG_MEMORYSTATUS
82 extern int max_task_footprint_mb
; /* Per-task limit on physical memory consumption in megabytes */
83 #endif /* CONFIG_MEMORYSTATUS */
85 /* Some loose-ends VM stuff */
87 extern vm_map_t kalloc_map
;
88 extern vm_size_t msg_ool_size_small
;
89 extern vm_map_t zone_map
;
91 extern void consider_machine_adjust(void);
92 extern vm_map_offset_t
get_map_min(vm_map_t
);
93 extern vm_map_offset_t
get_map_max(vm_map_t
);
94 extern vm_map_size_t
get_vmmap_size(vm_map_t
);
96 extern int get_vmmap_entries(vm_map_t
);
98 extern int get_map_nentries(vm_map_t
);
100 extern vm_map_offset_t
vm_map_page_mask(vm_map_t
);
103 extern boolean_t
coredumpok(vm_map_t map
, vm_offset_t va
);
107 * VM routines that used to be published to
108 * user space, and are now restricted to the kernel.
110 * They should eventually go away entirely -
111 * to be replaced with standard vm_map() and
112 * vm_deallocate() calls.
115 extern kern_return_t vm_upl_map
117 vm_map_t target_task
,
119 vm_address_t
*address
122 extern kern_return_t vm_upl_unmap
124 vm_map_t target_task
,
128 extern kern_return_t vm_region_object_create
130 vm_map_t target_task
,
132 ipc_port_t
*object_handle
135 extern mach_vm_offset_t
mach_get_vm_start(vm_map_t
);
136 extern mach_vm_offset_t
mach_get_vm_end(vm_map_t
);
138 #if CONFIG_CODE_DECRYPTION
139 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
140 #if VM_MAP_DEBUG_APPLE_PROTECT
141 extern int vm_map_debug_apple_protect
;
142 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
143 struct pager_crypt_info
;
144 extern kern_return_t
vm_map_apple_protected(
146 vm_map_offset_t start
,
148 vm_object_offset_t crypto_backing_offset
,
149 struct pager_crypt_info
*crypt_info
);
150 extern void apple_protect_pager_bootstrap(void);
151 extern memory_object_t
apple_protect_pager_setup(
152 vm_object_t backing_object
,
153 vm_object_offset_t backing_offset
,
154 vm_object_offset_t crypto_backing_offset
,
155 struct pager_crypt_info
*crypt_info
,
156 vm_object_offset_t crypto_start
,
157 vm_object_offset_t crypto_end
);
158 #endif /* CONFIG_CODE_DECRYPTION */
161 extern void swapfile_pager_bootstrap(void);
162 extern memory_object_t
swapfile_pager_setup(struct vnode
*vp
);
163 extern memory_object_control_t
swapfile_pager_control(memory_object_t mem_obj
);
165 #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
166 #define SIXTEENK_PAGE_SIZE 0x4000
167 #define SIXTEENK_PAGE_MASK 0x3FFF
168 #define SIXTEENK_PAGE_SHIFT 14
169 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
176 extern void *upl_get_internal_page_list(
179 extern void vnode_setswapmount(struct vnode
*);
180 extern int64_t vnode_getswappin_avail(struct vnode
*);
182 typedef int pager_return_t
;
183 extern pager_return_t
vnode_pagein(
184 struct vnode
*, upl_t
,
185 upl_offset_t
, vm_object_offset_t
,
186 upl_size_t
, int, int *);
187 extern pager_return_t
vnode_pageout(
188 struct vnode
*, upl_t
,
189 upl_offset_t
, vm_object_offset_t
,
190 upl_size_t
, int, int *);
191 extern uint32_t vnode_trim (struct vnode
*, int64_t offset
, unsigned long len
);
192 extern memory_object_t
vnode_pager_setup(
193 struct vnode
*, memory_object_t
);
194 extern vm_object_offset_t
vnode_pager_get_filesize(
196 extern uint32_t vnode_pager_isinuse(
198 extern boolean_t
vnode_pager_isSSD(
200 extern void vnode_pager_throttle(
202 extern uint32_t vnode_pager_return_throttle_io_limit(
205 extern kern_return_t
vnode_pager_get_name(
208 vm_size_t pathname_len
,
210 vm_size_t filename_len
,
211 boolean_t
*truncated_path_p
);
213 extern kern_return_t
vnode_pager_get_mtime(
215 struct timespec
*mtime
,
216 struct timespec
*cs_mtime
);
217 extern kern_return_t
vnode_pager_get_cs_blobs(
222 void vnode_pager_issue_reprioritize_io(
229 #if CHECK_CS_VALIDATION_BITMAP
230 /* used by the vnode_pager_cs_validation_bitmap routine*/
231 #define CS_BITMAP_SET 1
232 #define CS_BITMAP_CLEAR 2
233 #define CS_BITMAP_CHECK 3
235 #endif /* CHECK_CS_VALIDATION_BITMAP */
237 extern void vnode_pager_bootstrap(void);
239 vnode_pager_data_unlock(
240 memory_object_t mem_obj
,
241 memory_object_offset_t offset
,
242 memory_object_size_t size
,
243 vm_prot_t desired_access
);
244 extern kern_return_t
vnode_pager_init(
246 memory_object_control_t
,
247 memory_object_cluster_size_t
);
248 extern kern_return_t
vnode_pager_get_object_size(
250 memory_object_offset_t
*);
253 extern kern_return_t
vnode_pager_get_object_devvp(
258 extern kern_return_t
vnode_pager_get_isinuse(
261 extern kern_return_t
vnode_pager_get_isSSD(
264 extern kern_return_t
vnode_pager_get_throttle_io_limit(
267 extern kern_return_t
vnode_pager_get_object_name(
268 memory_object_t mem_obj
,
270 vm_size_t pathname_len
,
272 vm_size_t filename_len
,
273 boolean_t
*truncated_path_p
);
274 extern kern_return_t
vnode_pager_get_object_mtime(
275 memory_object_t mem_obj
,
276 struct timespec
*mtime
,
277 struct timespec
*cs_mtime
);
279 #if CHECK_CS_VALIDATION_BITMAP
280 extern kern_return_t
vnode_pager_cs_check_validation_bitmap(
281 memory_object_t mem_obj
,
282 memory_object_offset_t offset
,
284 #endif /*CHECK_CS_VALIDATION_BITMAP*/
286 extern kern_return_t
ubc_cs_check_validation_bitmap (
288 memory_object_offset_t offset
,
291 extern kern_return_t
vnode_pager_data_request(
293 memory_object_offset_t
,
294 memory_object_cluster_size_t
,
296 memory_object_fault_info_t
);
297 extern kern_return_t
vnode_pager_data_return(
299 memory_object_offset_t
,
300 memory_object_cluster_size_t
,
301 memory_object_offset_t
*,
306 extern kern_return_t
vnode_pager_data_initialize(
308 memory_object_offset_t
,
309 memory_object_cluster_size_t
);
310 extern void vnode_pager_reference(
311 memory_object_t mem_obj
);
312 extern kern_return_t
vnode_pager_synchronize(
313 memory_object_t mem_obj
,
314 memory_object_offset_t offset
,
315 memory_object_size_t length
,
316 vm_sync_t sync_flags
);
317 extern kern_return_t
vnode_pager_map(
318 memory_object_t mem_obj
,
320 extern kern_return_t
vnode_pager_last_unmap(
321 memory_object_t mem_obj
);
322 extern void vnode_pager_deallocate(
324 extern kern_return_t
vnode_pager_terminate(
326 extern void vnode_pager_vrele(
328 extern void vnode_pager_release_from_cache(
330 extern struct vnode
*vnode_pager_lookup_vnode(
336 extern void ubc_unmap(
340 extern struct vm_object
*find_vnode_object(struct vm_map_entry
*entry
);
342 extern void device_pager_reference(memory_object_t
);
343 extern void device_pager_deallocate(memory_object_t
);
344 extern kern_return_t
device_pager_init(memory_object_t
,
345 memory_object_control_t
,
346 memory_object_cluster_size_t
);
347 extern kern_return_t
device_pager_terminate(memory_object_t
);
348 extern kern_return_t
device_pager_data_request(memory_object_t
,
349 memory_object_offset_t
,
350 memory_object_cluster_size_t
,
352 memory_object_fault_info_t
);
353 extern kern_return_t
device_pager_data_return(memory_object_t
,
354 memory_object_offset_t
,
355 memory_object_cluster_size_t
,
356 memory_object_offset_t
*,
361 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
362 memory_object_offset_t
,
363 memory_object_cluster_size_t
);
364 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
365 memory_object_offset_t
,
366 memory_object_size_t
,
368 extern kern_return_t
device_pager_synchronize(memory_object_t
,
369 memory_object_offset_t
,
370 memory_object_size_t
,
372 extern kern_return_t
device_pager_map(memory_object_t
, vm_prot_t
);
373 extern kern_return_t
device_pager_last_unmap(memory_object_t
);
374 extern kern_return_t
device_pager_populate_object(
375 memory_object_t device
,
376 memory_object_offset_t offset
,
379 extern memory_object_t
device_pager_setup(
384 extern void device_pager_bootstrap(void);
385 extern boolean_t
is_device_pager_ops(const struct memory_object_pager_ops
*pager_ops
);
387 extern kern_return_t
pager_map_to_phys_contiguous(
388 memory_object_control_t object
,
389 memory_object_offset_t offset
,
393 extern kern_return_t
memory_object_create_named(
394 memory_object_t pager
,
395 memory_object_offset_t size
,
396 memory_object_control_t
*control
);
398 struct macx_triggers_args
;
399 extern int mach_macx_triggers(
400 struct macx_triggers_args
*args
);
402 extern int macx_swapinfo(
403 memory_object_size_t
*total_p
,
404 memory_object_size_t
*avail_p
,
405 vm_size_t
*pagesize_p
,
406 boolean_t
*encrypted_p
);
408 extern void log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
);
409 extern void log_unnest_badness(
411 vm_map_offset_t start_unnest
,
412 vm_map_offset_t end_unnest
,
413 boolean_t is_nested_map
,
414 vm_map_offset_t lowest_unnestable_addr
);
417 extern int cs_allow_invalid(struct proc
*p
);
418 extern int cs_invalid_page(addr64_t vaddr
, boolean_t
*cs_killed
);
420 #define CS_VALIDATE_TAINTED 0x00000001
421 #define CS_VALIDATE_NX 0x00000002
422 extern boolean_t
cs_validate_range(struct vnode
*vp
,
423 memory_object_t pager
,
424 memory_object_offset_t offset
,
429 extern kern_return_t
mach_memory_entry_purgable_control(
430 ipc_port_t entry_port
,
431 vm_purgable_t control
,
434 extern kern_return_t
mach_memory_entry_get_page_counts(
435 ipc_port_t entry_port
,
436 unsigned int *resident_page_count
,
437 unsigned int *dirty_page_count
);
439 extern kern_return_t
mach_memory_entry_page_op(
440 ipc_port_t entry_port
,
441 vm_object_offset_t offset
,
446 extern kern_return_t
mach_memory_entry_range_op(
447 ipc_port_t entry_port
,
448 vm_object_offset_t offset_beg
,
449 vm_object_offset_t offset_end
,
453 extern void mach_memory_entry_port_release(ipc_port_t port
);
454 extern void mach_destroy_memory_entry(ipc_port_t port
);
455 extern kern_return_t
mach_memory_entry_allocate(
456 struct vm_named_entry
**user_entry_p
,
457 ipc_port_t
*user_handle_p
);
459 extern void vm_paging_map_init(void);
461 extern int macx_backing_store_compaction(int flags
);
462 extern unsigned int mach_vm_ctl_page_free_wanted(void);
464 extern int no_paging_space_action(void);
466 #define VM_TOGGLE_CLEAR 0
467 #define VM_TOGGLE_SET 1
468 #define VM_TOGGLE_GETVALUE 999
469 int vm_toggle_entry_reuse(int, int*);
471 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
472 #define SWAP_READ 0x00000001 /* Read buffer. */
473 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
475 extern void vm_compressor_pager_init(void);
476 extern kern_return_t
compressor_memory_object_create(
477 memory_object_size_t
,
480 extern boolean_t
vm_compressor_low_on_space(void);
481 extern int vm_swap_low_on_space(void);
482 void do_fastwake_warmup_all(void);
484 extern int proc_get_memstat_priority(struct proc
*, boolean_t
);
485 #endif /* CONFIG_JETSAM */
487 /* the object purger. purges the next eligible object from memory. */
488 /* returns TRUE if an object was purged, otherwise FALSE. */
489 boolean_t
vm_purgeable_object_purge_one_unlocked(int force_purge_below_group
);
490 void vm_purgeable_disown(task_t task
);
495 struct trim_list
*tl_next
;
498 u_int32_t
vnode_trim_list(struct vnode
*vp
, struct trim_list
*tl
, boolean_t route_only
);
500 #define MAX_SWAPFILENAME_LEN 1024
501 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
503 extern char swapfilename
[MAX_SWAPFILENAME_LEN
+ 1];
506 unsigned int do_collapse_compressor
;
507 unsigned int do_collapse_compressor_pages
;
508 unsigned int do_collapse_terminate
;
509 unsigned int do_collapse_terminate_failure
;
510 unsigned int should_cow_but_wired
;
511 unsigned int create_upl_extra_cow
;
512 unsigned int create_upl_extra_cow_pages
;
513 unsigned int create_upl_lookup_failure_write
;
514 unsigned int create_upl_lookup_failure_copy
;
516 extern struct vm_counters vm_counters
;
518 #if CONFIG_SECLUDED_MEMORY
519 struct vm_page_secluded_data
{
520 int eligible_for_secluded
;
521 int grab_success_free
;
522 int grab_success_other
;
523 int grab_failure_locked
;
524 int grab_failure_state
;
525 int grab_failure_dirty
;
527 int grab_for_iokit_success
;
529 extern struct vm_page_secluded_data vm_page_secluded
;
531 extern int num_tasks_can_use_secluded_mem
;
534 extern int secluded_for_apps
;
535 extern int secluded_for_iokit
;
536 extern int secluded_for_filecache
;
538 extern int secluded_for_fbdp
;
542 * "secluded_aging_policy" controls the aging of secluded pages:
544 * SECLUDED_AGING_FIFO
545 * When a page eligible for the secluded queue is activated or
546 * deactivated, it is inserted in the secluded queue.
547 * When it get pushed out of the secluded queue, it gets freed.
549 * SECLUDED_AGING_ALONG_ACTIVE
550 * When a page eligible for the secluded queue is activated, it is
551 * inserted in the secluded queue.
552 * When it gets pushed out of the secluded queue, its "referenced" bit
553 * is reset and it is inserted in the inactive queue.
555 * SECLUDED_AGING_AFTER_INACTIVE
556 * A page eligible for the secluded queue first makes its way through the
557 * active and inactive queues.
558 * When it is pushed out of the inactive queue without being re-activated,
559 * it is inserted in the secluded queue instead of being reclaimed.
560 * When it is pushed out of the secluded queue, it is either freed if it
561 * hasn't been re-referenced, or re-activated if it has been re-referenced.
563 * SECLUDED_AGING_BEFORE_ACTIVE
564 * A page eligible for the secluded queue will first make its way through
565 * the secluded queue. When it gets pushed out of the secluded queue (by
566 * new secluded pages), it goes back to the normal aging path, through the
567 * active queue and then the inactive queue.
569 extern int secluded_aging_policy
;
570 #define SECLUDED_AGING_FIFO 0
571 #define SECLUDED_AGING_ALONG_ACTIVE 1
572 #define SECLUDED_AGING_AFTER_INACTIVE 2
573 #define SECLUDED_AGING_BEFORE_ACTIVE 3
575 extern void memory_object_mark_eligible_for_secluded(
576 memory_object_control_t control
,
577 boolean_t eligible_for_secluded
);
579 #endif /* CONFIG_SECLUDED_MEMORY */
585 #endif /* _VM_VM_PROTOS_H_ */
587 #endif /* XNU_KERNEL_PRIVATE */