2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef XNU_KERNEL_PRIVATE
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
38 * This file contains various type definitions and routine prototypes
39 * that are needed to avoid compilation warnings for VM code (in osfmk,
40 * default_pager and bsd).
41 * Most of these should eventually go into more appropriate header files.
43 * Include it after all other header files since it doesn't include any
44 * type definitions and it works around some conflicts with other header
51 extern kern_return_t
device_data_action(
52 uintptr_t device_handle
,
53 ipc_port_t device_pager
,
55 vm_object_offset_t offset
,
58 extern kern_return_t
device_close(
59 uintptr_t device_handle
);
64 extern int start_def_pager(
66 extern int default_pager_init_flag
;
71 #ifndef _IPC_IPC_PORT_H_
72 extern mach_port_name_t
ipc_port_copyout_send(
75 extern task_t
port_name_to_task(
76 mach_port_name_t name
);
77 #endif /* _IPC_IPC_PORT_H_ */
79 extern ipc_space_t
get_task_ipcspace(
82 /* Some loose-ends VM stuff */
84 extern vm_map_t kalloc_map
;
85 extern vm_size_t msg_ool_size_small
;
86 extern vm_map_t zone_map
;
88 extern void consider_machine_adjust(void);
89 extern pmap_t
get_map_pmap(vm_map_t
);
90 extern vm_map_offset_t
get_map_min(vm_map_t
);
91 extern vm_map_offset_t
get_map_max(vm_map_t
);
92 extern vm_map_size_t
get_vmmap_size(vm_map_t
);
93 extern int get_vmmap_entries(vm_map_t
);
95 int vm_map_page_mask(vm_map_t
);
97 extern boolean_t
coredumpok(vm_map_t map
, vm_offset_t va
);
100 * VM routines that used to be published to
101 * user space, and are now restricted to the kernel.
103 * They should eventually go away entirely -
104 * to be replaced with standard vm_map() and
105 * vm_deallocate() calls.
108 extern kern_return_t vm_upl_map
110 vm_map_t target_task
,
112 vm_address_t
*address
115 extern kern_return_t vm_upl_unmap
117 vm_map_t target_task
,
121 extern kern_return_t vm_region_object_create
123 vm_map_t target_task
,
125 ipc_port_t
*object_handle
128 extern mach_vm_offset_t
mach_get_vm_start(vm_map_t
);
129 extern mach_vm_offset_t
mach_get_vm_end(vm_map_t
);
131 #if CONFIG_CODE_DECRYPTION
132 struct pager_crypt_info
;
133 extern kern_return_t
vm_map_apple_protected(
135 vm_map_offset_t start
,
137 struct pager_crypt_info
*crypt_info
);
138 extern void apple_protect_pager_bootstrap(void);
139 extern memory_object_t
apple_protect_pager_setup(vm_object_t backing_object
,
140 struct pager_crypt_info
*crypt_info
);
141 #endif /* CONFIG_CODE_DECRYPTION */
144 extern void swapfile_pager_bootstrap(void);
145 extern memory_object_t
swapfile_pager_setup(struct vnode
*vp
);
146 extern memory_object_control_t
swapfile_pager_control(memory_object_t mem_obj
);
153 extern void vnode_pager_shutdown(void);
154 extern void *upl_get_internal_page_list(
157 typedef int pager_return_t
;
158 extern pager_return_t
vnode_pagein(
159 struct vnode
*, upl_t
,
160 upl_offset_t
, vm_object_offset_t
,
161 upl_size_t
, int, int *);
162 extern pager_return_t
vnode_pageout(
163 struct vnode
*, upl_t
,
164 upl_offset_t
, vm_object_offset_t
,
165 upl_size_t
, int, int *);
166 extern uint32_t vnode_trim (struct vnode
*, int64_t offset
, unsigned long len
);
167 extern memory_object_t
vnode_pager_setup(
168 struct vnode
*, memory_object_t
);
169 extern vm_object_offset_t
vnode_pager_get_filesize(
171 extern uint32_t vnode_pager_isinuse(
173 extern boolean_t
vnode_pager_isSSD(
175 extern void vnode_pager_throttle(
177 extern uint32_t vnode_pager_return_throttle_io_limit(
180 extern kern_return_t
vnode_pager_get_pathname(
183 vm_size_t
*length_p
);
184 extern kern_return_t
vnode_pager_get_filename(
186 const char **filename
);
187 extern kern_return_t
vnode_pager_get_cs_blobs(
191 #if CHECK_CS_VALIDATION_BITMAP
192 /* used by the vnode_pager_cs_validation_bitmap routine*/
193 #define CS_BITMAP_SET 1
194 #define CS_BITMAP_CLEAR 2
195 #define CS_BITMAP_CHECK 3
197 #endif /* CHECK_CS_VALIDATION_BITMAP */
199 extern void vnode_pager_bootstrap(void);
201 vnode_pager_data_unlock(
202 memory_object_t mem_obj
,
203 memory_object_offset_t offset
,
204 memory_object_size_t size
,
205 vm_prot_t desired_access
);
206 extern kern_return_t
vnode_pager_init(
208 memory_object_control_t
,
209 memory_object_cluster_size_t
);
210 extern kern_return_t
vnode_pager_get_object_size(
212 memory_object_offset_t
*);
213 extern kern_return_t
vnode_pager_get_isinuse(
216 extern kern_return_t
vnode_pager_get_isSSD(
219 extern kern_return_t
vnode_pager_get_throttle_io_limit(
222 extern kern_return_t
vnode_pager_get_object_pathname(
223 memory_object_t mem_obj
,
225 vm_size_t
*length_p
);
226 extern kern_return_t
vnode_pager_get_object_filename(
227 memory_object_t mem_obj
,
228 const char **filename
);
229 extern kern_return_t
vnode_pager_get_object_cs_blobs(
230 memory_object_t mem_obj
,
233 #if CHECK_CS_VALIDATION_BITMAP
234 extern kern_return_t
vnode_pager_cs_check_validation_bitmap(
235 memory_object_t mem_obj
,
236 memory_object_offset_t offset
,
238 #endif /*CHECK_CS_VALIDATION_BITMAP*/
240 extern kern_return_t
ubc_cs_check_validation_bitmap (
242 memory_object_offset_t offset
,
245 extern kern_return_t
vnode_pager_data_request(
247 memory_object_offset_t
,
248 memory_object_cluster_size_t
,
250 memory_object_fault_info_t
);
251 extern kern_return_t
vnode_pager_data_return(
253 memory_object_offset_t
,
254 memory_object_cluster_size_t
,
255 memory_object_offset_t
*,
260 extern kern_return_t
vnode_pager_data_initialize(
262 memory_object_offset_t
,
263 memory_object_cluster_size_t
);
264 extern void vnode_pager_reference(
265 memory_object_t mem_obj
);
266 extern kern_return_t
vnode_pager_synchronize(
267 memory_object_t mem_obj
,
268 memory_object_offset_t offset
,
269 memory_object_size_t length
,
270 vm_sync_t sync_flags
);
271 extern kern_return_t
vnode_pager_map(
272 memory_object_t mem_obj
,
274 extern kern_return_t
vnode_pager_last_unmap(
275 memory_object_t mem_obj
);
276 extern void vnode_pager_deallocate(
278 extern kern_return_t
vnode_pager_terminate(
280 extern void vnode_pager_vrele(
282 extern void vnode_pager_release_from_cache(
287 extern void ubc_unmap(
291 extern struct vm_object
*find_vnode_object(struct vm_map_entry
*entry
);
293 extern void dp_memory_object_reference(memory_object_t
);
294 extern void dp_memory_object_deallocate(memory_object_t
);
295 #ifndef _memory_object_server_
296 extern kern_return_t
dp_memory_object_init(memory_object_t
,
297 memory_object_control_t
,
298 memory_object_cluster_size_t
);
299 extern kern_return_t
dp_memory_object_terminate(memory_object_t
);
300 extern kern_return_t
dp_memory_object_data_request(memory_object_t
,
301 memory_object_offset_t
,
302 memory_object_cluster_size_t
,
304 memory_object_fault_info_t
);
305 extern kern_return_t
dp_memory_object_data_return(memory_object_t
,
306 memory_object_offset_t
,
307 memory_object_cluster_size_t
,
308 memory_object_offset_t
*,
313 extern kern_return_t
dp_memory_object_data_initialize(memory_object_t
,
314 memory_object_offset_t
,
315 memory_object_cluster_size_t
);
316 extern kern_return_t
dp_memory_object_data_unlock(memory_object_t
,
317 memory_object_offset_t
,
318 memory_object_size_t
,
320 extern kern_return_t
dp_memory_object_synchronize(memory_object_t
,
321 memory_object_offset_t
,
322 memory_object_size_t
,
324 extern kern_return_t
dp_memory_object_map(memory_object_t
,
326 extern kern_return_t
dp_memory_object_last_unmap(memory_object_t
);
327 #endif /* _memory_object_server_ */
328 #ifndef _memory_object_default_server_
329 extern kern_return_t
default_pager_memory_object_create(
330 memory_object_default_t
,
333 #endif /* _memory_object_default_server_ */
336 extern unsigned int default_pager_swap_pages_free(void);
337 struct default_freezer_handle
;
339 __private_extern__
void default_freezer_init(void);
340 __private_extern__
struct default_freezer_handle
* default_freezer_handle_allocate(void);
341 __private_extern__ kern_return_t
342 default_freezer_handle_init(
343 struct default_freezer_handle
*df_handle
);
344 __private_extern__
void
345 default_freezer_handle_deallocate(
346 struct default_freezer_handle
*df_handle
);
347 __private_extern__
void
348 default_freezer_pageout(
349 struct default_freezer_handle
*df_handle
);
350 __private_extern__ kern_return_t
351 default_freezer_pack(
352 unsigned int *purgeable_count
,
353 unsigned int *wired_count
,
354 unsigned int *clean_count
,
355 unsigned int *dirty_count
,
356 unsigned int dirty_budget
,
358 vm_object_t src_object
,
359 struct default_freezer_handle
*df_handle
);
360 __private_extern__ kern_return_t
361 default_freezer_unpack(
362 struct default_freezer_handle
*df_handle
);
363 __private_extern__
void
364 default_freezer_pack_page(
366 struct default_freezer_handle
*df_handle
);
368 #endif /* CONFIG_FREEZE */
370 extern void device_pager_reference(memory_object_t
);
371 extern void device_pager_deallocate(memory_object_t
);
372 extern kern_return_t
device_pager_init(memory_object_t
,
373 memory_object_control_t
,
374 memory_object_cluster_size_t
);
375 extern kern_return_t
device_pager_terminate(memory_object_t
);
376 extern kern_return_t
device_pager_data_request(memory_object_t
,
377 memory_object_offset_t
,
378 memory_object_cluster_size_t
,
380 memory_object_fault_info_t
);
381 extern kern_return_t
device_pager_data_return(memory_object_t
,
382 memory_object_offset_t
,
383 memory_object_cluster_size_t
,
384 memory_object_offset_t
*,
389 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
390 memory_object_offset_t
,
391 memory_object_cluster_size_t
);
392 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
393 memory_object_offset_t
,
394 memory_object_size_t
,
396 extern kern_return_t
device_pager_synchronize(memory_object_t
,
397 memory_object_offset_t
,
398 memory_object_size_t
,
400 extern kern_return_t
device_pager_map(memory_object_t
, vm_prot_t
);
401 extern kern_return_t
device_pager_last_unmap(memory_object_t
);
402 extern kern_return_t
device_pager_populate_object(
403 memory_object_t device
,
404 memory_object_offset_t offset
,
407 extern memory_object_t
device_pager_setup(
412 extern void device_pager_bootstrap(void);
414 extern kern_return_t
pager_map_to_phys_contiguous(
415 memory_object_control_t object
,
416 memory_object_offset_t offset
,
420 extern kern_return_t
memory_object_create_named(
421 memory_object_t pager
,
422 memory_object_offset_t size
,
423 memory_object_control_t
*control
);
425 struct macx_triggers_args
;
426 extern int mach_macx_triggers(
427 struct macx_triggers_args
*args
);
429 extern int macx_swapinfo(
430 memory_object_size_t
*total_p
,
431 memory_object_size_t
*avail_p
,
432 vm_size_t
*pagesize_p
,
433 boolean_t
*encrypted_p
);
435 extern void log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
);
436 extern void log_unnest_badness(vm_map_t
, vm_map_offset_t
, vm_map_offset_t
);
439 extern int cs_allow_invalid(struct proc
*p
);
440 extern int cs_invalid_page(addr64_t vaddr
);
441 extern boolean_t
cs_validate_page(void *blobs
,
442 memory_object_t pager
,
443 memory_object_offset_t offset
,
447 extern kern_return_t
mach_memory_entry_purgable_control(
448 ipc_port_t entry_port
,
449 vm_purgable_t control
,
452 extern kern_return_t
mach_memory_entry_get_page_counts(
453 ipc_port_t entry_port
,
454 unsigned int *resident_page_count
,
455 unsigned int *dirty_page_count
);
457 extern kern_return_t
mach_memory_entry_page_op(
458 ipc_port_t entry_port
,
459 vm_object_offset_t offset
,
464 extern kern_return_t
mach_memory_entry_range_op(
465 ipc_port_t entry_port
,
466 vm_object_offset_t offset_beg
,
467 vm_object_offset_t offset_end
,
471 extern void mach_memory_entry_port_release(ipc_port_t port
);
472 extern void mach_destroy_memory_entry(ipc_port_t port
);
473 extern kern_return_t
mach_memory_entry_allocate(
474 struct vm_named_entry
**user_entry_p
,
475 ipc_port_t
*user_handle_p
);
477 extern void vm_paging_map_init(void);
479 extern int macx_backing_store_compaction(int flags
);
480 extern unsigned int mach_vm_ctl_page_free_wanted(void);
482 extern void no_paging_space_action(void);
484 #define VM_TOGGLE_CLEAR 0
485 #define VM_TOGGLE_SET 1
486 #define VM_TOGGLE_GETVALUE 999
487 int vm_toggle_entry_reuse(int, int*);
489 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
490 #define SWAP_READ 0x00000001 /* Read buffer. */
491 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
493 extern void vm_compressor_pager_init(void);
494 extern kern_return_t
compressor_memory_object_create(
498 /* the object purger. purges the next eligible object from memory. */
499 /* returns TRUE if an object was purged, otherwise FALSE. */
500 boolean_t
vm_purgeable_object_purge_one_unlocked(int force_purge_below_group
);
505 struct trim_list
*tl_next
;
508 u_int32_t
vnode_trim_list(struct vnode
*vp
, struct trim_list
*tl
);
510 #endif /* _VM_VM_PROTOS_H_ */
512 #endif /* XNU_KERNEL_PRIVATE */