]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 /*
38 * This file contains various type definitions and routine prototypes
39 * that are needed to avoid compilation warnings for VM code (in osfmk,
40 * default_pager and bsd).
41 * Most of these should eventually go into more appropriate header files.
42 *
43 * Include it after all other header files since it doesn't include any
44 * type definitions and it works around some conflicts with other header
45 * files.
46 */
47
48 /*
49 * iokit
50 */
51 extern kern_return_t device_data_action(
52 uintptr_t device_handle,
53 ipc_port_t device_pager,
54 vm_prot_t protection,
55 vm_object_offset_t offset,
56 vm_size_t size);
57
58 extern kern_return_t device_close(
59 uintptr_t device_handle);
60
61 /*
62 * default_pager
63 */
64 extern int start_def_pager(
65 char *bs_device);
66 extern int default_pager_init_flag;
67
68 /*
69 * osfmk
70 */
71 #ifndef _IPC_IPC_PORT_H_
72 extern mach_port_name_t ipc_port_copyout_send(
73 ipc_port_t sright,
74 ipc_space_t space);
75 extern task_t port_name_to_task(
76 mach_port_name_t name);
77 #endif /* _IPC_IPC_PORT_H_ */
78
79 extern ipc_space_t get_task_ipcspace(
80 task_t t);
81
82 /* Some loose-ends VM stuff */
83
84 extern vm_map_t kalloc_map;
85 extern vm_size_t msg_ool_size_small;
86 extern vm_map_t zone_map;
87
88 extern void consider_machine_adjust(void);
89 extern pmap_t get_map_pmap(vm_map_t);
90 extern vm_map_offset_t get_map_min(vm_map_t);
91 extern vm_map_offset_t get_map_max(vm_map_t);
92 extern vm_map_size_t get_vmmap_size(vm_map_t);
93 extern int get_vmmap_entries(vm_map_t);
94
95 int vm_map_page_mask(vm_map_t);
96
97 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
98
99 /*
100 * VM routines that used to be published to
101 * user space, and are now restricted to the kernel.
102 *
103 * They should eventually go away entirely -
104 * to be replaced with standard vm_map() and
105 * vm_deallocate() calls.
106 */
107
108 extern kern_return_t vm_upl_map
109 (
110 vm_map_t target_task,
111 upl_t upl,
112 vm_address_t *address
113 );
114
115 extern kern_return_t vm_upl_unmap
116 (
117 vm_map_t target_task,
118 upl_t upl
119 );
120
121 extern kern_return_t vm_region_object_create
122 (
123 vm_map_t target_task,
124 vm_size_t size,
125 ipc_port_t *object_handle
126 );
127
128 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
129 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
130
131 #if CONFIG_CODE_DECRYPTION
132 struct pager_crypt_info;
133 extern kern_return_t vm_map_apple_protected(
134 vm_map_t map,
135 vm_map_offset_t start,
136 vm_map_offset_t end,
137 struct pager_crypt_info *crypt_info);
138 extern void apple_protect_pager_bootstrap(void);
139 extern memory_object_t apple_protect_pager_setup(vm_object_t backing_object,
140 struct pager_crypt_info *crypt_info);
141 #endif /* CONFIG_CODE_DECRYPTION */
142
143 struct vnode;
144 extern void swapfile_pager_bootstrap(void);
145 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
146 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
147
148
149 /*
150 * bsd
151 */
152 struct vnode;
153 extern void vnode_pager_shutdown(void);
154 extern void *upl_get_internal_page_list(
155 upl_t upl);
156
157 extern void vnode_setswapmount(struct vnode *);
158
159 typedef int pager_return_t;
160 extern pager_return_t vnode_pagein(
161 struct vnode *, upl_t,
162 upl_offset_t, vm_object_offset_t,
163 upl_size_t, int, int *);
164 extern pager_return_t vnode_pageout(
165 struct vnode *, upl_t,
166 upl_offset_t, vm_object_offset_t,
167 upl_size_t, int, int *);
168 extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
169 extern memory_object_t vnode_pager_setup(
170 struct vnode *, memory_object_t);
171 extern vm_object_offset_t vnode_pager_get_filesize(
172 struct vnode *);
173 extern uint32_t vnode_pager_isinuse(
174 struct vnode *);
175 extern boolean_t vnode_pager_isSSD(
176 struct vnode *);
177 extern void vnode_pager_throttle(
178 void);
179 extern uint32_t vnode_pager_return_throttle_io_limit(
180 struct vnode *,
181 uint32_t *);
182 extern kern_return_t vnode_pager_get_name(
183 struct vnode *vp,
184 char *pathname,
185 vm_size_t pathname_len,
186 char *filename,
187 vm_size_t filename_len,
188 boolean_t *truncated_path_p);
189 struct timespec;
190 extern kern_return_t vnode_pager_get_mtime(
191 struct vnode *vp,
192 struct timespec *mtime,
193 struct timespec *cs_mtime);
194 extern kern_return_t vnode_pager_get_cs_blobs(
195 struct vnode *vp,
196 void **blobs);
197
198 #if CONFIG_IOSCHED
199 void vnode_pager_issue_reprioritize_io(
200 struct vnode *devvp,
201 uint64_t blkno,
202 uint32_t len,
203 int priority);
204 #endif
205
206 #if CHECK_CS_VALIDATION_BITMAP
207 /* used by the vnode_pager_cs_validation_bitmap routine*/
208 #define CS_BITMAP_SET 1
209 #define CS_BITMAP_CLEAR 2
210 #define CS_BITMAP_CHECK 3
211
212 #endif /* CHECK_CS_VALIDATION_BITMAP */
213
214 extern void vnode_pager_bootstrap(void);
215 extern kern_return_t
216 vnode_pager_data_unlock(
217 memory_object_t mem_obj,
218 memory_object_offset_t offset,
219 memory_object_size_t size,
220 vm_prot_t desired_access);
221 extern kern_return_t vnode_pager_init(
222 memory_object_t,
223 memory_object_control_t,
224 memory_object_cluster_size_t);
225 extern kern_return_t vnode_pager_get_object_size(
226 memory_object_t,
227 memory_object_offset_t *);
228
229 #if CONFIG_IOSCHED
230 extern kern_return_t vnode_pager_get_object_devvp(
231 memory_object_t,
232 uintptr_t *);
233 #endif
234
235 extern kern_return_t vnode_pager_get_isinuse(
236 memory_object_t,
237 uint32_t *);
238 extern kern_return_t vnode_pager_get_isSSD(
239 memory_object_t,
240 boolean_t *);
241 extern kern_return_t vnode_pager_get_throttle_io_limit(
242 memory_object_t,
243 uint32_t *);
244 extern kern_return_t vnode_pager_get_object_name(
245 memory_object_t mem_obj,
246 char *pathname,
247 vm_size_t pathname_len,
248 char *filename,
249 vm_size_t filename_len,
250 boolean_t *truncated_path_p);
251 extern kern_return_t vnode_pager_get_object_mtime(
252 memory_object_t mem_obj,
253 struct timespec *mtime,
254 struct timespec *cs_mtime);
255 extern kern_return_t vnode_pager_get_object_cs_blobs(
256 memory_object_t mem_obj,
257 void **blobs);
258
259 #if CHECK_CS_VALIDATION_BITMAP
260 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
261 memory_object_t mem_obj,
262 memory_object_offset_t offset,
263 int optype);
264 #endif /*CHECK_CS_VALIDATION_BITMAP*/
265
266 extern kern_return_t ubc_cs_check_validation_bitmap (
267 struct vnode *vp,
268 memory_object_offset_t offset,
269 int optype);
270
271 extern kern_return_t vnode_pager_data_request(
272 memory_object_t,
273 memory_object_offset_t,
274 memory_object_cluster_size_t,
275 vm_prot_t,
276 memory_object_fault_info_t);
277 extern kern_return_t vnode_pager_data_return(
278 memory_object_t,
279 memory_object_offset_t,
280 memory_object_cluster_size_t,
281 memory_object_offset_t *,
282 int *,
283 boolean_t,
284 boolean_t,
285 int);
286 extern kern_return_t vnode_pager_data_initialize(
287 memory_object_t,
288 memory_object_offset_t,
289 memory_object_cluster_size_t);
290 extern void vnode_pager_reference(
291 memory_object_t mem_obj);
292 extern kern_return_t vnode_pager_synchronize(
293 memory_object_t mem_obj,
294 memory_object_offset_t offset,
295 memory_object_size_t length,
296 vm_sync_t sync_flags);
297 extern kern_return_t vnode_pager_map(
298 memory_object_t mem_obj,
299 vm_prot_t prot);
300 extern kern_return_t vnode_pager_last_unmap(
301 memory_object_t mem_obj);
302 extern void vnode_pager_deallocate(
303 memory_object_t);
304 extern kern_return_t vnode_pager_terminate(
305 memory_object_t);
306 extern void vnode_pager_vrele(
307 struct vnode *vp);
308 extern void vnode_pager_release_from_cache(
309 int *);
310 extern int ubc_map(
311 struct vnode *vp,
312 int flags);
313 extern void ubc_unmap(
314 struct vnode *vp);
315
316 struct vm_map_entry;
317 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
318
319 extern void dp_memory_object_reference(memory_object_t);
320 extern void dp_memory_object_deallocate(memory_object_t);
321 #ifndef _memory_object_server_
322 extern kern_return_t dp_memory_object_init(memory_object_t,
323 memory_object_control_t,
324 memory_object_cluster_size_t);
325 extern kern_return_t dp_memory_object_terminate(memory_object_t);
326 extern kern_return_t dp_memory_object_data_request(memory_object_t,
327 memory_object_offset_t,
328 memory_object_cluster_size_t,
329 vm_prot_t,
330 memory_object_fault_info_t);
331 extern kern_return_t dp_memory_object_data_return(memory_object_t,
332 memory_object_offset_t,
333 memory_object_cluster_size_t,
334 memory_object_offset_t *,
335 int *,
336 boolean_t,
337 boolean_t,
338 int);
339 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
340 memory_object_offset_t,
341 memory_object_cluster_size_t);
342 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
343 memory_object_offset_t,
344 memory_object_size_t,
345 vm_prot_t);
346 extern kern_return_t dp_memory_object_synchronize(memory_object_t,
347 memory_object_offset_t,
348 memory_object_size_t,
349 vm_sync_t);
350 extern kern_return_t dp_memory_object_map(memory_object_t,
351 vm_prot_t);
352 extern kern_return_t dp_memory_object_last_unmap(memory_object_t);
353 #endif /* _memory_object_server_ */
354 #ifndef _memory_object_default_server_
355 extern kern_return_t default_pager_memory_object_create(
356 memory_object_default_t,
357 vm_size_t,
358 memory_object_t *);
359 #endif /* _memory_object_default_server_ */
360
361 #if CONFIG_FREEZE
362 extern unsigned int default_pager_swap_pages_free(void);
363 struct default_freezer_handle;
364 struct vm_page;
365 __private_extern__ void default_freezer_init(void);
366 __private_extern__ struct default_freezer_handle* default_freezer_handle_allocate(void);
367 __private_extern__ kern_return_t
368 default_freezer_handle_init(
369 struct default_freezer_handle *df_handle);
370 __private_extern__ void
371 default_freezer_handle_deallocate(
372 struct default_freezer_handle *df_handle);
373 __private_extern__ void
374 default_freezer_pageout(
375 struct default_freezer_handle *df_handle);
376 __private_extern__ kern_return_t
377 default_freezer_pack(
378 unsigned int *purgeable_count,
379 unsigned int *wired_count,
380 unsigned int *clean_count,
381 unsigned int *dirty_count,
382 unsigned int dirty_budget,
383 boolean_t *shared,
384 vm_object_t src_object,
385 struct default_freezer_handle *df_handle);
386 __private_extern__ kern_return_t
387 default_freezer_unpack(
388 struct default_freezer_handle *df_handle);
389 __private_extern__ void
390 default_freezer_pack_page(
391 struct vm_page* p,
392 struct default_freezer_handle *df_handle);
393
394 #endif /* CONFIG_FREEZE */
395
396 extern void device_pager_reference(memory_object_t);
397 extern void device_pager_deallocate(memory_object_t);
398 extern kern_return_t device_pager_init(memory_object_t,
399 memory_object_control_t,
400 memory_object_cluster_size_t);
401 extern kern_return_t device_pager_terminate(memory_object_t);
402 extern kern_return_t device_pager_data_request(memory_object_t,
403 memory_object_offset_t,
404 memory_object_cluster_size_t,
405 vm_prot_t,
406 memory_object_fault_info_t);
407 extern kern_return_t device_pager_data_return(memory_object_t,
408 memory_object_offset_t,
409 memory_object_cluster_size_t,
410 memory_object_offset_t *,
411 int *,
412 boolean_t,
413 boolean_t,
414 int);
415 extern kern_return_t device_pager_data_initialize(memory_object_t,
416 memory_object_offset_t,
417 memory_object_cluster_size_t);
418 extern kern_return_t device_pager_data_unlock(memory_object_t,
419 memory_object_offset_t,
420 memory_object_size_t,
421 vm_prot_t);
422 extern kern_return_t device_pager_synchronize(memory_object_t,
423 memory_object_offset_t,
424 memory_object_size_t,
425 vm_sync_t);
426 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
427 extern kern_return_t device_pager_last_unmap(memory_object_t);
428 extern kern_return_t device_pager_populate_object(
429 memory_object_t device,
430 memory_object_offset_t offset,
431 ppnum_t page_num,
432 vm_size_t size);
433 extern memory_object_t device_pager_setup(
434 memory_object_t,
435 uintptr_t,
436 vm_size_t,
437 int);
438 extern void device_pager_bootstrap(void);
439
440 extern kern_return_t pager_map_to_phys_contiguous(
441 memory_object_control_t object,
442 memory_object_offset_t offset,
443 addr64_t base_vaddr,
444 vm_size_t size);
445
446 extern kern_return_t memory_object_create_named(
447 memory_object_t pager,
448 memory_object_offset_t size,
449 memory_object_control_t *control);
450
451 struct macx_triggers_args;
452 extern int mach_macx_triggers(
453 struct macx_triggers_args *args);
454
455 extern int macx_swapinfo(
456 memory_object_size_t *total_p,
457 memory_object_size_t *avail_p,
458 vm_size_t *pagesize_p,
459 boolean_t *encrypted_p);
460
461 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
462 extern void log_unnest_badness(vm_map_t, vm_map_offset_t, vm_map_offset_t);
463
464 struct proc;
465 extern int cs_allow_invalid(struct proc *p);
466 extern int cs_invalid_page(addr64_t vaddr);
467
468 #define CS_VALIDATE_TAINTED 0x00000001
469 #define CS_VALIDATE_NX 0x00000002
470 extern boolean_t cs_validate_page(void *blobs,
471 memory_object_t pager,
472 memory_object_offset_t offset,
473 const void *data,
474 unsigned *result);
475
476 extern kern_return_t mach_memory_entry_purgable_control(
477 ipc_port_t entry_port,
478 vm_purgable_t control,
479 int *state);
480
481 extern kern_return_t mach_memory_entry_get_page_counts(
482 ipc_port_t entry_port,
483 unsigned int *resident_page_count,
484 unsigned int *dirty_page_count);
485
486 extern kern_return_t mach_memory_entry_page_op(
487 ipc_port_t entry_port,
488 vm_object_offset_t offset,
489 int ops,
490 ppnum_t *phys_entry,
491 int *flags);
492
493 extern kern_return_t mach_memory_entry_range_op(
494 ipc_port_t entry_port,
495 vm_object_offset_t offset_beg,
496 vm_object_offset_t offset_end,
497 int ops,
498 int *range);
499
500 extern void mach_memory_entry_port_release(ipc_port_t port);
501 extern void mach_destroy_memory_entry(ipc_port_t port);
502 extern kern_return_t mach_memory_entry_allocate(
503 struct vm_named_entry **user_entry_p,
504 ipc_port_t *user_handle_p);
505
506 extern void vm_paging_map_init(void);
507
508 extern int macx_backing_store_compaction(int flags);
509 extern unsigned int mach_vm_ctl_page_free_wanted(void);
510
511 extern int no_paging_space_action(void);
512
513 #define VM_TOGGLE_CLEAR 0
514 #define VM_TOGGLE_SET 1
515 #define VM_TOGGLE_GETVALUE 999
516 int vm_toggle_entry_reuse(int, int*);
517
518 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
519 #define SWAP_READ 0x00000001 /* Read buffer. */
520 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
521
522 extern void vm_compressor_pager_init(void);
523 extern kern_return_t compressor_memory_object_create(
524 memory_object_size_t,
525 memory_object_t *);
526
527 #if CONFIG_JETSAM
528 extern int proc_get_memstat_priority(struct proc*, boolean_t);
529 #endif /* CONFIG_JETSAM */
530
531 /* the object purger. purges the next eligible object from memory. */
532 /* returns TRUE if an object was purged, otherwise FALSE. */
533 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
534 void vm_purgeable_disown(task_t task);
535
536 struct trim_list {
537 uint64_t tl_offset;
538 uint64_t tl_length;
539 struct trim_list *tl_next;
540 };
541
542 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
543
544 #define MAX_SWAPFILENAME_LEN 1024
545 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
546
547 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
548
549 struct vm_counters {
550 unsigned int do_collapse_compressor;
551 unsigned int do_collapse_compressor_pages;
552 unsigned int do_collapse_terminate;
553 unsigned int do_collapse_terminate_failure;
554 unsigned int should_cow_but_wired;
555 unsigned int create_upl_extra_cow;
556 unsigned int create_upl_extra_cow_pages;
557 unsigned int create_upl_lookup_failure_write;
558 unsigned int create_upl_lookup_failure_copy;
559 };
560 extern struct vm_counters vm_counters;
561
562 #endif /* _VM_VM_PROTOS_H_ */
563
564 #endif /* XNU_KERNEL_PRIVATE */