]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_protos.h
xnu-3247.10.11.tar.gz
[apple/xnu.git] / osfmk / vm / vm_protos.h
1 /*
2 * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef XNU_KERNEL_PRIVATE
30
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
33
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36
37 /*
38 * This file contains various type definitions and routine prototypes
39 * that are needed to avoid compilation warnings for VM code (in osfmk,
40 * default_pager and bsd).
41 * Most of these should eventually go into more appropriate header files.
42 *
43 * Include it after all other header files since it doesn't include any
44 * type definitions and it works around some conflicts with other header
45 * files.
46 */
47
48 /*
49 * iokit
50 */
51 extern kern_return_t device_data_action(
52 uintptr_t device_handle,
53 ipc_port_t device_pager,
54 vm_prot_t protection,
55 vm_object_offset_t offset,
56 vm_size_t size);
57
58 extern kern_return_t device_close(
59 uintptr_t device_handle);
60
61 /*
62 * default_pager
63 */
64 extern int start_def_pager(
65 char *bs_device);
66 extern int default_pager_init_flag;
67
68 /*
69 * osfmk
70 */
71 #ifndef _IPC_IPC_PORT_H_
72 extern mach_port_name_t ipc_port_copyout_send(
73 ipc_port_t sright,
74 ipc_space_t space);
75 extern task_t port_name_to_task(
76 mach_port_name_t name);
77 #endif /* _IPC_IPC_PORT_H_ */
78
79 extern ipc_space_t get_task_ipcspace(
80 task_t t);
81
82 #if CONFIG_JETSAM
83 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */
84 #endif // CONFIG_JETSAM
85
86 /* Some loose-ends VM stuff */
87
88 extern vm_map_t kalloc_map;
89 extern vm_size_t msg_ool_size_small;
90 extern vm_map_t zone_map;
91
92 extern void consider_machine_adjust(void);
93 extern vm_map_offset_t get_map_min(vm_map_t);
94 extern vm_map_offset_t get_map_max(vm_map_t);
95 extern vm_map_size_t get_vmmap_size(vm_map_t);
96 extern int get_vmmap_entries(vm_map_t);
97
98 extern vm_map_offset_t vm_map_page_mask(vm_map_t);
99
100 extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
101
102 /*
103 * VM routines that used to be published to
104 * user space, and are now restricted to the kernel.
105 *
106 * They should eventually go away entirely -
107 * to be replaced with standard vm_map() and
108 * vm_deallocate() calls.
109 */
110
111 extern kern_return_t vm_upl_map
112 (
113 vm_map_t target_task,
114 upl_t upl,
115 vm_address_t *address
116 );
117
118 extern kern_return_t vm_upl_unmap
119 (
120 vm_map_t target_task,
121 upl_t upl
122 );
123
124 extern kern_return_t vm_region_object_create
125 (
126 vm_map_t target_task,
127 vm_size_t size,
128 ipc_port_t *object_handle
129 );
130
131 extern mach_vm_offset_t mach_get_vm_start(vm_map_t);
132 extern mach_vm_offset_t mach_get_vm_end(vm_map_t);
133
134 #if CONFIG_CODE_DECRYPTION
135 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
136 struct pager_crypt_info;
137 extern kern_return_t vm_map_apple_protected(
138 vm_map_t map,
139 vm_map_offset_t start,
140 vm_map_offset_t end,
141 vm_object_offset_t crypto_backing_offset,
142 struct pager_crypt_info *crypt_info);
143 extern void apple_protect_pager_bootstrap(void);
144 extern memory_object_t apple_protect_pager_setup(
145 vm_object_t backing_object,
146 vm_object_offset_t backing_offset,
147 vm_object_offset_t crypto_backing_offset,
148 struct pager_crypt_info *crypt_info,
149 vm_object_offset_t crypto_start,
150 vm_object_offset_t crypto_end);
151 #endif /* CONFIG_CODE_DECRYPTION */
152
153 struct vnode;
154 extern void swapfile_pager_bootstrap(void);
155 extern memory_object_t swapfile_pager_setup(struct vnode *vp);
156 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj);
157
158 #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS))
159 #define SIXTEENK_PAGE_SIZE 0x4000
160 #define SIXTEENK_PAGE_MASK 0x3FFF
161 #define SIXTEENK_PAGE_SHIFT 14
162 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
163
164
165 /*
166 * bsd
167 */
168 struct vnode;
169 extern void vnode_pager_shutdown(void);
170 extern void *upl_get_internal_page_list(
171 upl_t upl);
172
173 extern void vnode_setswapmount(struct vnode *);
174 extern int64_t vnode_getswappin_avail(struct vnode *);
175
176 typedef int pager_return_t;
177 extern pager_return_t vnode_pagein(
178 struct vnode *, upl_t,
179 upl_offset_t, vm_object_offset_t,
180 upl_size_t, int, int *);
181 extern pager_return_t vnode_pageout(
182 struct vnode *, upl_t,
183 upl_offset_t, vm_object_offset_t,
184 upl_size_t, int, int *);
185 extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len);
186 extern memory_object_t vnode_pager_setup(
187 struct vnode *, memory_object_t);
188 extern vm_object_offset_t vnode_pager_get_filesize(
189 struct vnode *);
190 extern uint32_t vnode_pager_isinuse(
191 struct vnode *);
192 extern boolean_t vnode_pager_isSSD(
193 struct vnode *);
194 extern void vnode_pager_throttle(
195 void);
196 extern uint32_t vnode_pager_return_throttle_io_limit(
197 struct vnode *,
198 uint32_t *);
199 extern kern_return_t vnode_pager_get_name(
200 struct vnode *vp,
201 char *pathname,
202 vm_size_t pathname_len,
203 char *filename,
204 vm_size_t filename_len,
205 boolean_t *truncated_path_p);
206 struct timespec;
207 extern kern_return_t vnode_pager_get_mtime(
208 struct vnode *vp,
209 struct timespec *mtime,
210 struct timespec *cs_mtime);
211 extern kern_return_t vnode_pager_get_cs_blobs(
212 struct vnode *vp,
213 void **blobs);
214
215 #if CONFIG_IOSCHED
216 void vnode_pager_issue_reprioritize_io(
217 struct vnode *devvp,
218 uint64_t blkno,
219 uint32_t len,
220 int priority);
221 #endif
222
223 #if CHECK_CS_VALIDATION_BITMAP
224 /* used by the vnode_pager_cs_validation_bitmap routine*/
225 #define CS_BITMAP_SET 1
226 #define CS_BITMAP_CLEAR 2
227 #define CS_BITMAP_CHECK 3
228
229 #endif /* CHECK_CS_VALIDATION_BITMAP */
230
231 extern void vnode_pager_bootstrap(void);
232 extern kern_return_t
233 vnode_pager_data_unlock(
234 memory_object_t mem_obj,
235 memory_object_offset_t offset,
236 memory_object_size_t size,
237 vm_prot_t desired_access);
238 extern kern_return_t vnode_pager_init(
239 memory_object_t,
240 memory_object_control_t,
241 memory_object_cluster_size_t);
242 extern kern_return_t vnode_pager_get_object_size(
243 memory_object_t,
244 memory_object_offset_t *);
245
246 #if CONFIG_IOSCHED
247 extern kern_return_t vnode_pager_get_object_devvp(
248 memory_object_t,
249 uintptr_t *);
250 #endif
251
252 extern kern_return_t vnode_pager_get_isinuse(
253 memory_object_t,
254 uint32_t *);
255 extern kern_return_t vnode_pager_get_isSSD(
256 memory_object_t,
257 boolean_t *);
258 extern kern_return_t vnode_pager_get_throttle_io_limit(
259 memory_object_t,
260 uint32_t *);
261 extern kern_return_t vnode_pager_get_object_name(
262 memory_object_t mem_obj,
263 char *pathname,
264 vm_size_t pathname_len,
265 char *filename,
266 vm_size_t filename_len,
267 boolean_t *truncated_path_p);
268 extern kern_return_t vnode_pager_get_object_mtime(
269 memory_object_t mem_obj,
270 struct timespec *mtime,
271 struct timespec *cs_mtime);
272 extern kern_return_t vnode_pager_get_object_cs_blobs(
273 memory_object_t mem_obj,
274 void **blobs);
275
276 #if CHECK_CS_VALIDATION_BITMAP
277 extern kern_return_t vnode_pager_cs_check_validation_bitmap(
278 memory_object_t mem_obj,
279 memory_object_offset_t offset,
280 int optype);
281 #endif /*CHECK_CS_VALIDATION_BITMAP*/
282
283 extern kern_return_t ubc_cs_check_validation_bitmap (
284 struct vnode *vp,
285 memory_object_offset_t offset,
286 int optype);
287
288 extern kern_return_t vnode_pager_data_request(
289 memory_object_t,
290 memory_object_offset_t,
291 memory_object_cluster_size_t,
292 vm_prot_t,
293 memory_object_fault_info_t);
294 extern kern_return_t vnode_pager_data_return(
295 memory_object_t,
296 memory_object_offset_t,
297 memory_object_cluster_size_t,
298 memory_object_offset_t *,
299 int *,
300 boolean_t,
301 boolean_t,
302 int);
303 extern kern_return_t vnode_pager_data_initialize(
304 memory_object_t,
305 memory_object_offset_t,
306 memory_object_cluster_size_t);
307 extern void vnode_pager_reference(
308 memory_object_t mem_obj);
309 extern kern_return_t vnode_pager_synchronize(
310 memory_object_t mem_obj,
311 memory_object_offset_t offset,
312 memory_object_size_t length,
313 vm_sync_t sync_flags);
314 extern kern_return_t vnode_pager_map(
315 memory_object_t mem_obj,
316 vm_prot_t prot);
317 extern kern_return_t vnode_pager_last_unmap(
318 memory_object_t mem_obj);
319 extern void vnode_pager_deallocate(
320 memory_object_t);
321 extern kern_return_t vnode_pager_terminate(
322 memory_object_t);
323 extern void vnode_pager_vrele(
324 struct vnode *vp);
325 extern void vnode_pager_release_from_cache(
326 int *);
327 extern int ubc_map(
328 struct vnode *vp,
329 int flags);
330 extern void ubc_unmap(
331 struct vnode *vp);
332
333 struct vm_map_entry;
334 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry);
335
336 extern void dp_memory_object_reference(memory_object_t);
337 extern void dp_memory_object_deallocate(memory_object_t);
338 #ifndef _memory_object_server_
339 extern kern_return_t dp_memory_object_init(memory_object_t,
340 memory_object_control_t,
341 memory_object_cluster_size_t);
342 extern kern_return_t dp_memory_object_terminate(memory_object_t);
343 extern kern_return_t dp_memory_object_data_request(memory_object_t,
344 memory_object_offset_t,
345 memory_object_cluster_size_t,
346 vm_prot_t,
347 memory_object_fault_info_t);
348 extern kern_return_t dp_memory_object_data_return(memory_object_t,
349 memory_object_offset_t,
350 memory_object_cluster_size_t,
351 memory_object_offset_t *,
352 int *,
353 boolean_t,
354 boolean_t,
355 int);
356 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
357 memory_object_offset_t,
358 memory_object_cluster_size_t);
359 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
360 memory_object_offset_t,
361 memory_object_size_t,
362 vm_prot_t);
363 extern kern_return_t dp_memory_object_synchronize(memory_object_t,
364 memory_object_offset_t,
365 memory_object_size_t,
366 vm_sync_t);
367 extern kern_return_t dp_memory_object_map(memory_object_t,
368 vm_prot_t);
369 extern kern_return_t dp_memory_object_last_unmap(memory_object_t);
370 #endif /* _memory_object_server_ */
371 #ifndef _memory_object_default_server_
372 extern kern_return_t default_pager_memory_object_create(
373 memory_object_default_t,
374 vm_size_t,
375 memory_object_t *);
376 #endif /* _memory_object_default_server_ */
377
378 #if CONFIG_FREEZE
379 extern unsigned int default_pager_swap_pages_free(void);
380 struct default_freezer_handle;
381 struct vm_page;
382 __private_extern__ void default_freezer_init(void);
383 __private_extern__ struct default_freezer_handle* default_freezer_handle_allocate(void);
384 __private_extern__ kern_return_t
385 default_freezer_handle_init(
386 struct default_freezer_handle *df_handle);
387 __private_extern__ void
388 default_freezer_handle_deallocate(
389 struct default_freezer_handle *df_handle);
390 __private_extern__ void
391 default_freezer_pageout(
392 struct default_freezer_handle *df_handle);
393 __private_extern__ kern_return_t
394 default_freezer_pack(
395 unsigned int *purgeable_count,
396 unsigned int *wired_count,
397 unsigned int *clean_count,
398 unsigned int *dirty_count,
399 unsigned int dirty_budget,
400 boolean_t *shared,
401 vm_object_t src_object,
402 struct default_freezer_handle *df_handle);
403 __private_extern__ kern_return_t
404 default_freezer_unpack(
405 struct default_freezer_handle *df_handle);
406 __private_extern__ void
407 default_freezer_pack_page(
408 struct vm_page* p,
409 struct default_freezer_handle *df_handle);
410
411 #endif /* CONFIG_FREEZE */
412
413 extern void device_pager_reference(memory_object_t);
414 extern void device_pager_deallocate(memory_object_t);
415 extern kern_return_t device_pager_init(memory_object_t,
416 memory_object_control_t,
417 memory_object_cluster_size_t);
418 extern kern_return_t device_pager_terminate(memory_object_t);
419 extern kern_return_t device_pager_data_request(memory_object_t,
420 memory_object_offset_t,
421 memory_object_cluster_size_t,
422 vm_prot_t,
423 memory_object_fault_info_t);
424 extern kern_return_t device_pager_data_return(memory_object_t,
425 memory_object_offset_t,
426 memory_object_cluster_size_t,
427 memory_object_offset_t *,
428 int *,
429 boolean_t,
430 boolean_t,
431 int);
432 extern kern_return_t device_pager_data_initialize(memory_object_t,
433 memory_object_offset_t,
434 memory_object_cluster_size_t);
435 extern kern_return_t device_pager_data_unlock(memory_object_t,
436 memory_object_offset_t,
437 memory_object_size_t,
438 vm_prot_t);
439 extern kern_return_t device_pager_synchronize(memory_object_t,
440 memory_object_offset_t,
441 memory_object_size_t,
442 vm_sync_t);
443 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t);
444 extern kern_return_t device_pager_last_unmap(memory_object_t);
445 extern kern_return_t device_pager_populate_object(
446 memory_object_t device,
447 memory_object_offset_t offset,
448 ppnum_t page_num,
449 vm_size_t size);
450 extern memory_object_t device_pager_setup(
451 memory_object_t,
452 uintptr_t,
453 vm_size_t,
454 int);
455 extern void device_pager_bootstrap(void);
456
457 extern kern_return_t pager_map_to_phys_contiguous(
458 memory_object_control_t object,
459 memory_object_offset_t offset,
460 addr64_t base_vaddr,
461 vm_size_t size);
462
463 extern kern_return_t memory_object_create_named(
464 memory_object_t pager,
465 memory_object_offset_t size,
466 memory_object_control_t *control);
467
468 struct macx_triggers_args;
469 extern int mach_macx_triggers(
470 struct macx_triggers_args *args);
471
472 extern int macx_swapinfo(
473 memory_object_size_t *total_p,
474 memory_object_size_t *avail_p,
475 vm_size_t *pagesize_p,
476 boolean_t *encrypted_p);
477
478 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
479 extern void log_unnest_badness(vm_map_t, vm_map_offset_t, vm_map_offset_t);
480
481 struct proc;
482 extern int cs_allow_invalid(struct proc *p);
483 extern int cs_invalid_page(addr64_t vaddr);
484
485 #define CS_VALIDATE_TAINTED 0x00000001
486 #define CS_VALIDATE_NX 0x00000002
487 extern boolean_t cs_validate_page(void *blobs,
488 memory_object_t pager,
489 memory_object_offset_t offset,
490 const void *data,
491 unsigned *result);
492
493 extern kern_return_t mach_memory_entry_purgable_control(
494 ipc_port_t entry_port,
495 vm_purgable_t control,
496 int *state);
497
498 extern kern_return_t mach_memory_entry_get_page_counts(
499 ipc_port_t entry_port,
500 unsigned int *resident_page_count,
501 unsigned int *dirty_page_count);
502
503 extern kern_return_t mach_memory_entry_page_op(
504 ipc_port_t entry_port,
505 vm_object_offset_t offset,
506 int ops,
507 ppnum_t *phys_entry,
508 int *flags);
509
510 extern kern_return_t mach_memory_entry_range_op(
511 ipc_port_t entry_port,
512 vm_object_offset_t offset_beg,
513 vm_object_offset_t offset_end,
514 int ops,
515 int *range);
516
517 extern void mach_memory_entry_port_release(ipc_port_t port);
518 extern void mach_destroy_memory_entry(ipc_port_t port);
519 extern kern_return_t mach_memory_entry_allocate(
520 struct vm_named_entry **user_entry_p,
521 ipc_port_t *user_handle_p);
522
523 extern void vm_paging_map_init(void);
524
525 extern int macx_backing_store_compaction(int flags);
526 extern unsigned int mach_vm_ctl_page_free_wanted(void);
527
528 extern int no_paging_space_action(void);
529
530 #define VM_TOGGLE_CLEAR 0
531 #define VM_TOGGLE_SET 1
532 #define VM_TOGGLE_GETVALUE 999
533 int vm_toggle_entry_reuse(int, int*);
534
535 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
536 #define SWAP_READ 0x00000001 /* Read buffer. */
537 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
538
539 extern void vm_compressor_pager_init(void);
540 extern kern_return_t compressor_memory_object_create(
541 memory_object_size_t,
542 memory_object_t *);
543
544 extern boolean_t vm_compressor_low_on_space(void);
545 extern int vm_swap_low_on_space(void);
546
547 #if CONFIG_JETSAM
548 extern int proc_get_memstat_priority(struct proc*, boolean_t);
549 #endif /* CONFIG_JETSAM */
550
551 /* the object purger. purges the next eligible object from memory. */
552 /* returns TRUE if an object was purged, otherwise FALSE. */
553 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
554 void vm_purgeable_disown(task_t task);
555
556 struct trim_list {
557 uint64_t tl_offset;
558 uint64_t tl_length;
559 struct trim_list *tl_next;
560 };
561
562 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
563
564 #define MAX_SWAPFILENAME_LEN 1024
565 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
566
567 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
568
569 struct vm_counters {
570 unsigned int do_collapse_compressor;
571 unsigned int do_collapse_compressor_pages;
572 unsigned int do_collapse_terminate;
573 unsigned int do_collapse_terminate_failure;
574 unsigned int should_cow_but_wired;
575 unsigned int create_upl_extra_cow;
576 unsigned int create_upl_extra_cow_pages;
577 unsigned int create_upl_lookup_failure_write;
578 unsigned int create_upl_lookup_failure_copy;
579 };
580 extern struct vm_counters vm_counters;
581
582 #endif /* _VM_VM_PROTOS_H_ */
583
584 #endif /* XNU_KERNEL_PRIVATE */