2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifdef XNU_KERNEL_PRIVATE
31 #ifndef _VM_VM_PROTOS_H_
32 #define _VM_VM_PROTOS_H_
34 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
42 * This file contains various type definitions and routine prototypes
43 * that are needed to avoid compilation warnings for VM code (in osfmk,
44 * default_pager and bsd).
45 * Most of these should eventually go into more appropriate header files.
47 * Include it after all other header files since it doesn't include any
48 * type definitions and it works around some conflicts with other header
55 extern kern_return_t
device_data_action(
56 uintptr_t device_handle
,
57 ipc_port_t device_pager
,
59 vm_object_offset_t offset
,
62 extern kern_return_t
device_close(
63 uintptr_t device_handle
);
65 extern boolean_t
vm_swap_files_pinned(void);
70 #ifndef _IPC_IPC_PORT_H_
71 extern mach_port_name_t
ipc_port_copyout_send(
74 extern task_t
port_name_to_task(
75 mach_port_name_t name
);
76 extern task_t
port_name_to_task_name(
77 mach_port_name_t name
);
78 extern void ipc_port_release_send(
80 #endif /* _IPC_IPC_PORT_H_ */
82 extern ipc_space_t
get_task_ipcspace(
85 #if CONFIG_MEMORYSTATUS
86 extern int max_task_footprint_mb
; /* Per-task limit on physical memory consumption in megabytes */
87 #endif /* CONFIG_MEMORYSTATUS */
89 /* Some loose-ends VM stuff */
91 extern vm_map_t kalloc_map
;
92 extern vm_size_t msg_ool_size_small
;
94 extern kern_return_t
vm_tests(void);
95 extern void consider_machine_adjust(void);
96 extern vm_map_offset_t
get_map_min(vm_map_t
);
97 extern vm_map_offset_t
get_map_max(vm_map_t
);
98 extern vm_map_size_t
get_vmmap_size(vm_map_t
);
100 extern int get_vmmap_entries(vm_map_t
);
102 extern int get_map_nentries(vm_map_t
);
104 extern vm_map_offset_t
vm_map_page_mask(vm_map_t
);
106 extern kern_return_t
vm_map_purgable_control(
108 vm_map_offset_t address
,
109 vm_purgable_t control
,
113 extern void vm_map_pmap_check_ledgers(
118 #endif /* MACH_ASSERT */
121 vnode_pager_get_object_vnode(
122 memory_object_t mem_obj
,
123 uintptr_t * vnodeaddr
,
127 extern boolean_t
coredumpok(vm_map_t map
, mach_vm_offset_t va
);
131 * VM routines that used to be published to
132 * user space, and are now restricted to the kernel.
134 * They should eventually go away entirely -
135 * to be replaced with standard vm_map() and
136 * vm_deallocate() calls.
139 extern kern_return_t vm_upl_map
141 vm_map_t target_task
,
143 vm_address_t
*address
146 extern kern_return_t vm_upl_unmap
148 vm_map_t target_task
,
152 extern kern_return_t vm_region_object_create
154 vm_map_t target_task
,
156 ipc_port_t
*object_handle
159 extern mach_vm_offset_t
mach_get_vm_start(vm_map_t
);
160 extern mach_vm_offset_t
mach_get_vm_end(vm_map_t
);
162 #if CONFIG_CODE_DECRYPTION
163 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT
164 #if VM_MAP_DEBUG_APPLE_PROTECT
165 extern int vm_map_debug_apple_protect
;
166 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
167 struct pager_crypt_info
;
168 extern kern_return_t
vm_map_apple_protected(
170 vm_map_offset_t start
,
172 vm_object_offset_t crypto_backing_offset
,
173 struct pager_crypt_info
*crypt_info
,
175 extern memory_object_t
apple_protect_pager_setup(
176 vm_object_t backing_object
,
177 vm_object_offset_t backing_offset
,
178 vm_object_offset_t crypto_backing_offset
,
179 struct pager_crypt_info
*crypt_info
,
180 vm_object_offset_t crypto_start
,
181 vm_object_offset_t crypto_end
);
182 #endif /* CONFIG_CODE_DECRYPTION */
184 struct vm_shared_region_slide_info
;
185 extern kern_return_t
vm_map_shared_region(
187 vm_map_offset_t start
,
189 vm_object_offset_t backing_offset
,
190 struct vm_shared_region_slide_info
*slide_info
);
192 extern memory_object_t
shared_region_pager_setup(
193 vm_object_t backing_object
,
194 vm_object_offset_t backing_offset
,
195 struct vm_shared_region_slide_info
*slide_info
,
197 #if __has_feature(ptrauth_calls)
198 extern memory_object_t
shared_region_pager_match(
199 vm_object_t backing_object
,
200 vm_object_offset_t backing_offset
,
201 struct vm_shared_region_slide_info
*slide_info
,
203 extern void shared_region_key_alloc(
204 char *shared_region_id
,
206 uint64_t inherited_key
);
207 extern void shared_region_key_dealloc(
208 char *shared_region_id
);
209 extern uint64_t generate_jop_key(void);
210 extern void shared_region_pager_match_task_key(memory_object_t memobj
, task_t task
);
211 #endif /* __has_feature(ptrauth_calls) */
212 extern bool vm_shared_region_is_reslide(struct task
*task
);
215 extern memory_object_t
swapfile_pager_setup(struct vnode
*vp
);
216 extern memory_object_control_t
swapfile_pager_control(memory_object_t mem_obj
);
218 #if __arm64__ || (__ARM_ARCH_7K__ >= 2)
219 #define SIXTEENK_PAGE_SIZE 0x4000
220 #define SIXTEENK_PAGE_MASK 0x3FFF
221 #define SIXTEENK_PAGE_SHIFT 14
222 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */
224 #define FOURK_PAGE_SIZE 0x1000
225 #define FOURK_PAGE_MASK 0xFFF
226 #define FOURK_PAGE_SHIFT 12
230 extern unsigned int page_shift_user32
;
232 #define VM_MAP_DEBUG_FOURK MACH_ASSERT
233 #if VM_MAP_DEBUG_FOURK
234 extern int vm_map_debug_fourk
;
235 #endif /* VM_MAP_DEBUG_FOURK */
236 extern memory_object_t
fourk_pager_create(void);
237 extern vm_object_t
fourk_pager_to_vm_object(memory_object_t mem_obj
);
238 extern kern_return_t
fourk_pager_populate(
239 memory_object_t mem_obj
,
242 vm_object_t new_backing_object
,
243 vm_object_offset_t new_backing_offset
,
244 vm_object_t
*old_backing_object
,
245 vm_object_offset_t
*old_backing_offset
);
246 #endif /* __arm64__ */
252 extern void *upl_get_internal_page_list(
255 extern void vnode_setswapmount(struct vnode
*);
256 extern int64_t vnode_getswappin_avail(struct vnode
*);
258 extern void vnode_pager_was_dirtied(
263 typedef int pager_return_t
;
264 extern pager_return_t
vnode_pagein(
265 struct vnode
*, upl_t
,
266 upl_offset_t
, vm_object_offset_t
,
267 upl_size_t
, int, int *);
268 extern pager_return_t
vnode_pageout(
269 struct vnode
*, upl_t
,
270 upl_offset_t
, vm_object_offset_t
,
271 upl_size_t
, int, int *);
272 extern uint32_t vnode_trim(struct vnode
*, int64_t offset
, unsigned long len
);
273 extern memory_object_t
vnode_pager_setup(
274 struct vnode
*, memory_object_t
);
275 extern vm_object_offset_t
vnode_pager_get_filesize(
277 extern uint32_t vnode_pager_isinuse(
279 extern boolean_t
vnode_pager_isSSD(
281 extern void vnode_pager_throttle(
283 extern uint32_t vnode_pager_return_throttle_io_limit(
286 extern kern_return_t
vnode_pager_get_name(
289 vm_size_t pathname_len
,
291 vm_size_t filename_len
,
292 boolean_t
*truncated_path_p
);
294 extern kern_return_t
vnode_pager_get_mtime(
296 struct timespec
*mtime
,
297 struct timespec
*cs_mtime
);
298 extern kern_return_t
vnode_pager_get_cs_blobs(
303 void vnode_pager_issue_reprioritize_io(
310 #if CHECK_CS_VALIDATION_BITMAP
311 /* used by the vnode_pager_cs_validation_bitmap routine*/
312 #define CS_BITMAP_SET 1
313 #define CS_BITMAP_CLEAR 2
314 #define CS_BITMAP_CHECK 3
316 #endif /* CHECK_CS_VALIDATION_BITMAP */
319 vnode_pager_data_unlock(
320 memory_object_t mem_obj
,
321 memory_object_offset_t offset
,
322 memory_object_size_t size
,
323 vm_prot_t desired_access
);
324 extern kern_return_t
vnode_pager_init(
326 memory_object_control_t
,
327 memory_object_cluster_size_t
);
328 extern kern_return_t
vnode_pager_get_object_size(
330 memory_object_offset_t
*);
333 extern kern_return_t
vnode_pager_get_object_devvp(
338 extern void vnode_pager_dirtied(
342 extern kern_return_t
vnode_pager_get_isinuse(
345 extern kern_return_t
vnode_pager_get_isSSD(
348 extern kern_return_t
vnode_pager_get_throttle_io_limit(
351 extern kern_return_t
vnode_pager_get_object_name(
352 memory_object_t mem_obj
,
354 vm_size_t pathname_len
,
356 vm_size_t filename_len
,
357 boolean_t
*truncated_path_p
);
358 extern kern_return_t
vnode_pager_get_object_mtime(
359 memory_object_t mem_obj
,
360 struct timespec
*mtime
,
361 struct timespec
*cs_mtime
);
363 #if CHECK_CS_VALIDATION_BITMAP
364 extern kern_return_t
vnode_pager_cs_check_validation_bitmap(
365 memory_object_t mem_obj
,
366 memory_object_offset_t offset
,
368 #endif /*CHECK_CS_VALIDATION_BITMAP*/
370 extern kern_return_t
ubc_cs_check_validation_bitmap(
372 memory_object_offset_t offset
,
375 extern kern_return_t
vnode_pager_data_request(
377 memory_object_offset_t
,
378 memory_object_cluster_size_t
,
380 memory_object_fault_info_t
);
381 extern kern_return_t
vnode_pager_data_return(
383 memory_object_offset_t
,
384 memory_object_cluster_size_t
,
385 memory_object_offset_t
*,
390 extern kern_return_t
vnode_pager_data_initialize(
392 memory_object_offset_t
,
393 memory_object_cluster_size_t
);
394 extern void vnode_pager_reference(
395 memory_object_t mem_obj
);
396 extern kern_return_t
vnode_pager_synchronize(
397 memory_object_t mem_obj
,
398 memory_object_offset_t offset
,
399 memory_object_size_t length
,
400 vm_sync_t sync_flags
);
401 extern kern_return_t
vnode_pager_map(
402 memory_object_t mem_obj
,
404 extern kern_return_t
vnode_pager_last_unmap(
405 memory_object_t mem_obj
);
406 extern void vnode_pager_deallocate(
408 extern kern_return_t
vnode_pager_terminate(
410 extern void vnode_pager_vrele(
412 extern struct vnode
*vnode_pager_lookup_vnode(
418 extern void ubc_unmap(
422 extern struct vm_object
*find_vnode_object(struct vm_map_entry
*entry
);
424 extern void device_pager_reference(memory_object_t
);
425 extern void device_pager_deallocate(memory_object_t
);
426 extern kern_return_t
device_pager_init(memory_object_t
,
427 memory_object_control_t
,
428 memory_object_cluster_size_t
);
429 extern kern_return_t
device_pager_terminate(memory_object_t
);
430 extern kern_return_t
device_pager_data_request(memory_object_t
,
431 memory_object_offset_t
,
432 memory_object_cluster_size_t
,
434 memory_object_fault_info_t
);
435 extern kern_return_t
device_pager_data_return(memory_object_t
,
436 memory_object_offset_t
,
437 memory_object_cluster_size_t
,
438 memory_object_offset_t
*,
443 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
444 memory_object_offset_t
,
445 memory_object_cluster_size_t
);
446 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
447 memory_object_offset_t
,
448 memory_object_size_t
,
450 extern kern_return_t
device_pager_synchronize(memory_object_t
,
451 memory_object_offset_t
,
452 memory_object_size_t
,
454 extern kern_return_t
device_pager_map(memory_object_t
, vm_prot_t
);
455 extern kern_return_t
device_pager_last_unmap(memory_object_t
);
456 extern kern_return_t
device_pager_populate_object(
457 memory_object_t device
,
458 memory_object_offset_t offset
,
461 extern memory_object_t
device_pager_setup(
467 extern boolean_t
is_device_pager_ops(const struct memory_object_pager_ops
*pager_ops
);
469 extern kern_return_t
pager_map_to_phys_contiguous(
470 memory_object_control_t object
,
471 memory_object_offset_t offset
,
475 extern kern_return_t
memory_object_create_named(
476 memory_object_t pager
,
477 memory_object_offset_t size
,
478 memory_object_control_t
*control
);
480 struct macx_triggers_args
;
481 extern int mach_macx_triggers(
482 struct macx_triggers_args
*args
);
484 extern int macx_swapinfo(
485 memory_object_size_t
*total_p
,
486 memory_object_size_t
*avail_p
,
487 vm_size_t
*pagesize_p
,
488 boolean_t
*encrypted_p
);
490 extern void log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
);
491 extern void log_unnest_badness(
493 vm_map_offset_t start_unnest
,
494 vm_map_offset_t end_unnest
,
495 boolean_t is_nested_map
,
496 vm_map_offset_t lowest_unnestable_addr
);
499 struct proc
*current_proc(void);
500 extern int cs_allow_invalid(struct proc
*p
);
501 extern int cs_invalid_page(addr64_t vaddr
, boolean_t
*cs_killed
);
503 #define CS_VALIDATE_TAINTED 0x00000001
504 #define CS_VALIDATE_NX 0x00000002
505 extern boolean_t
cs_validate_range(struct vnode
*vp
,
506 memory_object_t pager
,
507 memory_object_offset_t offset
,
511 extern void cs_validate_page(
513 memory_object_t pager
,
514 memory_object_offset_t offset
,
520 extern kern_return_t
cs_associate_blob_with_mapping(
522 vm_map_offset_t start
,
524 vm_object_offset_t offset
,
528 extern kern_return_t
memory_entry_purgeable_control_internal(
529 ipc_port_t entry_port
,
530 vm_purgable_t control
,
533 extern kern_return_t
memory_entry_access_tracking_internal(
534 ipc_port_t entry_port
,
535 int *access_tracking
,
536 uint32_t *access_tracking_reads
,
537 uint32_t *access_tracking_writes
);
539 extern kern_return_t
mach_memory_entry_purgable_control(
540 ipc_port_t entry_port
,
541 vm_purgable_t control
,
544 extern kern_return_t
mach_memory_entry_get_page_counts(
545 ipc_port_t entry_port
,
546 unsigned int *resident_page_count
,
547 unsigned int *dirty_page_count
);
549 extern kern_return_t
mach_memory_entry_phys_page_offset(
550 ipc_port_t entry_port
,
551 vm_object_offset_t
*offset_p
);
553 extern kern_return_t
mach_memory_entry_map_size(
554 ipc_port_t entry_port
,
556 memory_object_offset_t offset
,
557 memory_object_offset_t size
,
558 mach_vm_size_t
*map_size
);
560 extern kern_return_t
vm_map_range_physical_size(
562 vm_map_address_t start
,
564 mach_vm_size_t
* phys_size
);
566 extern kern_return_t
mach_memory_entry_page_op(
567 ipc_port_t entry_port
,
568 vm_object_offset_t offset
,
573 extern kern_return_t
mach_memory_entry_range_op(
574 ipc_port_t entry_port
,
575 vm_object_offset_t offset_beg
,
576 vm_object_offset_t offset_end
,
580 extern void mach_memory_entry_port_release(ipc_port_t port
);
581 extern void mach_destroy_memory_entry(ipc_port_t port
);
582 extern kern_return_t
mach_memory_entry_allocate(
583 struct vm_named_entry
**user_entry_p
,
584 ipc_port_t
*user_handle_p
);
585 extern vm_object_t
vm_named_entry_to_vm_object(
586 vm_named_entry_t named_entry
);
587 extern kern_return_t
vm_named_entry_from_vm_object(
588 vm_named_entry_t named_entry
,
590 vm_object_offset_t offset
,
591 vm_object_size_t size
,
594 extern void vm_paging_map_init(void);
596 extern int macx_backing_store_compaction(int flags
);
597 extern unsigned int mach_vm_ctl_page_free_wanted(void);
599 extern int no_paging_space_action(void);
601 #define VM_TOGGLE_CLEAR 0
602 #define VM_TOGGLE_SET 1
603 #define VM_TOGGLE_GETVALUE 999
604 int vm_toggle_entry_reuse(int, int*);
606 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
607 #define SWAP_READ 0x00000001 /* Read buffer. */
608 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
610 extern void vm_compressor_pager_init(void);
611 extern kern_return_t
compressor_memory_object_create(
612 memory_object_size_t
,
615 extern boolean_t
vm_compressor_low_on_space(void);
616 extern boolean_t
vm_compressor_out_of_space(void);
617 extern int vm_swap_low_on_space(void);
618 void do_fastwake_warmup_all(void);
620 extern int proc_get_memstat_priority(struct proc
*, boolean_t
);
621 #endif /* CONFIG_JETSAM */
623 /* the object purger. purges the next eligible object from memory. */
624 /* returns TRUE if an object was purged, otherwise FALSE. */
625 boolean_t
vm_purgeable_object_purge_one_unlocked(int force_purge_below_group
);
626 void vm_purgeable_nonvolatile_owner_update(task_t owner
,
628 void vm_purgeable_volatile_owner_update(task_t owner
,
630 void vm_owned_objects_disown(task_t task
);
636 struct trim_list
*tl_next
;
639 u_int32_t
vnode_trim_list(struct vnode
*vp
, struct trim_list
*tl
, boolean_t route_only
);
641 #define MAX_SWAPFILENAME_LEN 1024
642 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
644 extern char swapfilename
[MAX_SWAPFILENAME_LEN
+ 1];
647 unsigned int do_collapse_compressor
;
648 unsigned int do_collapse_compressor_pages
;
649 unsigned int do_collapse_terminate
;
650 unsigned int do_collapse_terminate_failure
;
651 unsigned int should_cow_but_wired
;
652 unsigned int create_upl_extra_cow
;
653 unsigned int create_upl_extra_cow_pages
;
654 unsigned int create_upl_lookup_failure_write
;
655 unsigned int create_upl_lookup_failure_copy
;
657 extern struct vm_counters vm_counters
;
659 #if CONFIG_SECLUDED_MEMORY
660 struct vm_page_secluded_data
{
661 int eligible_for_secluded
;
662 int grab_success_free
;
663 int grab_success_other
;
664 int grab_failure_locked
;
665 int grab_failure_state
;
666 int grab_failure_dirty
;
668 int grab_for_iokit_success
;
670 extern struct vm_page_secluded_data vm_page_secluded
;
672 extern int num_tasks_can_use_secluded_mem
;
675 extern int secluded_for_apps
;
676 extern int secluded_for_iokit
;
677 extern int secluded_for_filecache
;
679 extern int secluded_for_fbdp
;
682 extern uint64_t vm_page_secluded_drain(void);
683 extern void memory_object_mark_eligible_for_secluded(
684 memory_object_control_t control
,
685 boolean_t eligible_for_secluded
);
687 #endif /* CONFIG_SECLUDED_MEMORY */
689 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
691 extern kern_return_t
mach_make_memory_entry_internal(
693 memory_object_size_t
*size
,
694 memory_object_offset_t offset
,
695 vm_prot_t permission
,
696 vm_named_entry_kernel_flags_t vmne_kflags
,
697 ipc_port_t
*object_handle
,
698 ipc_port_t parent_handle
);
701 memory_entry_check_for_adjustment(
704 vm_map_offset_t
*overmap_start
,
705 vm_map_offset_t
*overmap_end
);
707 #define roundup(x, y) ((((x) % (y)) == 0) ? \
708 (x) : ((x) + ((y) - ((x) % (y)))))
715 * Flags for the VM swapper/reclaimer.
716 * Used by vm_swap_consider_defragment()
717 * to force defrag/reclaim by the swap
720 #define VM_SWAP_FLAGS_NONE 0
721 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1
722 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2
726 * Flags to control the behavior of
727 * the legacy footprint entitlement.
729 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
730 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
731 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
733 #endif /* __arm64__ */
737 extern struct proc
*current_proc(void);
738 extern int proc_pid(struct proc
*);
739 extern char *proc_best_name(struct proc
*);
741 extern uint64_t thread_tid(struct thread
*);
742 extern int debug4k_filter
;
743 extern int debug4k_proc_filter
;
744 extern char debug4k_proc_name
[];
745 extern const char *debug4k_category_name
[];
747 #define __DEBUG4K(category, fmt, ...) \
749 int __category = (category); \
750 struct thread *__t = NULL; \
751 struct proc *__p = NULL; \
752 const char *__pname = "?"; \
753 boolean_t __do_log = FALSE; \
755 if ((1 << __category) & debug4k_filter) { \
757 } else if (((1 << __category) & debug4k_proc_filter) && \
758 debug4k_proc_name[0] != '\0') { \
759 __p = current_proc(); \
761 __pname = proc_best_name(__p); \
763 if (!strcmp(debug4k_proc_name, __pname)) { \
769 __p = current_proc(); \
771 __pname = proc_best_name(__p); \
774 __t = current_thread(); \
775 printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \
776 debug4k_category_name[__category], \
777 __p ? proc_pid(__p) : 0, \
787 #define __DEBUG4K_ERROR 0
788 #define __DEBUG4K_LIFE 1
789 #define __DEBUG4K_LOAD 2
790 #define __DEBUG4K_FAULT 3
791 #define __DEBUG4K_COPY 4
792 #define __DEBUG4K_SHARE 5
793 #define __DEBUG4K_ADJUST 6
794 #define __DEBUG4K_PMAP 7
795 #define __DEBUG4K_MEMENTRY 8
796 #define __DEBUG4K_IOKIT 9
797 #define __DEBUG4K_UPL 10
798 #define __DEBUG4K_EXC 11
799 #define __DEBUG4K_VFS 12
801 #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__)
802 #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__)
803 #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__)
804 #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__)
805 #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__)
806 #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__)
807 #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__)
808 #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__)
809 #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__)
810 #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__)
811 #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__)
812 #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__)
813 #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__)
815 #else /* MACH_ASSERT */
817 #define DEBUG4K_ERROR(...)
818 #define DEBUG4K_LIFE(...)
819 #define DEBUG4K_LOAD(...)
820 #define DEBUG4K_FAULT(...)
821 #define DEBUG4K_COPY(...)
822 #define DEBUG4K_SHARE(...)
823 #define DEBUG4K_ADJUST(...)
824 #define DEBUG4K_PMAP(...)
825 #define DEBUG4K_MEMENTRY(...)
826 #define DEBUG4K_IOKIT(...)
827 #define DEBUG4K_UPL(...)
828 #define DEBUG4K_EXC(...)
829 #define DEBUG4K_VFS(...)
831 #endif /* MACH_ASSERT */
833 #endif /* _VM_VM_PROTOS_H_ */
835 #endif /* XNU_KERNEL_PRIVATE */