X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..527f99514973766e9c0382a4d8550dfb00f54939:/osfmk/mach/vm_statistics.h diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index 36a79fdd1..fa0560559 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,15 +113,6 @@ struct vm_statistics { typedef struct vm_statistics *vm_statistics_t; typedef struct vm_statistics vm_statistics_data_t; -#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */ - -typedef struct vm_statistics *vm_statistics64_t; -typedef struct vm_statistics vm_statistics64_data_t; - -#define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) value - -#else /* !(defined(__ppc__)) */ - /* * vm_statistics64 * @@ -133,6 +124,8 @@ typedef struct vm_statistics vm_statistics64_data_t; * rev3 - changed name to vm_statistics64. * changed some fields in structure to 64-bit on * arm, i386 and x86_64 architectures. + * rev4 - require 64-bit alignment for efficient access + * in the kernel. No change to reported data. * */ @@ -149,12 +142,8 @@ struct vm_statistics64 { uint64_t cow_faults; /* # of copy-on-writes */ uint64_t lookups; /* object cache lookups */ uint64_t hits; /* object cache hits */ - - /* added for rev1 */ uint64_t purges; /* # of pages purged */ natural_t purgeable_count; /* # of pages purgeable */ - - /* added for rev2 */ /* * NB: speculative pages are already accounted for in "free_count", * so "speculative_count" is the number of "free" pages that are @@ -163,8 +152,17 @@ struct vm_statistics64 { */ natural_t speculative_count; /* # of pages speculative */ -} -; + /* added for rev1 */ + uint64_t decompressions; /* # of pages decompressed */ + uint64_t compressions; /* # of pages compressed */ + uint64_t swapins; /* # of pages swapped in (via compression segments) */ + uint64_t swapouts; /* # of pages swapped out (via compression segments) */ + natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */ + natural_t throttled_count; /* # of pages throttled */ + natural_t external_page_count; /* # of pages that are file-backed (non-swap) */ + natural_t internal_page_count; /* # of pages that are anonymous */ + uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */ +} __attribute__((aligned(8))); typedef struct vm_statistics64 *vm_statistics64_t; typedef struct vm_statistics64 vm_statistics64_data_t; @@ -177,8 +175,40 @@ typedef struct vm_statistics64 vm_statistics64_data_t; */ #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value))) -#endif /* !(defined(__ppc__)) */ +/* + * vm_extmod_statistics + * + * Structure to record modifications to a task by an + * external agent. + * + * History: + * rev0 - original structure. + */ + +struct vm_extmod_statistics { + int64_t task_for_pid_count; /* # of times task port was looked up */ + int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */ + int64_t thread_creation_count; /* # of threads created in task */ + int64_t thread_creation_caller_count; /* # of threads created by task */ + int64_t thread_set_state_count; /* # of register state sets in task */ + int64_t thread_set_state_caller_count; /* # of register state sets by task */ +} __attribute__((aligned(8))); + +typedef struct vm_extmod_statistics *vm_extmod_statistics_t; +typedef struct vm_extmod_statistics vm_extmod_statistics_data_t; + +typedef struct vm_purgeable_stat { + uint64_t count; + uint64_t size; +}vm_purgeable_stat_t; + +struct vm_purgeable_info { + vm_purgeable_stat_t fifo_data[8]; + vm_purgeable_stat_t obsolete_data; + vm_purgeable_stat_t lifo_data[8]; +}; +typedef struct vm_purgeable_info *vm_purgeable_info_t; /* included for the vm_map_page_query call */ @@ -192,6 +222,7 @@ typedef struct vm_statistics64 vm_statistics64_data_t; #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80 #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100 #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200 +#define VM_PAGE_QUERY_PAGE_CS_NX 0x400 #ifdef MACH_KERNEL_PRIVATE @@ -206,10 +237,29 @@ struct pmap_statistics { integer_t resident_count; /* # of pages mapped (total)*/ integer_t resident_max; /* # of pages mapped (peak) */ integer_t wired_count; /* # of pages wired */ + + integer_t device; + integer_t device_peak; + integer_t internal; + integer_t internal_peak; + integer_t external; + integer_t external_peak; + integer_t reusable; + integer_t reusable_peak; + uint64_t compressed __attribute__((aligned(8))); + uint64_t compressed_peak __attribute__((aligned(8))); + uint64_t compressed_lifetime __attribute__((aligned(8))); }; typedef struct pmap_statistics *pmap_statistics_t; +#define PMAP_STATS_PEAK(field) \ + MACRO_BEGIN \ + if (field > field##_peak) { \ + field##_peak = field; \ + } \ + MACRO_END + #endif /* MACH_KERNEL_PRIVATE */ /* @@ -240,52 +290,101 @@ typedef struct pmap_statistics *pmap_statistics_t; * queue instead of the active queue. In other words, they are not * cached so that they will be stolen first if memory runs low. */ + #define VM_FLAGS_FIXED 0x0000 #define VM_FLAGS_ANYWHERE 0x0001 #define VM_FLAGS_PURGABLE 0x0002 +#define VM_FLAGS_RANDOM_ADDR 0x0008 #define VM_FLAGS_NO_CACHE 0x0010 -#ifdef KERNEL_PRIVATE -#define VM_FLAGS_BELOW_MIN 0x0080 /* map below the map's min offset */ -#define VM_FLAGS_PERMANENT 0x0100 /* mapping can NEVER be unmapped */ -#define VM_FLAGS_GUARD_AFTER 0x0200 /* guard page after the mapping */ -#define VM_FLAGS_GUARD_BEFORE 0x0400 /* guard page before the mapping */ -#define VM_FLAGS_SUBMAP 0x0800 /* mapping a VM submap */ -#define VM_FLAGS_ALREADY 0x1000 /* OK if same mapping already exists */ -#define VM_FLAGS_BEYOND_MAX 0x2000 /* map beyond the map's max offset */ +#define VM_FLAGS_RESILIENT_CODESIGN 0x0020 +#define VM_FLAGS_RESILIENT_MEDIA 0x0040 #define VM_FLAGS_OVERWRITE 0x4000 /* delete any existing mappings first */ -#define VM_FLAGS_NO_PMAP_CHECK 0x8000 /* do not check that pmap is empty */ -#endif /* KERNEL_PRIVATE */ - /* * VM_FLAGS_SUPERPAGE_MASK * 3 bits that specify whether large pages should be used instead of * base pages (!=0), as well as the requested page size. */ #define VM_FLAGS_SUPERPAGE_MASK 0x70000 /* bits 0x10000, 0x20000, 0x40000 */ -#define VM_FLAGS_SUPERPAGE_SHIFT 16 - -#define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */ -#define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE<> 24 +#if !XNU_KERNEL_PRIVATE #define VM_SET_FLAGS_ALIAS(flags, alias) \ (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \ (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24)) +#endif /* !XNU_KERNEL_PRIVATE */ /* These are the flags that we accept from user-space */ #define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \ VM_FLAGS_ANYWHERE | \ VM_FLAGS_PURGABLE | \ + VM_FLAGS_RANDOM_ADDR | \ VM_FLAGS_NO_CACHE | \ + VM_FLAGS_OVERWRITE | \ VM_FLAGS_SUPERPAGE_MASK | \ VM_FLAGS_ALIAS_MASK) -#define VM_FLAGS_USER_MAP VM_FLAGS_USER_ALLOCATE +#define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \ + VM_FLAGS_RETURN_4K_DATA_ADDR | \ + VM_FLAGS_RETURN_DATA_ADDR) +#define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \ + VM_FLAGS_ANYWHERE | \ + VM_FLAGS_RANDOM_ADDR | \ + VM_FLAGS_OVERWRITE| \ + VM_FLAGS_RETURN_DATA_ADDR |\ + VM_FLAGS_RESILIENT_CODESIGN) + +#define VM_FLAGS_SUPERPAGE_SHIFT 16 +#define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */ +#define SUPERPAGE_SIZE_ANY 1 +#define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT) +#define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT) +#if defined(__x86_64__) || !defined(KERNEL) +#define SUPERPAGE_SIZE_2MB 2 +#define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB<