/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* use remaining physical pages
* to allocate page frames.
*/
-extern void pmap_init(void) __attribute__((section("__TEXT, initcode")));
+extern void pmap_init(void);
/* Initialization,
* after kernel runs
* in virtual memory.
*/
extern boolean_t pmap_next_page(ppnum_t *pnum);
-#if defined(__LP64__)
-extern boolean_t pmap_next_page_k64(ppnum_t *pnum);
-#endif
+extern boolean_t pmap_next_page_hi(ppnum_t *pnum);
/* During VM initialization,
* return the next unused
* physical page.
* Routines to manage the physical map data structure.
*/
extern pmap_t pmap_create( /* Create a pmap_t. */
+ ledger_t ledger,
vm_map_size_t size,
-#ifdef __i386__
- boolean_t is_64bit);
-#else
__unused boolean_t is_64bit);
-#endif
extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */
extern void pmap_reference(pmap_t pmap); /* Gain a reference. */
extern void pmap_destroy(pmap_t pmap); /* Release a reference. */
vm_map_offset_t v,
ppnum_t pn,
vm_prot_t prot,
+ vm_prot_t fault_type,
unsigned int flags,
boolean_t wired);
+extern kern_return_t pmap_enter_options(
+ pmap_t pmap,
+ vm_map_offset_t v,
+ ppnum_t pn,
+ vm_prot_t prot,
+ vm_prot_t fault_type,
+ unsigned int flags,
+ boolean_t wired,
+ unsigned int options,
+ void *arg);
+
extern void pmap_remove_some_phys(
pmap_t pmap,
ppnum_t pn);
ppnum_t phys,
vm_prot_t prot);
+extern void pmap_page_protect_options( /* Restrict access to page. */
+ ppnum_t phys,
+ vm_prot_t prot,
+ unsigned int options,
+ void *arg);
+
extern void (pmap_zero_page)(
ppnum_t pn);
extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */
ppnum_t phys);
+extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */
+ ppnum_t phys,
+ unsigned int options,
+ void *arg);
+
extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate
* cache based on
* page number sent */
extern unsigned int (pmap_cache_attributes)(
ppnum_t pn);
+/*
+ * Set (override) cache attributes for the specified physical page
+ */
+extern void pmap_set_cache_attributes(
+ ppnum_t,
+ unsigned int);
extern void pmap_sync_page_data_phys(ppnum_t pa);
extern void pmap_sync_page_attributes_phys(ppnum_t pa);
/*
* Macro to be used in place of pmap_enter()
*/
-#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
+#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
MACRO_BEGIN \
pmap_t __pmap = (pmap); \
vm_page_t __page = (page); \
+ int __options = 0; \
\
- if (__pmap != kernel_pmap) { \
- ASSERT_PAGE_DECRYPTED(__page); \
+ PMAP_ENTER_CHECK(__pmap, __page) \
+ if (__page->object->internal) { \
+ __options |= PMAP_OPTIONS_INTERNAL; \
} \
- if (__page->error) { \
- panic("VM page %p should not have an error\n", \
- __page); \
+ if (__page->reusable || __page->object->all_reusable) { \
+ __options |= PMAP_OPTIONS_REUSABLE; \
} \
- pmap_enter(__pmap, \
- (virtual_address), \
- __page->phys_page, \
- (protection), \
- (flags), \
- (wired)); \
+ (void) pmap_enter_options(__pmap, \
+ (virtual_address), \
+ __page->phys_page, \
+ (protection), \
+ (fault_type), \
+ (flags), \
+ (wired), \
+ __options, \
+ NULL); \
MACRO_END
#endif /* !PMAP_ENTER */
+#ifndef PMAP_ENTER_OPTIONS
+#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \
+ fault_type, flags, wired, options, result) \
+ MACRO_BEGIN \
+ pmap_t __pmap = (pmap); \
+ vm_page_t __page = (page); \
+ int __extra_options = 0; \
+ \
+ PMAP_ENTER_CHECK(__pmap, __page) \
+ if (__page->object->internal) { \
+ __extra_options |= PMAP_OPTIONS_INTERNAL; \
+ } \
+ if (__page->reusable || __page->object->all_reusable) { \
+ __extra_options |= PMAP_OPTIONS_REUSABLE; \
+ } \
+ result = pmap_enter_options(__pmap, \
+ (virtual_address), \
+ __page->phys_page, \
+ (protection), \
+ (fault_type), \
+ (flags), \
+ (wired), \
+ (options) | __extra_options, \
+ NULL); \
+ MACRO_END
+#endif /* !PMAP_ENTER_OPTIONS */
+
+#ifndef PMAP_SET_CACHE_ATTR
+#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
+ MACRO_BEGIN \
+ if (!batch_pmap_op) { \
+ pmap_set_cache_attributes(mem->phys_page, cache_attr); \
+ object->set_cache_attr = TRUE; \
+ } \
+ MACRO_END
+#endif /* PMAP_SET_CACHE_ATTR */
+
+#ifndef PMAP_BATCH_SET_CACHE_ATTR
+#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \
+ cache_attr, num_pages, batch_pmap_op) \
+ MACRO_BEGIN \
+ if ((batch_pmap_op)) { \
+ unsigned int __page_idx=0; \
+ while (__page_idx < (num_pages)) { \
+ pmap_set_cache_attributes( \
+ user_page_list[__page_idx].phys_addr, \
+ (cache_attr)); \
+ __page_idx++; \
+ } \
+ (object)->set_cache_attr = TRUE; \
+ } \
+ MACRO_END
+#endif /* PMAP_BATCH_SET_CACHE_ATTR */
+
+#define PMAP_ENTER_CHECK(pmap, page) \
+{ \
+ if ((pmap) != kernel_pmap) { \
+ ASSERT_PAGE_DECRYPTED(page); \
+ } \
+ if ((page)->error) { \
+ panic("VM page %p should not have an error\n", \
+ (page)); \
+ } \
+}
+
/*
* Routines to manage reference/modify bits based on
* physical addresses, simulating them if not provided
* by the hardware.
*/
+struct pfc {
+ long pfc_cpus;
+ long pfc_invalid_global;
+};
+
+typedef struct pfc pmap_flush_context;
+
/* Clear reference bit */
extern void pmap_clear_reference(ppnum_t pn);
/* Return reference bit */
extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask);
#define VM_MEM_MODIFIED 0x01 /* Modified bit */
#define VM_MEM_REFERENCED 0x02 /* Referenced bit */
+extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
+
+
+extern void pmap_flush_context_init(pmap_flush_context *);
+extern void pmap_flush(pmap_flush_context *);
/*
* Routines that operate on ranges of virtual addresses.
vm_map_offset_t e,
vm_prot_t prot);
+extern void pmap_protect_options( /* Change protections. */
+ pmap_t map,
+ vm_map_offset_t s,
+ vm_map_offset_t e,
+ vm_prot_t prot,
+ unsigned int options,
+ void *arg);
+
extern void (pmap_pageable)(
pmap_t pmap,
vm_map_offset_t start,
extern uint64_t pmap_nesting_size_min;
extern uint64_t pmap_nesting_size_max;
-extern kern_return_t pmap_nest(pmap_t grand,
- pmap_t subord,
- addr64_t vstart,
- addr64_t nstart,
- uint64_t size);
-extern kern_return_t pmap_unnest(pmap_t grand,
- addr64_t vaddr,
- uint64_t size);
+extern kern_return_t pmap_nest(pmap_t,
+ pmap_t,
+ addr64_t,
+ addr64_t,
+ uint64_t);
+extern kern_return_t pmap_unnest(pmap_t,
+ addr64_t,
+ uint64_t);
extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
#endif /* MACH_KERNEL_PRIVATE */
+extern boolean_t pmap_is_noencrypt(ppnum_t);
+extern void pmap_set_noencrypt(ppnum_t pn);
+extern void pmap_clear_noencrypt(ppnum_t pn);
+
/*
* JMM - This portion is exported to other kernel components right now,
* but will be pulled back in the future when the needed functionality
#define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */
#define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */
+#define VM_WIMG_USE_DEFAULT 0x80
#define VM_WIMG_MASK 0xFF
-#define VM_WIMG_USE_DEFAULT 0x80000000
#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */
+#define VM_MEM_STACK 0x200
+
+#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return
+ * KERN_RESOURCE_SHORTAGE
+ * instead */
+#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed
+ * but don't enter mapping
+ */
+#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for
+ * this operation */
+#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */
+#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */
+#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */
+#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */
+#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */
+
#if !defined(__LP64__)
extern vm_offset_t pmap_extract(pmap_t pmap,
vm_map_offset_t va);
vm_map_offset_t s,
vm_map_offset_t e);
+extern void pmap_remove_options( /* Remove mappings. */
+ pmap_t map,
+ vm_map_offset_t s,
+ vm_map_offset_t e,
+ int options);
+
+extern void pmap_reusable(
+ pmap_t map,
+ vm_map_offset_t s,
+ vm_map_offset_t e,
+ boolean_t reusable);
+
extern void fillPage(ppnum_t pa, unsigned int fill);
extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
#endif
+unsigned int pmap_query_resident(pmap_t pmap,
+ vm_map_offset_t s,
+ vm_map_offset_t e);
+
#endif /* KERNEL_PRIVATE */
#endif /* _VM_PMAP_H_ */