X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d1ecb069dfe24481e4a83f44cb5217a2b06746d7..22ba694c5857e62b5a553b1505dcf2e509177f28:/osfmk/vm/pmap.h diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h index bf7cbed6c..2e228d6d2 100644 --- a/osfmk/vm/pmap.h +++ b/osfmk/vm/pmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -141,7 +141,7 @@ extern void pmap_startup( * use remaining physical pages * to allocate page frames. */ -extern void pmap_init(void) __attribute__((section("__TEXT, initcode"))); +extern void pmap_init(void); /* Initialization, * after kernel runs * in virtual memory. @@ -169,9 +169,7 @@ extern void mapping_free_prime(void); /* Primes the mapping block release list */ extern boolean_t pmap_next_page(ppnum_t *pnum); -#if defined(__LP64__) -extern boolean_t pmap_next_page_k64(ppnum_t *pnum); -#endif +extern boolean_t pmap_next_page_hi(ppnum_t *pnum); /* During VM initialization, * return the next unused * physical page. @@ -189,12 +187,9 @@ extern void pmap_virtual_space( * Routines to manage the physical map data structure. */ extern pmap_t pmap_create( /* Create a pmap_t. */ + ledger_t ledger, vm_map_size_t size, -#ifdef __i386__ - boolean_t is_64bit); -#else __unused boolean_t is_64bit); -#endif extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ @@ -206,6 +201,7 @@ extern void pmap_enter( /* Enter a mapping */ vm_map_offset_t v, ppnum_t pn, vm_prot_t prot, + vm_prot_t fault_type, unsigned int flags, boolean_t wired); @@ -214,9 +210,11 @@ extern kern_return_t pmap_enter_options( vm_map_offset_t v, ppnum_t pn, vm_prot_t prot, + vm_prot_t fault_type, unsigned int flags, boolean_t wired, - unsigned int options); + unsigned int options, + void *arg); extern void pmap_remove_some_phys( pmap_t pmap, @@ -231,6 +229,12 @@ extern void pmap_page_protect( /* Restrict access to page. */ ppnum_t phys, vm_prot_t prot); +extern void pmap_page_protect_options( /* Restrict access to page. */ + ppnum_t phys, + vm_prot_t prot, + unsigned int options, + void *arg); + extern void (pmap_zero_page)( ppnum_t pn); @@ -265,6 +269,11 @@ extern void (pmap_copy_part_rpage)( extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ ppnum_t phys); +extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ + ppnum_t phys, + unsigned int options, + void *arg); + extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate * cache based on * page number sent */ @@ -276,6 +285,12 @@ extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate extern unsigned int (pmap_cache_attributes)( ppnum_t pn); +/* + * Set (override) cache attributes for the specified physical page + */ +extern void pmap_set_cache_attributes( + ppnum_t, + unsigned int); extern void pmap_sync_page_data_phys(ppnum_t pa); extern void pmap_sync_page_attributes_phys(ppnum_t pa); @@ -370,39 +385,85 @@ extern kern_return_t (pmap_attribute)( /* Get/Set special memory /* * Macro to be used in place of pmap_enter() */ -#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \ +#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \ MACRO_BEGIN \ pmap_t __pmap = (pmap); \ vm_page_t __page = (page); \ + int __options = 0; \ \ PMAP_ENTER_CHECK(__pmap, __page) \ - pmap_enter(__pmap, \ - (virtual_address), \ - __page->phys_page, \ - (protection), \ - (flags), \ - (wired)); \ + if (__page->object->internal) { \ + __options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->reusable || __page->object->all_reusable) { \ + __options |= PMAP_OPTIONS_REUSABLE; \ + } \ + (void) pmap_enter_options(__pmap, \ + (virtual_address), \ + __page->phys_page, \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + __options, \ + NULL); \ MACRO_END #endif /* !PMAP_ENTER */ #ifndef PMAP_ENTER_OPTIONS #define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ - flags, wired, options, result) \ + fault_type, flags, wired, options, result) \ MACRO_BEGIN \ pmap_t __pmap = (pmap); \ vm_page_t __page = (page); \ + int __extra_options = 0; \ \ PMAP_ENTER_CHECK(__pmap, __page) \ + if (__page->object->internal) { \ + __extra_options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->reusable || __page->object->all_reusable) { \ + __extra_options |= PMAP_OPTIONS_REUSABLE; \ + } \ result = pmap_enter_options(__pmap, \ - (virtual_address), \ - __page->phys_page, \ - (protection), \ - (flags), \ - (wired), \ - options); \ + (virtual_address), \ + __page->phys_page, \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + (options) | __extra_options, \ + NULL); \ MACRO_END #endif /* !PMAP_ENTER_OPTIONS */ +#ifndef PMAP_SET_CACHE_ATTR +#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ + MACRO_BEGIN \ + if (!batch_pmap_op) { \ + pmap_set_cache_attributes(mem->phys_page, cache_attr); \ + object->set_cache_attr = TRUE; \ + } \ + MACRO_END +#endif /* PMAP_SET_CACHE_ATTR */ + +#ifndef PMAP_BATCH_SET_CACHE_ATTR +#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ + cache_attr, num_pages, batch_pmap_op) \ + MACRO_BEGIN \ + if ((batch_pmap_op)) { \ + unsigned int __page_idx=0; \ + while (__page_idx < (num_pages)) { \ + pmap_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr)); \ + __page_idx++; \ + } \ + (object)->set_cache_attr = TRUE; \ + } \ + MACRO_END +#endif /* PMAP_BATCH_SET_CACHE_ATTR */ + #define PMAP_ENTER_CHECK(pmap, page) \ { \ if ((pmap) != kernel_pmap) { \ @@ -419,6 +480,13 @@ extern kern_return_t (pmap_attribute)( /* Get/Set special memory * physical addresses, simulating them if not provided * by the hardware. */ +struct pfc { + long pfc_cpus; + long pfc_invalid_global; +}; + +typedef struct pfc pmap_flush_context; + /* Clear reference bit */ extern void pmap_clear_reference(ppnum_t pn); /* Return reference bit */ @@ -435,6 +503,11 @@ extern unsigned int pmap_get_refmod(ppnum_t pn); extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); #define VM_MEM_MODIFIED 0x01 /* Modified bit */ #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ +extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); + + +extern void pmap_flush_context_init(pmap_flush_context *); +extern void pmap_flush(pmap_flush_context *); /* * Routines that operate on ranges of virtual addresses. @@ -445,6 +518,14 @@ extern void pmap_protect( /* Change protections. */ vm_map_offset_t e, vm_prot_t prot); +extern void pmap_protect_options( /* Change protections. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + vm_prot_t prot, + unsigned int options, + void *arg); + extern void (pmap_pageable)( pmap_t pmap, vm_map_offset_t start, @@ -455,17 +536,21 @@ extern void (pmap_pageable)( extern uint64_t pmap_nesting_size_min; extern uint64_t pmap_nesting_size_max; -extern kern_return_t pmap_nest(pmap_t grand, - pmap_t subord, - addr64_t vstart, - addr64_t nstart, - uint64_t size); -extern kern_return_t pmap_unnest(pmap_t grand, - addr64_t vaddr, - uint64_t size); +extern kern_return_t pmap_nest(pmap_t, + pmap_t, + addr64_t, + addr64_t, + uint64_t); +extern kern_return_t pmap_unnest(pmap_t, + addr64_t, + uint64_t); extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); #endif /* MACH_KERNEL_PRIVATE */ +extern boolean_t pmap_is_noencrypt(ppnum_t); +extern void pmap_set_noencrypt(ppnum_t pn); +extern void pmap_clear_noencrypt(ppnum_t pn); + /* * JMM - This portion is exported to other kernel components right now, * but will be pulled back in the future when the needed functionality @@ -482,14 +567,25 @@ extern pmap_t kernel_pmap; /* The kernel's map */ #define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ #define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ +#define VM_WIMG_USE_DEFAULT 0x80 #define VM_WIMG_MASK 0xFF -#define VM_WIMG_USE_DEFAULT 0x80000000 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ +#define VM_MEM_STACK 0x200 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return * KERN_RESOURCE_SHORTAGE * instead */ +#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed + * but don't enter mapping + */ +#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for + * this operation */ +#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ +#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ +#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ +#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ +#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ #if !defined(__LP64__) extern vm_offset_t pmap_extract(pmap_t pmap, @@ -506,6 +602,18 @@ extern void pmap_remove( /* Remove mappings. */ vm_map_offset_t s, vm_map_offset_t e); +extern void pmap_remove_options( /* Remove mappings. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + int options); + +extern void pmap_reusable( + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + boolean_t reusable); + extern void fillPage(ppnum_t pa, unsigned int fill); extern void pmap_map_sharedpage(task_t task, pmap_t pmap); @@ -515,6 +623,10 @@ extern void pmap_unmap_sharedpage(pmap_t pmap); void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); #endif +unsigned int pmap_query_resident(pmap_t pmap, + vm_map_offset_t s, + vm_map_offset_t e); + #endif /* KERNEL_PRIVATE */ #endif /* _VM_PMAP_H_ */