X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..15129b1c8dbb3650c63b70adb1cad9af601c6c17:/osfmk/vm/pmap.h diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h index 26337c7e7..2e228d6d2 100644 --- a/osfmk/vm/pmap.h +++ b/osfmk/vm/pmap.h @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -61,10 +67,6 @@ #ifndef _VM_PMAP_H_ #define _VM_PMAP_H_ -#include - -#ifdef __APPLE_API_PRIVATE - #include #include #include @@ -72,6 +74,8 @@ #include #include +#ifdef KERNEL_PRIVATE + /* * The following is a description of the interface to the * machine-dependent "physical map" data structure. The module @@ -84,13 +88,29 @@ * many address spaces. */ -#if !defined(MACH_KERNEL_PRIVATE) - -typedef void *pmap_t; - -#else /* MACH_KERNEL_PRIVATE */ - -typedef struct pmap *pmap_t; +/* Copy between a physical page and a virtual address */ +/* LP64todo - switch to vm_map_offset_t when it grows */ +extern kern_return_t copypv( + addr64_t source, + addr64_t sink, + unsigned int size, + int which); +#define cppvPsnk 1 +#define cppvPsnkb 31 +#define cppvPsrc 2 +#define cppvPsrcb 30 +#define cppvFsnk 4 +#define cppvFsnkb 29 +#define cppvFsrc 8 +#define cppvFsrcb 28 +#define cppvNoModSnk 16 +#define cppvNoModSnkb 27 +#define cppvNoRefSrc 32 +#define cppvNoRefSrcb 26 +#define cppvKmap 64 /* Use the kernel's vm_map */ +#define cppvKmapb 25 + +#ifdef MACH_KERNEL_PRIVATE #include @@ -99,9 +119,14 @@ typedef struct pmap *pmap_t; * There is traditionally also a pmap_bootstrap, * used very early by machine-dependent code, * but it is not part of the interface. + * + * LP64todo - + * These interfaces are tied to the size of the + * kernel pmap - and therefore use the "local" + * vm_offset_t, etc... types. */ -extern vm_offset_t pmap_steal_memory(vm_size_t size); +extern void *pmap_steal_memory(vm_size_t size); /* During VM initialization, * steal a chunk of memory. */ @@ -116,11 +141,16 @@ extern void pmap_startup( * use remaining physical pages * to allocate page frames. */ -extern void pmap_init(void); /* Initialization, +extern void pmap_init(void); + /* Initialization, * after kernel runs * in virtual memory. */ +extern void mapping_adjust(void); /* Adjust free mapping count */ + +extern void mapping_free_prime(void); /* Primes the mapping block release list */ + #ifndef MACHINE_PAGES /* * If machine/pmap.h defines MACHINE_PAGES, it must implement @@ -138,7 +168,8 @@ extern void pmap_init(void); /* Initialization, * However, for best performance pmap_free_pages should be accurate. */ -extern boolean_t pmap_next_page(vm_offset_t *paddr); +extern boolean_t pmap_next_page(ppnum_t *pnum); +extern boolean_t pmap_next_page_hi(ppnum_t *pnum); /* During VM initialization, * return the next unused * physical page. @@ -155,7 +186,10 @@ extern void pmap_virtual_space( /* * Routines to manage the physical map data structure. */ -extern pmap_t pmap_create(vm_size_t size); /* Create a pmap_t. */ +extern pmap_t pmap_create( /* Create a pmap_t. */ + ledger_t ledger, + vm_map_size_t size, + __unused boolean_t is_64bit); extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ @@ -164,15 +198,27 @@ extern void pmap_switch(pmap_t); extern void pmap_enter( /* Enter a mapping */ pmap_t pmap, - vm_offset_t v, - vm_offset_t pa, + vm_map_offset_t v, + ppnum_t pn, vm_prot_t prot, + vm_prot_t fault_type, unsigned int flags, boolean_t wired); +extern kern_return_t pmap_enter_options( + pmap_t pmap, + vm_map_offset_t v, + ppnum_t pn, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + void *arg); + extern void pmap_remove_some_phys( pmap_t pmap, - vm_offset_t pa); + ppnum_t pn); /* @@ -180,71 +226,93 @@ extern void pmap_remove_some_phys( */ extern void pmap_page_protect( /* Restrict access to page. */ - vm_offset_t phys, + ppnum_t phys, vm_prot_t prot); +extern void pmap_page_protect_options( /* Restrict access to page. */ + ppnum_t phys, + vm_prot_t prot, + unsigned int options, + void *arg); + extern void (pmap_zero_page)( - vm_offset_t phys); + ppnum_t pn); extern void (pmap_zero_part_page)( - vm_offset_t p, + ppnum_t pn, vm_offset_t offset, vm_size_t len); extern void (pmap_copy_page)( - vm_offset_t src, - vm_offset_t dest); + ppnum_t src, + ppnum_t dest); extern void (pmap_copy_part_page)( - vm_offset_t src, + ppnum_t src, vm_offset_t src_offset, - vm_offset_t dst, + ppnum_t dst, vm_offset_t dst_offset, vm_size_t len); extern void (pmap_copy_part_lpage)( vm_offset_t src, - vm_offset_t dst, + ppnum_t dst, vm_offset_t dst_offset, vm_size_t len); extern void (pmap_copy_part_rpage)( - vm_offset_t src, + ppnum_t src, vm_offset_t src_offset, vm_offset_t dst, vm_size_t len); + +extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ + ppnum_t phys); + +extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ + ppnum_t phys, + unsigned int options, + void *arg); + +extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate + * cache based on + * page number sent */ + ppnum_t pn, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); + +extern unsigned int (pmap_cache_attributes)( + ppnum_t pn); + +/* + * Set (override) cache attributes for the specified physical page + */ +extern void pmap_set_cache_attributes( + ppnum_t, + unsigned int); +extern void pmap_sync_page_data_phys(ppnum_t pa); +extern void pmap_sync_page_attributes_phys(ppnum_t pa); /* * debug/assertions. pmap_verify_free returns true iff * the given physical page is mapped into no pmap. */ -extern boolean_t pmap_verify_free(vm_offset_t paddr); +extern boolean_t pmap_verify_free(ppnum_t pn); /* * Statistics routines */ extern int (pmap_resident_count)(pmap_t pmap); +extern int (pmap_resident_max)(pmap_t pmap); /* * Sundry required (internal) routines */ +#ifdef CURRENTLY_UNUSED_AND_UNTESTED extern void pmap_collect(pmap_t pmap);/* Perform garbage * collection, if any */ - - -extern vm_offset_t (pmap_phys_address)( /* Transform address returned - * by device driver mapping - * function to physical address - * known to this module. */ - int frame); - -extern int (pmap_phys_to_frame)( /* Inverse of pmap_phys_addess, - * for use by device driver - * mapping function in - * machine-independent - * pseudo-devices. */ - vm_offset_t phys); - +#endif /* * Optional routines */ @@ -252,108 +320,236 @@ extern void (pmap_copy)( /* Copy range of mappings, * if desired. */ pmap_t dest, pmap_t source, - vm_offset_t dest_va, - vm_size_t size, - vm_offset_t source_va); + vm_map_offset_t dest_va, + vm_map_size_t size, + vm_map_offset_t source_va); extern kern_return_t (pmap_attribute)( /* Get/Set special memory * attributes */ pmap_t pmap, - vm_offset_t va, - vm_size_t size, + vm_map_offset_t va, + vm_map_size_t size, vm_machine_attribute_t attribute, vm_machine_attribute_val_t* value); -extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate - * cache based on - * phys addr sent */ - vm_offset_t addr, - vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value); - /* * Routines defined as macros. */ #ifndef PMAP_ACTIVATE_USER -#define PMAP_ACTIVATE_USER(act, cpu) { \ +#ifndef PMAP_ACTIVATE +#define PMAP_ACTIVATE_USER(thr, cpu) +#else /* PMAP_ACTIVATE */ +#define PMAP_ACTIVATE_USER(thr, cpu) { \ pmap_t pmap; \ \ - pmap = (act)->map->pmap; \ + pmap = (thr)->map->pmap; \ if (pmap != pmap_kernel()) \ - PMAP_ACTIVATE(pmap, (act), (cpu)); \ + PMAP_ACTIVATE(pmap, (thr), (cpu)); \ } +#endif /* PMAP_ACTIVATE */ #endif /* PMAP_ACTIVATE_USER */ #ifndef PMAP_DEACTIVATE_USER -#define PMAP_DEACTIVATE_USER(act, cpu) { \ +#ifndef PMAP_DEACTIVATE +#define PMAP_DEACTIVATE_USER(thr, cpu) +#else /* PMAP_DEACTIVATE */ +#define PMAP_DEACTIVATE_USER(thr, cpu) { \ pmap_t pmap; \ \ - pmap = (act)->map->pmap; \ - if ((pmap) != pmap_kernel()) \ - PMAP_DEACTIVATE(pmap, (act), (cpu)); \ + pmap = (thr)->map->pmap; \ + if ((pmap) != pmap_kernel()) \ + PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ } +#endif /* PMAP_DEACTIVATE */ #endif /* PMAP_DEACTIVATE_USER */ #ifndef PMAP_ACTIVATE_KERNEL +#ifndef PMAP_ACTIVATE +#define PMAP_ACTIVATE_KERNEL(cpu) +#else /* PMAP_ACTIVATE */ #define PMAP_ACTIVATE_KERNEL(cpu) \ - PMAP_ACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu) + PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) +#endif /* PMAP_ACTIVATE */ #endif /* PMAP_ACTIVATE_KERNEL */ #ifndef PMAP_DEACTIVATE_KERNEL +#ifndef PMAP_DEACTIVATE +#define PMAP_DEACTIVATE_KERNEL(cpu) +#else /* PMAP_DEACTIVATE */ #define PMAP_DEACTIVATE_KERNEL(cpu) \ - PMAP_DEACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu) + PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) +#endif /* PMAP_DEACTIVATE */ #endif /* PMAP_DEACTIVATE_KERNEL */ #ifndef PMAP_ENTER /* * Macro to be used in place of pmap_enter() */ -#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \ - MACRO_BEGIN \ - pmap_enter( \ - (pmap), \ - (virtual_address), \ - (page)->phys_addr, \ - (protection) & ~(page)->page_lock, \ - flags, \ - (wired) \ - ); \ - MACRO_END +#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \ + MACRO_BEGIN \ + pmap_t __pmap = (pmap); \ + vm_page_t __page = (page); \ + int __options = 0; \ + \ + PMAP_ENTER_CHECK(__pmap, __page) \ + if (__page->object->internal) { \ + __options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->reusable || __page->object->all_reusable) { \ + __options |= PMAP_OPTIONS_REUSABLE; \ + } \ + (void) pmap_enter_options(__pmap, \ + (virtual_address), \ + __page->phys_page, \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + __options, \ + NULL); \ + MACRO_END #endif /* !PMAP_ENTER */ +#ifndef PMAP_ENTER_OPTIONS +#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ + fault_type, flags, wired, options, result) \ + MACRO_BEGIN \ + pmap_t __pmap = (pmap); \ + vm_page_t __page = (page); \ + int __extra_options = 0; \ + \ + PMAP_ENTER_CHECK(__pmap, __page) \ + if (__page->object->internal) { \ + __extra_options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->reusable || __page->object->all_reusable) { \ + __extra_options |= PMAP_OPTIONS_REUSABLE; \ + } \ + result = pmap_enter_options(__pmap, \ + (virtual_address), \ + __page->phys_page, \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + (options) | __extra_options, \ + NULL); \ + MACRO_END +#endif /* !PMAP_ENTER_OPTIONS */ + +#ifndef PMAP_SET_CACHE_ATTR +#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ + MACRO_BEGIN \ + if (!batch_pmap_op) { \ + pmap_set_cache_attributes(mem->phys_page, cache_attr); \ + object->set_cache_attr = TRUE; \ + } \ + MACRO_END +#endif /* PMAP_SET_CACHE_ATTR */ + +#ifndef PMAP_BATCH_SET_CACHE_ATTR +#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ + cache_attr, num_pages, batch_pmap_op) \ + MACRO_BEGIN \ + if ((batch_pmap_op)) { \ + unsigned int __page_idx=0; \ + while (__page_idx < (num_pages)) { \ + pmap_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr)); \ + __page_idx++; \ + } \ + (object)->set_cache_attr = TRUE; \ + } \ + MACRO_END +#endif /* PMAP_BATCH_SET_CACHE_ATTR */ + +#define PMAP_ENTER_CHECK(pmap, page) \ +{ \ + if ((pmap) != kernel_pmap) { \ + ASSERT_PAGE_DECRYPTED(page); \ + } \ + if ((page)->error) { \ + panic("VM page %p should not have an error\n", \ + (page)); \ + } \ +} + /* * Routines to manage reference/modify bits based on * physical addresses, simulating them if not provided * by the hardware. */ +struct pfc { + long pfc_cpus; + long pfc_invalid_global; +}; + +typedef struct pfc pmap_flush_context; + /* Clear reference bit */ -extern void pmap_clear_reference(vm_offset_t paddr); +extern void pmap_clear_reference(ppnum_t pn); /* Return reference bit */ -extern boolean_t (pmap_is_referenced)(vm_offset_t paddr); +extern boolean_t (pmap_is_referenced)(ppnum_t pn); /* Set modify bit */ -extern void pmap_set_modify(vm_offset_t paddr); +extern void pmap_set_modify(ppnum_t pn); /* Clear modify bit */ -extern void pmap_clear_modify(vm_offset_t paddr); +extern void pmap_clear_modify(ppnum_t pn); /* Return modify bit */ -extern boolean_t pmap_is_modified(vm_offset_t paddr); +extern boolean_t pmap_is_modified(ppnum_t pn); + /* Return modified and referenced bits */ +extern unsigned int pmap_get_refmod(ppnum_t pn); + /* Clear modified and referenced bits */ +extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); +#define VM_MEM_MODIFIED 0x01 /* Modified bit */ +#define VM_MEM_REFERENCED 0x02 /* Referenced bit */ +extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); + + +extern void pmap_flush_context_init(pmap_flush_context *); +extern void pmap_flush(pmap_flush_context *); /* * Routines that operate on ranges of virtual addresses. */ extern void pmap_protect( /* Change protections. */ pmap_t map, - vm_offset_t s, - vm_offset_t e, + vm_map_offset_t s, + vm_map_offset_t e, vm_prot_t prot); +extern void pmap_protect_options( /* Change protections. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + vm_prot_t prot, + unsigned int options, + void *arg); + extern void (pmap_pageable)( pmap_t pmap, - vm_offset_t start, - vm_offset_t end, + vm_map_offset_t start, + vm_map_offset_t end, boolean_t pageable); -#endif /* MACH_KERNEL_PRIVATE */ + +extern uint64_t pmap_nesting_size_min; +extern uint64_t pmap_nesting_size_max; + +extern kern_return_t pmap_nest(pmap_t, + pmap_t, + addr64_t, + addr64_t, + uint64_t); +extern kern_return_t pmap_unnest(pmap_t, + addr64_t, + uint64_t); +extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); +#endif /* MACH_KERNEL_PRIVATE */ + +extern boolean_t pmap_is_noencrypt(ppnum_t); +extern void pmap_set_noencrypt(ppnum_t pn); +extern void pmap_clear_noencrypt(ppnum_t pn); /* * JMM - This portion is exported to other kernel components right now, @@ -361,39 +557,76 @@ extern void (pmap_pageable)( * is provided in a cleaner manner. */ -#define PMAP_NULL ((pmap_t) 0) - extern pmap_t kernel_pmap; /* The kernel's map */ #define pmap_kernel() (kernel_pmap) /* machine independent WIMG bits */ -#define VM_MEM_GUARDED 0x1 -#define VM_MEM_COHERENT 0x2 -#define VM_MEM_NOT_CACHEABLE 0x4 -#define VM_MEM_WRITE_THROUGH 0x8 +#define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ +#define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ +#define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ +#define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ +#define VM_WIMG_USE_DEFAULT 0x80 #define VM_WIMG_MASK 0xFF -#define VM_WIMG_USE_DEFAULT 0x80000000 -extern void pmap_modify_pages( /* Set modify bit for pages */ - pmap_t map, - vm_offset_t s, - vm_offset_t e); +#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ +#define VM_MEM_STACK 0x200 +#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return + * KERN_RESOURCE_SHORTAGE + * instead */ +#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed + * but don't enter mapping + */ +#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for + * this operation */ +#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ +#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ +#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ +#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ +#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ + +#if !defined(__LP64__) extern vm_offset_t pmap_extract(pmap_t pmap, - vm_offset_t va); - + vm_map_offset_t va); +#endif extern void pmap_change_wiring( /* Specify pageability */ pmap_t pmap, - vm_offset_t va, + vm_map_offset_t va, boolean_t wired); +/* LP64todo - switch to vm_map_offset_t when it grows */ extern void pmap_remove( /* Remove mappings. */ pmap_t map, - vm_offset_t s, - vm_offset_t e); + vm_map_offset_t s, + vm_map_offset_t e); + +extern void pmap_remove_options( /* Remove mappings. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + int options); + +extern void pmap_reusable( + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + boolean_t reusable); + +extern void fillPage(ppnum_t pa, unsigned int fill); + +extern void pmap_map_sharedpage(task_t task, pmap_t pmap); +extern void pmap_unmap_sharedpage(pmap_t pmap); + +#if defined(__LP64__) +void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); +#endif + +unsigned int pmap_query_resident(pmap_t pmap, + vm_map_offset_t s, + vm_map_offset_t e); -#endif /* __APPLE_API_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #endif /* _VM_PMAP_H_ */