]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/pmap.h
xnu-2422.90.20.tar.gz
[apple/xnu.git] / osfmk / vm / pmap.h
index 7638a13a3a66da1cae80cd4b52ca3b045914549b..2e228d6d280858bb89ece6aa60fd62636dbea1d2 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -135,7 +141,8 @@ extern void         pmap_startup(
                                                 * use remaining physical pages
                                                 * to allocate page frames.
                                                 */
-extern void            pmap_init(void);        /* Initialization,
+extern void            pmap_init(void);
+                                               /* Initialization,
                                                 * after kernel runs
                                                 * in virtual memory.
                                                 */
@@ -162,6 +169,7 @@ extern void                 mapping_free_prime(void); /* Primes the mapping block release list
  */
 
 extern boolean_t       pmap_next_page(ppnum_t *pnum);
+extern boolean_t       pmap_next_page_hi(ppnum_t *pnum);
                                                /* During VM initialization,
                                                 * return the next unused
                                                 * physical page.
@@ -179,8 +187,9 @@ extern void         pmap_virtual_space(
  *     Routines to manage the physical map data structure.
  */
 extern pmap_t          pmap_create(    /* Create a pmap_t. */
+                               ledger_t        ledger,
                                vm_map_size_t   size,
-                               boolean_t       is_64bit);
+                               __unused boolean_t      is_64bit);
 extern pmap_t          (pmap_kernel)(void);    /* Return the kernel's pmap */
 extern void            pmap_reference(pmap_t pmap);    /* Gain a reference. */
 extern void            pmap_destroy(pmap_t pmap); /* Release a reference. */
@@ -192,9 +201,21 @@ extern void                pmap_enter(     /* Enter a mapping */
                                vm_map_offset_t v,
                                ppnum_t         pn,
                                vm_prot_t       prot,
+                               vm_prot_t       fault_type,
                                unsigned int    flags,
                                boolean_t       wired);
 
+extern kern_return_t   pmap_enter_options(
+                                          pmap_t pmap,
+                                          vm_map_offset_t v,
+                                          ppnum_t pn,
+                                          vm_prot_t prot,
+                                          vm_prot_t fault_type,
+                                          unsigned int flags,
+                                          boolean_t wired,
+                                          unsigned int options,
+                                          void *arg);
+
 extern void            pmap_remove_some_phys(
                                pmap_t          pmap,
                                ppnum_t         pn);
@@ -208,6 +229,12 @@ extern void                pmap_page_protect(      /* Restrict access to page. */
                                ppnum_t phys,
                                vm_prot_t       prot);
 
+extern void            pmap_page_protect_options(      /* Restrict access to page. */
+                               ppnum_t phys,
+                               vm_prot_t       prot,
+                               unsigned int    options,
+                               void            *arg);
+
 extern void            (pmap_zero_page)(
                                ppnum_t         pn);
 
@@ -242,6 +269,11 @@ extern void                (pmap_copy_part_rpage)(
 extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */
                                ppnum_t         phys);
 
+extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */
+                               ppnum_t         phys,
+                               unsigned int    options,
+                               void            *arg);
+
 extern kern_return_t   (pmap_attribute_cache_sync)(  /* Flush appropriate 
                                                       * cache based on
                                                       * page number sent */
@@ -253,6 +285,15 @@ extern kern_return_t       (pmap_attribute_cache_sync)(  /* Flush appropriate
 extern unsigned int    (pmap_cache_attributes)(
                                ppnum_t         pn);
 
+/*
+ * Set (override) cache attributes for the specified physical page
+ */
+extern void            pmap_set_cache_attributes(
+                               ppnum_t,
+                               unsigned int);
+extern void pmap_sync_page_data_phys(ppnum_t pa);
+extern void pmap_sync_page_attributes_phys(ppnum_t pa);
+
 /*
  * debug/assertions. pmap_verify_free returns true iff
  * the given physical page is mapped into no pmap.
@@ -263,13 +304,15 @@ extern boolean_t  pmap_verify_free(ppnum_t pn);
  *     Statistics routines
  */
 extern int             (pmap_resident_count)(pmap_t pmap);
+extern int             (pmap_resident_max)(pmap_t pmap);
 
 /*
  *     Sundry required (internal) routines
  */
+#ifdef CURRENTLY_UNUSED_AND_UNTESTED
 extern void            pmap_collect(pmap_t pmap);/* Perform garbage
                                                 * collection, if any */
-
+#endif
 /*
  *     Optional routines
  */
@@ -342,28 +385,108 @@ extern kern_return_t     (pmap_attribute)(       /* Get/Set special memory
 /*
  *     Macro to be used in place of pmap_enter()
  */
-#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
+#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
        MACRO_BEGIN                                                     \
        pmap_t          __pmap = (pmap);                                \
        vm_page_t       __page = (page);                                \
+       int             __options = 0;                                  \
                                                                        \
-       if (__pmap != kernel_pmap) {                                    \
-               ASSERT_PAGE_DECRYPTED(__page);                          \
+       PMAP_ENTER_CHECK(__pmap, __page)                                \
+       if (__page->object->internal) {                                 \
+               __options |= PMAP_OPTIONS_INTERNAL;                     \
+       }                                                               \
+       if (__page->reusable || __page->object->all_reusable) {         \
+               __options |= PMAP_OPTIONS_REUSABLE;                     \
        }                                                               \
-       pmap_enter(__pmap,                                              \
-                  (virtual_address),                                   \
-                  __page->phys_page,                                   \
-                  (protection) & ~__page->page_lock,                   \
-                  (flags),                                             \
-                  (wired));                                            \
+       (void) pmap_enter_options(__pmap,                               \
+                                 (virtual_address),                    \
+                                 __page->phys_page,                    \
+                                 (protection),                         \
+                                 (fault_type),                         \
+                                 (flags),                              \
+                                 (wired),                              \
+                                 __options,                            \
+                                 NULL);                                \
        MACRO_END
 #endif /* !PMAP_ENTER */
 
+#ifndef        PMAP_ENTER_OPTIONS
+#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection,    \
+                          fault_type, flags, wired, options, result)   \
+       MACRO_BEGIN                                                     \
+       pmap_t          __pmap = (pmap);                                \
+       vm_page_t       __page = (page);                                \
+       int             __extra_options = 0;                            \
+                                                                       \
+       PMAP_ENTER_CHECK(__pmap, __page)                                \
+       if (__page->object->internal) {                                 \
+               __extra_options |= PMAP_OPTIONS_INTERNAL;               \
+       }                                                               \
+       if (__page->reusable || __page->object->all_reusable) {         \
+               __extra_options |= PMAP_OPTIONS_REUSABLE;               \
+       }                                                               \
+       result = pmap_enter_options(__pmap,                             \
+                                   (virtual_address),                  \
+                                   __page->phys_page,                  \
+                                   (protection),                       \
+                                   (fault_type),                       \
+                                   (flags),                            \
+                                   (wired),                            \
+                                   (options) | __extra_options,        \
+                                   NULL);                              \
+       MACRO_END
+#endif /* !PMAP_ENTER_OPTIONS */
+
+#ifndef PMAP_SET_CACHE_ATTR
+#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op)            \
+       MACRO_BEGIN                                                             \
+               if (!batch_pmap_op) {                                           \
+                       pmap_set_cache_attributes(mem->phys_page, cache_attr);  \
+                       object->set_cache_attr = TRUE;                          \
+               }                                                               \
+       MACRO_END                                                       
+#endif /* PMAP_SET_CACHE_ATTR */
+
+#ifndef PMAP_BATCH_SET_CACHE_ATTR
+#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                      \
+                                       cache_attr, num_pages, batch_pmap_op)   \
+       MACRO_BEGIN                                                             \
+               if ((batch_pmap_op)) {                                          \
+                       unsigned int __page_idx=0;                              \
+                       while (__page_idx < (num_pages)) {                      \
+                               pmap_set_cache_attributes(                      \
+                                       user_page_list[__page_idx].phys_addr,   \
+                                       (cache_attr));                          \
+                               __page_idx++;                                   \
+                       }                                                       \
+                       (object)->set_cache_attr = TRUE;                        \
+               }                                                               \
+       MACRO_END
+#endif /* PMAP_BATCH_SET_CACHE_ATTR */
+
+#define PMAP_ENTER_CHECK(pmap, page)                                   \
+{                                                                      \
+       if ((pmap) != kernel_pmap) {                                    \
+               ASSERT_PAGE_DECRYPTED(page);                            \
+       }                                                               \
+       if ((page)->error) {                                            \
+               panic("VM page %p should not have an error\n",          \
+                       (page));                                        \
+       }                                                               \
+}
+
 /*
  *     Routines to manage reference/modify bits based on
  *     physical addresses, simulating them if not provided
  *     by the hardware.
  */
+struct pfc {
+       long    pfc_cpus;
+       long    pfc_invalid_global;
+};
+
+typedef        struct pfc      pmap_flush_context;
+
                                /* Clear reference bit */
 extern void            pmap_clear_reference(ppnum_t     pn);
                                /* Return reference bit */
@@ -380,6 +503,11 @@ extern unsigned int pmap_get_refmod(ppnum_t pn);
 extern void                    pmap_clear_refmod(ppnum_t pn, unsigned int mask);
 #define VM_MEM_MODIFIED                0x01    /* Modified bit */
 #define VM_MEM_REFERENCED      0x02    /* Referenced bit */
+extern void                    pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
+
+
+extern void pmap_flush_context_init(pmap_flush_context *);
+extern void pmap_flush(pmap_flush_context *);
 
 /*
  *     Routines that operate on ranges of virtual addresses.
@@ -390,14 +518,39 @@ extern void               pmap_protect(   /* Change protections. */
                                vm_map_offset_t e,
                                vm_prot_t       prot);
 
+extern void            pmap_protect_options(   /* Change protections. */
+                               pmap_t          map,
+                               vm_map_offset_t s,
+                               vm_map_offset_t e,
+                               vm_prot_t       prot,
+                               unsigned int    options,
+                               void            *arg);
+
 extern void            (pmap_pageable)(
                                pmap_t          pmap,
                                vm_map_offset_t start,
                                vm_map_offset_t end,
                                boolean_t       pageable);
 
+
+extern uint64_t pmap_nesting_size_min;
+extern uint64_t pmap_nesting_size_max;
+
+extern kern_return_t pmap_nest(pmap_t,
+                              pmap_t,
+                              addr64_t,
+                              addr64_t,
+                              uint64_t);
+extern kern_return_t pmap_unnest(pmap_t,
+                                addr64_t,
+                                uint64_t);
+extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
 #endif /* MACH_KERNEL_PRIVATE */
 
+extern boolean_t       pmap_is_noencrypt(ppnum_t);
+extern void            pmap_set_noencrypt(ppnum_t pn);
+extern void            pmap_clear_noencrypt(ppnum_t pn);
+
 /*
  * JMM - This portion is exported to other kernel components right now,
  * but will be pulled back in the future when the needed functionality
@@ -414,17 +567,30 @@ extern pmap_t     kernel_pmap;                    /* The kernel's map */
 #define VM_MEM_NOT_CACHEABLE   0x4             /* (I) Cache Inhibit */
 #define VM_MEM_WRITE_THROUGH   0x8             /* (W) Write-Through */
 
+#define VM_WIMG_USE_DEFAULT    0x80
 #define VM_WIMG_MASK           0xFF
-#define VM_WIMG_USE_DEFAULT    0x80000000
 
-extern void            pmap_modify_pages(      /* Set modify bit for pages */
-                               pmap_t          map,
-                               vm_map_offset_t s,
-                               vm_map_offset_t e);
+#define VM_MEM_SUPERPAGE       0x100           /* map a superpage instead of a base page */
+#define VM_MEM_STACK           0x200
 
+#define PMAP_OPTIONS_NOWAIT    0x1             /* don't block, return 
+                                                * KERN_RESOURCE_SHORTAGE 
+                                                * instead */
+#define PMAP_OPTIONS_NOENTER   0x2             /* expand pmap if needed
+                                                * but don't enter mapping
+                                                */
+#define PMAP_OPTIONS_COMPRESSOR 0x4            /* credit the compressor for
+                                                * this operation */
+#define PMAP_OPTIONS_INTERNAL  0x8             /* page from internal object */
+#define PMAP_OPTIONS_REUSABLE  0x10            /* page is "reusable" */
+#define PMAP_OPTIONS_NOFLUSH   0x20            /* delay flushing of pmap */
+#define PMAP_OPTIONS_NOREFMOD  0x40            /* don't need ref/mod on disconnect */
+#define PMAP_OPTIONS_REMOVE    0x100           /* removing a mapping */
+
+#if    !defined(__LP64__)
 extern vm_offset_t     pmap_extract(pmap_t pmap,
                                vm_map_offset_t va);
-
+#endif
 extern void            pmap_change_wiring(     /* Specify pageability */
                                pmap_t          pmap,
                                vm_map_offset_t va,
@@ -433,9 +599,33 @@ extern void                pmap_change_wiring(     /* Specify pageability */
 /* LP64todo - switch to vm_map_offset_t when it grows */
 extern void            pmap_remove(    /* Remove mappings. */
                                pmap_t          map,
-                               addr64_t        s,
-                               addr64_t        e);
+                               vm_map_offset_t s,
+                               vm_map_offset_t e);
+
+extern void            pmap_remove_options(    /* Remove mappings. */
+                               pmap_t          map,
+                               vm_map_offset_t s,
+                               vm_map_offset_t e,
+                               int             options);
+
+extern void            pmap_reusable(
+                               pmap_t          map,
+                               vm_map_offset_t s,
+                               vm_map_offset_t e,
+                               boolean_t       reusable);
+
+extern void            fillPage(ppnum_t pa, unsigned int fill);
+
+extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
+extern void pmap_unmap_sharedpage(pmap_t pmap);
+
+#if defined(__LP64__)
+void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
+#endif
 
+unsigned int pmap_query_resident(pmap_t pmap,
+                                vm_map_offset_t s,
+                                vm_map_offset_t e);
 
 #endif  /* KERNEL_PRIVATE */