]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/pmap.h
xnu-1228.5.18.tar.gz
[apple/xnu.git] / osfmk / vm / pmap.h
index 4dda9444571b88232c6dbb8c1bad50546544d8c5..d30299778fee450467eec6a5066869fe4e0a23d1 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -68,6 +74,8 @@
 #include <mach/boolean.h>
 #include <mach/vm_prot.h>
 
+#ifdef KERNEL_PRIVATE
+
 /*
  *     The following is a description of the interface to the
  *     machine-dependent "physical map" data structure.  The module
  *     many address spaces.
  */
 
-#ifndef MACH_KERNEL_PRIVATE
-
-typedef void *pmap_t;
-
-#else /* MACH_KERNEL_PRIVATE */
-
-typedef struct pmap *pmap_t;
+/* Copy between a physical page and a virtual address */
+/* LP64todo - switch to vm_map_offset_t when it grows */
+extern kern_return_t   copypv(
+                               addr64_t source, 
+                               addr64_t sink, 
+                               unsigned int size, 
+                               int which);     
+#define cppvPsnk        1
+#define cppvPsnkb      31
+#define cppvPsrc        2
+#define cppvPsrcb      30
+#define cppvFsnk        4
+#define cppvFsnkb      29
+#define cppvFsrc        8
+#define cppvFsrcb      28
+#define cppvNoModSnk   16
+#define cppvNoModSnkb  27
+#define cppvNoRefSrc   32
+#define cppvNoRefSrcb  26
+#define cppvKmap       64      /* Use the kernel's vm_map */
+#define cppvKmapb      25
+
+#ifdef MACH_KERNEL_PRIVATE
 
 #include <machine/pmap.h>
 
@@ -95,9 +119,14 @@ typedef struct pmap *pmap_t;
  *     There is traditionally also a pmap_bootstrap,
  *     used very early by machine-dependent code,
  *     but it is not part of the interface.
+ *
+ *     LP64todo -
+ *     These interfaces are tied to the size of the
+ *     kernel pmap - and therefore use the "local"
+ *     vm_offset_t, etc... types.
  */
 
-extern vm_offset_t     pmap_steal_memory(vm_size_t size);
+extern void            *pmap_steal_memory(vm_size_t size);
                                                /* During VM initialization,
                                                 * steal a chunk of memory.
                                                 */
@@ -112,11 +141,16 @@ extern void               pmap_startup(
                                                 * use remaining physical pages
                                                 * to allocate page frames.
                                                 */
-extern void            pmap_init(void);        /* Initialization,
+extern void            pmap_init(void) __attribute__((section("__TEXT, initcode")));
+                                               /* Initialization,
                                                 * after kernel runs
                                                 * in virtual memory.
                                                 */
 
+extern void            mapping_adjust(void);   /* Adjust free mapping count */
+
+extern void            mapping_free_prime(void); /* Primes the mapping block release list */
+
 #ifndef        MACHINE_PAGES
 /*
  *     If machine/pmap.h defines MACHINE_PAGES, it must implement
@@ -134,7 +168,7 @@ extern void         pmap_init(void);        /* Initialization,
  *     However, for best performance pmap_free_pages should be accurate.
  */
 
-extern boolean_t       pmap_next_page(vm_offset_t *paddr);
+extern boolean_t       pmap_next_page(ppnum_t *pnum);
                                                /* During VM initialization,
                                                 * return the next unused
                                                 * physical page.
@@ -151,7 +185,13 @@ extern void                pmap_virtual_space(
 /*
  *     Routines to manage the physical map data structure.
  */
-extern pmap_t          pmap_create(vm_size_t size);    /* Create a pmap_t. */
+extern pmap_t          pmap_create(    /* Create a pmap_t. */
+                               vm_map_size_t   size,
+#ifdef __i386__
+                               boolean_t       is_64bit);
+#else
+                               __unused boolean_t      is_64bit);
+#endif
 extern pmap_t          (pmap_kernel)(void);    /* Return the kernel's pmap */
 extern void            pmap_reference(pmap_t pmap);    /* Gain a reference. */
 extern void            pmap_destroy(pmap_t pmap); /* Release a reference. */
@@ -160,81 +200,92 @@ extern void               pmap_switch(pmap_t);
 
 extern void            pmap_enter(     /* Enter a mapping */
                                pmap_t          pmap,
-                               vm_offset_t     v,
-                               vm_offset_t     pa,
+                               vm_map_offset_t v,
+                               ppnum_t         pn,
                                vm_prot_t       prot,
+                               unsigned int    flags,
                                boolean_t       wired);
 
+extern void            pmap_remove_some_phys(
+                               pmap_t          pmap,
+                               ppnum_t         pn);
+
 
 /*
  *     Routines that operate on physical addresses.
  */
+
 extern void            pmap_page_protect(      /* Restrict access to page. */
-                               vm_offset_t     phys,
+                               ppnum_t phys,
                                vm_prot_t       prot);
 
 extern void            (pmap_zero_page)(
-                               vm_offset_t     phys);
+                               ppnum_t         pn);
 
 extern void            (pmap_zero_part_page)(
-                               vm_offset_t     p,
+                               ppnum_t         pn,
                                vm_offset_t     offset,
                                vm_size_t       len);
 
 extern void            (pmap_copy_page)(
-                               vm_offset_t     src,
-                               vm_offset_t     dest);
+                               ppnum_t         src,
+                               ppnum_t         dest);
 
 extern void            (pmap_copy_part_page)(
-                               vm_offset_t     src,
+                               ppnum_t         src,
                                vm_offset_t     src_offset,
-                               vm_offset_t     dst,
+                               ppnum_t         dst,
                                vm_offset_t     dst_offset,
                                vm_size_t       len);
 
 extern void            (pmap_copy_part_lpage)(
                                vm_offset_t     src,
-                               vm_offset_t     dst,
+                               ppnum_t         dst,
                                vm_offset_t     dst_offset,
                                vm_size_t       len);
 
 extern void            (pmap_copy_part_rpage)(
-                               vm_offset_t     src,
+                               ppnum_t         src,
                                vm_offset_t     src_offset,
                                vm_offset_t     dst,
                                vm_size_t       len);
+                               
+extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */
+                               ppnum_t         phys);
+
+extern kern_return_t   (pmap_attribute_cache_sync)(  /* Flush appropriate 
+                                                      * cache based on
+                                                      * page number sent */
+                               ppnum_t         pn, 
+                               vm_size_t       size, 
+                               vm_machine_attribute_t attribute, 
+                               vm_machine_attribute_val_t* value);
+
+extern unsigned int    (pmap_cache_attributes)(
+                               ppnum_t         pn);
+
+extern void pmap_sync_page_data_phys(ppnum_t pa);
+extern void pmap_sync_page_attributes_phys(ppnum_t pa);
 
 /*
  * debug/assertions. pmap_verify_free returns true iff
  * the given physical page is mapped into no pmap.
  */
-extern boolean_t       pmap_verify_free(vm_offset_t paddr);
+extern boolean_t       pmap_verify_free(ppnum_t pn);
 
 /*
  *     Statistics routines
  */
 extern int             (pmap_resident_count)(pmap_t pmap);
+extern int             (pmap_resident_max)(pmap_t pmap);
 
 /*
  *     Sundry required (internal) routines
  */
+#ifdef CURRENTLY_UNUSED_AND_UNTESTED
 extern void            pmap_collect(pmap_t pmap);/* Perform garbage
                                                 * collection, if any */
-
-
-extern vm_offset_t     (pmap_phys_address)(    /* Transform address returned
-                                                * by device driver mapping
-                                                * function to physical address
-                                                * known to this module.  */
-                               int             frame);
-
-extern int             (pmap_phys_to_frame)(   /* Inverse of pmap_phys_addess,
-                                                * for use by device driver
-                                                * mapping function in
-                                                * machine-independent
-                                                * pseudo-devices.  */
-                               vm_offset_t     phys);
-
+#endif
 /*
  *     Optional routines
  */
@@ -242,15 +293,15 @@ extern void               (pmap_copy)(            /* Copy range of mappings,
                                                 * if desired. */
                                pmap_t          dest,
                                pmap_t          source,
-                               vm_offset_t     dest_va,
-                               vm_size_t       size,
-                               vm_offset_t     source_va);
+                               vm_map_offset_t dest_va,
+                               vm_map_size_t   size,
+                               vm_map_offset_t source_va);
 
 extern kern_return_t   (pmap_attribute)(       /* Get/Set special memory
                                                 * attributes */
                                pmap_t          pmap,
-                               vm_offset_t     va,
-                               vm_size_t       size,
+                               vm_map_offset_t va,
+                               vm_map_size_t   size,
                                vm_machine_attribute_t  attribute,
                                vm_machine_attribute_val_t* value);
 
@@ -258,110 +309,166 @@ extern kern_return_t    (pmap_attribute)(       /* Get/Set special memory
  * Routines defined as macros.
  */
 #ifndef PMAP_ACTIVATE_USER
-#define PMAP_ACTIVATE_USER(act, cpu) {                         \
+#ifndef        PMAP_ACTIVATE
+#define PMAP_ACTIVATE_USER(thr, cpu)
+#else  /* PMAP_ACTIVATE */
+#define PMAP_ACTIVATE_USER(thr, cpu) {                 \
        pmap_t  pmap;                                           \
                                                                \
-       pmap = (act)->map->pmap;                                \
+       pmap = (thr)->map->pmap;                                \
        if (pmap != pmap_kernel())                              \
-               PMAP_ACTIVATE(pmap, (act), (cpu));              \
+               PMAP_ACTIVATE(pmap, (thr), (cpu));              \
 }
+#endif  /* PMAP_ACTIVATE */
 #endif  /* PMAP_ACTIVATE_USER */
 
 #ifndef PMAP_DEACTIVATE_USER
-#define PMAP_DEACTIVATE_USER(act, cpu) {                       \
+#ifndef PMAP_DEACTIVATE
+#define PMAP_DEACTIVATE_USER(thr, cpu)
+#else  /* PMAP_DEACTIVATE */
+#define PMAP_DEACTIVATE_USER(thr, cpu) {                       \
        pmap_t  pmap;                                           \
                                                                \
-       pmap = (act)->map->pmap;                                \
-       if ((pmap) != pmap_kernel())                            \
-               PMAP_DEACTIVATE(pmap, (act), (cpu));            \
+       pmap = (thr)->map->pmap;                                \
+       if ((pmap) != pmap_kernel())                    \
+               PMAP_DEACTIVATE(pmap, (thr), (cpu));    \
 }
+#endif /* PMAP_DEACTIVATE */
 #endif  /* PMAP_DEACTIVATE_USER */
 
 #ifndef        PMAP_ACTIVATE_KERNEL
+#ifndef PMAP_ACTIVATE
+#define        PMAP_ACTIVATE_KERNEL(cpu)
+#else  /* PMAP_ACTIVATE */
 #define        PMAP_ACTIVATE_KERNEL(cpu)                       \
-               PMAP_ACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu)
+               PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
+#endif /* PMAP_ACTIVATE */
 #endif /* PMAP_ACTIVATE_KERNEL */
 
 #ifndef        PMAP_DEACTIVATE_KERNEL
+#ifndef PMAP_DEACTIVATE
+#define        PMAP_DEACTIVATE_KERNEL(cpu)
+#else  /* PMAP_DEACTIVATE */
 #define        PMAP_DEACTIVATE_KERNEL(cpu)                     \
-               PMAP_DEACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu)
+               PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
+#endif /* PMAP_DEACTIVATE */
 #endif /* PMAP_DEACTIVATE_KERNEL */
 
 #ifndef        PMAP_ENTER
 /*
  *     Macro to be used in place of pmap_enter()
  */
-#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \
-               MACRO_BEGIN                                     \
-               pmap_enter(                                     \
-                       (pmap),                                 \
-                       (virtual_address),                      \
-                       (page)->phys_addr,                      \
-                       (protection) & ~(page)->page_lock,      \
-                       (wired)                                 \
-                );                                             \
-               MACRO_END
+#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
+       MACRO_BEGIN                                                     \
+       pmap_t          __pmap = (pmap);                                \
+       vm_page_t       __page = (page);                                \
+                                                                       \
+       if (__pmap != kernel_pmap) {                                    \
+               ASSERT_PAGE_DECRYPTED(__page);                          \
+       }                                                               \
+       if (__page->error) {                                            \
+               panic("VM page %p should not have an error\n",          \
+                       __page);                                        \
+       }                                                               \
+       pmap_enter(__pmap,                                              \
+                  (virtual_address),                                   \
+                  __page->phys_page,                                   \
+                  (protection),                                        \
+                  (flags),                                             \
+                  (wired));                                            \
+       MACRO_END
 #endif /* !PMAP_ENTER */
 
-#endif /* MACH_KERNEL_PRIVATE */
-
-/*
- * JMM - This portion is exported to other kernel components right now,
- * but will be pulled back in the future when the needed functionality
- * is provided in a cleaner manner.
- */
-
-#define PMAP_NULL  ((pmap_t) 0)
-
-extern pmap_t  kernel_pmap;                    /* The kernel's map */
-#define                pmap_kernel()   (kernel_pmap)
-
 /*
  *     Routines to manage reference/modify bits based on
  *     physical addresses, simulating them if not provided
  *     by the hardware.
  */
                                /* Clear reference bit */
-extern void            pmap_clear_reference(vm_offset_t paddr);
+extern void            pmap_clear_reference(ppnum_t     pn);
                                /* Return reference bit */
-extern boolean_t       (pmap_is_referenced)(vm_offset_t paddr);
+extern boolean_t       (pmap_is_referenced)(ppnum_t     pn);
                                /* Set modify bit */
-extern void             pmap_set_modify(vm_offset_t paddr);
+extern void             pmap_set_modify(ppnum_t         pn);
                                /* Clear modify bit */
-extern void            pmap_clear_modify(vm_offset_t paddr);
+extern void            pmap_clear_modify(ppnum_t pn);
                                /* Return modify bit */
-extern boolean_t       pmap_is_modified(vm_offset_t paddr);
+extern boolean_t       pmap_is_modified(ppnum_t pn);
+                               /* Return modified and referenced bits */
+extern unsigned int pmap_get_refmod(ppnum_t pn);
+                               /* Clear modified and referenced bits */
+extern void                    pmap_clear_refmod(ppnum_t pn, unsigned int mask);
+#define VM_MEM_MODIFIED                0x01    /* Modified bit */
+#define VM_MEM_REFERENCED      0x02    /* Referenced bit */
 
 /*
  *     Routines that operate on ranges of virtual addresses.
  */
-extern void            pmap_remove(    /* Remove mappings. */
-                               pmap_t          map,
-                               vm_offset_t     s,
-                               vm_offset_t     e);
-
 extern void            pmap_protect(   /* Change protections. */
                                pmap_t          map,
-                               vm_offset_t     s,
-                               vm_offset_t     e,
+                               vm_map_offset_t s,
+                               vm_map_offset_t e,
                                vm_prot_t       prot);
 
 extern void            (pmap_pageable)(
                                pmap_t          pmap,
-                               vm_offset_t     start,
-                               vm_offset_t     end,
+                               vm_map_offset_t start,
+                               vm_map_offset_t end,
                                boolean_t       pageable);
 
-extern void            pmap_modify_pages(      /* Set modify bit for pages */
-                               pmap_t          map,
-                               vm_offset_t     s,
-                               vm_offset_t     e);
+#ifndef NO_NESTED_PMAP
+extern uint64_t pmap_nesting_size_min;
+extern uint64_t pmap_nesting_size_max;
+extern kern_return_t pmap_nest(pmap_t grand,
+                              pmap_t subord,
+                              addr64_t vstart,
+                              addr64_t nstart,
+                              uint64_t size);
+extern kern_return_t pmap_unnest(pmap_t grand,
+                                addr64_t vaddr,
+                                uint64_t size);
+#endif /* NO_NESTED_PMAP */
+
+#endif /* MACH_KERNEL_PRIVATE */
+
+/*
+ * JMM - This portion is exported to other kernel components right now,
+ * but will be pulled back in the future when the needed functionality
+ * is provided in a cleaner manner.
+ */
+
+extern pmap_t  kernel_pmap;                    /* The kernel's map */
+#define                pmap_kernel()   (kernel_pmap)
+
+/* machine independent WIMG bits */
+
+#define VM_MEM_GUARDED                 0x1             /* (G) Guarded Storage */
+#define VM_MEM_COHERENT                0x2             /* (M) Memory Coherency */
+#define VM_MEM_NOT_CACHEABLE   0x4             /* (I) Cache Inhibit */
+#define VM_MEM_WRITE_THROUGH   0x8             /* (W) Write-Through */
+
+#define VM_WIMG_MASK           0xFF
+#define VM_WIMG_USE_DEFAULT    0x80000000
 
 extern vm_offset_t     pmap_extract(pmap_t pmap,
-                               vm_offset_t va);
+                               vm_map_offset_t va);
 
 extern void            pmap_change_wiring(     /* Specify pageability */
                                pmap_t          pmap,
-                               vm_offset_t     va,
+                               vm_map_offset_t va,
                                boolean_t       wired);
+
+/* LP64todo - switch to vm_map_offset_t when it grows */
+extern void            pmap_remove(    /* Remove mappings. */
+                               pmap_t          map,
+                               vm_map_offset_t s,
+                               vm_map_offset_t e);
+
+extern void            fillPage(ppnum_t pa, unsigned int fill);
+
+extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
+extern void pmap_unmap_sharedpage(pmap_t pmap);
+
+#endif  /* KERNEL_PRIVATE */
+
 #endif /* _VM_PMAP_H_ */