2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1990 The University of Utah and
27 * the Center for Software Science at the University of Utah (CSS).
28 * All rights reserved.
30 * Permission to use, copy, modify and distribute this software is hereby
31 * granted provided that (1) source code retains these copyright, permission,
32 * and disclaimer notices, and (2) redistributions including binaries
33 * reproduce the notices in supporting documentation, and (3) all advertising
34 * materials mentioning features or use of this software display the following
35 * acknowledgement: ``This product includes software developed by the Center
36 * for Software Science at the University of Utah.''
38 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
39 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
40 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 * CSS requests users of this software to return to css-dist@cs.utah.edu any
43 * improvements that they make and grant CSS redistribution rights.
45 * Utah $Hdr: pmap.h 1.13 91/09/25$
46 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 9/90
52 #include <mach/vm_types.h>
53 #include <mach/machine/vm_types.h>
54 #include <mach/vm_prot.h>
55 #include <mach/vm_statistics.h>
56 #include <kern/queue.h>
58 #include <ppc/mappings.h>
60 #define maxPPage32 0x000FFFFF /* Maximum page number in 32-bit machines */
62 typedef uint32_t shexlock
;
64 #pragma pack(4) /* Make sure the structure stays as we defined it */
67 uint64_t sgcESID
; /* ESID portion of segment cache */
68 #define sgcESmsk 0xFFFFFFFFF0000000ULL /* ESID portion of segment register cache */
69 uint64_t sgcVSID
; /* VSID portion of segment cache */
70 #define sgcVSmsk 0xFFFFFFFFFFFFF000ULL /* VSID mask */
71 #define sgcVSKeys 0x0000000000000C00ULL /* Protection keys */
72 #define sgcVSKeyUsr 53 /* User protection key */
73 #define sgcVSNoEx 0x0000000000000200ULL /* No execute */
77 typedef struct sgc sgc
;
79 #pragma pack(4) /* Make sure the structure stays as we defined it */
80 struct pmap_vmm_stats
{
81 unsigned int vxsGpf
; /* Guest faults */
82 unsigned int vxsGpfMiss
; /* Faults that miss in hash table */
84 unsigned int vxsGrm
; /* Guest mapping remove requests */
85 unsigned int vxsGrmMiss
; /* Remove misses in hash table */
86 unsigned int vxsGrmActive
; /* Remove hits that are active */
88 unsigned int vxsGra
; /* Guest remove all mappings requests */
89 unsigned int vxsGraHits
; /* Remove hits in hash table */
90 unsigned int vxsGraActive
; /* Remove hits that are active */
92 unsigned int vxsGrl
; /* Guest remove local mappings requests */
93 unsigned int vxsGrlActive
; /* Active mappings removed */
95 unsigned int vxsGrs
; /* Guest mapping resumes */
96 unsigned int vxsGrsHitAct
; /* Resume hits active entry */
97 unsigned int vxsGrsHitSusp
; /* Resume hits suspended entry */
98 unsigned int vxsGrsMissGV
; /* Resume misses on guest virtual */
99 unsigned int vxsGrsHitPE
; /* Resume hits on host virtual */
100 unsigned int vxsGrsMissPE
; /* Resume misses on host virtual */
102 unsigned int vxsGad
; /* Guest mapping adds */
103 unsigned int vxsGadHit
; /* Add hits entry (active or dormant) */
104 unsigned int vxsGadFree
; /* Add takes free entry in group */
105 unsigned int vxsGadDormant
; /* Add steals dormant entry in group */
106 unsigned int vxsGadSteal
; /* Add steals active entry in group */
108 unsigned int vxsGsu
; /* Guest mapping suspends */
109 unsigned int vxsGsuHit
; /* Suspend hits entry (active only) */
110 unsigned int vxsGsuMiss
; /* Suspend misses entry */
112 unsigned int vxsGtd
; /* Guest test ref&chg */
113 unsigned int vxsGtdHit
; /* Test r&c hits entry (active only) */
114 unsigned int vxsGtdMiss
; /* Test r&c misses entry */
117 typedef struct pmap_vmm_stats pmap_vmm_stats
;
119 /* Not wanting to tax all of our customers for the sins of those that use virtual operating
120 systems, we've built the hash table from its own primitive virtual memory. We first
121 allocate a pmap_vmm_ext with sufficient space following to accomodate the hash table
122 index (one 64-bit physical address per 4k-byte page of hash table). The allocation
123 must not cross a 4k-byte page boundary (we'll be accessing the block with relocation
124 off), so we'll try a couple of times, then just burn a whole page. We stuff the effective
125 address of the cache-aligned index into hIdxBase; the physical-mode code locates the index
126 by adding the size of a pmap_vmm_extension to its translated physical address, then rounding
127 up to the next 32-byte boundary. Now we grab enough virtual pages to contain the hash table,
128 and fill in the index with the page's physical addresses. For the final touch that's sure
129 to please, we initialize the hash table. Mmmmm, golden brown perfection.
133 struct pmap_vmm_ext
{
134 addr64_t vmxSalt
; /* This block's virt<->real conversion salt */
135 addr64_t vmxHostPmapPhys
; /* Host pmap physical address */
136 struct pmap
*vmxHostPmap
; /* Host pmap effective address */
137 addr64_t
*vmxHashPgIdx
; /* Hash table physical index base address */
138 vm_offset_t
*vmxHashPgList
; /* List of virtual pages comprising the hash table */
139 unsigned int *vmxActiveBitmap
; /* Bitmap of active mappings in hash table */
140 pmap_vmm_stats vmxStats
; /* Stats for VMM assists */
141 #define VMX_HPIDX_OFFSET ((sizeof(pmap_vmm_ext) + 127) & ~127)
142 /* The hash table physical index begins at the first
143 128-byte boundary after the pmap_vmm_ext struct */
144 #define VMX_HPLIST_OFFSET (VMX_HPIDX_OFFSET + (GV_HPAGES * sizeof(addr64_t)))
145 #define VMX_ACTMAP_OFFSET (VMX_HPLIST_OFFSET + (GV_HPAGES * sizeof(vm_offset_t)))
148 typedef struct pmap_vmm_ext pmap_vmm_ext
;
150 #pragma pack(4) /* Make sure the structure stays as we defined it */
152 queue_head_t pmap_link
; /* MUST BE FIRST */
153 addr64_t pmapvr
; /* Virtual to real conversion mask */
154 shexlock pmapSXlk
; /* Shared/Exclusive lock for mapping changes */
155 unsigned int space
; /* space for this pmap */
156 #define invalSpace 0x00000001 /* Predefined always invalid space */
157 int ref_count
; /* reference count */
158 unsigned int pmapFlags
; /* Flags */
159 #define pmapKeys 0x00000007 /* Keys and no execute bit to use with this pmap */
160 #define pmapKeyDef 0x00000006 /* Default keys - Sup = 1, user = 1, no ex = 0 */
161 #define pmapVMhost 0x00000010 /* pmap with Virtual Machines attached to it */
162 #define pmapVMgsaa 0x00000020 /* Guest shadow assist active */
163 unsigned int spaceNum
; /* Space number */
164 unsigned int pmapCCtl
; /* Cache control */
165 #define pmapCCtlVal 0xFFFF0000 /* Valid entries */
166 #define pmapCCtlLck 0x00008000 /* Lock bit */
167 #define pmapCCtlLckb 16 /* Lock bit */
168 #define pmapCCtlGen 0x00007FFF /* Generation number */
170 #define pmapSegCacheCnt 16 /* Maximum number of cache entries */
171 #define pmapSegCacheUse 16 /* Number of cache entries to use */
173 struct pmap
*freepmap
; /* Free pmaps */
174 pmap_vmm_ext
*pmapVmmExt
; /* VMM extension block, for VMM host and guest pmaps */
175 addr64_t pmapVmmExtPhys
; /* VMM extension block physical address */
177 uint64_t pmapSCSubTag
; /* Segment cache sub-tags. This is a 16 entry 4 bit array */
179 sgc pmapSegCache
[pmapSegCacheCnt
]; /* SLD values cached for quick load */
182 /* if fanout is 4, then shift is 1, if fanout is 8 shift is 2, etc */
183 #define kSkipListFanoutShift 1
184 /* with n lists, we can handle (fanout**n) pages optimally */
185 #define kSkipListMaxLists 12
186 unsigned char pmapCurLists
; /* 0x140 - max #lists any mapping in this pmap currently has */
187 unsigned char pmapRsv2
[3];
188 uint32_t pmapRandNum
; /* 0x144 - used by mapSetLists() as a random number generator */
189 addr64_t pmapSkipLists
[kSkipListMaxLists
]; /* 0x148 - the list headers */
190 /* following statistics conditionally gathered */
191 uint64_t pmapSearchVisits
; /* 0x1A8 - nodes visited searching pmaps */
192 uint32_t pmapSearchCnt
; /* 0x1B0 - number of calls to mapSearch or mapSearchFull */
194 unsigned int pmapRsv3
[3];
198 struct pmap_statistics stats
; /* statistics */
200 /* Need to pad out to a power of 2 - right now it is 512 bytes */
206 struct pmapTransTab
{
207 addr64_t pmapPAddr
; /* Physcial address of pmap */
208 unsigned int pmapVAddr
; /* Virtual address of pmap */
210 #pragma pack() /* Make sure the structure stays as we defined it */
212 typedef struct pmapTransTab pmapTransTab
;
215 * Address Chunk IDentified Table
219 unsigned int acidVAddr
; /* Virtual address of pmap or pointer to next free entry */
220 unsigned int acidGas
; /* reserved */
221 addr64_t acidPAddr
; /* Physcial address of pmap */
224 typedef struct acidTabEnt acidTabEnt
;
226 extern acidTabEnt
*acidTab
; /* Pointer to acid table */
227 extern acidTabEnt
*acidFree
; /* List of free acid entries */
229 #define PMAP_NULL ((pmap_t) 0)
231 extern pmap_t cursor_pmap
; /* The pmap to start allocations with */
232 extern pmap_t sharedPmap
;
233 extern unsigned int sharedPage
;
234 extern int ppc_max_adrsp
; /* Maximum number of concurrent address spaces allowed. */
235 extern addr64_t vm_max_address
; /* Maximum effective address supported */
236 extern addr64_t vm_max_physical
; /* Maximum physical address supported */
237 extern pmapTransTab
*pmapTrans
; /* Space to pmap translate table */
238 #define PMAP_SWITCH_USER(th, map, my_cpu) th->map = map;
240 #define PMAP_CONTEXT(pmap,th)
242 #define pmap_kernel_va(VA) \
243 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= vm_last_addr))
245 #define PPC_SID_KERNEL 0 /* Must change KERNEL_SEG_REG0_VALUE if !0 */
247 #define maxAdrSp 16384
249 #define USER_MEM_WINDOW_VADDR 0x00000000E0000000ULL
250 #define PHYS_MEM_WINDOW_VADDR 0x0000000100000000ULL
251 #define IO_MEM_WINDOW_VADDR 0x0000000080000000ULL
252 #define IO_MEM_WINDOW_SIZE 0x0000000080000000ULL
253 #define pmapSmallBlock 65536
255 #define pmap_kernel() (kernel_pmap)
256 #define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
257 #define pmap_remove_attributes(pmap,start,end)
258 #define pmap_copy(dpmap,spmap,da,len,sa)
259 #define pmap_update()
261 #define PMAP_DEFAULT_CACHE 0
262 #define PMAP_INHIBIT_CACHE 1
263 #define PMAP_GUARDED_CACHE 2
264 #define PMAP_ACTIVATE_CACHE 4
265 #define PMAP_NO_GUARD_CACHE 8
267 /* corresponds to cached, coherent, not writethru, not guarded */
268 #define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
269 #define VM_WIMG_COPYBACK (VM_MEM_COHERENT)
270 #define VM_WIMG_IO (VM_MEM_COHERENT | \
271 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
272 #define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
273 /* write combining mode, aka store gather */
274 #define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
279 extern vm_offset_t
phystokv(vm_offset_t pa
); /* Get kernel virtual address from physical */
280 extern vm_offset_t
kvtophys(vm_offset_t va
); /* Get physical address from kernel virtual */
281 extern vm_map_offset_t
kvtophys64(vm_map_offset_t va
); /* Get 64-bit physical address from kernel virtual */
282 extern vm_offset_t
pmap_map(vm_offset_t va
,
286 extern kern_return_t
pmap_add_physical_memory(vm_offset_t spa
,
290 extern void pmap_bootstrap(uint64_t msize
,
291 vm_offset_t
*first_avail
,
292 unsigned int kmapsize
);
294 extern vm_offset_t
pmap_boot_map(vm_size_t size
);
296 extern void sync_cache64(addr64_t pa
, unsigned length
);
297 extern void sync_ppage(ppnum_t pa
);
298 extern void sync_cache_virtual(vm_offset_t va
, unsigned length
);
299 extern void flush_dcache(vm_offset_t va
, unsigned length
, boolean_t phys
);
300 extern void flush_dcache64(addr64_t va
, unsigned length
, boolean_t phys
);
301 extern void invalidate_dcache(vm_offset_t va
, unsigned length
, boolean_t phys
);
302 extern void invalidate_dcache64(addr64_t va
, unsigned length
, boolean_t phys
);
303 extern void invalidate_icache(vm_offset_t va
, unsigned length
, boolean_t phys
);
304 extern void invalidate_icache64(addr64_t va
, unsigned length
, boolean_t phys
);
305 extern void pmap_sync_page_data_phys(ppnum_t pa
);
306 extern void pmap_sync_page_attributes_phys(ppnum_t pa
);
307 extern void pmap_map_block(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
);
308 extern int pmap_map_block_rc(pmap_t pmap
, addr64_t va
, ppnum_t pa
, uint32_t size
, vm_prot_t prot
, int attr
, unsigned int flags
);
310 extern kern_return_t
pmap_nest(pmap_t grand
, pmap_t subord
, addr64_t vstart
, addr64_t nstart
, uint64_t size
);
311 extern kern_return_t
pmap_unnest(pmap_t grand
, addr64_t vaddr
);
312 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
313 extern void MapUserMemoryWindowInit(void);
314 extern addr64_t
MapUserMemoryWindow(vm_map_t map
, addr64_t va
);
315 extern boolean_t
pmap_eligible_for_execute(ppnum_t pa
);
316 extern int pmap_list_resident_pages(
320 extern void pmap_init_sharedpage(vm_offset_t cpg
);
321 extern void pmap_map_sharedpage(task_t task
, pmap_t pmap
);
322 extern void pmap_unmap_sharedpage(pmap_t pmap
);
326 #endif /* _PPC_PMAP_H_ */