]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pmap.h
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Copyright (c) 1990 The University of Utah and
33 * the Center for Software Science at the University of Utah (CSS).
34 * All rights reserved.
35 *
36 * Permission to use, copy, modify and distribute this software is hereby
37 * granted provided that (1) source code retains these copyright, permission,
38 * and disclaimer notices, and (2) redistributions including binaries
39 * reproduce the notices in supporting documentation, and (3) all advertising
40 * materials mentioning features or use of this software display the following
41 * acknowledgement: ``This product includes software developed by the Center
42 * for Software Science at the University of Utah.''
43 *
44 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
45 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
46 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * CSS requests users of this software to return to css-dist@cs.utah.edu any
49 * improvements that they make and grant CSS redistribution rights.
50 *
51 * Utah $Hdr: pmap.h 1.13 91/09/25$
52 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 9/90
53 */
54
55#ifndef _PPC_PMAP_H_
56#define _PPC_PMAP_H_
57
58#include <mach/vm_types.h>
59#include <mach/machine/vm_types.h>
60#include <mach/vm_prot.h>
61#include <mach/vm_statistics.h>
62#include <kern/queue.h>
63#include <vm/pmap.h>
91447636 64#include <ppc/mappings.h>
1c79356b 65
55e303ae
A
66#define maxPPage32 0x000FFFFF /* Maximum page number in 32-bit machines */
67
68typedef uint32_t shexlock;
69
70#pragma pack(4) /* Make sure the structure stays as we defined it */
71
72struct sgc {
73 uint64_t sgcESID; /* ESID portion of segment cache */
74#define sgcESmsk 0xFFFFFFFFF0000000ULL /* ESID portion of segment register cache */
75 uint64_t sgcVSID; /* VSID portion of segment cache */
76#define sgcVSmsk 0xFFFFFFFFFFFFF000ULL /* VSID mask */
77#define sgcVSKeys 0x0000000000000C00ULL /* Protection keys */
78#define sgcVSKeyUsr 53 /* User protection key */
79#define sgcVSNoEx 0x0000000000000200ULL /* No execute */
80};
81#pragma pack()
82
83typedef struct sgc sgc;
84
91447636
A
85#pragma pack(4) /* Make sure the structure stays as we defined it */
86struct pmap_vmm_stats {
87 unsigned int vxsGpf; /* Guest faults */
88 unsigned int vxsGpfMiss; /* Faults that miss in hash table */
89
90 unsigned int vxsGrm; /* Guest mapping remove requests */
91 unsigned int vxsGrmMiss; /* Remove misses in hash table */
92 unsigned int vxsGrmActive; /* Remove hits that are active */
93
94 unsigned int vxsGra; /* Guest remove all mappings requests */
95 unsigned int vxsGraHits; /* Remove hits in hash table */
96 unsigned int vxsGraActive; /* Remove hits that are active */
97
98 unsigned int vxsGrl; /* Guest remove local mappings requests */
99 unsigned int vxsGrlActive; /* Active mappings removed */
100
101 unsigned int vxsGrs; /* Guest mapping resumes */
102 unsigned int vxsGrsHitAct; /* Resume hits active entry */
103 unsigned int vxsGrsHitSusp; /* Resume hits suspended entry */
104 unsigned int vxsGrsMissGV; /* Resume misses on guest virtual */
105 unsigned int vxsGrsHitPE; /* Resume hits on host virtual */
106 unsigned int vxsGrsMissPE; /* Resume misses on host virtual */
107
108 unsigned int vxsGad; /* Guest mapping adds */
109 unsigned int vxsGadHit; /* Add hits entry (active or dormant) */
110 unsigned int vxsGadFree; /* Add takes free entry in group */
111 unsigned int vxsGadDormant; /* Add steals dormant entry in group */
112 unsigned int vxsGadSteal; /* Add steals active entry in group */
113
114 unsigned int vxsGsu; /* Guest mapping suspends */
115 unsigned int vxsGsuHit; /* Suspend hits entry (active only) */
116 unsigned int vxsGsuMiss; /* Suspend misses entry */
117
118 unsigned int vxsGtd; /* Guest test ref&chg */
119 unsigned int vxsGtdHit; /* Test r&c hits entry (active only) */
120 unsigned int vxsGtdMiss; /* Test r&c misses entry */
121};
122#pragma pack()
123typedef struct pmap_vmm_stats pmap_vmm_stats;
124
125/* Not wanting to tax all of our customers for the sins of those that use virtual operating
126 systems, we've built the hash table from its own primitive virtual memory. We first
127 allocate a pmap_vmm_ext with sufficient space following to accomodate the hash table
128 index (one 64-bit physical address per 4k-byte page of hash table). The allocation
129 must not cross a 4k-byte page boundary (we'll be accessing the block with relocation
130 off), so we'll try a couple of times, then just burn a whole page. We stuff the effective
131 address of the cache-aligned index into hIdxBase; the physical-mode code locates the index
132 by adding the size of a pmap_vmm_extension to its translated physical address, then rounding
133 up to the next 32-byte boundary. Now we grab enough virtual pages to contain the hash table,
134 and fill in the index with the page's physical addresses. For the final touch that's sure
135 to please, we initialize the hash table. Mmmmm, golden brown perfection.
136 */
137
138#pragma pack(4)
139struct pmap_vmm_ext {
140 addr64_t vmxSalt; /* This block's virt<->real conversion salt */
141 addr64_t vmxHostPmapPhys; /* Host pmap physical address */
142 struct pmap *vmxHostPmap; /* Host pmap effective address */
143 addr64_t *vmxHashPgIdx; /* Hash table physical index base address */
144 vm_offset_t *vmxHashPgList; /* List of virtual pages comprising the hash table */
145 unsigned int *vmxActiveBitmap; /* Bitmap of active mappings in hash table */
146 pmap_vmm_stats vmxStats; /* Stats for VMM assists */
147#define VMX_HPIDX_OFFSET ((sizeof(pmap_vmm_ext) + 127) & ~127)
148 /* The hash table physical index begins at the first
149 128-byte boundary after the pmap_vmm_ext struct */
150#define VMX_HPLIST_OFFSET (VMX_HPIDX_OFFSET + (GV_HPAGES * sizeof(addr64_t)))
151#define VMX_ACTMAP_OFFSET (VMX_HPLIST_OFFSET + (GV_HPAGES * sizeof(vm_offset_t)))
152};
153#pragma pack()
154typedef struct pmap_vmm_ext pmap_vmm_ext;
155
55e303ae 156#pragma pack(4) /* Make sure the structure stays as we defined it */
1c79356b 157struct pmap {
55e303ae
A
158 queue_head_t pmap_link; /* MUST BE FIRST */
159 addr64_t pmapvr; /* Virtual to real conversion mask */
160 shexlock pmapSXlk; /* Shared/Exclusive lock for mapping changes */
161 unsigned int space; /* space for this pmap */
162#define invalSpace 0x00000001 /* Predefined always invalid space */
2d21ac55 163 uint32_t ref_count; /* reference count */
55e303ae
A
164 unsigned int pmapFlags; /* Flags */
165#define pmapKeys 0x00000007 /* Keys and no execute bit to use with this pmap */
166#define pmapKeyDef 0x00000006 /* Default keys - Sup = 1, user = 1, no ex = 0 */
167#define pmapVMhost 0x00000010 /* pmap with Virtual Machines attached to it */
91447636 168#define pmapVMgsaa 0x00000020 /* Guest shadow assist active */
0c530ab8 169#define pmapNXdisabled 0x00000040 /* no-execute disabled for this pmap */
55e303ae
A
170 unsigned int spaceNum; /* Space number */
171 unsigned int pmapCCtl; /* Cache control */
172#define pmapCCtlVal 0xFFFF0000 /* Valid entries */
173#define pmapCCtlLck 0x00008000 /* Lock bit */
174#define pmapCCtlLckb 16 /* Lock bit */
175#define pmapCCtlGen 0x00007FFF /* Generation number */
176
177#define pmapSegCacheCnt 16 /* Maximum number of cache entries */
178#define pmapSegCacheUse 16 /* Number of cache entries to use */
179
180 struct pmap *freepmap; /* Free pmaps */
91447636
A
181 pmap_vmm_ext *pmapVmmExt; /* VMM extension block, for VMM host and guest pmaps */
182 addr64_t pmapVmmExtPhys; /* VMM extension block physical address */
55e303ae
A
183/* 0x038 */
184 uint64_t pmapSCSubTag; /* Segment cache sub-tags. This is a 16 entry 4 bit array */
185/* 0x040 */
186 sgc pmapSegCache[pmapSegCacheCnt]; /* SLD values cached for quick load */
187
188/* 0x140 */
189/* if fanout is 4, then shift is 1, if fanout is 8 shift is 2, etc */
190#define kSkipListFanoutShift 1
191/* with n lists, we can handle (fanout**n) pages optimally */
192#define kSkipListMaxLists 12
193 unsigned char pmapCurLists; /* 0x140 - max #lists any mapping in this pmap currently has */
194 unsigned char pmapRsv2[3];
195 uint32_t pmapRandNum; /* 0x144 - used by mapSetLists() as a random number generator */
196 addr64_t pmapSkipLists[kSkipListMaxLists]; /* 0x148 - the list headers */
197/* following statistics conditionally gathered */
198 uint64_t pmapSearchVisits; /* 0x1A8 - nodes visited searching pmaps */
199 uint32_t pmapSearchCnt; /* 0x1B0 - number of calls to mapSearch or mapSearchFull */
200
201 unsigned int pmapRsv3[3];
202
203/* 0x1C0 */
204
205 struct pmap_statistics stats; /* statistics */
1c79356b
A
206
207/* Need to pad out to a power of 2 - right now it is 512 bytes */
208#define pmapSize 512
209};
55e303ae
A
210#pragma pack()
211
212#pragma pack(4)
213struct pmapTransTab {
214 addr64_t pmapPAddr; /* Physcial address of pmap */
215 unsigned int pmapVAddr; /* Virtual address of pmap */
216};
217#pragma pack() /* Make sure the structure stays as we defined it */
218
219typedef struct pmapTransTab pmapTransTab;
1c79356b 220
91447636
A
221/*
222 * Address Chunk IDentified Table
223 */
224
225struct acidTabEnt {
226 unsigned int acidVAddr; /* Virtual address of pmap or pointer to next free entry */
227 unsigned int acidGas; /* reserved */
228 addr64_t acidPAddr; /* Physcial address of pmap */
229};
230
231typedef struct acidTabEnt acidTabEnt;
232
233extern acidTabEnt *acidTab; /* Pointer to acid table */
234extern acidTabEnt *acidFree; /* List of free acid entries */
235
1c79356b
A
236#define PMAP_NULL ((pmap_t) 0)
237
1c79356b 238extern pmap_t cursor_pmap; /* The pmap to start allocations with */
55e303ae
A
239extern pmap_t sharedPmap;
240extern unsigned int sharedPage;
241extern int ppc_max_adrsp; /* Maximum number of concurrent address spaces allowed. */
242extern addr64_t vm_max_address; /* Maximum effective address supported */
243extern addr64_t vm_max_physical; /* Maximum physical address supported */
244extern pmapTransTab *pmapTrans; /* Space to pmap translate table */
1c79356b
A
245#define PMAP_SWITCH_USER(th, map, my_cpu) th->map = map;
246
1c79356b
A
247#define PMAP_CONTEXT(pmap,th)
248
249#define pmap_kernel_va(VA) \
55e303ae 250 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= vm_last_addr))
1c79356b
A
251
252#define PPC_SID_KERNEL 0 /* Must change KERNEL_SEG_REG0_VALUE if !0 */
55e303ae
A
253
254#define maxAdrSp 16384
255#define maxAdrSpb 14
91447636
A
256#define USER_MEM_WINDOW_VADDR 0x00000000E0000000ULL
257#define PHYS_MEM_WINDOW_VADDR 0x0000000100000000ULL
258#define IO_MEM_WINDOW_VADDR 0x0000000080000000ULL
259#define IO_MEM_WINDOW_SIZE 0x0000000080000000ULL
3a60a9f5 260#define pmapSmallBlock 65536
1c79356b
A
261
262#define pmap_kernel() (kernel_pmap)
263#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
2d21ac55 264#define pmap_resident_max(pmap) ((pmap)->stats.resident_max)
1c79356b
A
265#define pmap_remove_attributes(pmap,start,end)
266#define pmap_copy(dpmap,spmap,da,len,sa)
267#define pmap_update()
268
9bccf70c
A
269#define PMAP_DEFAULT_CACHE 0
270#define PMAP_INHIBIT_CACHE 1
271#define PMAP_GUARDED_CACHE 2
272#define PMAP_ACTIVATE_CACHE 4
273#define PMAP_NO_GUARD_CACHE 8
274
275/* corresponds to cached, coherent, not writethru, not guarded */
55e303ae
A
276#define VM_WIMG_DEFAULT (VM_MEM_COHERENT)
277#define VM_WIMG_COPYBACK (VM_MEM_COHERENT)
278#define VM_WIMG_IO (VM_MEM_COHERENT | \
279 VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED)
280#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED)
281/* write combining mode, aka store gather */
282#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT)
9bccf70c 283
1c79356b
A
284/*
285 * prototypes.
286 */
2d21ac55
A
287extern addr64_t kvtophys(vm_offset_t va); /* Get physical address from kernel virtual */
288extern vm_map_offset_t kvtophys64(vm_map_offset_t va); /* Get 64-bit physical address from kernel virtual */
1c79356b
A
289extern vm_offset_t pmap_map(vm_offset_t va,
290 vm_offset_t spa,
291 vm_offset_t epa,
0c530ab8
A
292 vm_prot_t prot,
293 unsigned int flags);
1c79356b
A
294extern kern_return_t pmap_add_physical_memory(vm_offset_t spa,
295 vm_offset_t epa,
296 boolean_t available,
297 unsigned int attr);
55e303ae 298extern void pmap_bootstrap(uint64_t msize,
1c79356b 299 vm_offset_t *first_avail,
55e303ae 300 unsigned int kmapsize);
1c79356b 301
91447636 302extern vm_offset_t pmap_boot_map(vm_size_t size);
1c79356b 303
55e303ae
A
304extern void sync_cache64(addr64_t pa, unsigned length);
305extern void sync_ppage(ppnum_t pa);
306extern void sync_cache_virtual(vm_offset_t va, unsigned length);
1c79356b 307extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys);
55e303ae 308extern void flush_dcache64(addr64_t va, unsigned length, boolean_t phys);
1c79356b 309extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys);
55e303ae 310extern void invalidate_dcache64(addr64_t va, unsigned length, boolean_t phys);
1c79356b 311extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys);
55e303ae 312extern void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys);
3a60a9f5
A
313extern void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
314extern int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
55e303ae 315
55e303ae 316extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
91447636
A
317extern void MapUserMemoryWindowInit(void);
318extern addr64_t MapUserMemoryWindow(vm_map_t map, addr64_t va);
319extern boolean_t pmap_eligible_for_execute(ppnum_t pa);
320extern int pmap_list_resident_pages(
321 struct pmap *pmap,
322 vm_offset_t *listp,
323 int space);
324extern void pmap_init_sharedpage(vm_offset_t cpg);
0c530ab8
A
325extern void pmap_disable_NX(pmap_t pmap);
326/* Not required for ppc: */
327static inline void pmap_set_4GB_pagezero(__unused pmap_t pmap) {}
328static inline void pmap_clear_4GB_pagezero(__unused pmap_t pmap) {}
91447636 329
1c79356b
A
330#endif /* _PPC_PMAP_H_ */
331