]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
e5568f75 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
e5568f75 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
e5568f75 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Header files for the hardware virtual memory mapping stuff | |
24 | */ | |
91447636 A |
25 | #ifdef XNU_KERNEL_PRIVATE |
26 | ||
1c79356b A |
27 | #ifndef _PPC_MAPPINGS_H_ |
28 | #define _PPC_MAPPINGS_H_ | |
29 | ||
55e303ae A |
30 | #include <mach/mach_types.h> |
31 | #include <mach/vm_types.h> | |
32 | #include <mach/machine/vm_types.h> | |
33 | #include <mach/vm_prot.h> | |
34 | #include <mach/vm_statistics.h> | |
35 | #include <kern/assert.h> | |
36 | #include <kern/cpu_number.h> | |
37 | #include <kern/lock.h> | |
38 | #include <kern/queue.h> | |
39 | #include <ppc/proc_reg.h> | |
40 | ||
41 | /* | |
42 | * Don't change these structures unless you change the assembly code | |
43 | */ | |
44 | ||
45 | /* | |
46 | * This control block serves as anchor for all virtual mappings of the same physical | |
47 | * page, i.e., aliases. There is a table for each bank (mem_region). All tables | |
48 | * must reside in V=R storage and within the first 2GB of memory. Also, the | |
49 | * mappings to which it points must be on at least a 64-byte boundary. These | |
50 | * requirements allow a total of 2 bits for status and flags, and allow all address | |
51 | * calculations to be 32-bit. | |
52 | */ | |
53 | ||
54 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
55 | typedef struct phys_entry { | |
56 | addr64_t ppLink; /* Physical pointer to aliased mappings and flags */ | |
57 | #define ppLock 0x8000000000000000LL /* Lock for alias chain */ | |
91447636 A |
58 | #define ppFlags 0x700000000000000FLL /* Status and flags */ |
59 | #define ppI 0x2000000000000000LL /* Cache inhibited */ | |
60 | #define ppIb 2 /* Cache inhibited */ | |
61 | #define ppG 0x1000000000000000LL /* Guarded */ | |
62 | #define ppGb 3 /* Guarded */ | |
55e303ae A |
63 | #define ppR 0x0000000000000008LL /* Referenced */ |
64 | #define ppRb 60 /* Referenced */ | |
65 | #define ppC 0x0000000000000004LL /* Changed */ | |
66 | #define ppCb 61 /* Changed */ | |
91447636 A |
67 | |
68 | /* The lock, attribute, and flag bits are arranged so that their positions may be | |
69 | * described by a contiguous mask of one bits wrapping from bit postion 63 to 0. | |
70 | * In assembly language, we can then rapidly produce this mask with: | |
71 | * li r0,ppLFAmask ; r0 <- 0x00000000000000FF | |
72 | * rotrdi r0,r0,ppLFArrot ; r0 <- 0xF00000000000000F | |
73 | */ | |
74 | #define ppLFAmask 0x00FF /* One bit for each lock, attr, or flag bit */ | |
75 | #define ppLFArrot 4 /* Right-rotate count to obtain 64-bit mask */ | |
76 | } phys_entry_t; | |
55e303ae | 77 | #pragma pack() |
91447636 | 78 | #define physEntrySize sizeof(phys_entry_t) |
55e303ae A |
79 | |
80 | /* Memory may be non-contiguous. This data structure contains info | |
81 | * for mapping this non-contiguous space into the contiguous | |
82 | * physical->virtual mapping tables. An array of this type is | |
83 | * provided to the pmap system at bootstrap by ppc_vm_init. | |
84 | * | |
85 | */ | |
86 | ||
87 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
88 | typedef struct mem_region { | |
91447636 | 89 | phys_entry_t *mrPhysTab; /* Base of region table */ |
55e303ae A |
90 | ppnum_t mrStart; /* Start of region */ |
91 | ppnum_t mrEnd; /* Last page in region */ | |
92 | ppnum_t mrAStart; /* Next page in region to allocate */ | |
93 | ppnum_t mrAEnd; /* Last page in region to allocate */ | |
94 | } mem_region_t; | |
95 | #pragma pack() | |
96 | ||
97 | #define mrSize sizeof(mem_region_t) | |
91447636 | 98 | #define PMAP_MEM_REGION_MAX 11 |
55e303ae A |
99 | |
100 | extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX + 1]; | |
101 | extern int pmap_mem_regions_count; | |
102 | ||
103 | /* Prototypes */ | |
104 | ||
105 | ||
106 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
1c79356b | 107 | typedef struct PCA { /* PTEG Control Area */ |
1c79356b A |
108 | union flgs { |
109 | unsigned int PCAallo; /* Allocation controls */ | |
110 | struct PCAalflgs { /* Keep these in order!!! */ | |
111 | unsigned char PCAfree; /* Indicates the slot is free */ | |
de355530 | 112 | unsigned char PCAsteal; /* Steal scan start position */ |
55e303ae A |
113 | unsigned char PCAauto; /* Indicates that the PTE was autogenned */ |
114 | unsigned char PCAmisc; /* Misc. flags */ | |
115 | #define PCAlock 1 /* This locks up the associated PTEG */ | |
116 | #define PCAlockb 31 | |
1c79356b A |
117 | } PCAalflgs; |
118 | } flgs; | |
91447636 | 119 | } PCA_t; |
55e303ae | 120 | #pragma pack() |
1c79356b | 121 | |
91447636 A |
122 | /* The hash table is composed of mappings organized into G groups of S slots |
123 | * each. In the macros below, by GV_GROUPS_LG2, GV_SLOT_SZ_LG2, and GV_SLOTS_LG2, the number | |
124 | * of groups, the size (in bytes) of a slot, and the number of slots in a group are given. | |
125 | * Since these values are given as log2, they're restricted to powers of two. Fast operation | |
126 | * and all that. | |
127 | * | |
128 | * This patch of macros define all of the hash table's metrics and handy masks. It's a | |
129 | * build-time thing because it's faster that way. Only the first group of values may | |
130 | * be adjusted. | |
131 | */ | |
132 | #define GV_GROUPS_LG2 10 /* 1024 groups per hash table (log2(max) is 14, viz. 16K groups) */ | |
133 | #define GV_SLOTS_LG2 3 /* 8 slots per group (log2(max) is 8, viz. 256 slots) */ | |
134 | ||
135 | #define GV_SLOT_SZ_LG2 5 /* 32 bytes per slot (mapping size) */ | |
136 | #define GV_PGIDX_SZ_LG2 3 /* 64-bit Hash-table-page physical-addrress index entry size */ | |
137 | #define GV_PAGE_SZ_LG2 12 /* 4k-byte hash-table-page size */ | |
138 | ||
139 | #define GV_GROUPS (1 << GV_GROUPS_LG2) | |
140 | #define GV_SLOT_SZ (1 << GV_SLOT_SZ_LG2) | |
141 | #define GV_SLOTS (1 << GV_SLOTS_LG2) | |
142 | #define GV_PAGE_SZ (1 << GV_PAGE_SZ_LG2) | |
143 | #define GV_GRP_MASK (GV_GROUPS - 1) | |
144 | #define GV_SLOT_MASK (GV_SLOTS - 1) | |
145 | #define GV_PAGE_MASK (GV_PAGE_SZ - 1) | |
146 | #define GV_HPAGES (1 << (GV_GROUPS_LG2 + GV_SLOT_SZ_LG2 + GV_SLOTS_LG2 - GV_PAGE_SZ_LG2)) | |
147 | #define GV_GRPS_PPG_LG2 (GV_PAGE_SZ_LG2 - (GV_SLOT_SZ_LG2 + GV_SLOTS_LG2)) | |
148 | #define GV_GRPS_PPG (1 << GV_GRPS_PPG_LG2) | |
149 | #define GV_SLTS_PPG_LG2 (GV_PAGE_SZ_LG2 - GV_SLOT_SZ_LG2) | |
150 | #define GV_SLTS_PPG (1 << GV_SLTS_PPG_LG2) | |
151 | ||
152 | #define GV_HPAGE_SHIFT (GV_PGIDX_SZ_LG2 - GV_GRPS_PPG_LG2) | |
153 | #define GV_HPAGE_MASK ((GV_HPAGES - 1) << GV_PGIDX_SZ_LG2) | |
154 | #define GV_HGRP_SHIFT (GV_SLOT_SZ_LG2 + GV_SLOTS_LG2) | |
155 | #define GV_HGRP_MASK ((GV_GRPS_PPG - 1) << GV_HGRP_SHIFT) | |
156 | ||
157 | #define GV_MAPWD_BITS_LG2 5 /* 32-bit active map word size */ | |
158 | #define GV_MAPWD_SZ_LG2 (GV_MAPWD_BITS_LG2 - 3) | |
159 | #define GV_BAND_SHIFT (GV_MAPWD_BITS_LG2 + GV_SLOT_SZ_LG2) | |
160 | #define GV_BAND_SZ_LG2 (GV_PAGE_SZ_LG2 - GV_SLOT_SZ_LG2 - GV_MAPWD_BITS_LG2) | |
161 | #define GV_BAND_MASK (((1 << GV_BAND_SZ_LG2) - 1) << GV_BAND_SHIFT) | |
162 | #define GV_MAP_WORDS (1 << (GV_GROUPS_LG2 + GV_SLOTS_LG2 - GV_MAPWD_BITS_LG2)) | |
163 | #define GV_MAP_MASK ((GV_MAP_WORDS - 1) << GV_MAPWD_SZ_LG2) | |
164 | #define GV_MAP_SHIFT (GV_PGIDX_SZ_LG2 - GV_BAND_SZ_LG2) | |
165 | ||
166 | ||
55e303ae A |
167 | /* Mappings currently come in two sizes: 64 and 128 bytes. The only difference is the |
168 | * number of skiplists (ie, mpLists): 64-byte mappings have 1-4 lists and 128-byte mappings | |
169 | * have from 5-12. Only 1 in 256 mappings is large, so an average mapping is 64.25 bytes. | |
170 | * All mappings are 64-byte aligned. | |
de355530 | 171 | * |
55e303ae A |
172 | * Special note on mpFIP and mpRIP: |
173 | * These flags are manipulated under various locks. RIP is always set under an | |
174 | * exclusive lock while FIP is shared. The only worry is that there is a possibility that | |
175 | * FIP could be attempted by more than 1 processor at a time. Obviously, one will win. | |
176 | * The other(s) bail all the way to user state and may refault (or not). There are only | |
91447636 | 177 | * a few things in mpFlags that are not static, mpFIP, mpRIP, and mpBusy. |
55e303ae | 178 | * |
91447636 A |
179 | * We organize these so that mpFIP is in a byte with static data and mpRIP is in another. |
180 | * That means that we can use a store byte to update the guys without worrying about load | |
181 | * and reserve. Note that mpFIP must be set atomically because it is under a share lock; | |
182 | * but, it may be cleared with a simple store byte. Because mpRip is set once and then never | |
183 | * cleared, we can get away with setting it by means of a simple store byte. | |
55e303ae A |
184 | * |
185 | */ | |
186 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
187 | typedef struct mapping { | |
188 | unsigned int mpFlags; /* 0x000 - Various flags, lock bit. These are static except for lock */ | |
189 | #define mpBusy 0xFF000000 /* Busy count */ | |
91447636 A |
190 | #define mpPrevious 0x00800000 /* A previous mapping exists in a composite */ |
191 | #define mpNext 0x00400000 /* A next mapping exist in a composite */ | |
192 | #define mpPIndex 0x003F0000 /* Index into physical table (in words) */ | |
193 | #define mpType 0x0000F000 /* Mapping type: */ | |
194 | #define mpNormal 0x00000000 /* Normal logical page - backed by RAM, RC maintained, logical page size == physical page size */ | |
195 | /* DO NOT CHANGE THIS CODE */ | |
196 | #define mpBlock 0x00001000 /* Block mapping - used for I/O memory or non-RC maintained RAM, logical page size is independent from physical */ | |
197 | #define mpMinSpecial 0x00002000 /* Any mapping with this type or above has extra special handling */ | |
198 | #define mpNest 0x00002000 /* Forces transtion to an alternate address space after applying relocation */ | |
199 | #define mpLinkage 0x00003000 /* Transition to current user address space with relocation - used for copyin/out/pv */ | |
200 | #define mpACID 0x00004000 /* Address Chunk ID - provides the address space ID for VSID calculation. Normally mapped at chunk size - 2KB */ | |
201 | #define mpGuest 0x00005000 /* Guest->physical shadow mapping */ | |
202 | /* 0x00006000 - 0x0000F000 Reserved */ | |
203 | #define mpFIP 0x00000800 /* Fault in progress */ | |
204 | #define mpFIPb 20 /* Fault in progress */ | |
205 | #define mpPcfg 0x00000700 /* Physical Page configuration */ | |
206 | #define mpPcfgb 23 /* Physical Page configuration index bit */ | |
55e303ae A |
207 | #define mpRIP 0x00000080 /* Remove in progress - DO NOT MOVE */ |
208 | #define mpRIPb 24 /* Remove in progress */ | |
91447636 A |
209 | #define mpPerm 0x00000040 /* Mapping is permanent - DO NOT MOVE */ |
210 | #define mpPermb 25 /* Mapping is permanent */ | |
211 | #define mpBSu 0x00000020 /* Basic Size unit - 0 = 4KB, 1 = 32MB */ | |
212 | #define mpBSub 26 /* Basic Size unit - 0 = 4KB, 1 = 32MB */ | |
55e303ae A |
213 | #define mpLists 0x0000001F /* Number of skip lists mapping is on, max of 27 */ |
214 | #define mpListsb 27 /* Number of skip lists mapping is on, max of 27 */ | |
91447636 A |
215 | #define mpgFlags 0x0000001F /* Shadow cache mappings re-use mpLists for flags: */ |
216 | #define mpgGlobal 0x00000004 /* Mapping is global (1) or local (0) */ | |
217 | #define mpgFree 0x00000002 /* Mapping is free */ | |
218 | #define mpgDormant 0x00000001 /* Mapping is dormant */ | |
1c79356b | 219 | |
55e303ae | 220 | unsigned short mpSpace; /* 0x004 - Address space hash */ |
91447636 A |
221 | union { |
222 | unsigned short mpBSize; /* 0x006 - Block size - 1 in pages - max block size 256MB */ | |
223 | unsigned char mpgCursor; /* 0x006 - Shadow-cache group allocation cursor (first mapping in the group) */ | |
224 | } u; | |
225 | ||
55e303ae A |
226 | unsigned int mpPte; /* 0x008 - Offset to PTEG in hash table. Offset to exact PTE if mpHValid set - NOTE: this MUST be 0 for block mappings */ |
227 | #define mpHValid 0x00000001 /* PTE is entered in hash table */ | |
228 | #define mpHValidb 31 /* PTE is entered in hash table */ | |
229 | ppnum_t mpPAddr; /* 0x00C - Physical page number */ | |
230 | addr64_t mpVAddr; /* 0x010 - Starting virtual address */ | |
231 | #define mpHWFlags 0x0000000000000FFFULL /* Reference/Change, WIMG, AC, N, protection flags from PTE */ | |
91447636 A |
232 | #define mpHWFlagsb 52 |
233 | #define mpN 0x0000000000000004ULL /* Page-level no-execute (PowerAS machines) */ | |
234 | #define mpNb 61 | |
235 | #define mpPP 0x0000000000000003ULL /* Protection flags */ | |
236 | #define mpPPb 62 | |
237 | #define mpPPe 63 | |
55e303ae A |
238 | #define mpKKN 0x0000000000000007ULL /* Segment key and no execute flag (nested pmap) */ |
239 | #define mpKKNb 61 | |
240 | #define mpWIMG 0x0000000000000078ULL /* Attribute bits */ | |
241 | #define mpWIMGb 57 | |
242 | #define mpW 0x0000000000000040ULL | |
243 | #define mpWb 57 | |
244 | #define mpI 0x0000000000000020ULL | |
245 | #define mpIb 58 | |
246 | #define mpM 0x0000000000000010ULL | |
247 | #define mpMb 59 | |
248 | #define mpG 0x0000000000000008ULL | |
249 | #define mpGb 60 | |
250 | #define mpWIMGe 60 | |
251 | #define mpC 0x0000000000000080ULL /* Change bit */ | |
252 | #define mpCb 56 | |
253 | #define mpR 0x0000000000000100ULL /* Reference bit */ | |
254 | #define mpRb 55 | |
255 | addr64_t mpAlias; /* 0x018 - Pointer to alias mappings of physical page */ | |
256 | #define mpNestReloc mpAlias /* 0x018 - Redefines mpAlias relocation value of vaddr to nested pmap value */ | |
257 | #define mpBlkRemCur mpAlias /* 0x018 - Next offset in block map to remove (this is 4 bytes) */ | |
258 | addr64_t mpList0; /* 0x020 - Forward chain of mappings. This one is always used */ | |
259 | addr64_t mpList[3]; /* 0x028 - Forward chain of mappings. Next higher order */ | |
260 | /* 0x040 - End of basic mapping */ | |
261 | #define mpBasicSize 64 | |
262 | #define mpBasicLists 4 | |
263 | /* note the dependence on kSkipListMaxLists, which must be <= #lists in a 256-byte mapping (ie, <=28) */ | |
264 | /* addr64_t mpList4[8]; 0x040 - First extended list entries */ | |
265 | /* 0x080 - End of first extended mapping */ | |
266 | /* addr64_t mpList12[8]; 0x080 - Second extended list entries */ | |
267 | /* 0x0C0 - End of second extended mapping */ | |
268 | /* addr64_t mpList20[8]; 0x0C0 - Third extended list entries */ | |
269 | /* 0x100 - End of third extended mapping */ | |
1c79356b | 270 | |
91447636 | 271 | } mapping_t; |
55e303ae | 272 | #pragma pack() |
1c79356b A |
273 | |
274 | #define MAPPING_NULL ((struct mapping *) 0) | |
275 | ||
0b4e3aa0 A |
276 | #define mapDirect 0x08 |
277 | #define mapRWNA 0x00000000 | |
278 | #define mapRWRO 0x00000001 | |
279 | #define mapRWRW 0x00000002 | |
280 | #define mapRORO 0x00000003 | |
281 | ||
55e303ae A |
282 | /* All counts are in units of basic 64-byte mappings. A 128-byte mapping is |
283 | * just two adjacent 64-byte entries. | |
284 | */ | |
285 | #pragma pack(4) /* Make sure the structure stays as we defined it */ | |
9bccf70c A |
286 | |
287 | typedef struct mappingflush { | |
55e303ae A |
288 | addr64_t addr; /* Start address to search mapping */ |
289 | unsigned int spacenum; /* Last space num to search pmap */ | |
290 | unsigned int mapfgas[1]; /* Pad to 64 bytes */ | |
91447636 | 291 | } mappingflush_t; |
9bccf70c | 292 | |
1c79356b A |
293 | typedef struct mappingctl { |
294 | unsigned int mapclock; /* Mapping allocation lock */ | |
295 | unsigned int mapcrecurse; /* Mapping allocation recursion control */ | |
296 | struct mappingblok *mapcnext; /* First mapping block with free entries */ | |
297 | struct mappingblok *mapclast; /* Last mapping block with free entries */ | |
298 | struct mappingblok *mapcrel; /* List of deferred block releases */ | |
299 | unsigned int mapcfree; /* Total free entries on list */ | |
300 | unsigned int mapcinuse; /* Total entries in use */ | |
301 | unsigned int mapcreln; /* Total blocks on pending release list */ | |
302 | int mapcholdoff; /* Hold off clearing release list */ | |
303 | unsigned int mapcfreec; /* Total calls to mapping free */ | |
304 | unsigned int mapcallocc; /* Total calls to mapping alloc */ | |
55e303ae A |
305 | unsigned int mapcbig; /* Count times a big mapping was requested of mapping_alloc */ |
306 | unsigned int mapcbigfails; /* Times caller asked for a big one but we gave 'em a small one */ | |
1c79356b A |
307 | unsigned int mapcmin; /* Minimum free mappings to keep */ |
308 | unsigned int mapcmaxalloc; /* Maximum number of mappings allocated at one time */ | |
de355530 | 309 | unsigned int mapcgas[1]; /* Pad to 64 bytes */ |
55e303ae | 310 | struct mappingflush mapcflush; |
91447636 | 311 | } mappingctl_t; |
55e303ae | 312 | #pragma pack() |
1c79356b | 313 | |
55e303ae A |
314 | /* MAPPERBLOK is the number of basic 64-byte mappings per block (ie, per page.) */ |
315 | #define MAPPERBLOK 63 | |
1c79356b A |
316 | #define MAPALTHRSH (4*MAPPERBLOK) |
317 | #define MAPFRTHRSH (2 * ((MAPALTHRSH + MAPPERBLOK - 1) / MAPPERBLOK)) | |
318 | typedef struct mappingblok { | |
55e303ae A |
319 | unsigned int mapblokfree[2]; /* Bit map of free mapping entrys */ |
320 | addr64_t mapblokvrswap; /* Virtual address XORed with physical address */ | |
1c79356b A |
321 | unsigned int mapblokflags; /* Various flags */ |
322 | #define mbPerm 0x80000000 /* Block is permanent */ | |
323 | struct mappingblok *nextblok; /* Pointer to the next mapping block */ | |
91447636 | 324 | } mappingblok_t; |
1c79356b | 325 | |
55e303ae A |
326 | #define mapRemChunk 128 |
327 | ||
328 | #define mapRetCode 0xF | |
329 | #define mapRtOK 0 | |
330 | #define mapRtBadLk 1 | |
331 | #define mapRtPerm 2 | |
332 | #define mapRtNotFnd 3 | |
333 | #define mapRtBlock 4 | |
334 | #define mapRtNest 5 | |
335 | #define mapRtRemove 6 | |
336 | #define mapRtMapDup 7 | |
91447636 A |
337 | #define mapRtGuest 8 |
338 | #define mapRtEmpty 9 | |
339 | #define mapRtSmash 0xA /* Mapping already exists and doesn't match new mapping */ | |
55e303ae | 340 | |
91447636 A |
341 | /* |
342 | * This struct describes available physical page configurations | |
343 | * Note: | |
344 | * Index 0 is required and is the primary page configuration (4K, non-large) | |
345 | * Index 1 is the primary large page config if supported by hw (16M, large page) | |
346 | */ | |
347 | ||
348 | typedef struct pcfg { | |
349 | uint8_t pcfFlags; /* Flags */ | |
350 | #define pcfValid 0x80 /* Configuration is valid */ | |
351 | #define pcfLarge 0x40 /* Large page */ | |
352 | #define pcfDedSeg 0x20 /* Requires dedicated segment */ | |
353 | uint8_t pcfEncode; /* Implementation specific PTE encoding */ | |
354 | uint8_t pcfPSize; /* Page size in powers of 2 */ | |
355 | uint8_t pcfShift; /* Shift for PTE construction */ | |
356 | } pcfg; | |
357 | ||
358 | #define pcfDefPcfg 0 /* Primary page configuration */ | |
359 | #define pcfLargePcfg 1 /* Primary large page configuration */ | |
360 | ||
361 | extern pcfg pPcfg[8]; /* Supported page configurations */ | |
362 | ||
363 | extern mappingctl_t mapCtl; /* Mapping allocation control */ | |
364 | ||
365 | extern unsigned char ppc_prot[]; /* Mach -> PPC protection translation table */ | |
366 | ||
367 | #define getProtPPC(__key) (ppc_prot[(__key) & 0xF]) | |
368 | /* Safe Mach -> PPC protection key conversion */ | |
1c79356b | 369 | |
55e303ae | 370 | extern addr64_t mapping_remove(pmap_t pmap, addr64_t va); /* Remove a single mapping for this VADDR */ |
91447636 | 371 | extern mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full); /* Finds a mapping */ |
1c79356b | 372 | extern void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked); /* Sets start and end of a block of mappings */ |
1c79356b A |
373 | extern void mapping_prealloc(unsigned int); /* Preallocate mappings for large use */ |
374 | extern void mapping_relpre(void); /* Releases preallocate request */ | |
375 | extern void mapping_init(void); /* Do initial stuff */ | |
91447636 | 376 | extern mapping_t *mapping_alloc(int lists); /* Obtain a mapping */ |
1c79356b | 377 | extern void mapping_free(struct mapping *mp); /* Release a mapping */ |
55e303ae A |
378 | extern boolean_t mapping_tst_ref(ppnum_t pa); /* Tests the reference bit of a physical page */ |
379 | extern boolean_t mapping_tst_mod(ppnum_t pa); /* Tests the change bit of a physical page */ | |
380 | extern void mapping_set_ref(ppnum_t pa); /* Sets the reference bit of a physical page */ | |
381 | extern void mapping_clr_ref(ppnum_t pa); /* Clears the reference bit of a physical page */ | |
382 | extern void mapping_set_mod(ppnum_t pa); /* Sets the change bit of a physical page */ | |
383 | extern void mapping_clr_mod(ppnum_t pa); /* Clears the change bit of a physical page */ | |
91447636 A |
384 | extern unsigned int mapping_tst_refmod(ppnum_t pa); /* Tests the reference and change bits of a physical page */ |
385 | extern void mapping_clr_refmod(ppnum_t pa, unsigned int mask); /* Clears the reference and change bits of a physical page */ | |
55e303ae | 386 | extern void mapping_protect_phys(ppnum_t pa, vm_prot_t prot); /* Change protection of all mappings to page */ |
91447636 | 387 | extern void mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change protection of a single mapping to page */ |
55e303ae A |
388 | extern addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot); /* Make a mapping */ |
389 | /* Flags for mapping_make */ | |
390 | #define mmFlgBlock 0x80000000 /* This is a block map, use size for number of pages covered */ | |
391 | #define mmFlgUseAttr 0x40000000 /* Use specified attributes */ | |
392 | #define mmFlgPerm 0x20000000 /* Mapping is permanant */ | |
91447636 | 393 | #define mmFlgPcfg 0x07000000 /* Physical page configuration index */ |
55e303ae A |
394 | #define mmFlgCInhib 0x00000002 /* Cahching inhibited - use if mapFlgUseAttr set or block */ |
395 | #define mmFlgGuarded 0x00000001 /* Access guarded - use if mapFlgUseAttr set or block */ | |
396 | extern void mapping_purge(ppnum_t pa); /* Remove all mappings for this physent */ | |
397 | extern addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa); /* Finds first virtual mapping of a physical page in a space */ | |
398 | extern void mapping_drop_busy(struct mapping *mapping); /* Drops busy count on mapping */ | |
91447636 | 399 | extern phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex); /* Finds the physical entry for the page */ |
55e303ae A |
400 | extern int mapalc1(struct mappingblok *mb); /* Finds and allcates a 1-bit mapping entry */ |
401 | extern int mapalc2(struct mappingblok *mb); /* Finds and allcates a 2-bit mapping entry */ | |
1c79356b A |
402 | extern void ignore_zero_fault(boolean_t type); /* Sets up to ignore or honor any fault on page 0 access for the current thread */ |
403 | ||
91447636 A |
404 | extern void mapping_fake_zone_info( /* return mapping usage stats as a fake zone info */ |
405 | int *count, | |
406 | vm_size_t *cur_size, | |
407 | vm_size_t *max_size, | |
408 | vm_size_t *elem_size, | |
409 | vm_size_t *alloc_size, | |
410 | int *collectable, | |
411 | int *exhaustable); | |
412 | ||
413 | extern mapping_t *hw_rem_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a mapping from the system */ | |
414 | extern mapping_t *hw_purge_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a regular mapping from the system */ | |
415 | extern mapping_t *hw_purge_space(struct phys_entry *pp, pmap_t pmap); /* Remove the first mapping for a specific pmap from physentry */ | |
416 | extern mapping_t *hw_purge_phys(struct phys_entry *pp); /* Remove the first mapping for a physentry */ | |
417 | extern mapping_t *hw_scrub_guest(struct phys_entry *pp, pmap_t pmap); /* Scrub first guest mapping belonging to this host */ | |
418 | extern mapping_t *hw_find_map(pmap_t pmap, addr64_t va, addr64_t *nextva); /* Finds a mapping */ | |
419 | extern mapping_t *hw_find_space(struct phys_entry *pp, unsigned int space); /* Given a phys_entry, find its first mapping in the specified space */ | |
55e303ae | 420 | extern addr64_t hw_add_map(pmap_t pmap, struct mapping *mp); /* Add a mapping to a pmap */ |
91447636 | 421 | extern unsigned int hw_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change the protection of a virtual page */ |
55e303ae A |
422 | extern unsigned int hw_test_rc(pmap_t pmap, addr64_t va, boolean_t reset); /* Test and optionally reset the RC bit of specific mapping */ |
423 | ||
91447636 A |
424 | extern unsigned int hw_clear_maps(void); |
425 | ||
426 | extern unsigned int hw_walk_phys(struct phys_entry *pp, unsigned int preop, unsigned int op, /* Perform function on all mappings on a physical page */ | |
427 | unsigned int postop, unsigned int parm, unsigned int opmod); | |
428 | /* Opcodes for hw_walk_phys */ | |
429 | #define hwpNoop 0 /* No operation */ | |
430 | #define hwpSPrtPhy 1 /* Sets protection in physent (obsolete) */ | |
431 | #define hwpSPrtMap 2 /* Sets protection in mapping */ | |
432 | #define hwpSAtrPhy 3 /* Sets attributes in physent */ | |
433 | #define hwpSAtrMap 4 /* Sets attributes in mapping */ | |
434 | #define hwpCRefPhy 5 /* Clears reference in physent */ | |
435 | #define hwpCRefMap 6 /* Clears reference in mapping */ | |
436 | #define hwpCCngPhy 7 /* Clears change in physent */ | |
437 | #define hwpCCngMap 8 /* Clears change in mapping */ | |
438 | #define hwpSRefPhy 9 /* Sets reference in physent */ | |
439 | #define hwpSRefMap 10 /* Sets reference in mapping */ | |
440 | #define hwpSCngPhy 11 /* Sets change in physent */ | |
441 | #define hwpSCngMap 12 /* Sets change in mapping */ | |
442 | #define hwpTRefPhy 13 /* Tests reference in physent */ | |
443 | #define hwpTRefMap 14 /* Tests reference in mapping */ | |
444 | #define hwpTCngPhy 15 /* Tests change in physent */ | |
445 | #define hwpTCngMap 16 /* Tests change in mapping */ | |
446 | #define hwpTRefCngPhy 17 /* Tests reference and change in physent */ | |
447 | #define hwpTRefCngMap 18 /* Tests reference and change in mapping */ | |
448 | #define hwpCRefCngPhy 19 /* Clears reference and change in physent */ | |
449 | #define hwpCRefCngMap 20 /* Clears reference and change in mapping */ | |
450 | /* Operation modifiers for connected PTE visits for hw_walk_phys */ | |
451 | #define hwpPurgePTE 0 /* Invalidate/purge PTE and merge RC bits for each connected mapping */ | |
452 | #define hwpMergePTE 1 /* Merge RC bits for each connected mapping */ | |
453 | #define hwpNoopPTE 2 /* Take no additional action for each connected mapping */ | |
1c79356b | 454 | |
1c79356b A |
455 | extern void hw_set_user_space(pmap_t pmap); /* Indicate we need a space switch */ |
456 | extern void hw_set_user_space_dis(pmap_t pmap); /* Indicate we need a space switch (already disabled) */ | |
55e303ae A |
457 | extern void hw_setup_trans(void); /* Setup hardware for translation */ |
458 | extern void hw_start_trans(void); /* Start translation for the first time */ | |
459 | extern void hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va); /* Validate a segment */ | |
460 | extern void hw_blow_seg(addr64_t seg); /* Invalidate a segment */ | |
461 | extern void invalidateSegs(pmap_t pmap); /* Invalidate the segment cache */ | |
462 | extern struct phys_entry *pmap_find_physentry(ppnum_t pa); | |
463 | extern void mapLog(unsigned int laddr, unsigned int type, addr64_t va); | |
464 | extern unsigned int mapSkipListVerifyC(pmap_t pmap, unsigned long long *dumpa); | |
465 | extern void fillPage(ppnum_t pa, unsigned int fill); | |
91447636 A |
466 | extern kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which); |
467 | ||
468 | extern void hw_rem_all_gv(pmap_t pmap); /* Remove all of a guest's mappings */ | |
469 | extern void hw_rem_local_gv(pmap_t gpmap); /* Remove guest local mappings */ | |
470 | extern unsigned int hw_res_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t hva, addr64_t gva, vm_prot_t prot); | |
471 | /* Resume a guest mapping */ | |
472 | extern void hw_add_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva, unsigned int mflags, ppnum_t pa); | |
473 | /* Add a guest mapping */ | |
474 | extern void hw_susp_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva); | |
475 | /* Suspend a guest mapping */ | |
476 | extern unsigned int hw_test_rc_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva, unsigned int reset); | |
477 | /* Test/reset mapping ref and chg */ | |
478 | extern unsigned int hw_protect_gv(pmap_t gpmap, addr64_t va, vm_prot_t prot); | |
479 | /* Change the protection of a guest page */ | |
480 | extern addr64_t hw_gva_to_hva(pmap_t gpmap, addr64_t gva); /* Convert guest to host virtual address */ | |
481 | extern unsigned int hw_find_map_gv(pmap_t gpmap, addr64_t gva, void *mpbuf); | |
482 | /* Find and copy guest mapping into buffer */ | |
1c79356b A |
483 | |
484 | extern unsigned int mappingdeb0; /* (TEST/DEBUG) */ | |
485 | extern unsigned int incrVSID; /* VSID increment value */ | |
486 | ||
91447636 A |
487 | extern int mapSetLists(pmap_t); |
488 | extern void consider_mapping_adjust(void); | |
489 | ||
1c79356b A |
490 | #endif /* _PPC_MAPPINGS_H_ */ |
491 | ||
91447636 | 492 | #endif /* XNU_KERNEL_PRIVATE */ |