]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/mappings.h
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / mappings.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30/*
31 * Header files for the hardware virtual memory mapping stuff
32 */
33#ifdef XNU_KERNEL_PRIVATE
34
35#ifndef _PPC_MAPPINGS_H_
36#define _PPC_MAPPINGS_H_
37
38#include <mach/mach_types.h>
39#include <mach/vm_types.h>
40#include <mach/machine/vm_types.h>
41#include <mach/vm_prot.h>
42#include <mach/vm_statistics.h>
43#include <kern/assert.h>
44#include <kern/cpu_number.h>
45#include <kern/lock.h>
46#include <kern/queue.h>
47#include <ppc/proc_reg.h>
48
49/*
50 * Don't change these structures unless you change the assembly code
51 */
52
53/*
54 * This control block serves as anchor for all virtual mappings of the same physical
55 * page, i.e., aliases. There is a table for each bank (mem_region). All tables
56 * must reside in V=R storage and within the first 2GB of memory. Also, the
57 * mappings to which it points must be on at least a 64-byte boundary. These
58 * requirements allow a total of 2 bits for status and flags, and allow all address
59 * calculations to be 32-bit.
60 */
61
62#pragma pack(4) /* Make sure the structure stays as we defined it */
63typedef struct phys_entry {
64 addr64_t ppLink; /* Physical pointer to aliased mappings and flags */
65#define ppLock 0x8000000000000000LL /* Lock for alias chain */
66#define ppFlags 0x700000000000000FLL /* Status and flags */
67#define ppI 0x2000000000000000LL /* Cache inhibited */
68#define ppIb 2 /* Cache inhibited */
69#define ppG 0x1000000000000000LL /* Guarded */
70#define ppGb 3 /* Guarded */
71#define ppR 0x0000000000000008LL /* Referenced */
72#define ppRb 60 /* Referenced */
73#define ppC 0x0000000000000004LL /* Changed */
74#define ppCb 61 /* Changed */
75
76/* The lock, attribute, and flag bits are arranged so that their positions may be
77 * described by a contiguous mask of one bits wrapping from bit postion 63 to 0.
78 * In assembly language, we can then rapidly produce this mask with:
79 * li r0,ppLFAmask ; r0 <- 0x00000000000000FF
80 * rotrdi r0,r0,ppLFArrot ; r0 <- 0xF00000000000000F
81 */
82#define ppLFAmask 0x00FF /* One bit for each lock, attr, or flag bit */
83#define ppLFArrot 4 /* Right-rotate count to obtain 64-bit mask */
84} phys_entry_t;
85#pragma pack()
86#define physEntrySize sizeof(phys_entry_t)
87
88/* Memory may be non-contiguous. This data structure contains info
89 * for mapping this non-contiguous space into the contiguous
90 * physical->virtual mapping tables. An array of this type is
91 * provided to the pmap system at bootstrap by ppc_vm_init.
92 *
93 */
94
95#pragma pack(4) /* Make sure the structure stays as we defined it */
96typedef struct mem_region {
97 phys_entry_t *mrPhysTab; /* Base of region table */
98 ppnum_t mrStart; /* Start of region */
99 ppnum_t mrEnd; /* Last page in region */
100 ppnum_t mrAStart; /* Next page in region to allocate */
101 ppnum_t mrAEnd; /* Last page in region to allocate */
102} mem_region_t;
103#pragma pack()
104
105#define mrSize sizeof(mem_region_t)
106#define PMAP_MEM_REGION_MAX 11
107
108extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX + 1];
109extern int pmap_mem_regions_count;
110
111/* Prototypes */
112
113
114#pragma pack(4) /* Make sure the structure stays as we defined it */
115typedef struct PCA { /* PTEG Control Area */
116 union flgs {
117 unsigned int PCAallo; /* Allocation controls */
118 struct PCAalflgs { /* Keep these in order!!! */
119 unsigned char PCAfree; /* Indicates the slot is free */
120 unsigned char PCAsteal; /* Steal scan start position */
121 unsigned char PCAauto; /* Indicates that the PTE was autogenned */
122 unsigned char PCAmisc; /* Misc. flags */
123#define PCAlock 1 /* This locks up the associated PTEG */
124#define PCAlockb 31
125 } PCAalflgs;
126 } flgs;
127} PCA_t;
128#pragma pack()
129
130/* The hash table is composed of mappings organized into G groups of S slots
131 * each. In the macros below, by GV_GROUPS_LG2, GV_SLOT_SZ_LG2, and GV_SLOTS_LG2, the number
132 * of groups, the size (in bytes) of a slot, and the number of slots in a group are given.
133 * Since these values are given as log2, they're restricted to powers of two. Fast operation
134 * and all that.
135 *
136 * This patch of macros define all of the hash table's metrics and handy masks. It's a
137 * build-time thing because it's faster that way. Only the first group of values may
138 * be adjusted.
139 */
140#define GV_GROUPS_LG2 10 /* 1024 groups per hash table (log2(max) is 14, viz. 16K groups) */
141#define GV_SLOTS_LG2 3 /* 8 slots per group (log2(max) is 8, viz. 256 slots) */
142
143#define GV_SLOT_SZ_LG2 5 /* 32 bytes per slot (mapping size) */
144#define GV_PGIDX_SZ_LG2 3 /* 64-bit Hash-table-page physical-addrress index entry size */
145#define GV_PAGE_SZ_LG2 12 /* 4k-byte hash-table-page size */
146
147#define GV_GROUPS (1 << GV_GROUPS_LG2)
148#define GV_SLOT_SZ (1 << GV_SLOT_SZ_LG2)
149#define GV_SLOTS (1 << GV_SLOTS_LG2)
150#define GV_PAGE_SZ (1 << GV_PAGE_SZ_LG2)
151#define GV_GRP_MASK (GV_GROUPS - 1)
152#define GV_SLOT_MASK (GV_SLOTS - 1)
153#define GV_PAGE_MASK (GV_PAGE_SZ - 1)
154#define GV_HPAGES (1 << (GV_GROUPS_LG2 + GV_SLOT_SZ_LG2 + GV_SLOTS_LG2 - GV_PAGE_SZ_LG2))
155#define GV_GRPS_PPG_LG2 (GV_PAGE_SZ_LG2 - (GV_SLOT_SZ_LG2 + GV_SLOTS_LG2))
156#define GV_GRPS_PPG (1 << GV_GRPS_PPG_LG2)
157#define GV_SLTS_PPG_LG2 (GV_PAGE_SZ_LG2 - GV_SLOT_SZ_LG2)
158#define GV_SLTS_PPG (1 << GV_SLTS_PPG_LG2)
159
160#define GV_HPAGE_SHIFT (GV_PGIDX_SZ_LG2 - GV_GRPS_PPG_LG2)
161#define GV_HPAGE_MASK ((GV_HPAGES - 1) << GV_PGIDX_SZ_LG2)
162#define GV_HGRP_SHIFT (GV_SLOT_SZ_LG2 + GV_SLOTS_LG2)
163#define GV_HGRP_MASK ((GV_GRPS_PPG - 1) << GV_HGRP_SHIFT)
164
165#define GV_MAPWD_BITS_LG2 5 /* 32-bit active map word size */
166#define GV_MAPWD_SZ_LG2 (GV_MAPWD_BITS_LG2 - 3)
167#define GV_BAND_SHIFT (GV_MAPWD_BITS_LG2 + GV_SLOT_SZ_LG2)
168#define GV_BAND_SZ_LG2 (GV_PAGE_SZ_LG2 - GV_SLOT_SZ_LG2 - GV_MAPWD_BITS_LG2)
169#define GV_BAND_MASK (((1 << GV_BAND_SZ_LG2) - 1) << GV_BAND_SHIFT)
170#define GV_MAP_WORDS (1 << (GV_GROUPS_LG2 + GV_SLOTS_LG2 - GV_MAPWD_BITS_LG2))
171#define GV_MAP_MASK ((GV_MAP_WORDS - 1) << GV_MAPWD_SZ_LG2)
172#define GV_MAP_SHIFT (GV_PGIDX_SZ_LG2 - GV_BAND_SZ_LG2)
173
174
175/* Mappings currently come in two sizes: 64 and 128 bytes. The only difference is the
176 * number of skiplists (ie, mpLists): 64-byte mappings have 1-4 lists and 128-byte mappings
177 * have from 5-12. Only 1 in 256 mappings is large, so an average mapping is 64.25 bytes.
178 * All mappings are 64-byte aligned.
179 *
180 * Special note on mpFIP and mpRIP:
181 * These flags are manipulated under various locks. RIP is always set under an
182 * exclusive lock while FIP is shared. The only worry is that there is a possibility that
183 * FIP could be attempted by more than 1 processor at a time. Obviously, one will win.
184 * The other(s) bail all the way to user state and may refault (or not). There are only
185 * a few things in mpFlags that are not static, mpFIP, mpRIP, and mpBusy.
186 *
187 * We organize these so that mpFIP is in a byte with static data and mpRIP is in another.
188 * That means that we can use a store byte to update the guys without worrying about load
189 * and reserve. Note that mpFIP must be set atomically because it is under a share lock;
190 * but, it may be cleared with a simple store byte. Because mpRip is set once and then never
191 * cleared, we can get away with setting it by means of a simple store byte.
192 *
193 */
194#pragma pack(4) /* Make sure the structure stays as we defined it */
195typedef struct mapping {
196 unsigned int mpFlags; /* 0x000 - Various flags, lock bit. These are static except for lock */
197#define mpBusy 0xFF000000 /* Busy count */
198#define mpPrevious 0x00800000 /* A previous mapping exists in a composite */
199#define mpNext 0x00400000 /* A next mapping exist in a composite */
200#define mpPIndex 0x003F0000 /* Index into physical table (in words) */
201#define mpType 0x0000F000 /* Mapping type: */
202#define mpNormal 0x00000000 /* Normal logical page - backed by RAM, RC maintained, logical page size == physical page size */
203 /* DO NOT CHANGE THIS CODE */
204#define mpBlock 0x00001000 /* Block mapping - used for I/O memory or non-RC maintained RAM, logical page size is independent from physical */
205#define mpMinSpecial 0x00002000 /* Any mapping with this type or above has extra special handling */
206#define mpNest 0x00002000 /* Forces transtion to an alternate address space after applying relocation */
207#define mpLinkage 0x00003000 /* Transition to current user address space with relocation - used for copyin/out/pv */
208#define mpACID 0x00004000 /* Address Chunk ID - provides the address space ID for VSID calculation. Normally mapped at chunk size - 2KB */
209#define mpGuest 0x00005000 /* Guest->physical shadow mapping */
210/* 0x00006000 - 0x0000F000 Reserved */
211#define mpFIP 0x00000800 /* Fault in progress */
212#define mpFIPb 20 /* Fault in progress */
213#define mpPcfg 0x00000700 /* Physical Page configuration */
214#define mpPcfgb 23 /* Physical Page configuration index bit */
215#define mpRIP 0x00000080 /* Remove in progress - DO NOT MOVE */
216#define mpRIPb 24 /* Remove in progress */
217#define mpPerm 0x00000040 /* Mapping is permanent - DO NOT MOVE */
218#define mpPermb 25 /* Mapping is permanent */
219#define mpBSu 0x00000020 /* Basic Size unit - 0 = 4KB, 1 = 32MB */
220#define mpBSub 26 /* Basic Size unit - 0 = 4KB, 1 = 32MB */
221#define mpLists 0x0000001F /* Number of skip lists mapping is on, max of 27 */
222#define mpListsb 27 /* Number of skip lists mapping is on, max of 27 */
223#define mpgFlags 0x0000001F /* Shadow cache mappings re-use mpLists for flags: */
224#define mpgGlobal 0x00000004 /* Mapping is global (1) or local (0) */
225#define mpgFree 0x00000002 /* Mapping is free */
226#define mpgDormant 0x00000001 /* Mapping is dormant */
227
228 unsigned short mpSpace; /* 0x004 - Address space hash */
229 union {
230 unsigned short mpBSize; /* 0x006 - Block size - 1 in pages - max block size 256MB */
231 unsigned char mpgCursor; /* 0x006 - Shadow-cache group allocation cursor (first mapping in the group) */
232 } u;
233
234 unsigned int mpPte; /* 0x008 - Offset to PTEG in hash table. Offset to exact PTE if mpHValid set - NOTE: this MUST be 0 for block mappings */
235#define mpHValid 0x00000001 /* PTE is entered in hash table */
236#define mpHValidb 31 /* PTE is entered in hash table */
237 ppnum_t mpPAddr; /* 0x00C - Physical page number */
238 addr64_t mpVAddr; /* 0x010 - Starting virtual address */
239#define mpHWFlags 0x0000000000000FFFULL /* Reference/Change, WIMG, AC, N, protection flags from PTE */
240#define mpHWFlagsb 52
241#define mpN 0x0000000000000004ULL /* Page-level no-execute (PowerAS machines) */
242#define mpNb 61
243#define mpPP 0x0000000000000003ULL /* Protection flags */
244#define mpPPb 62
245#define mpPPe 63
246#define mpKKN 0x0000000000000007ULL /* Segment key and no execute flag (nested pmap) */
247#define mpKKNb 61
248#define mpWIMG 0x0000000000000078ULL /* Attribute bits */
249#define mpWIMGb 57
250#define mpW 0x0000000000000040ULL
251#define mpWb 57
252#define mpI 0x0000000000000020ULL
253#define mpIb 58
254#define mpM 0x0000000000000010ULL
255#define mpMb 59
256#define mpG 0x0000000000000008ULL
257#define mpGb 60
258#define mpWIMGe 60
259#define mpC 0x0000000000000080ULL /* Change bit */
260#define mpCb 56
261#define mpR 0x0000000000000100ULL /* Reference bit */
262#define mpRb 55
263 addr64_t mpAlias; /* 0x018 - Pointer to alias mappings of physical page */
264#define mpNestReloc mpAlias /* 0x018 - Redefines mpAlias relocation value of vaddr to nested pmap value */
265#define mpBlkRemCur mpAlias /* 0x018 - Next offset in block map to remove (this is 4 bytes) */
266 addr64_t mpList0; /* 0x020 - Forward chain of mappings. This one is always used */
267 addr64_t mpList[3]; /* 0x028 - Forward chain of mappings. Next higher order */
268/* 0x040 - End of basic mapping */
269#define mpBasicSize 64
270#define mpBasicLists 4
271/* note the dependence on kSkipListMaxLists, which must be <= #lists in a 256-byte mapping (ie, <=28) */
272/* addr64_t mpList4[8]; 0x040 - First extended list entries */
273/* 0x080 - End of first extended mapping */
274/* addr64_t mpList12[8]; 0x080 - Second extended list entries */
275/* 0x0C0 - End of second extended mapping */
276/* addr64_t mpList20[8]; 0x0C0 - Third extended list entries */
277/* 0x100 - End of third extended mapping */
278
279} mapping_t;
280#pragma pack()
281
282#define MAPPING_NULL ((struct mapping *) 0)
283
284#define mapDirect 0x08
285#define mapRWNA 0x00000000
286#define mapRWRO 0x00000001
287#define mapRWRW 0x00000002
288#define mapRORO 0x00000003
289
290/* All counts are in units of basic 64-byte mappings. A 128-byte mapping is
291 * just two adjacent 64-byte entries.
292 */
293#pragma pack(4) /* Make sure the structure stays as we defined it */
294
295typedef struct mappingflush {
296 addr64_t addr; /* Start address to search mapping */
297 unsigned int spacenum; /* Last space num to search pmap */
298 unsigned int mapfgas[1]; /* Pad to 64 bytes */
299} mappingflush_t;
300
301typedef struct mappingctl {
302 unsigned int mapclock; /* Mapping allocation lock */
303 unsigned int mapcrecurse; /* Mapping allocation recursion control */
304 struct mappingblok *mapcnext; /* First mapping block with free entries */
305 struct mappingblok *mapclast; /* Last mapping block with free entries */
306 struct mappingblok *mapcrel; /* List of deferred block releases */
307 unsigned int mapcfree; /* Total free entries on list */
308 unsigned int mapcinuse; /* Total entries in use */
309 unsigned int mapcreln; /* Total blocks on pending release list */
310 int mapcholdoff; /* Hold off clearing release list */
311 unsigned int mapcfreec; /* Total calls to mapping free */
312 unsigned int mapcallocc; /* Total calls to mapping alloc */
313 unsigned int mapcbig; /* Count times a big mapping was requested of mapping_alloc */
314 unsigned int mapcbigfails; /* Times caller asked for a big one but we gave 'em a small one */
315 unsigned int mapcmin; /* Minimum free mappings to keep */
316 unsigned int mapcmaxalloc; /* Maximum number of mappings allocated at one time */
317 unsigned int mapcgas[1]; /* Pad to 64 bytes */
318 struct mappingflush mapcflush;
319} mappingctl_t;
320#pragma pack()
321
322/* MAPPERBLOK is the number of basic 64-byte mappings per block (ie, per page.) */
323#define MAPPERBLOK 63
324#define MAPALTHRSH (4*MAPPERBLOK)
325#define MAPFRTHRSH (2 * ((MAPALTHRSH + MAPPERBLOK - 1) / MAPPERBLOK))
326typedef struct mappingblok {
327 unsigned int mapblokfree[2]; /* Bit map of free mapping entrys */
328 addr64_t mapblokvrswap; /* Virtual address XORed with physical address */
329 unsigned int mapblokflags; /* Various flags */
330#define mbPerm 0x80000000 /* Block is permanent */
331 struct mappingblok *nextblok; /* Pointer to the next mapping block */
332} mappingblok_t;
333
334#define mapRemChunk 128
335
336#define mapRetCode 0xF
337#define mapRtOK 0
338#define mapRtBadLk 1
339#define mapRtPerm 2
340#define mapRtNotFnd 3
341#define mapRtBlock 4
342#define mapRtNest 5
343#define mapRtRemove 6
344#define mapRtMapDup 7
345#define mapRtGuest 8
346#define mapRtEmpty 9
347#define mapRtSmash 10 /* Mapping already exists and doesn't match new mapping */
348#define mapRtBadSz 11 /* Requested size too big or more than 256MB and not mult of 32MB */
349
350/*
351 * This struct describes available physical page configurations
352 * Note:
353 * Index 0 is required and is the primary page configuration (4K, non-large)
354 * Index 1 is the primary large page config if supported by hw (16M, large page)
355 */
356
357typedef struct pcfg {
358 uint8_t pcfFlags; /* Flags */
359#define pcfValid 0x80 /* Configuration is valid */
360#define pcfLarge 0x40 /* Large page */
361#define pcfDedSeg 0x20 /* Requires dedicated segment */
362 uint8_t pcfEncode; /* Implementation specific PTE encoding */
363 uint8_t pcfPSize; /* Page size in powers of 2 */
364 uint8_t pcfShift; /* Shift for PTE construction */
365} pcfg;
366
367#define pcfDefPcfg 0 /* Primary page configuration */
368#define pcfLargePcfg 1 /* Primary large page configuration */
369
370extern pcfg pPcfg[8]; /* Supported page configurations */
371
372extern mappingctl_t mapCtl; /* Mapping allocation control */
373
374extern unsigned char ppc_prot[]; /* Mach -> PPC protection translation table */
375
376vm_prot_t getProtPPC(int, boolean_t);
377 /* Safe Mach -> PPC protection key conversion */
378
379extern addr64_t mapping_remove(pmap_t pmap, addr64_t va); /* Remove a single mapping for this VADDR */
380extern mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full); /* Finds a mapping */
381extern void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked); /* Sets start and end of a block of mappings */
382extern void mapping_prealloc(unsigned int); /* Preallocate mappings for large use */
383extern void mapping_relpre(void); /* Releases preallocate request */
384extern void mapping_init(void); /* Do initial stuff */
385extern mapping_t *mapping_alloc(int lists); /* Obtain a mapping */
386extern void mapping_free(struct mapping *mp); /* Release a mapping */
387extern boolean_t mapping_tst_ref(ppnum_t pa); /* Tests the reference bit of a physical page */
388extern boolean_t mapping_tst_mod(ppnum_t pa); /* Tests the change bit of a physical page */
389extern void mapping_set_ref(ppnum_t pa); /* Sets the reference bit of a physical page */
390extern void mapping_clr_ref(ppnum_t pa); /* Clears the reference bit of a physical page */
391extern void mapping_set_mod(ppnum_t pa); /* Sets the change bit of a physical page */
392extern void mapping_clr_mod(ppnum_t pa); /* Clears the change bit of a physical page */
393extern unsigned int mapping_tst_refmod(ppnum_t pa); /* Tests the reference and change bits of a physical page */
394extern void mapping_clr_refmod(ppnum_t pa, unsigned int mask); /* Clears the reference and change bits of a physical page */
395extern void mapping_protect_phys(ppnum_t pa, vm_prot_t prot); /* Change protection of all mappings to page */
396extern void mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change protection of a single mapping to page */
397extern addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot); /* Make a mapping */
398/* Flags for mapping_make */
399#define mmFlgBlock 0x80000000 /* This is a block map, use size for number of pages covered */
400#define mmFlgUseAttr 0x40000000 /* Use specified attributes */
401#define mmFlgPerm 0x20000000 /* Mapping is permanant */
402#define mmFlgPcfg 0x07000000 /* Physical page configuration index */
403#define mmFlgCInhib 0x00000002 /* Cahching inhibited - use if mapFlgUseAttr set or block */
404#define mmFlgGuarded 0x00000001 /* Access guarded - use if mapFlgUseAttr set or block */
405extern void mapping_purge(ppnum_t pa); /* Remove all mappings for this physent */
406extern addr64_t mapping_p2v(pmap_t pmap, ppnum_t pa); /* Finds first virtual mapping of a physical page in a space */
407extern void mapping_drop_busy(struct mapping *mapping); /* Drops busy count on mapping */
408extern phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex); /* Finds the physical entry for the page */
409extern int mapalc1(struct mappingblok *mb); /* Finds and allcates a 1-bit mapping entry */
410extern int mapalc2(struct mappingblok *mb); /* Finds and allcates a 2-bit mapping entry */
411extern void ignore_zero_fault(boolean_t type); /* Sets up to ignore or honor any fault on page 0 access for the current thread */
412extern void mapping_hibernate_flush(void);
413
414extern void mapping_fake_zone_info( /* return mapping usage stats as a fake zone info */
415 int *count,
416 vm_size_t *cur_size,
417 vm_size_t *max_size,
418 vm_size_t *elem_size,
419 vm_size_t *alloc_size,
420 int *collectable,
421 int *exhaustable);
422
423extern mapping_t *hw_rem_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a mapping from the system */
424extern mapping_t *hw_purge_map(pmap_t pmap, addr64_t va, addr64_t *next); /* Remove a regular mapping from the system */
425extern mapping_t *hw_purge_space(struct phys_entry *pp, pmap_t pmap); /* Remove the first mapping for a specific pmap from physentry */
426extern mapping_t *hw_purge_phys(struct phys_entry *pp); /* Remove the first mapping for a physentry */
427extern mapping_t *hw_scrub_guest(struct phys_entry *pp, pmap_t pmap); /* Scrub first guest mapping belonging to this host */
428extern mapping_t *hw_find_map(pmap_t pmap, addr64_t va, addr64_t *nextva); /* Finds a mapping */
429extern mapping_t *hw_find_space(struct phys_entry *pp, unsigned int space); /* Given a phys_entry, find its first mapping in the specified space */
430extern addr64_t hw_add_map(pmap_t pmap, struct mapping *mp); /* Add a mapping to a pmap */
431extern unsigned int hw_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva); /* Change the protection of a virtual page */
432extern unsigned int hw_test_rc(pmap_t pmap, addr64_t va, boolean_t reset); /* Test and optionally reset the RC bit of specific mapping */
433
434extern unsigned int hw_clear_maps(void);
435
436extern unsigned int hw_walk_phys(struct phys_entry *pp, unsigned int preop, unsigned int op, /* Perform function on all mappings on a physical page */
437 unsigned int postop, unsigned int parm, unsigned int opmod);
438/* Opcodes for hw_walk_phys */
439#define hwpNoop 0 /* No operation */
440#define hwpSPrtPhy 1 /* Sets protection in physent (obsolete) */
441#define hwpSPrtMap 2 /* Sets protection in mapping */
442#define hwpSAtrPhy 3 /* Sets attributes in physent */
443#define hwpSAtrMap 4 /* Sets attributes in mapping */
444#define hwpCRefPhy 5 /* Clears reference in physent */
445#define hwpCRefMap 6 /* Clears reference in mapping */
446#define hwpCCngPhy 7 /* Clears change in physent */
447#define hwpCCngMap 8 /* Clears change in mapping */
448#define hwpSRefPhy 9 /* Sets reference in physent */
449#define hwpSRefMap 10 /* Sets reference in mapping */
450#define hwpSCngPhy 11 /* Sets change in physent */
451#define hwpSCngMap 12 /* Sets change in mapping */
452#define hwpTRefPhy 13 /* Tests reference in physent */
453#define hwpTRefMap 14 /* Tests reference in mapping */
454#define hwpTCngPhy 15 /* Tests change in physent */
455#define hwpTCngMap 16 /* Tests change in mapping */
456#define hwpTRefCngPhy 17 /* Tests reference and change in physent */
457#define hwpTRefCngMap 18 /* Tests reference and change in mapping */
458#define hwpCRefCngPhy 19 /* Clears reference and change in physent */
459#define hwpCRefCngMap 20 /* Clears reference and change in mapping */
460/* Operation modifiers for connected PTE visits for hw_walk_phys */
461#define hwpPurgePTE 0 /* Invalidate/purge PTE and merge RC bits for each connected mapping */
462#define hwpMergePTE 1 /* Merge RC bits for each connected mapping */
463#define hwpNoopPTE 2 /* Take no additional action for each connected mapping */
464
465extern void hw_set_user_space(pmap_t pmap); /* Indicate we need a space switch */
466extern void hw_set_user_space_dis(pmap_t pmap); /* Indicate we need a space switch (already disabled) */
467extern void hw_setup_trans(void); /* Setup hardware for translation */
468extern void hw_start_trans(void); /* Start translation for the first time */
469extern void hw_map_seg(pmap_t pmap, addr64_t seg, addr64_t va); /* Validate a segment */
470extern void hw_blow_seg(addr64_t seg); /* Invalidate a segment */
471extern void invalidateSegs(pmap_t pmap); /* Invalidate the segment cache */
472extern struct phys_entry *pmap_find_physentry(ppnum_t pa);
473extern void mapLog(unsigned int laddr, unsigned int type, addr64_t va);
474extern unsigned int mapSkipListVerifyC(pmap_t pmap, unsigned long long *dumpa);
475extern void fillPage(ppnum_t pa, unsigned int fill);
476extern kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which);
477
478extern void hw_rem_all_gv(pmap_t pmap); /* Remove all of a guest's mappings */
479extern void hw_rem_local_gv(pmap_t gpmap); /* Remove guest local mappings */
480extern unsigned int hw_res_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t hva, addr64_t gva, vm_prot_t prot);
481 /* Resume a guest mapping */
482extern void hw_add_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva, unsigned int mflags, ppnum_t pa);
483 /* Add a guest mapping */
484extern void hw_susp_map_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva);
485 /* Suspend a guest mapping */
486extern unsigned int hw_test_rc_gv(pmap_t hpmap, pmap_t gpmap, addr64_t gva, unsigned int reset);
487 /* Test/reset mapping ref and chg */
488extern unsigned int hw_protect_gv(pmap_t gpmap, addr64_t va, vm_prot_t prot);
489 /* Change the protection of a guest page */
490extern addr64_t hw_gva_to_hva(pmap_t gpmap, addr64_t gva); /* Convert guest to host virtual address */
491extern unsigned int hw_find_map_gv(pmap_t gpmap, addr64_t gva, void *mpbuf);
492 /* Find and copy guest mapping into buffer */
493
494extern unsigned int mappingdeb0; /* (TEST/DEBUG) */
495extern unsigned int incrVSID; /* VSID increment value */
496
497extern int mapSetLists(pmap_t);
498extern void consider_mapping_adjust(void);
499
500#endif /* _PPC_MAPPINGS_H_ */
501
502#endif /* XNU_KERNEL_PRIVATE */