X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cf03f5cdc65293b4cb5eba3ed23fed26dad903c9..de355530ae67247cbd0da700edb3a2a1dae884c2:/osfmk/ppc/pmap.h diff --git a/osfmk/ppc/pmap.h b/osfmk/ppc/pmap.h index 1958a3fbd..44bac7918 100644 --- a/osfmk/ppc/pmap.h +++ b/osfmk/ppc/pmap.h @@ -3,22 +3,19 @@ * * @APPLE_LICENSE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. * * @APPLE_LICENSE_HEADER_END@ */ @@ -59,99 +56,46 @@ #include #include -#define maxPPage32 0x000FFFFF /* Maximum page number in 32-bit machines */ - -typedef uint32_t shexlock; - -#pragma pack(4) /* Make sure the structure stays as we defined it */ - -struct sgc { - uint64_t sgcESID; /* ESID portion of segment cache */ -#define sgcESmsk 0xFFFFFFFFF0000000ULL /* ESID portion of segment register cache */ - uint64_t sgcVSID; /* VSID portion of segment cache */ -#define sgcVSmsk 0xFFFFFFFFFFFFF000ULL /* VSID mask */ -#define sgcVSKeys 0x0000000000000C00ULL /* Protection keys */ -#define sgcVSKeyUsr 53 /* User protection key */ -#define sgcVSNoEx 0x0000000000000200ULL /* No execute */ -}; -#pragma pack() - -typedef struct sgc sgc; - -#pragma pack(4) /* Make sure the structure stays as we defined it */ struct pmap { - queue_head_t pmap_link; /* MUST BE FIRST */ - addr64_t pmapvr; /* Virtual to real conversion mask */ - shexlock pmapSXlk; /* Shared/Exclusive lock for mapping changes */ - unsigned int space; /* space for this pmap */ -#define invalSpace 0x00000001 /* Predefined always invalid space */ - int ref_count; /* reference count */ - unsigned int pmapFlags; /* Flags */ -#define pmapKeys 0x00000007 /* Keys and no execute bit to use with this pmap */ -#define pmapKeyDef 0x00000006 /* Default keys - Sup = 1, user = 1, no ex = 0 */ -#define pmapVMhost 0x00000010 /* pmap with Virtual Machines attached to it */ - unsigned int spaceNum; /* Space number */ - unsigned int pmapCCtl; /* Cache control */ -#define pmapCCtlVal 0xFFFF0000 /* Valid entries */ -#define pmapCCtlLck 0x00008000 /* Lock bit */ -#define pmapCCtlLckb 16 /* Lock bit */ -#define pmapCCtlGen 0x00007FFF /* Generation number */ - -#define pmapSegCacheCnt 16 /* Maximum number of cache entries */ -#define pmapSegCacheUse 16 /* Number of cache entries to use */ - - struct pmap *freepmap; /* Free pmaps */ - - unsigned int pmapRsv1[3]; -/* 0x038 */ - uint64_t pmapSCSubTag; /* Segment cache sub-tags. This is a 16 entry 4 bit array */ -/* 0x040 */ - sgc pmapSegCache[pmapSegCacheCnt]; /* SLD values cached for quick load */ - -/* 0x140 */ -/* if fanout is 4, then shift is 1, if fanout is 8 shift is 2, etc */ -#define kSkipListFanoutShift 1 -/* with n lists, we can handle (fanout**n) pages optimally */ -#define kSkipListMaxLists 12 - unsigned char pmapCurLists; /* 0x140 - max #lists any mapping in this pmap currently has */ - unsigned char pmapRsv2[3]; - uint32_t pmapRandNum; /* 0x144 - used by mapSetLists() as a random number generator */ - addr64_t pmapSkipLists[kSkipListMaxLists]; /* 0x148 - the list headers */ -/* following statistics conditionally gathered */ - uint64_t pmapSearchVisits; /* 0x1A8 - nodes visited searching pmaps */ - uint32_t pmapSearchCnt; /* 0x1B0 - number of calls to mapSearch or mapSearchFull */ - - unsigned int pmapRsv3[3]; - -/* 0x1C0 */ - - struct pmap_statistics stats; /* statistics */ - decl_simple_lock_data(,lock) /* lock on map */ + queue_head_t pmap_link; /* MUST BE FIRST */ + unsigned int pmapvr; /* Virtual to real conversion mask */ + space_t space; /* space for this pmap */ +#define BMAPLOCK 0x00000001 + struct blokmap *bmaps; /* Physical pointer to odd-size page maps */ + int ref_count; /* reference count */ + unsigned int vflags; /* Alternate map validity flags */ +#define pmapBatVal 0xFF000000 +#define pmapBatDVal 0xF0000000 +#define pmapBatIVal 0x0F000000 +#define pmapFlags 0x00FF0000 +#define pmapSubord 0x00800000 +#define pmapVMhost 0x00400000 +#define pmapAltSeg 0x0000FFFF + unsigned int spaceNum; /* Space number */ +/* PPC line boundary here - 020 */ + unsigned int pmapSegs[16]; /* Contents of segment register if different than base space */ +/* PPC line boundary here - 060 */ + struct pmap *pmapPmaps[16]; /* Pointer to next lower level of pmaps */ +/* PPC line boundary here - 0A0 */ +/* Note: this must start on a word boundary */ + unsigned short pmapUsage[128]; /* Count of pages mapped into 32mb (8192 page) slots */ +#define pmapUsageShft 25 +#define pmapUsageMask 0x0000007F +#define pmapUsageSize (32*1024*1024) + +/* PPC line boundary here - 1A0 */ + struct pmap_statistics stats; /* statistics */ + decl_simple_lock_data(,lock) /* lock on map */ /* Need to pad out to a power of 2 - right now it is 512 bytes */ #define pmapSize 512 }; -#pragma pack() - -#pragma pack(4) -struct pmapTransTab { - addr64_t pmapPAddr; /* Physcial address of pmap */ - unsigned int pmapVAddr; /* Virtual address of pmap */ -}; -#pragma pack() /* Make sure the structure stays as we defined it */ - -typedef struct pmapTransTab pmapTransTab; #define PMAP_NULL ((pmap_t) 0) extern pmap_t kernel_pmap; /* The kernel's map */ extern pmap_t cursor_pmap; /* The pmap to start allocations with */ -extern pmap_t sharedPmap; -extern unsigned int sharedPage; -extern int ppc_max_adrsp; /* Maximum number of concurrent address spaces allowed. */ -extern addr64_t vm_max_address; /* Maximum effective address supported */ -extern addr64_t vm_max_physical; /* Maximum physical address supported */ -extern pmapTransTab *pmapTrans; /* Space to pmap translate table */ + #define PMAP_SWITCH_USER(th, map, my_cpu) th->map = map; #define PMAP_ACTIVATE(pmap, th, cpu) @@ -159,13 +103,10 @@ extern pmapTransTab *pmapTrans; /* Space to pmap translate table */ #define PMAP_CONTEXT(pmap,th) #define pmap_kernel_va(VA) \ - (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= vm_last_addr)) + (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) #define PPC_SID_KERNEL 0 /* Must change KERNEL_SEG_REG0_VALUE if !0 */ - -#define maxAdrSp 16384 -#define maxAdrSpb 14 -#define copyIOaddr 0x00000000E0000000ULL +#define SID_MAX ((1<<20) - 1) /* Space ID=20 bits, segment_id=SID + 4 bits */ #define pmap_kernel() (kernel_pmap) #define pmap_resident_count(pmap) ((pmap)->stats.resident_count) @@ -173,6 +114,9 @@ extern pmapTransTab *pmapTrans; /* Space to pmap translate table */ #define pmap_copy(dpmap,spmap,da,len,sa) #define pmap_update() +#define pmap_phys_address(x) ((x) << PPC_PGSHIFT) +#define pmap_phys_to_frame(x) ((x) >> PPC_PGSHIFT) + #define PMAP_DEFAULT_CACHE 0 #define PMAP_INHIBIT_CACHE 1 #define PMAP_GUARDED_CACHE 2 @@ -180,17 +124,14 @@ extern pmapTransTab *pmapTrans; /* Space to pmap translate table */ #define PMAP_NO_GUARD_CACHE 8 /* corresponds to cached, coherent, not writethru, not guarded */ -#define VM_WIMG_DEFAULT (VM_MEM_COHERENT) -#define VM_WIMG_COPYBACK (VM_MEM_COHERENT) -#define VM_WIMG_IO (VM_MEM_COHERENT | \ - VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) -#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) -/* write combining mode, aka store gather */ -#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) +#define VM_WIMG_DEFAULT VM_MEM_COHERENT +#define VM_WIMG_IO VM_MEM_COHERENT | \ + VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED /* * prototypes. */ +extern void ppc_protection_init(void); extern vm_offset_t phystokv(vm_offset_t pa); /* Get kernel virtual address from physical */ extern vm_offset_t kvtophys(vm_offset_t va); /* Get physical address from kernel virtual */ extern vm_offset_t pmap_map(vm_offset_t va, @@ -201,9 +142,18 @@ extern kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, boolean_t available, unsigned int attr); -extern void pmap_bootstrap(uint64_t msize, +extern vm_offset_t pmap_map_bd(vm_offset_t va, + vm_offset_t spa, + vm_offset_t epa, + vm_prot_t prot); +extern void pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, - unsigned int kmapsize); + vm_offset_t *first_phys_avail, unsigned int kmapsize); +extern void pmap_block_map(vm_offset_t pa, + vm_size_t size, + vm_prot_t prot, + int entry, + int dtlb); extern void pmap_switch(pmap_t); extern vm_offset_t pmap_extract(pmap_t pmap, @@ -211,29 +161,23 @@ extern vm_offset_t pmap_extract(pmap_t pmap, extern void pmap_remove_all(vm_offset_t pa); -extern boolean_t pmap_verify_free(ppnum_t pa); +extern boolean_t pmap_verify_free(vm_offset_t pa); extern void sync_cache(vm_offset_t pa, unsigned length); -extern void sync_cache64(addr64_t pa, unsigned length); -extern void sync_ppage(ppnum_t pa); -extern void sync_cache_virtual(vm_offset_t va, unsigned length); extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys); -extern void flush_dcache64(addr64_t va, unsigned length, boolean_t phys); extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys); -extern void invalidate_dcache64(addr64_t va, unsigned length, boolean_t phys); extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys); -extern void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys); -extern void pmap_sync_caches_phys(ppnum_t pa); -extern void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags); -extern int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags); - -extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size); -extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); -extern addr64_t MapUserAddressSpace(vm_map_t map, addr64_t va, unsigned int size); -extern void ReleaseUserAddressSpace(addr64_t kva); -extern kern_return_t pmap_attribute_cache_sync(ppnum_t pp, vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value); -extern int pmap_canExecute(ppnum_t pa); +extern void pmap_sync_caches_phys(vm_offset_t pa); +extern void invalidate_cache_for_io(vm_offset_t va, unsigned length, boolean_t phys); +extern void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, + vm_prot_t prot, int attr, unsigned int flags); /* Map a block */ +extern kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, + vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); /* Map a block allocating an optimal virtual address */ +extern kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, + vm_size_t size, vm_prot_t prot); + +extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size); + +extern void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); #endif /* _PPC_PMAP_H_ */