vm_offset_t va,
vm_offset_t spa,
vm_offset_t epa,
- vm_prot_t prot)
+ vm_prot_t prot,
+ unsigned int flags)
{
+ unsigned int mflags;
+ mflags = 0; /* Make sure this is initialized to nothing special */
+ if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */
+ mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
+ }
addr64_t colladr;
assert(epa > spa);
- colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL);
+ colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12),
+ (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, (prot & VM_PROT_ALL) );
if(colladr) { /* Was something already mapped in the range? */
panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
pmap_map_physical()
{
unsigned region;
+ uint64_t msize, size;
+ addr64_t paddr, vaddr, colladdr;
/* Iterate over physical memory regions, block mapping each into the kernel's address map */
for (region = 0; region < (unsigned)pmap_mem_regions_count; region++) {
- addr64_t paddr = ((addr64_t)pmap_mem_regions[region].mrStart << 12);
- addr64_t size = (((addr64_t)pmap_mem_regions[region].mrEnd + 1) << 12) - paddr;
+ paddr = ((addr64_t)pmap_mem_regions[region].mrStart << 12); /* Get starting physical address */
+ size = (((addr64_t)pmap_mem_regions[region].mrEnd + 1) << 12) - paddr;
+
+ vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
+
while (size > 0) {
- /* Block mappings are limited to 256M, so we map in blocks of up to 256M */
- addr64_t vaddr = paddr + lowGlo.lgPMWvaddr;
- unsigned msize = ((size > 0x10000000)? 0x10000000 : size);
- addr64_t colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
- (mmFlgBlock | mmFlgPerm), (msize >> 12),
- (VM_PROT_READ | VM_PROT_WRITE));
+
+ msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
+
+ colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
+ (mmFlgBlock | mmFlgPerm), (msize >> 12),
+ (VM_PROT_READ | VM_PROT_WRITE));
if (colladdr) {
- panic ("pmap_map_physical: collision with previously mapped range - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
+ panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
vaddr, (paddr >> 12), (msize >> 12), colladdr);
}
- paddr += msize;
+
+ vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
+ paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
size -= msize;
}
}
void
pmap_map_iohole(addr64_t paddr, addr64_t size)
{
+
+ addr64_t vaddr, colladdr, msize;
+ uint32_t psize;
+
+ vaddr = paddr + lowGlo.lgPMWvaddr; /* Get starting virtual address */
+
while (size > 0) {
- addr64_t vaddr = paddr + lowGlo.lgPMWvaddr;
- unsigned msize = ((size > 0x10000000)? 0x10000000 : size);
- addr64_t colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
- (mmFlgBlock | mmFlgPerm | mmFlgGuarded | mmFlgCInhib), (msize >> 12),
- (VM_PROT_READ | VM_PROT_WRITE));
+
+ msize = ((size > 0x0000020000000000ULL) ? 0x0000020000000000ULL : size); /* Get size, but no more than 2TBs */
+
+ colladdr = mapping_make(kernel_pmap, vaddr, (paddr >> 12),
+ (mmFlgBlock | mmFlgPerm | mmFlgGuarded | mmFlgCInhib), (msize >> 12),
+ (VM_PROT_READ | VM_PROT_WRITE));
if (colladdr) {
- panic ("pmap_map_iohole: collision with previously mapped range - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
- vaddr, (paddr >> 12), (msize >> 12), colladdr);
+ panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %08X, size = %08X, collision = %016llX\n",
+ vaddr, (paddr >> 12), (msize >> 12), colladdr);
}
- paddr += msize;
+
+ vaddr = vaddr + (uint64_t)msize; /* Point to the next virtual addr */
+ paddr = paddr + (uint64_t)msize; /* Point to the next physical addr */
size -= msize;
- }
+ }
}
/*
kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
kernel_pmap->ref_count = 1;
kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */
+ kernel_pmap->pmapFlags |= pmapNXdisabled;
kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
kernel_pmap->space = PPC_SID_KERNEL;
kernel_pmap->pmapvr = 0; /* Virtual = Real */
/* Map V=R the page tables */
pmap_map(first_used_addr, first_used_addr,
- round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE);
+ round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE, VM_WIMG_USE_DEFAULT);
*first_avail = round_page(first_used_addr + size); /* Set next available page */
first_free_virt = *first_avail; /* Ditto */
* only, and is bounded by that size.
*/
pmap_t
-pmap_create(vm_map_size_t size)
+pmap_create(vm_map_size_t size, __unused boolean_t is_64bit)
{
pmap_t pmap, ckpmap, fore;
int s;
mapping_t *mp;
- switch (prot) {
+ switch (prot & VM_PROT_ALL) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
remove = FALSE;
* physical page.
*/
- mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */
+ mapping_protect_phys(pa, (prot & VM_PROT_ALL) ); /* Change protection of all mappings to page. */
}
endva = eva & -4096LL; /* Round end down to a page */
while(1) { /* Go until we finish the range */
- mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */
+ mapping_protect(pmap, va, (prot & VM_PROT_ALL), &va); /* Change the protection and see what's next */
if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
}
while(1) { /* Keep trying the enter until it goes in */
- colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */
+ colva = mapping_make(pmap, va, pa, mflags, 1, (prot & VM_PROT_ALL) ); /* Enter the mapping into the pmap */
if(!colva) break; /* If there were no collisions, we are done... */
* not be changed. The block must be unmapped and then remapped with the new stuff.
* We also do not keep track of reference or change flags.
*
+ * Any block that is larger than 256MB must be a multiple of 32MB. We panic if it is not.
+ *
* Note that pmap_map_block_rc is the same but doesn't panic if collision.
*
*/
-void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
+void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
unsigned int mflags;
addr64_t colva;
// kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
// current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
-
mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
- colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */
+ colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
if(colva) { /* If there was a collision, panic */
- panic("pmap_map_block: collision at %016llX, pmap = %08X\n", colva, pmap);
+ panic("pmap_map_block: mapping error %d, pmap = %08X, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va);
}
return; /* Return */
}
-int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
+int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
unsigned int mflags;
addr64_t colva;
mflags = mmFlgBlock | mmFlgUseAttr | (attr & VM_MEM_GUARDED) | ((attr & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
if(flags) mflags |= mmFlgPerm; /* Mark permanent if requested */
-
- colva = mapping_make(pmap, va, pa, mflags, (size >> 12), prot); /* Enter the mapping into the pmap */
+
+ colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
if(colva) return 0; /* If there was a collision, fail */
}
+
+
+unsigned int pmap_cache_attributes(ppnum_t pgn) {
+
+ unsigned int flags;
+ struct phys_entry * pp;
+
+ // Find physical address
+ if ((pp = pmap_find_physentry(pgn))) {
+ // Use physical attributes as default
+ // NOTE: DEVICE_PAGER_FLAGS are made to line up
+ flags = VM_MEM_COHERENT; /* We only support coherent memory */
+ if (pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */
+ if (pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */
+ } else
+ // If no physical, just hard code attributes
+ flags = VM_WIMG_IO;
+
+ return (flags);
+}
+
+
+
/*
* pmap_attribute_cache_sync(vm_offset_t pa)
*
* subord = the pmap that goes into the grand
* vstart = start of range in pmap to be inserted
* nstart = start of range in pmap nested pmap
- * size = Size of nest area (up to 16TB)
+ * size = Size of nest area (up to 2TB)
*
* Inserts a pmap into another. This is used to implement shared segments.
* On the current PPC processors, this is limited to segment (256MB) aligned
* We actually kinda allow recursive nests. The gating factor is that we do not allow
* nesting on top of something that is already mapped, i.e., the range must be empty.
*
- *
- *
* Note that we depend upon higher level VM locks to insure that things don't change while
* we are doing this. For example, VM should not be doing any pmap enters while it is nesting
* or do 2 nests at once.
int nlists;
mapping_t *mp;
-
if(size & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this for multiples of 256MB */
- if((size >> 28) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 16TB */
+ if((size >> 25) > 65536) return KERN_INVALID_VALUE; /* Max size we can nest is 2TB */
if(vstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
if(nstart & 0x0FFFFFFFULL) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
panic("pmap_nest: size is invalid - %016llX\n", size);
}
- msize = (size >> 28) - 1; /* Change size to blocks of 256MB */
+ msize = (size >> 25) - 1; /* Change size to blocks of 32MB */
nlists = mapSetLists(grand); /* Set number of lists this will be on */
mp = mapping_alloc(nlists); /* Get a spare mapping block */
- mp->mpFlags = 0x01000000 | mpNest | mpPerm | nlists;
+ mp->mpFlags = 0x01000000 | mpNest | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
/* Set the flags. Make sure busy count is 1 */
mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */
mp->u.mpBSize = msize; /* Set the size */
mp = mapping_alloc(nlists); /* Get a spare mapping block */
- mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | nlists;
+ mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
/* Set the flags. Make sure busy count is 1 */
mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
- mp->u.mpBSize = 1; /* Set the size to 2 segments */
+ mp->u.mpBSize = 15; /* Set the size to 2 segments in 32MB chunks - 1 */
mp->mpPte = 0; /* Means nothing */
mp->mpPAddr = 0; /* Means nothing */
mp->mpVAddr = lowGlo.lgUMWvaddr; /* Set the address range we cover */
addr64_t cva, cpoff;
ppnum_t cpphys;
- sharedPmap = pmap_create(0); /* Get a pmap to hold the common segment */
+ sharedPmap = pmap_create(0, FALSE); /* Get a pmap to hold the common segment */
if(!sharedPmap) { /* Check for errors */
panic("pmap_init_sharedpage: couldn't make sharedPmap\n");
}
}
cva = mapping_make(sharedPmap, (addr64_t)((uint32_t)_COMM_PAGE_BASE_ADDRESS) + cpoff,
- cpphys, mmFlgPerm, 1, VM_PROT_READ); /* Map the page read only */
+ cpphys, mmFlgPerm, 1, VM_PROT_READ | VM_PROT_EXECUTE); /* Map the page read/execute only */
if(cva) { /* Check for errors */
panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva);
}
return TRUE;
}
+
/*
-;;; Local Variables: ***
-;;; tab-width:4 ***
-;;; End: ***
-*/
+ * disable no-execute capability on
+ * the specified pmap
+ */
+void pmap_disable_NX(pmap_t pmap) {
+
+ pmap->pmapFlags |= pmapNXdisabled;
+}