X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8ad349bb6ed4a0be06e34c92be0d98b92e078db4..5d5c5d0d5b79ade9a973d55186ffda2638ba2b6e:/osfmk/ppc/pmap.c diff --git a/osfmk/ppc/pmap.c b/osfmk/ppc/pmap.c index 66ce7d039..4b8671905 100644 --- a/osfmk/ppc/pmap.c +++ b/osfmk/ppc/pmap.c @@ -239,8 +239,14 @@ pmap_map( vm_offset_t va, vm_offset_t spa, vm_offset_t epa, - vm_prot_t prot) + vm_prot_t prot, + unsigned int flags) { + unsigned int mflags; + mflags = 0; /* Make sure this is initialized to nothing special */ + if(!(flags & VM_WIMG_USE_DEFAULT)) { /* Are they supplying the attributes? */ + mflags = mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */ + } addr64_t colladr; @@ -248,7 +254,8 @@ pmap_map( assert(epa > spa); - colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, prot & VM_PROT_ALL); + colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12), + (mmFlgBlock | mmFlgPerm), (epa - spa) >> 12, (prot & VM_PROT_ALL) ); if(colladr) { /* Was something already mapped in the range? */ panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n", @@ -365,6 +372,7 @@ pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize) kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */ kernel_pmap->ref_count = 1; kernel_pmap->pmapFlags = pmapKeyDef; /* Set the default keys */ + kernel_pmap->pmapFlags |= pmapNXdisabled; kernel_pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */ kernel_pmap->space = PPC_SID_KERNEL; kernel_pmap->pmapvr = 0; /* Virtual = Real */ @@ -533,7 +541,7 @@ pmap_bootstrap(uint64_t msize, vm_offset_t *first_avail, unsigned int kmapsize) /* Map V=R the page tables */ pmap_map(first_used_addr, first_used_addr, - round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE); + round_page(first_used_addr + size), VM_PROT_READ | VM_PROT_WRITE, VM_WIMG_USE_DEFAULT); *first_avail = round_page(first_used_addr + size); /* Set next available page */ first_free_virt = *first_avail; /* Ditto */ @@ -656,7 +664,7 @@ void pmap_virtual_space( * only, and is bounded by that size. */ pmap_t -pmap_create(vm_map_size_t size) +pmap_create(vm_map_size_t size, __unused boolean_t is_64bit) { pmap_t pmap, ckpmap, fore; int s; @@ -944,7 +952,7 @@ pmap_page_protect( mapping_t *mp; - switch (prot) { + switch (prot & VM_PROT_ALL) { case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: remove = FALSE; @@ -991,7 +999,7 @@ pmap_page_protect( * physical page. */ - mapping_protect_phys(pa, prot & VM_PROT_ALL); /* Change protection of all mappings to page. */ + mapping_protect_phys(pa, (prot & VM_PROT_ALL) ); /* Change protection of all mappings to page. */ } @@ -1069,7 +1077,7 @@ void pmap_protect( endva = eva & -4096LL; /* Round end down to a page */ while(1) { /* Go until we finish the range */ - mapping_protect(pmap, va, prot & VM_PROT_ALL, &va); /* Change the protection and see what's next */ + mapping_protect(pmap, va, (prot & VM_PROT_ALL), &va); /* Change the protection and see what's next */ if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */ } @@ -1110,7 +1118,7 @@ pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot, while(1) { /* Keep trying the enter until it goes in */ - colva = mapping_make(pmap, va, pa, mflags, 1, prot & VM_PROT_ALL); /* Enter the mapping into the pmap */ + colva = mapping_make(pmap, va, pa, mflags, 1, (prot & VM_PROT_ALL) ); /* Enter the mapping into the pmap */ if(!colva) break; /* If there were no collisions, we are done... */ @@ -1295,6 +1303,29 @@ pmap_attribute( } + + +unsigned int pmap_cache_attributes(ppnum_t pgn) { + + unsigned int flags; + struct phys_entry * pp; + + // Find physical address + if ((pp = pmap_find_physentry(pgn))) { + // Use physical attributes as default + // NOTE: DEVICE_PAGER_FLAGS are made to line up + flags = VM_MEM_COHERENT; /* We only support coherent memory */ + if (pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */ + if (pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */ + } else + // If no physical, just hard code attributes + flags = VM_WIMG_IO; + + return (flags); +} + + + /* * pmap_attribute_cache_sync(vm_offset_t pa) * @@ -1964,7 +1995,7 @@ void pmap_init_sharedpage(vm_offset_t cpg){ addr64_t cva, cpoff; ppnum_t cpphys; - sharedPmap = pmap_create(0); /* Get a pmap to hold the common segment */ + sharedPmap = pmap_create(0, FALSE); /* Get a pmap to hold the common segment */ if(!sharedPmap) { /* Check for errors */ panic("pmap_init_sharedpage: couldn't make sharedPmap\n"); } @@ -1977,7 +2008,7 @@ void pmap_init_sharedpage(vm_offset_t cpg){ } cva = mapping_make(sharedPmap, (addr64_t)((uint32_t)_COMM_PAGE_BASE_ADDRESS) + cpoff, - cpphys, mmFlgPerm, 1, VM_PROT_READ); /* Map the page read only */ + cpphys, mmFlgPerm, 1, VM_PROT_READ | VM_PROT_EXECUTE); /* Map the page read/execute only */ if(cva) { /* Check for errors */ panic("pmap_init_sharedpage: couldn't map commpage page - cva = %016llX\n", cva); } @@ -2064,3 +2095,13 @@ coredumpok( { return TRUE; } + + +/* + * disable no-execute capability on + * the specified pmap + */ +void pmap_disable_NX(pmap_t pmap) { + + pmap->pmapFlags |= pmapNXdisabled; +}