/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* Note, this will onl
*/
vm_offset_t
-io_map(phys_addr, size)
- vm_offset_t phys_addr;
- vm_size_t size;
+io_map(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags)
{
vm_offset_t start;
- int i;
- unsigned int j;
+ vm_size_t i;
+ unsigned int mflags;
vm_page_t m;
+ mflags = mmFlgBlock | mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
#if DEBUG
assert (kernel_map != VM_MAP_NULL); /* VM must be initialised */
(void) kmem_alloc_pageable(kernel_map, &start, size); /* Get some virtual addresses to use */
(void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12),
- (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */
+ mflags, /* Map with requested cache mode */
(size >> 12), VM_PROT_READ|VM_PROT_WRITE);
- return (start + (phys_addr & PAGE_MASK)); /* Pass back the physical address */
+ return (start + (phys_addr & PAGE_MASK)); /* Pass back the virtual address */
} else {
(void)mapping_make(kernel_pmap,
(addr64_t)(start + i), m->phys_page,
- (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */
+ mflags, /* Map with requested cache mode */
1, VM_PROT_READ|VM_PROT_WRITE);
}
* Allocate and map memory for devices before the VM system comes alive.
*/
-vm_offset_t io_map_spec(vm_offset_t phys_addr, vm_size_t size)
+vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags)
{
vm_offset_t start;
+ unsigned int mflags;
if(kernel_map != VM_MAP_NULL) { /* If VM system is up, redirect to normal routine */
- return io_map(phys_addr, size); /* Map the address */
+ return io_map(phys_addr, size, flags); /* Map the address */
}
+
+ mflags = mmFlgBlock | mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
size = round_page(size + (phys_addr - (phys_addr & -PAGE_SIZE))); /* Extend the length to include it all */
start = pmap_boot_map(size); /* Get me some virtual address */
(void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12),
- (mmFlgBlock | mmFlgUseAttr | mmFlgCInhib | mmFlgGuarded), /* Map as I/O page */
+ mflags, /* Map with requested cache mode */
(size >> 12), VM_PROT_READ|VM_PROT_WRITE);
return (start + (phys_addr & PAGE_MASK));