2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_page.h>
41 #include <ppc/io_map_entries.h>
42 #include <ppc/Firmware.h>
43 #include <ppc/mappings.h>
44 #include <ppc/proc_reg.h>
46 extern vm_offset_t virtual_avail
;
49 * Allocate and map memory for devices that may need to be mapped
50 * outside the usual physical memory. If phys_addr is NULL then
51 * steal the appropriate number of physical pages from the vm
52 * system and map them.
57 io_map(phys_addr
, size
)
58 vm_offset_t phys_addr
;
68 assert (kernel_map
!= VM_MAP_NULL
); /* VM must be initialised */
71 if (phys_addr
!= 0) { /* If they supplied a physical address, use it */
73 size
= round_page(size
+ (phys_addr
& PAGE_MASK
)); /* Make sure we map all of it */
75 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses to use */
77 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
78 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
79 (size
>> 12), VM_PROT_READ
|VM_PROT_WRITE
);
81 return (start
+ (phys_addr
& PAGE_MASK
)); /* Pass back the physical address */
85 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses */
87 mapping_prealloc(size
); /* Make sure there are enough free mappings */
89 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
91 while ((m
= vm_page_grab()) == VM_PAGE_NULL
) { /* Get a physical page */
92 VM_PAGE_WAIT(); /* Wait if we didn't have one */
96 (void)mapping_make(kernel_pmap
,
97 (addr64_t
)(start
+ i
), m
->phys_page
,
98 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
99 1, VM_PROT_READ
|VM_PROT_WRITE
);
103 mapping_relpre(); /* Allow mapping release */
110 * Allocate and map memory for devices before the VM system comes alive.
113 vm_offset_t
io_map_spec(vm_offset_t phys_addr
, vm_size_t size
)
117 if(kernel_map
!= VM_MAP_NULL
) { /* If VM system is up, redirect to normal routine */
119 return io_map(phys_addr
, size
); /* Map the address */
123 size
= round_page(size
+ (phys_addr
- (phys_addr
& -PAGE_SIZE
))); /* Extend the length to include it all */
124 start
= pmap_boot_map(size
); /* Get me some virtual address */
126 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
127 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
128 (size
>> 12), VM_PROT_READ
|VM_PROT_WRITE
);
130 return (start
+ (phys_addr
& PAGE_MASK
));