2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
31 #include <mach/vm_param.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_map.h>
34 #include <vm/vm_page.h>
36 #include <ppc/io_map_entries.h>
37 #include <ppc/Firmware.h>
38 #include <ppc/mappings.h>
39 #include <ppc/proc_reg.h>
41 extern vm_offset_t virtual_avail
;
44 * Allocate and map memory for devices that may need to be mapped
45 * outside the usual physical memory. If phys_addr is NULL then
46 * steal the appropriate number of physical pages from the vm
47 * system and map them.
52 io_map(phys_addr
, size
)
53 vm_offset_t phys_addr
;
63 assert (kernel_map
!= VM_MAP_NULL
); /* VM must be initialised */
66 if (phys_addr
!= 0) { /* If they supplied a physical address, use it */
68 size
= round_page_32(size
+ (phys_addr
& PAGE_MASK
)); /* Make sure we map all of it */
70 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses to use */
72 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
73 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
74 size
>> 12, VM_PROT_READ
|VM_PROT_WRITE
);
76 return (start
+ (phys_addr
& PAGE_MASK
)); /* Pass back the physical address */
80 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses */
82 mapping_prealloc(size
); /* Make sure there are enough free mappings */
84 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
86 while ((m
= vm_page_grab()) == VM_PAGE_NULL
) { /* Get a physical page */
87 VM_PAGE_WAIT(); /* Wait if we didn't have one */
91 (void)mapping_make(kernel_pmap
,
92 (addr64_t
)(start
+ i
), m
->phys_page
,
93 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
94 1, VM_PROT_READ
|VM_PROT_WRITE
);
98 mapping_relpre(); /* Allow mapping release */
105 * Allocate and map memory for devices before the VM system comes alive.
108 vm_offset_t
io_map_spec(vm_offset_t phys_addr
, vm_size_t size
)
116 if(kernel_map
!= VM_MAP_NULL
) { /* If VM system is up, redirect to normal routine */
118 return io_map(phys_addr
, size
); /* Map the address */
122 size
= round_page_32(size
+ (phys_addr
- (phys_addr
& -PAGE_SIZE
))); /* Extend the length to include it all */
123 start
= pmap_boot_map(size
); /* Get me some virtual address */
125 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
126 (mmFlgBlock
| mmFlgUseAttr
| mmFlgCInhib
| mmFlgGuarded
), /* Map as I/O page */
127 size
>> 12, VM_PROT_READ
|VM_PROT_WRITE
);
129 return (start
+ (phys_addr
& PAGE_MASK
));