2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
34 #include <mach/vm_param.h>
35 #include <vm/vm_kern.h>
36 #include <vm/vm_map.h>
37 #include <vm/vm_page.h>
39 #include <ppc/io_map_entries.h>
40 #include <ppc/Firmware.h>
41 #include <ppc/mappings.h>
42 #include <ppc/proc_reg.h>
44 extern vm_offset_t virtual_avail
;
47 * Allocate and map memory for devices that may need to be mapped
48 * outside the usual physical memory. If phys_addr is NULL then
49 * steal the appropriate number of physical pages from the vm
50 * system and map them.
55 io_map(vm_offset_t phys_addr
, vm_size_t size
, unsigned int flags
)
62 mflags
= mmFlgBlock
| mmFlgUseAttr
| (flags
& VM_MEM_GUARDED
) | ((flags
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
65 assert (kernel_map
!= VM_MAP_NULL
); /* VM must be initialised */
68 if (phys_addr
!= 0) { /* If they supplied a physical address, use it */
70 size
= round_page(size
+ (phys_addr
& PAGE_MASK
)); /* Make sure we map all of it */
72 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses to use */
74 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
75 mflags
, /* Map with requested cache mode */
76 (size
>> 12), VM_PROT_READ
|VM_PROT_WRITE
);
78 return (start
+ (phys_addr
& PAGE_MASK
)); /* Pass back the physical address */
82 (void) kmem_alloc_pageable(kernel_map
, &start
, size
); /* Get some virtual addresses */
84 mapping_prealloc(size
); /* Make sure there are enough free mappings */
86 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
88 while ((m
= vm_page_grab()) == VM_PAGE_NULL
) { /* Get a physical page */
89 VM_PAGE_WAIT(); /* Wait if we didn't have one */
93 (void)mapping_make(kernel_pmap
,
94 (addr64_t
)(start
+ i
), m
->phys_page
,
95 mflags
, /* Map with requested cache mode */
96 1, VM_PROT_READ
|VM_PROT_WRITE
);
100 mapping_relpre(); /* Allow mapping release */
107 * Allocate and map memory for devices before the VM system comes alive.
110 vm_offset_t
io_map_spec(vm_offset_t phys_addr
, vm_size_t size
, unsigned int flags
)
115 if(kernel_map
!= VM_MAP_NULL
) { /* If VM system is up, redirect to normal routine */
117 return io_map(phys_addr
, size
, flags
); /* Map the address */
121 mflags
= mmFlgBlock
| mmFlgUseAttr
| (flags
& VM_MEM_GUARDED
) | ((flags
& VM_MEM_NOT_CACHEABLE
) >> 1); /* Convert to our mapping_make flags */
123 size
= round_page(size
+ (phys_addr
- (phys_addr
& -PAGE_SIZE
))); /* Extend the length to include it all */
124 start
= pmap_boot_map(size
); /* Get me some virtual address */
126 (void)mapping_make(kernel_pmap
, (addr64_t
)start
, (ppnum_t
)(phys_addr
>> 12),
127 mflags
, /* Map with requested cache mode */
128 (size
>> 12), VM_PROT_READ
|VM_PROT_WRITE
);
130 return (start
+ (phys_addr
& PAGE_MASK
));