]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/io_map.c
a5aab49033773787a1698c7d958c3fdddeefb2ef
[apple/xnu.git] / osfmk / ppc / io_map.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 *
33 */
34
35 #include <debug.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_page.h>
40 #include <ppc/pmap.h>
41 #include <ppc/io_map_entries.h>
42 #include <ppc/Firmware.h>
43 #include <ppc/mappings.h>
44 #include <ppc/proc_reg.h>
45
46 extern vm_offset_t virtual_avail;
47
48 /*
49 * Allocate and map memory for devices that may need to be mapped
50 * outside the usual physical memory. If phys_addr is NULL then
51 * steal the appropriate number of physical pages from the vm
52 * system and map them.
53 *
54 * Note, this will onl
55 */
56 vm_offset_t
57 io_map(vm_offset_t phys_addr, vm_size_t size, unsigned int flags)
58 {
59 vm_offset_t start;
60 int i;
61 unsigned int j, mflags;
62 vm_page_t m;
63
64 mflags = mmFlgBlock | mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
65
66 #if DEBUG
67 assert (kernel_map != VM_MAP_NULL); /* VM must be initialised */
68 #endif
69
70 if (phys_addr != 0) { /* If they supplied a physical address, use it */
71
72 size = round_page(size + (phys_addr & PAGE_MASK)); /* Make sure we map all of it */
73
74 (void) kmem_alloc_pageable(kernel_map, &start, size); /* Get some virtual addresses to use */
75
76 (void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12),
77 mflags, /* Map with requested cache mode */
78 (size >> 12), VM_PROT_READ|VM_PROT_WRITE);
79
80 return (start + (phys_addr & PAGE_MASK)); /* Pass back the physical address */
81
82 } else {
83
84 (void) kmem_alloc_pageable(kernel_map, &start, size); /* Get some virtual addresses */
85
86 mapping_prealloc(size); /* Make sure there are enough free mappings */
87
88 for (i = 0; i < size ; i += PAGE_SIZE) {
89 m = VM_PAGE_NULL;
90 while ((m = vm_page_grab()) == VM_PAGE_NULL) { /* Get a physical page */
91 VM_PAGE_WAIT(); /* Wait if we didn't have one */
92 }
93 vm_page_gobble(m);
94
95 (void)mapping_make(kernel_pmap,
96 (addr64_t)(start + i), m->phys_page,
97 mflags, /* Map with requested cache mode */
98 1, VM_PROT_READ|VM_PROT_WRITE);
99
100 }
101
102 mapping_relpre(); /* Allow mapping release */
103 return start;
104 }
105 }
106
107
108 /*
109 * Allocate and map memory for devices before the VM system comes alive.
110 */
111
112 vm_offset_t io_map_spec(vm_offset_t phys_addr, vm_size_t size, unsigned int flags)
113 {
114 vm_offset_t start;
115 unsigned int mflags;
116
117 if(kernel_map != VM_MAP_NULL) { /* If VM system is up, redirect to normal routine */
118
119 return io_map(phys_addr, size, flags); /* Map the address */
120
121 }
122
123 mflags = mmFlgBlock | mmFlgUseAttr | (flags & VM_MEM_GUARDED) | ((flags & VM_MEM_NOT_CACHEABLE) >> 1); /* Convert to our mapping_make flags */
124
125 size = round_page(size + (phys_addr - (phys_addr & -PAGE_SIZE))); /* Extend the length to include it all */
126 start = pmap_boot_map(size); /* Get me some virtual address */
127
128 (void)mapping_make(kernel_pmap, (addr64_t)start, (ppnum_t)(phys_addr >> 12),
129 mflags, /* Map with requested cache mode */
130 (size >> 12), VM_PROT_READ|VM_PROT_WRITE);
131
132 return (start + (phys_addr & PAGE_MASK));
133 }