]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/ppc_vm_init.c
xnu-344.32.tar.gz
[apple/xnu.git] / osfmk / ppc / ppc_vm_init.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28
29 #include <mach_debug.h>
30 #include <mach_kdb.h>
31 #include <mach_kdp.h>
32 #include <debug.h>
33 #include <cpus.h>
34
35 #include <mach/vm_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
38 #include <kern/misc_protos.h>
39 #include <kern/assert.h>
40 #include <kern/cpu_number.h>
41
42 #include <ppc/proc_reg.h>
43 #include <ppc/Firmware.h>
44 #include <ppc/boot.h>
45 #include <ppc/misc_protos.h>
46 #include <ppc/pmap.h>
47 #include <ppc/pmap_internals.h>
48 #include <ppc/mem.h>
49 #include <ppc/mappings.h>
50 #include <ppc/exception.h>
51 #include <ppc/mp.h>
52
53 #ifdef __MACHO__
54 #include <mach-o/mach_header.h>
55 #endif
56
57 extern unsigned int intstack[]; /* declared in start.s */
58 extern unsigned int intstack_top_ss; /* declared in start.s */
59
60 vm_offset_t mem_size; /* Size of actual physical memory present
61 minus any performance buffer and possibly limited
62 by mem_limit in bytes */
63 vm_offset_t mem_actual; /* The "One True" physical memory size
64 actually, it's the highest physical address + 1 */
65
66
67 mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX];
68 int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */
69
70 mem_region_t free_regions[FREE_REGION_MAX];
71 int free_regions_count;
72
73 #ifndef __MACHO__
74 extern unsigned long etext;
75 #endif
76
77 unsigned int avail_remaining = 0;
78 vm_offset_t first_avail;
79 vm_offset_t static_memory_end;
80 extern vm_offset_t avail_next;
81
82 #ifdef __MACHO__
83 extern struct mach_header _mh_execute_header;
84 vm_offset_t sectTEXTB;
85 int sectSizeTEXT;
86 vm_offset_t sectDATAB;
87 int sectSizeDATA;
88 vm_offset_t sectLINKB;
89 int sectSizeLINK;
90 vm_offset_t sectKLDB;
91 int sectSizeKLD;
92
93 vm_offset_t end, etext, edata;
94 #endif
95
96 extern unsigned long exception_entry;
97 extern unsigned long exception_end;
98
99
100 void ppc_vm_init(unsigned int mem_limit, boot_args *args)
101 {
102 unsigned int htabmask;
103 unsigned int i, j, batsize, kmapsize;
104 vm_offset_t addr;
105 int boot_task_end_offset;
106 const char *cpus;
107 mapping *mp;
108 vm_offset_t first_phys_avail;
109 vm_offset_t sizeadj, oldstart;
110
111 /* Now retrieve addresses for end, edata, and etext
112 * from MACH-O headers.
113 */
114 sectTEXTB = (vm_offset_t)getsegdatafromheader(
115 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
116 sectDATAB = (vm_offset_t)getsegdatafromheader(
117 &_mh_execute_header, "__DATA", &sectSizeDATA);
118 sectLINKB = (vm_offset_t)getsegdatafromheader(
119 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
120 sectKLDB = (vm_offset_t)getsegdatafromheader(
121 &_mh_execute_header, "__KLD", &sectSizeKLD);
122
123 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
124 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
125 end = round_page(getlastaddr()); /* Force end to next page */
126 #if DEBUG
127 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT);
128 kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA);
129 kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK);
130 kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD);
131 kprintf("end: %x\n", end);
132 #endif
133
134 /* Stitch valid memory regions together - they may be contiguous
135 * even though they're not already glued together
136 */
137 mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */
138 addr = 0; /* temp use as pointer to previous memory region... */
139 for (i = 1; i < kMaxDRAMBanks; i++) {
140
141 if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */
142
143 if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */
144 mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */
145 }
146
147 if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */
148 args->PhysicalDRAM[addr].base +
149 args->PhysicalDRAM[addr].size) {
150 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
151 args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size,
152 args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size);
153
154 args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */
155 args->PhysicalDRAM[i].size = 0;
156 continue;
157 }
158 /* This is now last non-zero region to compare against */
159 addr = i;
160 }
161
162 /* Go through the list of memory regions passed in via the args
163 * and copy valid entries into the pmap_mem_regions table, adding
164 * further calculated entries.
165 */
166
167 pmap_mem_regions_count = 0;
168 mem_size = 0; /* Will use to total memory found so far */
169
170 for (i = 0; i < kMaxDRAMBanks; i++) {
171 if (args->PhysicalDRAM[i].size == 0)
172 continue;
173
174 /* The following should only happen if memory size has
175 been artificially reduced with -m */
176 if (mem_limit > 0 &&
177 mem_size + args->PhysicalDRAM[i].size > mem_limit)
178 args->PhysicalDRAM[i].size = mem_limit - mem_size;
179
180 /* We've found a region, tally memory */
181
182 pmap_mem_regions[pmap_mem_regions_count].start =
183 args->PhysicalDRAM[i].base;
184 pmap_mem_regions[pmap_mem_regions_count].end =
185 args->PhysicalDRAM[i].base +
186 args->PhysicalDRAM[i].size;
187
188 /* Regions must be provided in ascending order */
189 assert ((pmap_mem_regions_count == 0) ||
190 pmap_mem_regions[pmap_mem_regions_count].start >
191 pmap_mem_regions[pmap_mem_regions_count-1].start);
192
193 if (pmap_mem_regions_count > 0) {
194 /* we add on any pages not in the first memory
195 * region to the avail_remaining count. The first
196 * memory region is used for mapping everything for
197 * bootup and is taken care of specially.
198 */
199 avail_remaining +=
200 args->PhysicalDRAM[i].size / PPC_PGBYTES;
201 }
202
203 /* Keep track of how much memory we've found */
204
205 mem_size += args->PhysicalDRAM[i].size;
206
207 /* incremement number of regions found */
208 pmap_mem_regions_count++;
209 }
210
211 kprintf("mem_size: %d M\n",mem_size / (1024 * 1024));
212
213 /*
214 * Initialize the pmap system, using space above `first_avail'
215 * for the necessary data structures.
216 * NOTE : assume that we'll have enough space mapped in already
217 */
218
219 first_phys_avail = static_memory_end;
220 first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE);
221
222 kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */
223 (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) +
224 (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) +
225 (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) +
226 (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) +
227 (round_page(static_memory_end) - trunc_page(end));
228
229 pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize);
230
231 #ifdef __MACHO__
232 #if DEBUG
233 kprintf("Mapping memory:\n");
234 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry),
235 trunc_page(exception_entry), round_page(exception_end));
236 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB),
237 trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT));
238 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB),
239 trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA));
240 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB),
241 trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK));
242 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB),
243 trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD));
244 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end),
245 trunc_page(end), static_memory_end);
246 #endif /* DEBUG */
247 pmap_map(trunc_page(exception_entry), trunc_page(exception_entry),
248 round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE);
249 pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB),
250 round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE);
251 pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB),
252 round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
253
254
255 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
256 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
257 * to map both segments page-by-page.
258 */
259 for (addr = trunc_page(sectKLDB);
260 addr < round_page(sectKLDB+sectSizeKLD);
261 addr += PAGE_SIZE) {
262
263 pmap_enter(kernel_pmap, addr, addr,
264 VM_PROT_READ|VM_PROT_WRITE,
265 VM_WIMG_USE_DEFAULT, TRUE);
266 }
267
268 for (addr = trunc_page(sectLINKB);
269 addr < round_page(sectLINKB+sectSizeLINK);
270 addr += PAGE_SIZE) {
271
272 pmap_enter(kernel_pmap, addr, addr,
273 VM_PROT_READ|VM_PROT_WRITE,
274 VM_WIMG_USE_DEFAULT, TRUE);
275 }
276
277 /*
278 * We need to map the remainder page-by-page because some of this will
279 * be released later, but not all. Ergo, no block mapping here
280 */
281 for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) {
282 pmap_enter(kernel_pmap, addr, addr,
283 VM_PROT_READ|VM_PROT_WRITE,
284 VM_WIMG_USE_DEFAULT, TRUE);
285 }
286 #endif /* __MACHO__ */
287
288 #if DEBUG
289 for (i=0 ; i < free_regions_count; i++) {
290 kprintf("Free region start 0x%08x end 0x%08x\n",
291 free_regions[i].start,free_regions[i].end);
292 }
293 #endif
294
295 /*
296 * Note: the shadow BAT registers were already loaded in ppc_init.c
297 */
298
299
300 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */
301 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */
302
303 #if DEBUG
304 for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n",
305 i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower);
306 for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n",
307 i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower);
308 #endif
309 }
310
311 void ppc_vm_cpu_init(
312 struct per_proc_info *proc_info)
313 {
314 hash_table_init(hash_table_base, hash_table_size);
315
316 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]);
317 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]);
318
319 sync();isync();
320 }