]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/ppc_vm_init.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / ppc / ppc_vm_init.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28
29 #include <mach_debug.h>
30 #include <mach_kdb.h>
31 #include <mach_kdp.h>
32 #include <debug.h>
33
34 #include <mach/vm_types.h>
35 #include <mach/vm_param.h>
36 #include <mach/thread_status.h>
37 #include <kern/misc_protos.h>
38 #include <kern/assert.h>
39 #include <kern/cpu_number.h>
40 #include <kern/thread.h>
41
42 #include <ppc/proc_reg.h>
43 #include <ppc/Firmware.h>
44 #include <ppc/boot.h>
45 #include <ppc/misc_protos.h>
46 #include <ppc/pmap.h>
47 #include <ppc/mem.h>
48 #include <ppc/mappings.h>
49 #include <ppc/exception.h>
50 #include <ppc/lowglobals.h>
51
52 #include <mach-o/mach_header.h>
53
54 extern const char version[];
55 extern const char version_variant[];
56
57 addr64_t hash_table_base; /* Hash table base */
58 unsigned int hash_table_size; /* Hash table size */
59 int hash_table_shift; /* "ht_shift" boot arg, used to scale hash_table_size */
60 vm_offset_t taproot_addr; /* (BRINGUP) */
61 unsigned int taproot_size; /* (BRINGUP) */
62 unsigned int serialmode; /* Serial mode keyboard and console control */
63 extern int disableConsoleOutput;
64
65 struct shadowBAT shadow_BAT;
66
67
68
69 /*
70 * NOTE: mem_size is bogus on large memory machines. We will pin it to 0x80000000 if there is more than 2 GB
71 * This is left only for compatibility and max_mem should be used.
72 */
73 vm_offset_t mem_size; /* Size of actual physical memory present
74 minus any performance buffer and possibly limited
75 by mem_limit in bytes */
76 uint64_t mem_actual; /* The "One True" physical memory size
77 actually, it's the highest physical address + 1 */
78 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
79 uint64_t sane_size; /* Memory size to use for defaults calculations */
80
81
82 mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX + 1];
83 int pmap_mem_regions_count = 0; /* Assume no non-contiguous memory regions */
84
85 unsigned int avail_remaining = 0;
86 vm_offset_t first_avail;
87 vm_offset_t static_memory_end;
88 addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel virtual address known to the VM system */
89
90 extern struct mach_header _mh_execute_header;
91 vm_offset_t sectTEXTB;
92 int sectSizeTEXT;
93 vm_offset_t sectDATAB;
94 int sectSizeDATA;
95 vm_offset_t sectLINKB;
96 int sectSizeLINK;
97 vm_offset_t sectKLDB;
98 int sectSizeKLD;
99 vm_offset_t sectPRELINKB;
100 int sectSizePRELINK;
101 vm_offset_t sectHIBB;
102 int sectSizeHIB;
103
104 vm_offset_t end, etext, edata;
105
106 extern unsigned long exception_entry;
107 extern unsigned long exception_end;
108
109
110 void ppc_vm_init(uint64_t mem_limit, boot_args *args)
111 {
112 unsigned int i, kmapsize, pvr;
113 vm_offset_t addr;
114 unsigned int *xtaproot, bank_shift;
115 uint64_t cbsize, xhid0;
116
117
118 /*
119 * Invalidate all shadow BATs
120 */
121
122 /* Initialize shadow IBATs */
123 shadow_BAT.IBATs[0].upper=BAT_INVALID;
124 shadow_BAT.IBATs[0].lower=BAT_INVALID;
125 shadow_BAT.IBATs[1].upper=BAT_INVALID;
126 shadow_BAT.IBATs[1].lower=BAT_INVALID;
127 shadow_BAT.IBATs[2].upper=BAT_INVALID;
128 shadow_BAT.IBATs[2].lower=BAT_INVALID;
129 shadow_BAT.IBATs[3].upper=BAT_INVALID;
130 shadow_BAT.IBATs[3].lower=BAT_INVALID;
131
132 /* Initialize shadow DBATs */
133 shadow_BAT.DBATs[0].upper=BAT_INVALID;
134 shadow_BAT.DBATs[0].lower=BAT_INVALID;
135 shadow_BAT.DBATs[1].upper=BAT_INVALID;
136 shadow_BAT.DBATs[1].lower=BAT_INVALID;
137 shadow_BAT.DBATs[2].upper=BAT_INVALID;
138 shadow_BAT.DBATs[2].lower=BAT_INVALID;
139 shadow_BAT.DBATs[3].upper=BAT_INVALID;
140 shadow_BAT.DBATs[3].lower=BAT_INVALID;
141
142
143 /*
144 * Go through the list of memory regions passed in via the boot_args
145 * and copy valid entries into the pmap_mem_regions table, adding
146 * further calculated entries.
147 *
148 * boot_args version 1 has address instead of page numbers
149 * in the PhysicalDRAM banks, set bank_shift accordingly.
150 */
151
152 bank_shift = 0;
153 if (args->Version == kBootArgsVersion1) bank_shift = 12;
154
155 pmap_mem_regions_count = 0;
156 max_mem = 0; /* Will use to total memory found so far */
157 mem_actual = 0; /* Actual size of memory */
158
159 if (mem_limit == 0) mem_limit = 0xFFFFFFFFFFFFFFFFULL; /* If there is no set limit, use all */
160
161 for (i = 0; i < kMaxDRAMBanks; i++) { /* Look at all of the banks */
162
163 cbsize = (uint64_t)args->PhysicalDRAM[i].size << (12 - bank_shift); /* Remember current size */
164
165 if (!cbsize) continue; /* Skip if the bank is empty */
166
167 mem_actual = mem_actual + cbsize; /* Get true memory size */
168
169 if(mem_limit == 0) continue; /* If we hit restriction, just keep counting */
170
171 if (cbsize > mem_limit) cbsize = mem_limit; /* Trim to max allowed */
172 max_mem += cbsize; /* Total up what we have so far */
173 mem_limit = mem_limit - cbsize; /* Calculate amount left to do */
174
175 pmap_mem_regions[pmap_mem_regions_count].mrStart = args->PhysicalDRAM[i].base >> bank_shift; /* Set the start of the bank */
176 pmap_mem_regions[pmap_mem_regions_count].mrAStart = pmap_mem_regions[pmap_mem_regions_count].mrStart; /* Set the start of allocatable area */
177 pmap_mem_regions[pmap_mem_regions_count].mrEnd = ((uint64_t)args->PhysicalDRAM[i].base >> bank_shift) + (cbsize >> 12) - 1; /* Set the end address of bank */
178 pmap_mem_regions[pmap_mem_regions_count].mrAEnd = pmap_mem_regions[pmap_mem_regions_count].mrEnd; /* Set the end address of allocatable area */
179
180 /* Regions must be provided in ascending order */
181 assert ((pmap_mem_regions_count == 0) ||
182 pmap_mem_regions[pmap_mem_regions_count].mrStart >
183 pmap_mem_regions[pmap_mem_regions_count-1].mrStart);
184
185 pmap_mem_regions_count++; /* Count this region */
186 }
187
188 mem_size = (unsigned int)max_mem; /* Get size of memory */
189 if(max_mem > 0x0000000080000000ULL) mem_size = 0x80000000; /* Pin at 2 GB */
190
191 sane_size = max_mem; /* Calculate a sane value to use for init */
192 if(sane_size > (addr64_t)(VM_MAX_KERNEL_ADDRESS + 1))
193 sane_size = (addr64_t)(VM_MAX_KERNEL_ADDRESS + 1); /* If flush with ram, use addressible portion */
194
195
196 /*
197 * Initialize the pmap system, using space above `first_avail'
198 * for the necessary data structures.
199 * NOTE : assume that we'll have enough space mapped in already
200 */
201
202 first_avail = static_memory_end;
203
204 /*
205 * Now retrieve addresses for end, edata, and etext
206 * from MACH-O headers for the currently running 32 bit kernel.
207 */
208 sectTEXTB = (vm_offset_t)getsegdatafromheader(
209 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
210 sectDATAB = (vm_offset_t)getsegdatafromheader(
211 &_mh_execute_header, "__DATA", &sectSizeDATA);
212 sectLINKB = (vm_offset_t)getsegdatafromheader(
213 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
214 sectKLDB = (vm_offset_t)getsegdatafromheader(
215 &_mh_execute_header, "__KLD", &sectSizeKLD);
216 sectHIBB = (vm_offset_t)getsegdatafromheader(
217 &_mh_execute_header, "__HIB", &sectSizeHIB);
218 sectPRELINKB = (vm_offset_t)getsegdatafromheader(
219 &_mh_execute_header, "__PRELINK", &sectSizePRELINK);
220
221 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
222 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
223 end = round_page(getlastaddr()); /* Force end to next page */
224
225 kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */
226 (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) +
227 (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) +
228 (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) +
229 (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) +
230 (round_page_32(sectKLDB+sectSizeHIB) - trunc_page_32(sectHIBB)) +
231 (round_page(sectPRELINKB+sectSizePRELINK) - trunc_page(sectPRELINKB)) +
232 (round_page(static_memory_end) - trunc_page(end));
233
234 pmap_bootstrap(max_mem, &first_avail, kmapsize);
235
236 pmap_map(trunc_page(exception_entry), trunc_page(exception_entry),
237 round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE);
238
239 pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB),
240 round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE);
241
242 pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB),
243 round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
244
245 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
246 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
247 * to map both segments page-by-page.
248 */
249
250 for (addr = trunc_page(sectPRELINKB);
251 addr < round_page(sectPRELINKB+sectSizePRELINK);
252 addr += PAGE_SIZE) {
253
254 pmap_enter(kernel_pmap, (vm_map_offset_t)addr, (ppnum_t)(addr>>12),
255 VM_PROT_READ|VM_PROT_WRITE,
256 VM_WIMG_USE_DEFAULT, TRUE);
257
258 }
259
260 for (addr = trunc_page(sectKLDB);
261 addr < round_page(sectKLDB+sectSizeKLD);
262 addr += PAGE_SIZE) {
263
264 pmap_enter(kernel_pmap, (vm_map_offset_t)addr, (ppnum_t)(addr>>12),
265 VM_PROT_READ|VM_PROT_WRITE,
266 VM_WIMG_USE_DEFAULT, TRUE);
267
268 }
269
270 for (addr = trunc_page(sectLINKB);
271 addr < round_page(sectLINKB+sectSizeLINK);
272 addr += PAGE_SIZE) {
273
274 pmap_enter(kernel_pmap, (vm_map_offset_t)addr,
275 (ppnum_t)(addr>>12),
276 VM_PROT_READ|VM_PROT_WRITE,
277 VM_WIMG_USE_DEFAULT, TRUE);
278
279 }
280
281 for (addr = trunc_page_32(sectHIBB);
282 addr < round_page_32(sectHIBB+sectSizeHIB);
283 addr += PAGE_SIZE) {
284
285 pmap_enter(kernel_pmap, (vm_map_offset_t)addr, (ppnum_t)(addr>>12),
286 VM_PROT_READ|VM_PROT_WRITE,
287 VM_WIMG_USE_DEFAULT, TRUE);
288
289 }
290
291 pmap_enter(kernel_pmap, (vm_map_offset_t)&sharedPage,
292 (ppnum_t)&sharedPage >> 12, /* Make sure the sharedPage is mapped */
293 VM_PROT_READ|VM_PROT_WRITE,
294 VM_WIMG_USE_DEFAULT, TRUE);
295
296 pmap_enter(kernel_pmap, (vm_map_offset_t)&lowGlo.lgVerCode,
297 (ppnum_t)&lowGlo.lgVerCode >> 12, /* Make sure the low memory globals are mapped */
298 VM_PROT_READ|VM_PROT_WRITE,
299 VM_WIMG_USE_DEFAULT, TRUE);
300
301 /*
302 * We need to map the remainder page-by-page because some of this will
303 * be released later, but not all. Ergo, no block mapping here
304 */
305
306 for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) {
307
308 pmap_enter(kernel_pmap, (vm_map_address_t)addr, (ppnum_t)addr>>12,
309 VM_PROT_READ|VM_PROT_WRITE,
310 VM_WIMG_USE_DEFAULT, TRUE);
311
312 }
313
314 /*
315 * Here we map a window into the kernel address space that will be used to
316 * access a slice of a user address space. Clients for this service include
317 * copyin/out and copypv.
318 */
319
320 lowGlo.lgUMWvaddr = USER_MEM_WINDOW_VADDR;
321 /* Initialize user memory window base address */
322 MapUserMemoryWindowInit(); /* Go initialize user memory window */
323
324 /*
325 * At this point, there is enough mapped memory and all hw mapping structures are
326 * allocated and initialized. Here is where we turn on translation for the
327 * VERY first time....
328 *
329 * NOTE: Here is where our very first interruption will happen.
330 *
331 */
332
333 hw_start_trans(); /* Start translating */
334 PE_init_platform(TRUE, args); /* Initialize this right off the bat */
335
336
337 #if 0
338 GratefulDebInit((bootBumbleC *)&(args->Video)); /* Initialize the GratefulDeb debugger */
339 #endif
340
341
342 printf_init(); /* Init this in case we need debugger */
343 panic_init(); /* Init this in case we need debugger */
344 PE_init_kprintf(TRUE); /* Note on PPC we only call this after VM is set up */
345
346 kprintf("kprintf initialized\n");
347
348 serialmode = 0; /* Assume normal keyboard and console */
349 if(PE_parse_boot_arg("serial", &serialmode)) { /* Do we want a serial keyboard and/or console? */
350 kprintf("Serial mode specified: %08X\n", serialmode);
351 }
352 if(serialmode & 1) { /* Start serial if requested */
353 (void)switch_to_serial_console(); /* Switch into serial mode */
354 disableConsoleOutput = FALSE; /* Allow printfs to happen */
355 }
356
357 kprintf("max_mem: %ld M\n", (unsigned long)(max_mem >> 20));
358 kprintf("version_variant = %s\n", version_variant);
359 kprintf("version = %s\n\n", version);
360 __asm__ ("mfpvr %0" : "=r" (pvr));
361 kprintf("proc version = %08x\n", pvr);
362 if(getPerProc()->pf.Available & pf64Bit) { /* 64-bit processor? */
363 xhid0 = hid0get64(); /* Get the hid0 */
364 if(xhid0 & (1ULL << (63 - 19))) kprintf("Time base is externally clocked\n");
365 else kprintf("Time base is internally clocked\n");
366 }
367
368
369 taproot_size = PE_init_taproot(&taproot_addr); /* (BRINGUP) See if there is a taproot */
370 if(taproot_size) { /* (BRINGUP) */
371 kprintf("TapRoot card configured to use vaddr = %08X, size = %08X\n", taproot_addr, taproot_size);
372 bcopy_nc((void *)version, (void *)(taproot_addr + 16), strlen(version)); /* (BRINGUP) Pass it our kernel version */
373 __asm__ volatile("eieio"); /* (BRINGUP) */
374 xtaproot = (unsigned int *)taproot_addr; /* (BRINGUP) */
375 xtaproot[0] = 1; /* (BRINGUP) */
376 __asm__ volatile("eieio"); /* (BRINGUP) */
377 }
378
379 PE_create_console(); /* create the console for verbose or pretty mode */
380
381 /* setup console output */
382 PE_init_printf(FALSE);
383
384 #if DEBUG
385 printf("\n\n\nThis program was compiled using gcc %d.%d for powerpc\n",
386 __GNUC__,__GNUC_MINOR__);
387
388
389 /* Processor version information */
390 {
391 unsigned int pvr;
392 __asm__ ("mfpvr %0" : "=r" (pvr));
393 printf("processor version register : %08X\n", pvr);
394 }
395
396 kprintf("Args at %08X\n", args);
397 for (i = 0; i < pmap_mem_regions_count; i++) {
398 printf("DRAM at %08X size %08X\n",
399 args->PhysicalDRAM[i].base,
400 args->PhysicalDRAM[i].size);
401 }
402 #endif /* DEBUG */
403
404 #if DEBUG
405 kprintf("Mapped memory:\n");
406 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry),
407 trunc_page(exception_entry), round_page(exception_end));
408 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB),
409 trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT));
410 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB),
411 trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA));
412 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB),
413 trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK));
414 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB),
415 trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD));
416 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end),
417 trunc_page(end), static_memory_end);
418
419 #endif
420
421 return;
422 }
423