2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * @APPLE_FREE_COPYRIGHT@
35 #include <mach_debug.h>
40 #include <mach/vm_types.h>
41 #include <mach/vm_param.h>
42 #include <mach/thread_status.h>
43 #include <kern/misc_protos.h>
44 #include <kern/assert.h>
45 #include <kern/cpu_number.h>
46 #include <kern/thread.h>
47 #include <console/serial_protos.h>
49 #include <ppc/proc_reg.h>
50 #include <ppc/Firmware.h>
52 #include <ppc/misc_protos.h>
55 #include <ppc/mappings.h>
56 #include <ppc/exception.h>
57 #include <ppc/lowglobals.h>
58 #include <ppc/serial_io.h>
60 #include <libkern/kernel_mach_header.h>
62 extern const char version
[];
63 extern const char version_variant
[];
65 addr64_t hash_table_base
; /* Hash table base */
66 unsigned int hash_table_size
; /* Hash table size */
67 int hash_table_shift
; /* "ht_shift" boot arg, used to scale hash_table_size */
68 vm_offset_t taproot_addr
; /* (BRINGUP) */
69 unsigned int taproot_size
; /* (BRINGUP) */
70 extern int disableConsoleOutput
;
72 struct shadowBAT shadow_BAT
;
77 * NOTE: mem_size is bogus on large memory machines. We will pin it to 0x80000000 if there is more than 2 GB
78 * This is left only for compatibility and max_mem should be used.
80 vm_offset_t mem_size
; /* Size of actual physical memory present
81 minus any performance buffer and possibly limited
82 by mem_limit in bytes */
83 uint64_t mem_actual
; /* The "One True" physical memory size
84 actually, it's the highest physical address + 1 */
85 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted by maxmem */
86 uint64_t sane_size
; /* Memory size to use for defaults calculations */
89 mem_region_t pmap_mem_regions
[PMAP_MEM_REGION_MAX
+ 1];
90 unsigned int pmap_mem_regions_count
; /* Assume no non-contiguous memory regions */
92 unsigned int avail_remaining
= 0;
93 vm_offset_t first_avail
;
94 vm_offset_t static_memory_end
;
95 addr64_t vm_last_addr
= VM_MAX_KERNEL_ADDRESS
; /* Highest kernel virtual address known to the VM system */
97 vm_offset_t sectTEXTB
;
98 unsigned long sectSizeTEXT
;
99 vm_offset_t sectDATAB
;
100 unsigned long sectSizeDATA
;
101 vm_offset_t sectLINKB
;
102 unsigned long sectSizeLINK
;
103 vm_offset_t sectKLDB
;
104 unsigned long sectSizeKLD
;
105 vm_offset_t sectPRELINKB
;
106 unsigned long sectSizePRELINK
;
107 vm_offset_t sectHIBB
;
108 unsigned long sectSizeHIB
;
110 vm_offset_t end
, etext
, edata
;
112 extern unsigned long exception_entry
;
113 extern unsigned long exception_end
;
116 void ppc_vm_init(uint64_t mem_limit
, boot_args
*args
)
118 unsigned int i
, kmapsize
, pvr
;
120 unsigned int *xtaproot
, bank_shift
;
121 uint64_t cbsize
, xhid0
;
125 * Invalidate all shadow BATs
128 /* Initialize shadow IBATs */
129 shadow_BAT
.IBATs
[0].upper
=BAT_INVALID
;
130 shadow_BAT
.IBATs
[0].lower
=BAT_INVALID
;
131 shadow_BAT
.IBATs
[1].upper
=BAT_INVALID
;
132 shadow_BAT
.IBATs
[1].lower
=BAT_INVALID
;
133 shadow_BAT
.IBATs
[2].upper
=BAT_INVALID
;
134 shadow_BAT
.IBATs
[2].lower
=BAT_INVALID
;
135 shadow_BAT
.IBATs
[3].upper
=BAT_INVALID
;
136 shadow_BAT
.IBATs
[3].lower
=BAT_INVALID
;
138 /* Initialize shadow DBATs */
139 shadow_BAT
.DBATs
[0].upper
=BAT_INVALID
;
140 shadow_BAT
.DBATs
[0].lower
=BAT_INVALID
;
141 shadow_BAT
.DBATs
[1].upper
=BAT_INVALID
;
142 shadow_BAT
.DBATs
[1].lower
=BAT_INVALID
;
143 shadow_BAT
.DBATs
[2].upper
=BAT_INVALID
;
144 shadow_BAT
.DBATs
[2].lower
=BAT_INVALID
;
145 shadow_BAT
.DBATs
[3].upper
=BAT_INVALID
;
146 shadow_BAT
.DBATs
[3].lower
=BAT_INVALID
;
150 * Go through the list of memory regions passed in via the boot_args
151 * and copy valid entries into the pmap_mem_regions table, adding
152 * further calculated entries.
154 * boot_args version 1 has address instead of page numbers
155 * in the PhysicalDRAM banks, set bank_shift accordingly.
159 if (args
->Version
== kBootArgsVersion1
) bank_shift
= 12;
161 pmap_mem_regions_count
= 0;
162 max_mem
= 0; /* Will use to total memory found so far */
163 mem_actual
= 0; /* Actual size of memory */
165 if (mem_limit
== 0) mem_limit
= 0xFFFFFFFFFFFFFFFFULL
; /* If there is no set limit, use all */
167 for (i
= 0; i
< kMaxDRAMBanks
; i
++) { /* Look at all of the banks */
169 cbsize
= (uint64_t)args
->PhysicalDRAM
[i
].size
<< (12 - bank_shift
); /* Remember current size */
171 if (!cbsize
) continue; /* Skip if the bank is empty */
173 mem_actual
= mem_actual
+ cbsize
; /* Get true memory size */
175 if(mem_limit
== 0) continue; /* If we hit restriction, just keep counting */
177 if (cbsize
> mem_limit
) cbsize
= mem_limit
; /* Trim to max allowed */
178 max_mem
+= cbsize
; /* Total up what we have so far */
179 mem_limit
= mem_limit
- cbsize
; /* Calculate amount left to do */
181 pmap_mem_regions
[pmap_mem_regions_count
].mrStart
= args
->PhysicalDRAM
[i
].base
>> bank_shift
; /* Set the start of the bank */
182 pmap_mem_regions
[pmap_mem_regions_count
].mrAStart
= pmap_mem_regions
[pmap_mem_regions_count
].mrStart
; /* Set the start of allocatable area */
183 pmap_mem_regions
[pmap_mem_regions_count
].mrEnd
= ((uint64_t)args
->PhysicalDRAM
[i
].base
>> bank_shift
) + (cbsize
>> 12) - 1; /* Set the end address of bank */
184 pmap_mem_regions
[pmap_mem_regions_count
].mrAEnd
= pmap_mem_regions
[pmap_mem_regions_count
].mrEnd
; /* Set the end address of allocatable area */
186 /* Regions must be provided in ascending order */
187 assert ((pmap_mem_regions_count
== 0) ||
188 pmap_mem_regions
[pmap_mem_regions_count
].mrStart
>
189 pmap_mem_regions
[pmap_mem_regions_count
-1].mrStart
);
191 pmap_mem_regions_count
++; /* Count this region */
194 mem_size
= (unsigned int)max_mem
; /* Get size of memory */
195 if(max_mem
> 0x0000000080000000ULL
) mem_size
= 0x80000000; /* Pin at 2 GB */
197 sane_size
= max_mem
; /* Calculate a sane value to use for init */
198 if(sane_size
> (addr64_t
)(VM_MAX_KERNEL_ADDRESS
+ 1))
199 sane_size
= (addr64_t
)(VM_MAX_KERNEL_ADDRESS
+ 1); /* If flush with ram, use addressible portion */
203 * Initialize the pmap system, using space above `first_avail'
204 * for the necessary data structures.
205 * NOTE : assume that we'll have enough space mapped in already
208 first_avail
= static_memory_end
;
211 * Now retrieve addresses for end, edata, and etext
212 * from MACH-O headers for the currently running 32 bit kernel.
214 /* XXX fix double casts for 64 bit kernel */
215 sectTEXTB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
216 &_mh_execute_header
, "__TEXT", §SizeTEXT
);
217 sectDATAB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
218 &_mh_execute_header
, "__DATA", §SizeDATA
);
219 sectLINKB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
220 &_mh_execute_header
, "__LINKEDIT", §SizeLINK
);
221 sectKLDB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
222 &_mh_execute_header
, "__KLD", §SizeKLD
);
223 sectHIBB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
224 &_mh_execute_header
, "__HIB", §SizeHIB
);
225 sectPRELINKB
= (vm_offset_t
)(uint32_t *)getsegdatafromheader(
226 &_mh_execute_header
, "__PRELINK_TEXT", §SizePRELINK
);
228 etext
= (vm_offset_t
) sectTEXTB
+ sectSizeTEXT
;
229 edata
= (vm_offset_t
) sectDATAB
+ sectSizeDATA
;
230 end
= round_page(getlastaddr()); /* Force end to next page */
232 kmapsize
= (round_page(exception_end
) - trunc_page(exception_entry
)) + /* Get size we will map later */
233 (round_page(sectTEXTB
+sectSizeTEXT
) - trunc_page(sectTEXTB
)) +
234 (round_page(sectDATAB
+sectSizeDATA
) - trunc_page(sectDATAB
)) +
235 (round_page(sectLINKB
+sectSizeLINK
) - trunc_page(sectLINKB
)) +
236 (round_page(sectKLDB
+sectSizeKLD
) - trunc_page(sectKLDB
)) +
237 (round_page_32(sectKLDB
+sectSizeHIB
) - trunc_page_32(sectHIBB
)) +
238 (round_page(sectPRELINKB
+sectSizePRELINK
) - trunc_page(sectPRELINKB
)) +
239 (round_page(static_memory_end
) - trunc_page(end
));
241 pmap_bootstrap(max_mem
, &first_avail
, kmapsize
);
243 pmap_map(trunc_page(exception_entry
), trunc_page(exception_entry
),
244 round_page(exception_end
), VM_PROT_READ
|VM_PROT_EXECUTE
, VM_WIMG_USE_DEFAULT
);
246 pmap_map(trunc_page(sectTEXTB
), trunc_page(sectTEXTB
),
247 round_page(sectTEXTB
+sectSizeTEXT
), VM_PROT_READ
|VM_PROT_EXECUTE
, VM_WIMG_USE_DEFAULT
);
249 pmap_map(trunc_page(sectDATAB
), trunc_page(sectDATAB
),
250 round_page(sectDATAB
+sectSizeDATA
), VM_PROT_READ
|VM_PROT_WRITE
, VM_WIMG_USE_DEFAULT
);
252 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
253 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
254 * to map both segments page-by-page.
257 for (addr
= trunc_page(sectPRELINKB
);
258 addr
< round_page(sectPRELINKB
+sectSizePRELINK
);
261 pmap_enter(kernel_pmap
, (vm_map_offset_t
)addr
, (ppnum_t
)(addr
>>12),
262 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
,
263 VM_WIMG_USE_DEFAULT
, TRUE
);
267 for (addr
= trunc_page(sectKLDB
);
268 addr
< round_page(sectKLDB
+sectSizeKLD
);
271 pmap_enter(kernel_pmap
, (vm_map_offset_t
)addr
, (ppnum_t
)(addr
>>12),
272 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
,
273 VM_WIMG_USE_DEFAULT
, TRUE
);
277 for (addr
= trunc_page(sectLINKB
);
278 addr
< round_page(sectLINKB
+sectSizeLINK
);
281 pmap_enter(kernel_pmap
, (vm_map_offset_t
)addr
,
283 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
,
284 VM_WIMG_USE_DEFAULT
, TRUE
);
288 for (addr
= trunc_page_32(sectHIBB
);
289 addr
< round_page_32(sectHIBB
+sectSizeHIB
);
292 pmap_enter(kernel_pmap
, (vm_map_offset_t
)addr
, (ppnum_t
)(addr
>>12),
293 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
,
294 VM_WIMG_USE_DEFAULT
, TRUE
);
298 pmap_enter(kernel_pmap
, (vm_map_offset_t
)(uintptr_t)&sharedPage
,
299 (ppnum_t
)&sharedPage
>> 12, /* Make sure the sharedPage is mapped */
300 VM_PROT_READ
|VM_PROT_WRITE
,
301 VM_WIMG_USE_DEFAULT
, TRUE
);
303 pmap_enter(kernel_pmap
, (vm_map_offset_t
)(uintptr_t)&lowGlo
.lgVerCode
,
304 (ppnum_t
)&lowGlo
.lgVerCode
>> 12, /* Make sure the low memory globals are mapped */
305 VM_PROT_READ
|VM_PROT_WRITE
,
306 VM_WIMG_USE_DEFAULT
, TRUE
);
309 * We need to map the remainder page-by-page because some of this will
310 * be released later, but not all. Ergo, no block mapping here
313 for(addr
= trunc_page(end
); addr
< round_page(static_memory_end
); addr
+= PAGE_SIZE
) {
315 pmap_enter(kernel_pmap
, (vm_map_address_t
)addr
, (ppnum_t
)addr
>>12,
316 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
,
317 VM_WIMG_USE_DEFAULT
, TRUE
);
322 * Here we map a window into the kernel address space that will be used to
323 * access a slice of a user address space. Clients for this service include
324 * copyin/out and copypv.
327 lowGlo
.lgUMWvaddr
= USER_MEM_WINDOW_VADDR
;
328 /* Initialize user memory window base address */
329 MapUserMemoryWindowInit(); /* Go initialize user memory window */
332 * At this point, there is enough mapped memory and all hw mapping structures are
333 * allocated and initialized. Here is where we turn on translation for the
334 * VERY first time....
336 * NOTE: Here is where our very first interruption will happen.
340 hw_start_trans(); /* Start translating */
341 PE_init_platform(TRUE
, args
); /* Initialize this right off the bat */
345 GratefulDebInit((bootBumbleC
*)&(args
->Video
)); /* Initialize the GratefulDeb debugger */
349 printf_init(); /* Init this in case we need debugger */
350 panic_init(); /* Init this in case we need debugger */
351 PE_init_kprintf(TRUE
); /* Note on PPC we only call this after VM is set up */
353 kprintf("kprintf initialized\n");
355 serialmode
= 0; /* Assume normal keyboard and console */
356 if(PE_parse_boot_argn("serial", &serialmode
, sizeof (serialmode
))) { /* Do we want a serial keyboard and/or console? */
357 kprintf("Serial mode specified: %08X\n", serialmode
);
359 if(serialmode
& 1) { /* Start serial if requested */
360 (void)switch_to_serial_console(); /* Switch into serial mode */
361 disableConsoleOutput
= FALSE
; /* Allow printfs to happen */
364 kprintf("max_mem: %ld M\n", (unsigned long)(max_mem
>> 20));
365 kprintf("version_variant = %s\n", version_variant
);
366 kprintf("version = %s\n\n", version
);
367 __asm__ ("mfpvr %0" : "=r" (pvr
));
368 kprintf("proc version = %08x\n", pvr
);
369 if(getPerProc()->pf
.Available
& pf64Bit
) { /* 64-bit processor? */
370 xhid0
= hid0get64(); /* Get the hid0 */
371 if(xhid0
& (1ULL << (63 - 19))) kprintf("Time base is externally clocked\n");
372 else kprintf("Time base is internally clocked\n");
376 taproot_size
= PE_init_taproot(&taproot_addr
); /* (BRINGUP) See if there is a taproot */
377 if(taproot_size
) { /* (BRINGUP) */
378 kprintf("TapRoot card configured to use vaddr = %08X, size = %08X\n", taproot_addr
, taproot_size
);
379 bcopy_nc(version
, (void *)(taproot_addr
+ 16), strlen(version
)); /* (BRINGUP) Pass it our kernel version */
380 __asm__
volatile("eieio"); /* (BRINGUP) */
381 xtaproot
= (unsigned int *)taproot_addr
; /* (BRINGUP) */
382 xtaproot
[0] = 1; /* (BRINGUP) */
383 __asm__
volatile("eieio"); /* (BRINGUP) */
386 PE_create_console(); /* create the console for verbose or pretty mode */
388 /* setup console output */
389 PE_init_printf(FALSE
);
392 printf("\n\n\nThis program was compiled using gcc %d.%d for powerpc\n",
393 __GNUC__
,__GNUC_MINOR__
);
396 /* Processor version information */
397 __asm__ ("mfpvr %0" : "=r" (pvr
));
398 printf("processor version register : %08X\n", pvr
);
400 kprintf("Args at %p\n", args
);
401 for (i
= 0; i
< pmap_mem_regions_count
; i
++) {
402 printf("DRAM at %08lX size %08lX\n",
403 args
->PhysicalDRAM
[i
].base
,
404 args
->PhysicalDRAM
[i
].size
);
409 kprintf("Mapped memory:\n");
410 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry
),
411 trunc_page(exception_entry
), round_page(exception_end
));
412 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB
),
413 trunc_page(sectTEXTB
), round_page(sectTEXTB
+sectSizeTEXT
));
414 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB
),
415 trunc_page(sectDATAB
), round_page(sectDATAB
+sectSizeDATA
));
416 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB
),
417 trunc_page(sectLINKB
), round_page(sectLINKB
+sectSizeLINK
));
418 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB
),
419 trunc_page(sectKLDB
), round_page(sectKLDB
+sectSizeKLD
));
420 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end
),
421 trunc_page(end
), static_memory_end
);