2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
29 #include <mach_debug.h>
35 #include <mach/vm_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
38 #include <kern/misc_protos.h>
39 #include <kern/assert.h>
40 #include <kern/cpu_number.h>
42 #include <ppc/proc_reg.h>
43 #include <ppc/Firmware.h>
45 #include <ppc/misc_protos.h>
47 #include <ppc/pmap_internals.h>
49 #include <ppc/mappings.h>
50 #include <ppc/exception.h>
54 #include <mach-o/mach_header.h>
57 extern unsigned int intstack
[]; /* declared in start.s */
58 extern unsigned int intstack_top_ss
; /* declared in start.s */
60 vm_offset_t mem_size
; /* Size of actual physical memory present
61 minus any performance buffer and possibly limited
62 by mem_limit in bytes */
63 vm_offset_t mem_actual
; /* The "One True" physical memory size
64 actually, it's the highest physical address + 1 */
67 mem_region_t pmap_mem_regions
[PMAP_MEM_REGION_MAX
];
68 int pmap_mem_regions_count
= 0; /* No non-contiguous memory regions */
70 mem_region_t free_regions
[FREE_REGION_MAX
];
71 int free_regions_count
;
74 extern unsigned long etext
;
77 unsigned int avail_remaining
= 0;
78 vm_offset_t first_avail
;
79 vm_offset_t static_memory_end
;
80 extern vm_offset_t avail_next
;
83 extern struct mach_header _mh_execute_header
;
84 vm_offset_t sectTEXTB
;
86 vm_offset_t sectDATAB
;
88 vm_offset_t sectOBJCB
;
90 vm_offset_t sectLINKB
;
95 vm_offset_t end
, etext
, edata
;
98 extern unsigned long exception_entry
;
99 extern unsigned long exception_end
;
102 void ppc_vm_init(unsigned int mem_limit
, boot_args
*args
)
104 unsigned int htabmask
;
105 unsigned int i
, j
, batsize
, kmapsize
;
107 int boot_task_end_offset
;
110 vm_offset_t first_phys_avail
;
111 vm_offset_t sizeadj
, oldstart
;
114 /* Now retrieve addresses for end, edata, and etext
115 * from MACH-O headers.
117 sectTEXTB
= (vm_offset_t
)getsegdatafromheader(
118 &_mh_execute_header
, "__TEXT", §SizeTEXT
);
119 sectDATAB
= (vm_offset_t
)getsegdatafromheader(
120 &_mh_execute_header
, "__DATA", §SizeDATA
);
121 sectOBJCB
= (vm_offset_t
)getsegdatafromheader(
122 &_mh_execute_header
, "__OBJC", §SizeOBJC
);
123 sectLINKB
= (vm_offset_t
)getsegdatafromheader(
124 &_mh_execute_header
, "__LINKEDIT", §SizeLINK
);
125 sectKLDB
= (vm_offset_t
)getsegdatafromheader(
126 &_mh_execute_header
, "__KLD", §SizeKLD
);
128 etext
= (vm_offset_t
) sectTEXTB
+ sectSizeTEXT
;
129 edata
= (vm_offset_t
) sectDATAB
+ sectSizeDATA
;
130 end
= round_page(getlastaddr()); /* Force end to next page */
132 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB
, sectSizeTEXT
);
133 kprintf("sectDATA: %x, size: %x\n", sectDATAB
, sectSizeDATA
);
134 kprintf("sectOBJC: %x, size: %x\n", sectOBJCB
, sectSizeOBJC
);
135 kprintf("sectLINK: %x, size: %x\n", sectLINKB
, sectSizeLINK
);
136 kprintf("sectKLD: %x, size: %x\n", sectKLDB
, sectSizeKLD
);
137 kprintf("end: %x\n", end
);
139 #endif /* __MACHO__ */
141 /* Stitch valid memory regions together - they may be contiguous
142 * even though they're not already glued together
144 mem_actual
= mem_actual
= args
->PhysicalDRAM
[0].base
+ args
->PhysicalDRAM
[0].size
; /* Initialize to the first region size */
145 addr
= 0; /* temp use as pointer to previous memory region... */
146 for (i
= 1; i
< kMaxDRAMBanks
; i
++) {
148 if (args
->PhysicalDRAM
[i
].size
== 0) continue; /* If region is empty, skip it */
150 if((args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
) > mem_actual
) { /* New high? */
151 mem_actual
= args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
; /* Take the high bid */
154 if (args
->PhysicalDRAM
[i
].base
== /* Does the end of the last hit the start of the next? */
155 args
->PhysicalDRAM
[addr
].base
+
156 args
->PhysicalDRAM
[addr
].size
) {
157 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
158 args
->PhysicalDRAM
[addr
].base
, args
->PhysicalDRAM
[addr
].size
,
159 args
->PhysicalDRAM
[i
].base
, args
->PhysicalDRAM
[i
].size
);
161 args
->PhysicalDRAM
[addr
].size
+= args
->PhysicalDRAM
[i
].size
; /* Join them */
162 args
->PhysicalDRAM
[i
].size
= 0;
165 /* This is now last non-zero region to compare against */
169 /* Go through the list of memory regions passed in via the args
170 * and copy valid entries into the pmap_mem_regions table, adding
171 * further calculated entries.
174 pmap_mem_regions_count
= 0;
175 mem_size
= 0; /* Will use to total memory found so far */
177 for (i
= 0; i
< kMaxDRAMBanks
; i
++) {
178 if (args
->PhysicalDRAM
[i
].size
== 0)
181 /* The following should only happen if memory size has
182 been artificially reduced with -m */
184 mem_size
+ args
->PhysicalDRAM
[i
].size
> mem_limit
)
185 args
->PhysicalDRAM
[i
].size
= mem_limit
- mem_size
;
187 /* We've found a region, tally memory */
189 pmap_mem_regions
[pmap_mem_regions_count
].start
=
190 args
->PhysicalDRAM
[i
].base
;
191 pmap_mem_regions
[pmap_mem_regions_count
].end
=
192 args
->PhysicalDRAM
[i
].base
+
193 args
->PhysicalDRAM
[i
].size
;
195 /* Regions must be provided in ascending order */
196 assert ((pmap_mem_regions_count
== 0) ||
197 pmap_mem_regions
[pmap_mem_regions_count
].start
>
198 pmap_mem_regions
[pmap_mem_regions_count
-1].start
);
200 if (pmap_mem_regions_count
> 0) {
201 /* we add on any pages not in the first memory
202 * region to the avail_remaining count. The first
203 * memory region is used for mapping everything for
204 * bootup and is taken care of specially.
207 args
->PhysicalDRAM
[i
].size
/ PPC_PGBYTES
;
210 /* Keep track of how much memory we've found */
212 mem_size
+= args
->PhysicalDRAM
[i
].size
;
214 /* incremement number of regions found */
215 pmap_mem_regions_count
++;
218 kprintf("mem_size: %d M\n",mem_size
/ (1024 * 1024));
221 * Initialize the pmap system, using space above `first_avail'
222 * for the necessary data structures.
223 * NOTE : assume that we'll have enough space mapped in already
226 first_phys_avail
= static_memory_end
;
227 first_avail
= adjust_bat_limit(first_phys_avail
, 0, FALSE
, FALSE
);
229 kmapsize
= (round_page(exception_end
) - trunc_page(exception_entry
)) + /* Get size we will map later */
230 (round_page(sectTEXTB
+sectSizeTEXT
) - trunc_page(sectTEXTB
)) +
231 (round_page(sectDATAB
+sectSizeDATA
) - trunc_page(sectDATAB
)) +
232 (round_page(sectOBJCB
+sectSizeOBJC
) - trunc_page(sectOBJCB
)) +
233 (round_page(sectLINKB
+sectSizeLINK
) - trunc_page(sectLINKB
)) +
234 (round_page(sectKLDB
+sectSizeKLD
) - trunc_page(sectKLDB
)) +
235 (round_page(static_memory_end
) - trunc_page(end
));
237 pmap_bootstrap(mem_size
,&first_avail
,&first_phys_avail
, kmapsize
);
241 kprintf("Mapping memory:\n");
242 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry
),
243 trunc_page(exception_entry
), round_page(exception_end
));
244 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB
),
245 trunc_page(sectTEXTB
), round_page(sectTEXTB
+sectSizeTEXT
));
246 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB
),
247 trunc_page(sectDATAB
), round_page(sectDATAB
+sectSizeDATA
));
248 kprintf(" sectOBJCB: %08X, %08X - %08X\n", trunc_page(sectOBJCB
),
249 trunc_page(sectOBJCB
), round_page(sectOBJCB
+sectSizeOBJC
));
250 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB
),
251 trunc_page(sectLINKB
), round_page(sectLINKB
+sectSizeLINK
));
252 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB
),
253 trunc_page(sectKLDB
), round_page(sectKLDB
+sectSizeKLD
));
254 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end
),
255 trunc_page(end
), static_memory_end
);
257 pmap_map(trunc_page(exception_entry
), trunc_page(exception_entry
),
258 round_page(exception_end
), VM_PROT_READ
|VM_PROT_EXECUTE
);
259 pmap_map(trunc_page(sectTEXTB
), trunc_page(sectTEXTB
),
260 round_page(sectTEXTB
+sectSizeTEXT
), VM_PROT_READ
|VM_PROT_EXECUTE
);
261 pmap_map(trunc_page(sectDATAB
), trunc_page(sectDATAB
),
262 round_page(sectDATAB
+sectSizeDATA
), VM_PROT_READ
|VM_PROT_WRITE
);
263 pmap_map(trunc_page(sectOBJCB
), trunc_page(sectOBJCB
),
264 round_page(sectOBJCB
+sectSizeOBJC
), VM_PROT_READ
|VM_PROT_WRITE
);
267 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
268 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
269 * to map both segments page-by-page.
271 for (addr
= trunc_page(sectKLDB
);
272 addr
< round_page(sectKLDB
+sectSizeKLD
);
275 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
278 for (addr
= trunc_page(sectLINKB
);
279 addr
< round_page(sectLINKB
+sectSizeLINK
);
282 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
286 * We need to map the remainder page-by-page because some of this will
287 * be released later, but not all. Ergo, no block mapping here
289 for(addr
= trunc_page(end
); addr
< round_page(static_memory_end
); addr
+= PAGE_SIZE
) {
290 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
292 #endif /* __MACHO__ */
295 for (i
=0 ; i
< free_regions_count
; i
++) {
296 kprintf("Free region start 0x%08x end 0x%08x\n",
297 free_regions
[i
].start
,free_regions
[i
].end
);
301 /* Initialize shadow IBATs */
302 shadow_BAT
.IBATs
[0].upper
=BAT_INVALID
;
303 shadow_BAT
.IBATs
[0].lower
=BAT_INVALID
;
304 shadow_BAT
.IBATs
[1].upper
=BAT_INVALID
;
305 shadow_BAT
.IBATs
[1].lower
=BAT_INVALID
;
306 shadow_BAT
.IBATs
[2].upper
=BAT_INVALID
;
307 shadow_BAT
.IBATs
[2].lower
=BAT_INVALID
;
308 shadow_BAT
.IBATs
[3].upper
=BAT_INVALID
;
309 shadow_BAT
.IBATs
[3].lower
=BAT_INVALID
;
311 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]); /* Load up real IBATs from shadows */
313 /* Initialize shadow DBATs */
314 shadow_BAT
.DBATs
[0].upper
=BAT_INVALID
;
315 shadow_BAT
.DBATs
[0].lower
=BAT_INVALID
;
316 shadow_BAT
.DBATs
[1].upper
=BAT_INVALID
;
317 shadow_BAT
.DBATs
[1].lower
=BAT_INVALID
;
318 mfdbatu(shadow_BAT
.DBATs
[2].upper
,2);
319 mfdbatl(shadow_BAT
.DBATs
[2].lower
,2);
320 mfdbatu(shadow_BAT
.DBATs
[3].upper
,3);
321 mfdbatl(shadow_BAT
.DBATs
[3].lower
,3);
323 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]); /* Load up real DBATs from shadows */
327 for(i
=0; i
<4; i
++) kprintf("DBAT%1d: %08X %08X\n",
328 i
, shadow_BAT
.DBATs
[i
].upper
, shadow_BAT
.DBATs
[i
].lower
);
329 for(i
=0; i
<4; i
++) kprintf("IBAT%1d: %08X %08X\n",
330 i
, shadow_BAT
.IBATs
[i
].upper
, shadow_BAT
.IBATs
[i
].lower
);
334 void ppc_vm_cpu_init(
335 struct per_proc_info
*proc_info
)
337 hash_table_init(hash_table_base
, hash_table_size
);
339 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]);
340 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]);