2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * @APPLE_FREE_COPYRIGHT@
32 #include <mach_debug.h>
38 #include <mach/vm_types.h>
39 #include <mach/vm_param.h>
40 #include <mach/thread_status.h>
41 #include <kern/misc_protos.h>
42 #include <kern/assert.h>
43 #include <kern/cpu_number.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/Firmware.h>
48 #include <ppc/misc_protos.h>
50 #include <ppc/pmap_internals.h>
52 #include <ppc/mappings.h>
53 #include <ppc/exception.h>
57 #include <mach-o/mach_header.h>
60 extern unsigned int intstack
[]; /* declared in start.s */
61 extern unsigned int intstack_top_ss
; /* declared in start.s */
63 vm_offset_t mem_size
; /* Size of actual physical memory present
64 minus any performance buffer and possibly limited
65 by mem_limit in bytes */
66 vm_offset_t mem_actual
; /* The "One True" physical memory size
67 actually, it's the highest physical address + 1 */
68 uint64_t max_mem
; /* Size of physical memory (bytes), adjusted by maxmem */
70 mem_region_t pmap_mem_regions
[PMAP_MEM_REGION_MAX
];
71 int pmap_mem_regions_count
= 0; /* No non-contiguous memory regions */
73 mem_region_t free_regions
[FREE_REGION_MAX
];
74 int free_regions_count
;
77 extern unsigned long etext
;
80 unsigned int avail_remaining
= 0;
81 vm_offset_t first_avail
;
82 vm_offset_t static_memory_end
;
83 extern vm_offset_t avail_next
;
86 extern struct mach_header _mh_execute_header
;
87 vm_offset_t sectTEXTB
;
89 vm_offset_t sectDATAB
;
91 vm_offset_t sectLINKB
;
96 vm_offset_t end
, etext
, edata
;
99 extern unsigned long exception_entry
;
100 extern unsigned long exception_end
;
103 void ppc_vm_init(unsigned int mem_limit
, boot_args
*args
)
105 unsigned int htabmask
;
106 unsigned int i
, j
, batsize
, kmapsize
;
108 int boot_task_end_offset
;
111 vm_offset_t first_phys_avail
;
112 vm_offset_t sizeadj
, oldstart
;
114 /* Now retrieve addresses for end, edata, and etext
115 * from MACH-O headers.
117 sectTEXTB
= (vm_offset_t
)getsegdatafromheader(
118 &_mh_execute_header
, "__TEXT", §SizeTEXT
);
119 sectDATAB
= (vm_offset_t
)getsegdatafromheader(
120 &_mh_execute_header
, "__DATA", §SizeDATA
);
121 sectLINKB
= (vm_offset_t
)getsegdatafromheader(
122 &_mh_execute_header
, "__LINKEDIT", §SizeLINK
);
123 sectKLDB
= (vm_offset_t
)getsegdatafromheader(
124 &_mh_execute_header
, "__KLD", §SizeKLD
);
126 etext
= (vm_offset_t
) sectTEXTB
+ sectSizeTEXT
;
127 edata
= (vm_offset_t
) sectDATAB
+ sectSizeDATA
;
128 end
= round_page(getlastaddr()); /* Force end to next page */
130 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB
, sectSizeTEXT
);
131 kprintf("sectDATA: %x, size: %x\n", sectDATAB
, sectSizeDATA
);
132 kprintf("sectLINK: %x, size: %x\n", sectLINKB
, sectSizeLINK
);
133 kprintf("sectKLD: %x, size: %x\n", sectKLDB
, sectSizeKLD
);
134 kprintf("end: %x\n", end
);
137 /* Stitch valid memory regions together - they may be contiguous
138 * even though they're not already glued together
140 mem_actual
= args
->PhysicalDRAM
[0].base
+ args
->PhysicalDRAM
[0].size
; /* Initialize to the first region size */
141 addr
= 0; /* temp use as pointer to previous memory region... */
142 for (i
= 1; i
< kMaxDRAMBanks
; i
++) {
144 if (args
->PhysicalDRAM
[i
].size
== 0) continue; /* If region is empty, skip it */
146 if((args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
) > mem_actual
) { /* New high? */
147 mem_actual
= args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
; /* Take the high bid */
150 if (args
->PhysicalDRAM
[i
].base
== /* Does the end of the last hit the start of the next? */
151 args
->PhysicalDRAM
[addr
].base
+
152 args
->PhysicalDRAM
[addr
].size
) {
153 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
154 args
->PhysicalDRAM
[addr
].base
, args
->PhysicalDRAM
[addr
].size
,
155 args
->PhysicalDRAM
[i
].base
, args
->PhysicalDRAM
[i
].size
);
157 args
->PhysicalDRAM
[addr
].size
+= args
->PhysicalDRAM
[i
].size
; /* Join them */
158 args
->PhysicalDRAM
[i
].size
= 0;
161 /* This is now last non-zero region to compare against */
165 /* Go through the list of memory regions passed in via the args
166 * and copy valid entries into the pmap_mem_regions table, adding
167 * further calculated entries.
170 pmap_mem_regions_count
= 0;
171 mem_size
= 0; /* Will use to total memory found so far */
173 for (i
= 0; i
< kMaxDRAMBanks
; i
++) {
174 if (args
->PhysicalDRAM
[i
].size
== 0)
177 /* The following should only happen if memory size has
178 been artificially reduced with -m */
180 mem_size
+ args
->PhysicalDRAM
[i
].size
> mem_limit
)
181 args
->PhysicalDRAM
[i
].size
= mem_limit
- mem_size
;
183 /* We've found a region, tally memory */
185 pmap_mem_regions
[pmap_mem_regions_count
].start
=
186 args
->PhysicalDRAM
[i
].base
;
187 pmap_mem_regions
[pmap_mem_regions_count
].end
=
188 args
->PhysicalDRAM
[i
].base
+
189 args
->PhysicalDRAM
[i
].size
;
191 /* Regions must be provided in ascending order */
192 assert ((pmap_mem_regions_count
== 0) ||
193 pmap_mem_regions
[pmap_mem_regions_count
].start
>
194 pmap_mem_regions
[pmap_mem_regions_count
-1].start
);
196 if (pmap_mem_regions_count
> 0) {
197 /* we add on any pages not in the first memory
198 * region to the avail_remaining count. The first
199 * memory region is used for mapping everything for
200 * bootup and is taken care of specially.
203 args
->PhysicalDRAM
[i
].size
/ PPC_PGBYTES
;
206 /* Keep track of how much memory we've found */
208 mem_size
+= args
->PhysicalDRAM
[i
].size
;
210 /* incremement number of regions found */
211 pmap_mem_regions_count
++;
216 kprintf("mem_size: %d M\n",mem_size
/ (1024 * 1024));
219 * Initialize the pmap system, using space above `first_avail'
220 * for the necessary data structures.
221 * NOTE : assume that we'll have enough space mapped in already
224 first_phys_avail
= static_memory_end
;
225 first_avail
= adjust_bat_limit(first_phys_avail
, 0, FALSE
, FALSE
);
227 kmapsize
= (round_page(exception_end
) - trunc_page(exception_entry
)) + /* Get size we will map later */
228 (round_page(sectTEXTB
+sectSizeTEXT
) - trunc_page(sectTEXTB
)) +
229 (round_page(sectDATAB
+sectSizeDATA
) - trunc_page(sectDATAB
)) +
230 (round_page(sectLINKB
+sectSizeLINK
) - trunc_page(sectLINKB
)) +
231 (round_page(sectKLDB
+sectSizeKLD
) - trunc_page(sectKLDB
)) +
232 (round_page(static_memory_end
) - trunc_page(end
));
234 pmap_bootstrap(mem_size
,&first_avail
,&first_phys_avail
, kmapsize
);
238 kprintf("Mapping memory:\n");
239 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry
),
240 trunc_page(exception_entry
), round_page(exception_end
));
241 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB
),
242 trunc_page(sectTEXTB
), round_page(sectTEXTB
+sectSizeTEXT
));
243 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB
),
244 trunc_page(sectDATAB
), round_page(sectDATAB
+sectSizeDATA
));
245 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB
),
246 trunc_page(sectLINKB
), round_page(sectLINKB
+sectSizeLINK
));
247 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB
),
248 trunc_page(sectKLDB
), round_page(sectKLDB
+sectSizeKLD
));
249 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end
),
250 trunc_page(end
), static_memory_end
);
252 pmap_map(trunc_page(exception_entry
), trunc_page(exception_entry
),
253 round_page(exception_end
), VM_PROT_READ
|VM_PROT_EXECUTE
);
254 pmap_map(trunc_page(sectTEXTB
), trunc_page(sectTEXTB
),
255 round_page(sectTEXTB
+sectSizeTEXT
), VM_PROT_READ
|VM_PROT_EXECUTE
);
256 pmap_map(trunc_page(sectDATAB
), trunc_page(sectDATAB
),
257 round_page(sectDATAB
+sectSizeDATA
), VM_PROT_READ
|VM_PROT_WRITE
);
260 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
261 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
262 * to map both segments page-by-page.
264 for (addr
= trunc_page(sectKLDB
);
265 addr
< round_page(sectKLDB
+sectSizeKLD
);
268 pmap_enter(kernel_pmap
, addr
, addr
,
269 VM_PROT_READ
|VM_PROT_WRITE
,
270 VM_WIMG_USE_DEFAULT
, TRUE
);
273 for (addr
= trunc_page(sectLINKB
);
274 addr
< round_page(sectLINKB
+sectSizeLINK
);
277 pmap_enter(kernel_pmap
, addr
, addr
,
278 VM_PROT_READ
|VM_PROT_WRITE
,
279 VM_WIMG_USE_DEFAULT
, TRUE
);
283 * We need to map the remainder page-by-page because some of this will
284 * be released later, but not all. Ergo, no block mapping here
286 for(addr
= trunc_page(end
); addr
< round_page(static_memory_end
); addr
+= PAGE_SIZE
) {
287 pmap_enter(kernel_pmap
, addr
, addr
,
288 VM_PROT_READ
|VM_PROT_WRITE
,
289 VM_WIMG_USE_DEFAULT
, TRUE
);
291 #endif /* __MACHO__ */
294 for (i
=0 ; i
< free_regions_count
; i
++) {
295 kprintf("Free region start 0x%08x end 0x%08x\n",
296 free_regions
[i
].start
,free_regions
[i
].end
);
301 * Note: the shadow BAT registers were already loaded in ppc_init.c
305 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]); /* Load up real IBATs from shadows */
306 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]); /* Load up real DBATs from shadows */
309 for(i
=0; i
<4; i
++) kprintf("DBAT%1d: %08X %08X\n",
310 i
, shadow_BAT
.DBATs
[i
].upper
, shadow_BAT
.DBATs
[i
].lower
);
311 for(i
=0; i
<4; i
++) kprintf("IBAT%1d: %08X %08X\n",
312 i
, shadow_BAT
.IBATs
[i
].upper
, shadow_BAT
.IBATs
[i
].lower
);
316 void ppc_vm_cpu_init(
317 struct per_proc_info
*proc_info
)
319 hash_table_init(hash_table_base
, hash_table_size
);
321 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]);
322 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]);