]> git.saurik.com Git - apple/boot.git/blob - i386/boot2/old/scanmemory.c
152040b24bf9febdad4b069afded04abf4cff142
[apple/boot.git] / i386 / boot2 / old / scanmemory.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.1 (the "License"). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24 #import <mach/boolean.h>
25
26 #import <mach/i386/vm_types.h>
27 #import <architecture/i386/cpu.h>
28
29 /*
30 * Primatives for manipulating the
31 * cpu cache(s).
32 */
33
34 static __inline__
35 void
36 enable_cache(void)
37 {
38 cr0_t cr0;
39
40 asm volatile(
41 "mov %%cr0,%0"
42 : "=r" (cr0));
43
44 cr0.cd = cr0.nw = 0;
45
46 asm volatile(
47 "wbinvd; mov %0,%%cr0"
48 :
49 : "r" (cr0));
50 }
51
52 static __inline__
53 void
54 flush_cache(void)
55 {
56 asm volatile("wbinvd");
57 }
58
59 /*
60 * Memory sizing code.
61 *
62 * Tunable Parameters:
63 * SCAN_INCR Size of memory scanning segment.
64 * SCAN_LEN Length to actually test, per segment,
65 * starting at the front of the segment.
66 * SCAN_LIM Highest address to test for existence of memory.
67 *
68 * Assumptions:
69 * Primary assumption is that SCAN_INCR and SCAN_LEN are chosen
70 * and the stack is positioned in its segment such that the tested
71 * portion of the segment does not overlap with any portion of the
72 * stack's location in that segment. The best way to acomplish this
73 * is to position the stack at the high end of a segment, and to make
74 * the segment size large enough to prevent it from overlapping the
75 * test area. The stack needs to be large enough to contain the low
76 * memory save area as well as the code for the scan function.
77 */
78 #define KB(x) (1024*(x))
79 #define MB(x) (1024*KB(x))
80
81 #define SCAN_PAT0 0x76543210
82 #define SCAN_PAT1 0x89abcdef
83
84 struct test_datum {
85 unsigned int word0;
86 unsigned int word1;
87 };
88
89 vm_offset_t
90 scan_memory(
91 vm_offset_t end_of_memory,
92 vm_offset_t end_of_cnvmem,
93 unsigned int SCAN_INCR,
94 unsigned int SCAN_LEN,
95 unsigned int SCAN_LIM
96 )
97 {
98 struct test_datum zero_pat = { 0, 0 };
99 vm_offset_t memory;
100
101 /*
102 * Make sure that the cache(s) are flushed
103 * and enabled.
104 */
105 enable_cache();
106
107 /*
108 * Round the starting address to the next
109 * segment boundary. This is where we will
110 * begin testing.
111 */
112 end_of_memory = (end_of_memory + (SCAN_INCR - 1) & ~(SCAN_INCR - 1));
113
114 /*
115 * Zero out the test area of each segent
116 * which is located in extended memory.
117 */
118 memory = KB(1024);
119
120 while (memory < end_of_memory) {
121 struct test_datum *memory_ptr;
122
123 (vm_offset_t)memory_ptr = memory;
124
125 while ((vm_offset_t)memory_ptr < memory + SCAN_LEN)
126 *memory_ptr++ = zero_pat;
127
128 memory += SCAN_INCR;
129 }
130
131 {
132 /*
133 * Code for segment scanning function.
134 */
135 extern unsigned int Scan_segment_code[],
136 Scan_segment_code_end[];
137 /*
138 * Location on the stack to where this
139 * function is copied and then executed
140 * from!! N.B. This code must be position
141 * independent. (duh)
142 */
143 unsigned int scan_func[
144 Scan_segment_code_end -
145 Scan_segment_code];
146
147 /*
148 * Copy the scan function onto the stack.
149 */
150 memcpy(scan_func, Scan_segment_code, sizeof (scan_func));
151
152 while (end_of_memory < SCAN_LIM) {
153 display_kbytes(end_of_memory);
154 if (!((vm_offset_t (*)())scan_func)(
155 end_of_memory,
156 end_of_cnvmem,
157 SCAN_INCR,
158 SCAN_LEN))
159 break;
160
161 end_of_memory += SCAN_INCR;
162 }
163 }
164
165 display_kbytes(end_of_memory);
166
167 return (end_of_memory);
168 }
169
170 static
171 void
172 display_kbytes(
173 vm_offset_t address
174 )
175 {
176 unsigned int quant, dig, done = 0, mag = 1000000000;
177 int places = 1;
178
179 quant = address / 1024;
180
181 while (mag > 0) {
182 done *= 10;
183 dig = (quant / mag) - done;
184 done += dig;
185 if (done > 0 || mag == 1) {
186 putc(dig + '0');
187 places++;
188 }
189 mag /= 10;
190 }
191
192 putc('K');
193
194 while (places-- > 0)
195 putc('\b');
196 }
197
198 /*
199 * Memory scan function, which tests one segment of memory.
200 * This code is copied onto the stack and executed there to
201 * avoid problems when memory aliasing occurs. If it detects
202 * problems due to aliasing to low memory, it restores the
203 * low memory segments before returning.
204 *
205 * Parameters:
206 * end_of_memory Address to start testing at,
207 * this is rounded to the start of
208 * the next segment internally.
209 * end_of_cnvmem Address where conventional
210 * memory ends.
211 * SCAN_INCR Size of each segment.
212 * SCAN_LEN Size of per segment test area,
213 * located at the front of the segment.
214 * SCAN_LIM Address next segment after highest
215 * to test.
216 */
217 static
218 boolean_t
219 scan_segment(
220 vm_offset_t start_of_segment,
221 vm_offset_t end_of_cnvmem,
222 unsigned int SCAN_INCR,
223 unsigned int SCAN_LEN
224 )
225 {
226 /*
227 * Location on the stack where the test
228 * area of each segment of low memory is
229 * saved, appended together. The copy is
230 * used to detect memory aliasing and to
231 * restore memory on that occasion.
232 */
233 unsigned int copy_area[
234 ((KB(640) / SCAN_INCR) * SCAN_LEN)
235 / sizeof (unsigned int)];
236 struct test_datum *test_ptr,
237 test_pat = { SCAN_PAT0, SCAN_PAT1 },
238 zero_pat = { 0, 0 };
239 vm_offset_t memory, copy;
240
241 /*
242 * Copy the test area of each low memory
243 * segment to the save area. Low memory
244 * begins at zero, and runs to the end of
245 * conventional memory.
246 */
247 copy = (vm_offset_t)copy_area;
248 memory = 0;
249
250 while (memory < KB(640)) {
251 unsigned int *memory_ptr, *copy_ptr;
252
253 if (memory <= (end_of_cnvmem - SCAN_LEN)) {
254 (vm_offset_t)memory_ptr = memory;
255 (vm_offset_t)copy_ptr = copy;
256
257 while ((vm_offset_t)memory_ptr < memory + SCAN_LEN)
258 *copy_ptr++ = *memory_ptr++;
259 }
260
261 memory += SCAN_INCR; copy += SCAN_LEN;
262 }
263
264 /*
265 * Write the test pattern in the test
266 * area of the current segment.
267 */
268 (vm_offset_t)test_ptr = start_of_segment;
269
270 while ((vm_offset_t)test_ptr < start_of_segment + SCAN_LEN)
271 *test_ptr++ = test_pat;
272
273 /*
274 * Flush the data cache to insure that the
275 * data actually gets written to main memory.
276 * This will provoke aliasing to occur if
277 * it is in fact present.
278 */
279 flush_cache();
280
281 /*
282 * Compare low memory against the save
283 * area, breaking out immediately if
284 * an inconsistency is observed.
285 */
286 copy = (vm_offset_t)copy_area;
287 memory = 0;
288
289 while (memory < KB(640)) {
290 struct test_datum *memory_ptr, *copy_ptr;
291
292 if (memory <= (end_of_cnvmem - SCAN_LEN)) {
293 (vm_offset_t)memory_ptr = memory;
294 (vm_offset_t)copy_ptr = copy;
295
296 while ((vm_offset_t)memory_ptr < memory + SCAN_LEN) {
297 if ( memory_ptr->word0 != copy_ptr->word0 ||
298 memory_ptr->word1 != copy_ptr->word1 )
299 break;
300
301 memory_ptr++; copy_ptr++;
302 }
303
304 if ((vm_offset_t)memory_ptr < memory + SCAN_LEN)
305 break;
306 }
307
308 memory += SCAN_INCR; copy += SCAN_LEN;
309 }
310
311 /*
312 * If an inconsistency was found in low
313 * memory, restore the entire region from
314 * the save area and return a failure.
315 */
316 if (memory < KB(640)) {
317 copy = (vm_offset_t)copy_area;
318 memory = 0;
319
320 while (memory < KB(640)) {
321 unsigned int *memory_ptr, *copy_ptr;
322
323 if (memory <= (end_of_cnvmem - SCAN_LEN)) {
324 (vm_offset_t)memory_ptr = memory;
325 (vm_offset_t)copy_ptr = copy;
326
327 while ((vm_offset_t)memory_ptr < memory + SCAN_LEN)
328 *memory_ptr++ = *copy_ptr++;
329 }
330
331 memory += SCAN_INCR; copy += SCAN_LEN;
332 }
333
334 return (FALSE);
335 }
336
337 /*
338 * Check the memory we have already scanned
339 * to see whether aliasing occurred there.
340 * The test area of each segment should contain
341 * zeros.
342 */
343 memory = KB(1024);
344
345 while (memory < start_of_segment) {
346 struct test_datum *memory_ptr;
347
348 (vm_offset_t)memory_ptr = memory;
349
350 while ((vm_offset_t)memory_ptr < memory + SCAN_LEN) {
351 if ( memory_ptr->word0 != zero_pat.word0 ||
352 memory_ptr->word1 != zero_pat.word1 )
353 break;
354
355 memory_ptr++;
356 }
357
358 if ((vm_offset_t)memory_ptr < memory + SCAN_LEN)
359 break;
360
361 memory += SCAN_INCR;
362 }
363
364 if (memory < start_of_segment)
365 return (FALSE);
366
367 /*
368 * Now check the current segment to see
369 * whether the test patten was correctly
370 * written out.
371 */
372 (vm_offset_t)test_ptr = start_of_segment;
373
374 while ((vm_offset_t)test_ptr < start_of_segment + SCAN_LEN) {
375 if ( test_ptr->word0 != test_pat.word0 ||
376 test_ptr->word1 != test_pat.word1 )
377 break;
378
379 test_ptr++;
380 }
381
382 if ((vm_offset_t)test_ptr < start_of_segment + SCAN_LEN)
383 return (FALSE);
384
385 /*
386 * Zero the current segment, which has now
387 * passed the test!!
388 */
389 (vm_offset_t)test_ptr = start_of_segment;
390
391 while ((vm_offset_t)test_ptr < start_of_segment + SCAN_LEN)
392 *test_ptr++ = zero_pat;
393
394 return (TRUE);
395 }