]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOHibernateRestoreKernel.c
ea2180933822829f4b4b87c3acf2aa3863826e73
[apple/xnu.git] / iokit / Kernel / IOHibernateRestoreKernel.c
1 /*
2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <mach/mach_types.h>
31 #include <mach/vm_param.h>
32 #include <IOKit/IOHibernatePrivate.h>
33 #include <IOKit/IOLib.h>
34 #include <pexpert/boot.h>
35 #include <crypto/aes.h>
36 #include <libkern/libkern.h>
37
38 #include "WKdm.h"
39 #include "IOHibernateInternal.h"
40
41 /*
42 This code is linked into the kernel but part of the "__HIB" section, which means
43 its used by code running in the special context of restoring the kernel text and data
44 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
45 it calls or references needs to be careful to only touch memory also in the "__HIB" section.
46 */
47
48 uint32_t gIOHibernateState;
49
50 uint32_t gIOHibernateDebugFlags;
51
52 static IOHibernateImageHeader _hibernateHeader;
53 IOHibernateImageHeader * gIOHibernateCurrentHeader = &_hibernateHeader;
54
55 static hibernate_graphics_t _hibernateGraphics;
56 hibernate_graphics_t * gIOHibernateGraphicsInfo = &_hibernateGraphics;
57
58 static hibernate_cryptwakevars_t _cryptWakeVars;
59 hibernate_cryptwakevars_t * gIOHibernateCryptWakeVars = &_cryptWakeVars;
60
61 vm_offset_t gIOHibernateWakeMap; // ppnum
62 vm_size_t gIOHibernateWakeMapSize;
63
64
65 #if CONFIG_SLEEP
66 #if defined(__i386__) || defined(__x86_64__)
67 extern void acpi_wake_prot_entry(void);
68 #endif
69 #endif
70
71 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
72
73 #if defined(__i386__) || defined(__x86_64__)
74
75 #define DBGLOG 1
76
77 #include <architecture/i386/pio.h>
78
79 /* standard port addresses */
80 enum {
81 COM1_PORT_ADDR = 0x3f8,
82 COM2_PORT_ADDR = 0x2f8
83 };
84
85 /* UART register offsets */
86 enum {
87 UART_RBR = 0, /* receive buffer Register (R) */
88 UART_THR = 0, /* transmit holding register (W) */
89 UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */
90 UART_IER = 1, /* interrupt enable register */
91 UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */
92 UART_IIR = 2, /* interrupt ident register (R) */
93 UART_FCR = 2, /* fifo control register (W) */
94 UART_LCR = 3, /* line control register */
95 UART_MCR = 4, /* modem control register */
96 UART_LSR = 5, /* line status register */
97 UART_MSR = 6, /* modem status register */
98 UART_SCR = 7 /* scratch register */
99 };
100
101 enum {
102 UART_LCR_8BITS = 0x03,
103 UART_LCR_DLAB = 0x80
104 };
105
106 enum {
107 UART_MCR_DTR = 0x01,
108 UART_MCR_RTS = 0x02,
109 UART_MCR_OUT1 = 0x04,
110 UART_MCR_OUT2 = 0x08,
111 UART_MCR_LOOP = 0x10
112 };
113
114 enum {
115 UART_LSR_DR = 0x01,
116 UART_LSR_OE = 0x02,
117 UART_LSR_PE = 0x04,
118 UART_LSR_FE = 0x08,
119 UART_LSR_THRE = 0x20
120 };
121
122 static void uart_putc(char c)
123 {
124 while (!(inb(COM1_PORT_ADDR + UART_LSR) & UART_LSR_THRE))
125 {}
126 outb(COM1_PORT_ADDR + UART_THR, c);
127 }
128
129 static int debug_probe( void )
130 {
131 /* Verify that the Scratch Register is accessible */
132 outb(COM1_PORT_ADDR + UART_SCR, 0x5a);
133 if (inb(COM1_PORT_ADDR + UART_SCR) != 0x5a) return false;
134 outb(COM1_PORT_ADDR + UART_SCR, 0xa5);
135 if (inb(COM1_PORT_ADDR + UART_SCR) != 0xa5) return false;
136 uart_putc('\n');
137 return true;
138 }
139
140 static void uart_puthex(uint64_t num)
141 {
142 int bit;
143 char c;
144 bool leading = true;
145
146 for (bit = 60; bit >= 0; bit -= 4)
147 {
148 c = 0xf & (num >> bit);
149 if (c)
150 leading = false;
151 else if (leading)
152 continue;
153 if (c <= 9)
154 c += '0';
155 else
156 c+= 'a' - 10;
157 uart_putc(c);
158 }
159 }
160
161 static void debug_code(uint32_t code, uint64_t value)
162 {
163 int bit;
164 char c;
165
166 if (!(kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags))
167 return;
168
169 for (bit = 24; bit >= 0; bit -= 8)
170 {
171 c = 0xFF & (code >> bit);
172 if (c)
173 uart_putc(c);
174 }
175 uart_putc('=');
176 uart_puthex(value);
177 uart_putc('\n');
178 uart_putc('\r');
179 }
180
181 #endif /* defined(__i386__) || defined(__x86_64__) */
182
183 #if !defined(DBGLOG)
184 #define debug_probe() (false)
185 #define debug_code(c, v) {}
186 #endif
187
188 enum
189 {
190 kIOHibernateRestoreCodeImageStart = 'imgS',
191 kIOHibernateRestoreCodeImageEnd = 'imgE',
192 kIOHibernateRestoreCodePageIndexStart = 'pgiS',
193 kIOHibernateRestoreCodePageIndexEnd = 'pgiE',
194 kIOHibernateRestoreCodeMapStart = 'mapS',
195 kIOHibernateRestoreCodeMapEnd = 'mapE',
196 kIOHibernateRestoreCodeWakeMapSize = 'wkms',
197 kIOHibernateRestoreCodeConflictPage = 'cfpg',
198 kIOHibernateRestoreCodeConflictSource = 'cfsr',
199 kIOHibernateRestoreCodeNoMemory = 'nomm'
200 };
201
202 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203
204
205 static void fatal(void)
206 {
207 #if defined(__i386__) || defined(__x86_64__)
208 outb(0xcf9, 6);
209 #else
210 while (true) {}
211 #endif
212 }
213
214 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
215
216 #define BASE 65521L /* largest prime smaller than 65536 */
217 #define NMAX 5000
218 // NMAX (was 5521) the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
219
220 #define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
221 #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
222 #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
223 #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
224 #define DO16(buf) DO8(buf,0); DO8(buf,8);
225
226 uint32_t
227 hibernate_sum(uint8_t *buf, int32_t len)
228 {
229 unsigned long s1 = 1; // adler & 0xffff;
230 unsigned long s2 = 0; // (adler >> 16) & 0xffff;
231 int k;
232
233 while (len > 0) {
234 k = len < NMAX ? len : NMAX;
235 len -= k;
236 while (k >= 16) {
237 DO16(buf);
238 buf += 16;
239 k -= 16;
240 }
241 if (k != 0) do {
242 s1 += *buf++;
243 s2 += s1;
244 } while (--k);
245 s1 %= BASE;
246 s2 %= BASE;
247 }
248 return (s2 << 16) | s1;
249 }
250
251 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
252
253 static hibernate_bitmap_t *
254 hibernate_page_bitmap(hibernate_page_list_t * list, uint32_t page)
255 {
256 uint32_t bank;
257 hibernate_bitmap_t * bitmap = &list->bank_bitmap[0];
258
259 for (bank = 0; bank < list->bank_count; bank++)
260 {
261 if ((page >= bitmap->first_page) && (page <= bitmap->last_page))
262 break;
263 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
264 }
265 if (bank == list->bank_count)
266 bitmap = NULL;
267
268 return (bitmap);
269 }
270
271 hibernate_bitmap_t *
272 hibernate_page_bitmap_pin(hibernate_page_list_t * list, uint32_t * pPage)
273 {
274 uint32_t bank, page = *pPage;
275 hibernate_bitmap_t * bitmap = &list->bank_bitmap[0];
276
277 for (bank = 0; bank < list->bank_count; bank++)
278 {
279 if (page <= bitmap->first_page)
280 {
281 *pPage = bitmap->first_page;
282 break;
283 }
284 if (page <= bitmap->last_page)
285 break;
286 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
287 }
288 if (bank == list->bank_count)
289 bitmap = NULL;
290
291 return (bitmap);
292 }
293
294 void
295 hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page)
296 {
297 hibernate_bitmap_t * bitmap;
298
299 bitmap = hibernate_page_bitmap(list, page);
300 if (bitmap)
301 {
302 page -= bitmap->first_page;
303 if (set)
304 bitmap->bitmap[page >> 5] |= (0x80000000 >> (page & 31));
305 //setbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]);
306 else
307 bitmap->bitmap[page >> 5] &= ~(0x80000000 >> (page & 31));
308 //clrbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]);
309 }
310 }
311
312 boolean_t
313 hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page)
314 {
315 boolean_t result = TRUE;
316 hibernate_bitmap_t * bitmap;
317
318 bitmap = hibernate_page_bitmap(list, page);
319 if (bitmap)
320 {
321 page -= bitmap->first_page;
322 result = (0 != (bitmap->bitmap[page >> 5] & (0x80000000 >> (page & 31))));
323 }
324 return (result);
325 }
326
327 // count bits clear or set (set == TRUE) starting at page.
328 uint32_t
329 hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page)
330 {
331 uint32_t index, bit, bits;
332 uint32_t count;
333
334 count = 0;
335
336 index = (page - bitmap->first_page) >> 5;
337 bit = (page - bitmap->first_page) & 31;
338
339 bits = bitmap->bitmap[index];
340 if (set)
341 bits = ~bits;
342 bits = (bits << bit);
343 if (bits)
344 count += __builtin_clz(bits);
345 else
346 {
347 count += 32 - bit;
348 while (++index < bitmap->bitmapwords)
349 {
350 bits = bitmap->bitmap[index];
351 if (set)
352 bits = ~bits;
353 if (bits)
354 {
355 count += __builtin_clz(bits);
356 break;
357 }
358 count += 32;
359 }
360 }
361
362 return (count);
363 }
364
365 static vm_offset_t
366 hibernate_page_list_grab(hibernate_page_list_t * list, uint32_t * pNextFree)
367 {
368 uint32_t nextFree = *pNextFree;
369 uint32_t nextFreeInBank;
370 hibernate_bitmap_t * bitmap;
371
372 nextFreeInBank = nextFree + 1;
373 while ((bitmap = hibernate_page_bitmap_pin(list, &nextFreeInBank)))
374 {
375 nextFreeInBank += hibernate_page_bitmap_count(bitmap, FALSE, nextFreeInBank);
376 if (nextFreeInBank <= bitmap->last_page)
377 {
378 *pNextFree = nextFreeInBank;
379 break;
380 }
381 }
382
383 if (!bitmap)
384 {
385 debug_code(kIOHibernateRestoreCodeNoMemory, nextFree);
386 fatal();
387 nextFree = 0;
388 }
389
390 return (nextFree);
391 }
392
393 static uint32_t
394 store_one_page(uint32_t procFlags, uint32_t * src, uint32_t compressedSize,
395 uint32_t * buffer, uint32_t ppnum)
396 {
397 uint64_t dst;
398 uint32_t sum;
399
400 dst = ptoa_64(ppnum);
401 if (ppnum < 0x00100000)
402 buffer = (uint32_t *) (uintptr_t) dst;
403
404 if (compressedSize != PAGE_SIZE)
405 {
406 WKdm_decompress((WK_word*) src, (WK_word*) buffer, PAGE_SIZE >> 2);
407 src = buffer;
408 }
409
410 sum = hibernate_sum((uint8_t *) src, PAGE_SIZE);
411
412 if (((uint64_t) (uintptr_t) src) == dst)
413 src = 0;
414
415 hibernate_restore_phys_page((uint64_t) (uintptr_t) src, dst, PAGE_SIZE, procFlags);
416
417 return (sum);
418 }
419
420 // used only for small struct copies
421 static void
422 bcopy_internal(const void *src, void *dst, uint32_t len)
423 {
424 const char *s = src;
425 char *d = dst;
426 uint32_t idx = 0;
427
428 while (idx < len)
429 {
430 d[idx] = s[idx];
431 idx++;
432 }
433 }
434
435 #define C_ASSERT(e) typedef char __C_ASSERT__[(e) ? 1 : -1]
436
437 long
438 hibernate_kernel_entrypoint(IOHibernateImageHeader * header,
439 void * p2, void * p3, void * p4)
440 {
441 uint32_t idx;
442 uint32_t * src;
443 uint32_t * buffer;
444 uint32_t * pageIndexSource;
445 hibernate_page_list_t * map;
446 uint32_t count;
447 uint32_t ppnum;
448 uint32_t page;
449 uint32_t conflictCount;
450 uint32_t compressedSize;
451 uint32_t uncompressedPages;
452 uint32_t copyPageListHead;
453 uint32_t * copyPageList;
454 uint32_t copyPageIndex;
455 uint32_t sum;
456 uint32_t nextFree;
457 uint32_t lastImagePage;
458 uint32_t lastMapPage;
459 uint32_t lastPageIndexPage;
460
461 C_ASSERT(sizeof(IOHibernateImageHeader) == 512);
462
463 if ((kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags) && !debug_probe())
464 gIOHibernateDebugFlags &= ~kIOHibernateDebugRestoreLogs;
465
466 debug_code(kIOHibernateRestoreCodeImageStart, (uintptr_t) header);
467
468 bcopy_internal(header,
469 gIOHibernateCurrentHeader,
470 sizeof(IOHibernateImageHeader));
471
472 if (!p2)
473 {
474 count = header->graphicsInfoOffset;
475 if (count)
476 p2 = (void *)(((uintptr_t) header) - count);
477 }
478 if (p2)
479 bcopy_internal(p2,
480 gIOHibernateGraphicsInfo,
481 sizeof(hibernate_graphics_t));
482 else
483 gIOHibernateGraphicsInfo->physicalAddress = gIOHibernateGraphicsInfo->depth = 0;
484
485 if (!p3)
486 {
487 count = header->cryptVarsOffset;
488 if (count)
489 p3 = (void *)(((uintptr_t) header) - count);
490 }
491 if (p3)
492 bcopy_internal(p3,
493 gIOHibernateCryptWakeVars,
494 sizeof(hibernate_cryptvars_t));
495
496 src = (uint32_t *)
497 (((uintptr_t) &header->fileExtentMap[0])
498 + header->fileExtentMapSize
499 + ptoa_32(header->restore1PageCount));
500
501 if (header->previewSize)
502 {
503 pageIndexSource = src;
504 map = (hibernate_page_list_t *)(((uintptr_t) pageIndexSource) + header->previewSize);
505 src = (uint32_t *) (((uintptr_t) pageIndexSource) + header->previewPageListSize);
506 }
507 else
508 {
509 pageIndexSource = 0;
510 map = (hibernate_page_list_t *) src;
511 src = (uint32_t *) (((uintptr_t) map) + header->bitmapSize);
512 }
513
514 lastPageIndexPage = atop_32((uintptr_t) src);
515
516 lastImagePage = atop_32(((uintptr_t) header) + header->image1Size);
517
518 lastMapPage = atop_32(((uintptr_t) map) + header->bitmapSize);
519
520 debug_code(kIOHibernateRestoreCodeImageEnd, ptoa_64(lastImagePage));
521 debug_code(kIOHibernateRestoreCodePageIndexStart, (uintptr_t) pageIndexSource);
522 debug_code(kIOHibernateRestoreCodePageIndexEnd, ptoa_64(lastPageIndexPage));
523 debug_code(kIOHibernateRestoreCodeMapStart, (uintptr_t) map);
524 debug_code(kIOHibernateRestoreCodeMapEnd, ptoa_64(lastMapPage));
525
526 // knock all the image pages to be used out of free map
527 for (ppnum = atop_32((uintptr_t) header); ppnum <= lastImagePage; ppnum++)
528 {
529 hibernate_page_bitset(map, FALSE, ppnum);
530 }
531
532 nextFree = 0;
533 hibernate_page_list_grab(map, &nextFree);
534 buffer = (uint32_t *) (uintptr_t) ptoa_32(hibernate_page_list_grab(map, &nextFree));
535
536 if (header->memoryMapSize && (count = header->memoryMapOffset))
537 {
538 p4 = (void *)(((uintptr_t) header) - count);
539 gIOHibernateWakeMap = hibernate_page_list_grab(map, &nextFree);
540 gIOHibernateWakeMapSize = header->memoryMapSize;
541 debug_code(kIOHibernateRestoreCodeWakeMapSize, gIOHibernateWakeMapSize);
542 if (gIOHibernateWakeMapSize > PAGE_SIZE)
543 fatal();
544 bcopy_internal(p4, (void *) (uintptr_t) ptoa_32(gIOHibernateWakeMap), gIOHibernateWakeMapSize);
545 }
546 else
547 gIOHibernateWakeMapSize = 0;
548
549 sum = gIOHibernateCurrentHeader->actualRestore1Sum;
550 gIOHibernateCurrentHeader->diag[0] = (uint32_t)(uintptr_t) header;
551 gIOHibernateCurrentHeader->diag[1] = sum;
552
553 uncompressedPages = 0;
554 conflictCount = 0;
555 copyPageListHead = 0;
556 copyPageList = 0;
557 copyPageIndex = PAGE_SIZE >> 2;
558
559 compressedSize = PAGE_SIZE;
560
561 while (1)
562 {
563 if (pageIndexSource)
564 {
565 ppnum = pageIndexSource[0];
566 count = pageIndexSource[1];
567 pageIndexSource += 2;
568 if (!count)
569 {
570 pageIndexSource = 0;
571 src = (uint32_t *) (((uintptr_t) map) + gIOHibernateCurrentHeader->bitmapSize);
572 ppnum = src[0];
573 count = src[1];
574 src += 2;
575 }
576 }
577 else
578 {
579 ppnum = src[0];
580 count = src[1];
581 if (!count)
582 break;
583 src += 2;
584 }
585
586 for (page = 0; page < count; page++, ppnum++)
587 {
588 uint32_t tag;
589 int conflicts;
590
591 if (!pageIndexSource)
592 {
593 tag = *src++;
594 compressedSize = kIOHibernateTagLength & tag;
595 }
596
597 conflicts = (((ppnum >= atop_32((uintptr_t) map)) && (ppnum <= lastMapPage))
598 || ((ppnum >= atop_32((uintptr_t) src)) && (ppnum <= lastImagePage)));
599
600 if (pageIndexSource)
601 conflicts |= ((ppnum >= atop_32((uintptr_t) pageIndexSource)) && (ppnum <= lastPageIndexPage));
602
603 if (!conflicts)
604 {
605 if (compressedSize)
606 sum += store_one_page(gIOHibernateCurrentHeader->processorFlags,
607 src, compressedSize, buffer, ppnum);
608 uncompressedPages++;
609 }
610 else
611 {
612 uint32_t bufferPage;
613 uint32_t * dst;
614
615 // debug_code(kIOHibernateRestoreCodeConflictPage, ppnum);
616 // debug_code(kIOHibernateRestoreCodeConflictSource, (uintptr_t) src);
617
618 conflictCount++;
619
620 // alloc new buffer page
621 bufferPage = hibernate_page_list_grab(map, &nextFree);
622
623 if (copyPageIndex > ((PAGE_SIZE >> 2) - 3))
624 {
625 // alloc new copy list page
626 uint32_t pageListPage = hibernate_page_list_grab(map, &nextFree);
627 // link to current
628 if (copyPageList)
629 copyPageList[1] = pageListPage;
630 else
631 copyPageListHead = pageListPage;
632 copyPageList = (uint32_t *) (uintptr_t) ptoa_32(pageListPage);
633 copyPageList[1] = 0;
634 copyPageIndex = 2;
635 }
636
637 copyPageList[copyPageIndex++] = ppnum;
638 copyPageList[copyPageIndex++] = bufferPage;
639 copyPageList[copyPageIndex++] = compressedSize;
640 copyPageList[0] = copyPageIndex;
641
642 dst = (uint32_t *) (uintptr_t) ptoa_32(bufferPage);
643 for (idx = 0; idx < ((compressedSize + 3) >> 2); idx++)
644 dst[idx] = src[idx];
645 }
646 src += ((compressedSize + 3) >> 2);
647 }
648 }
649
650 // -- copy back conflicts
651
652 copyPageList = (uint32_t *)(uintptr_t) ptoa_32(copyPageListHead);
653 while (copyPageList)
654 {
655 for (copyPageIndex = 2; copyPageIndex < copyPageList[0]; copyPageIndex += 3)
656 {
657 ppnum = copyPageList[copyPageIndex + 0];
658 src = (uint32_t *) (uintptr_t) ptoa_32(copyPageList[copyPageIndex + 1]);
659 compressedSize = copyPageList[copyPageIndex + 2];
660
661 sum += store_one_page(gIOHibernateCurrentHeader->processorFlags,
662 src, compressedSize, buffer, ppnum);
663 uncompressedPages++;
664 }
665 copyPageList = (uint32_t *) (uintptr_t) ptoa_32(copyPageList[1]);
666 }
667
668 // -- image has been destroyed...
669
670 gIOHibernateCurrentHeader->actualImage1Sum = sum;
671 gIOHibernateCurrentHeader->actualUncompressedPages = uncompressedPages;
672 gIOHibernateCurrentHeader->conflictCount = conflictCount;
673 gIOHibernateCurrentHeader->nextFree = nextFree;
674
675 gIOHibernateState = kIOHibernateStateWakingFromHibernate;
676
677 #if CONFIG_SLEEP
678 #if defined(__ppc__)
679 typedef void (*ResetProc)(void);
680 ResetProc proc;
681 proc = (ResetProc) 0x100;
682 __asm__ volatile("ori 0, 0, 0" : : );
683 proc();
684 #elif defined(__i386__) || defined(__x86_64__)
685 typedef void (*ResetProc)(void);
686 ResetProc proc;
687 proc = (ResetProc) acpi_wake_prot_entry;
688 // flush caches
689 __asm__("wbinvd");
690 proc();
691 #else
692 // implement me
693 #endif
694 #endif
695
696 return -1;
697 }