2 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/param.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_param.h>
33 #include <IOKit/IOHibernatePrivate.h>
34 #include <IOKit/IOLib.h>
35 #include <pexpert/boot.h>
36 #include <libkern/libkern.h>
38 #include "IOHibernateInternal.h"
40 #include <machine/pal_hibernate.h>
43 * This code is linked into the kernel but part of the "__HIB" section, which means
44 * its used by code running in the special context of restoring the kernel text and data
45 * from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
46 * it calls or references needs to be careful to only touch memory also in the "__HIB" section.
49 #define HIB_ROUND_PAGE(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
51 uint32_t gIOHibernateState
;
53 uint32_t gIOHibernateDebugFlags
;
55 static IOHibernateImageHeader _hibernateHeader
;
56 IOHibernateImageHeader
* gIOHibernateCurrentHeader
= &_hibernateHeader
;
58 ppnum_t gIOHibernateHandoffPages
[64];
59 const uint32_t gIOHibernateHandoffPageCount
= sizeof(gIOHibernateHandoffPages
)
60 / sizeof(gIOHibernateHandoffPages
[0]);
63 void hibprintf(const char *fmt
, ...);
65 #define hibprintf(x...)
70 #if defined(__i386__) || defined(__x86_64__)
71 extern void acpi_wake_prot_entry(void);
75 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
77 #if defined(__i386__) || defined(__x86_64__)
78 #include <i386/proc_reg.h>
81 static inline uint64_t
87 #endif /* defined(__i386__) || defined(__x86_64__) */
89 #if defined(__i386__) || defined(__x86_64__)
93 #include <architecture/i386/pio.h>
95 /* standard port addresses */
97 COM1_PORT_ADDR
= 0x3f8,
98 COM2_PORT_ADDR
= 0x2f8
101 /* UART register offsets */
103 UART_RBR
= 0, /* receive buffer Register (R) */
104 UART_THR
= 0, /* transmit holding register (W) */
105 UART_DLL
= 0, /* DLAB = 1, divisor latch (LSB) */
106 UART_IER
= 1, /* interrupt enable register */
107 UART_DLM
= 1, /* DLAB = 1, divisor latch (MSB) */
108 UART_IIR
= 2, /* interrupt ident register (R) */
109 UART_FCR
= 2, /* fifo control register (W) */
110 UART_LCR
= 3, /* line control register */
111 UART_MCR
= 4, /* modem control register */
112 UART_LSR
= 5, /* line status register */
113 UART_MSR
= 6, /* modem status register */
114 UART_SCR
= 7 /* scratch register */
118 UART_LCR_8BITS
= 0x03,
125 UART_MCR_OUT1
= 0x04,
126 UART_MCR_OUT2
= 0x08,
139 hib_uart_putc(char c
)
141 while (!(inb(COM1_PORT_ADDR
+ UART_LSR
) & UART_LSR_THRE
)) {
143 outb(COM1_PORT_ADDR
+ UART_THR
, c
);
149 /* Verify that the Scratch Register is accessible */
150 outb(COM1_PORT_ADDR
+ UART_SCR
, 0x5a);
151 if (inb(COM1_PORT_ADDR
+ UART_SCR
) != 0x5a) {
154 outb(COM1_PORT_ADDR
+ UART_SCR
, 0xa5);
155 if (inb(COM1_PORT_ADDR
+ UART_SCR
) != 0xa5) {
162 #elif defined(__arm64__)
166 #include <pexpert/arm/dockchannel.h>
167 #include <pexpert/arm/S3cUART.h>
168 #define dockchannel_uart_base gHibernateGlobals.dockChannelRegBase
169 #define uart_base gHibernateGlobals.hibUartRegBase
172 hib_uart_putc(char c
)
174 if (dockchannel_uart_base
) {
175 while ((rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL
) & gHibernateGlobals
.dockChannelWstatMask
) == 0) {
177 rDOCKCHANNELS_DEV_WDATA1(DOCKCHANNEL_UART_CHANNEL
) = c
;
180 while ((rUTRSTAT0
& 0x04) == 0) {
181 // wait for space in the uart
194 #endif /* defined(__arm64__) */
196 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
199 uart_putstring(const char *str
)
202 hib_uart_putc(*str
++);
207 uart_putdec(uint64_t num
)
210 for (uint64_t pos
= 10000000000000000000ull; pos
!= 0; pos
/= 10) {
211 char c
= (char) (num
/ pos
);
215 } else if (leading
&& (pos
!= 1)) {
218 hib_uart_putc(c
+ '0');
223 uart_puthex(uint64_t num
)
229 for (bit
= 60; bit
>= 0; bit
-= 4) {
230 c
= 0xf & (num
>> bit
);
233 } else if (leading
&& bit
) {
246 debug_code(uint32_t code
, uint64_t value
)
251 if (!(kIOHibernateDebugRestoreLogs
& gIOHibernateDebugFlags
)) {
255 for (bit
= 24; bit
>= 0; bit
-= 8) {
256 c
= 0xFF & (code
>> bit
);
267 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
270 #define debug_probe() (false)
271 #define debug_code(c, v) {}
275 kIOHibernateRestoreCodeImageStart
= 'imgS',
276 kIOHibernateRestoreCodeImageEnd
= 'imgE',
277 kIOHibernateRestoreCodePageIndexStart
= 'pgiS',
278 kIOHibernateRestoreCodePageIndexEnd
= 'pgiE',
279 kIOHibernateRestoreCodeMapStart
= 'mapS',
280 kIOHibernateRestoreCodeMapEnd
= 'mapE',
281 kIOHibernateRestoreCodeWakeMapSize
= 'wkms',
282 kIOHibernateRestoreCodeConflictPage
= 'cfpg',
283 kIOHibernateRestoreCodeConflictSource
= 'cfsr',
284 kIOHibernateRestoreCodeNoMemory
= 'nomm',
285 kIOHibernateRestoreCodeTag
= 'tag ',
286 kIOHibernateRestoreCodeSignature
= 'sign',
287 kIOHibernateRestoreCodeMapVirt
= 'mapV',
288 kIOHibernateRestoreCodeHandoffPages
= 'hand',
289 kIOHibernateRestoreCodeHandoffCount
= 'hndc',
292 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
296 __hib_assert(const char *file
, int line
, const char *expression
)
298 uart_putstring(file
);
301 uart_putstring(" Assertion failed: ");
302 uart_putstring(expression
);
304 #if defined(__i386__) || defined(__x86_64__)
306 #endif /* defined(__i386__) || defined(__x86_64__) */
311 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
314 hibernate_sum_page(uint8_t *buf
, uint32_t ppnum
)
316 return ((uint32_t *)buf
)[((PAGE_SIZE
>> 2) - 1) & ppnum
];
319 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
321 static hibernate_bitmap_t
*
322 hibernate_page_bitmap(hibernate_page_list_t
* list
, uint32_t page
)
325 hibernate_bitmap_t
* bitmap
= &list
->bank_bitmap
[0];
327 for (bank
= 0; bank
< list
->bank_count
; bank
++) {
328 if ((page
>= bitmap
->first_page
) && (page
<= bitmap
->last_page
)) {
331 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
333 if (bank
== list
->bank_count
) {
341 hibernate_page_bitmap_pin(hibernate_page_list_t
* list
, uint32_t * pPage
)
343 uint32_t bank
, page
= *pPage
;
344 hibernate_bitmap_t
* bitmap
= &list
->bank_bitmap
[0];
346 for (bank
= 0; bank
< list
->bank_count
; bank
++) {
347 if (page
<= bitmap
->first_page
) {
348 *pPage
= bitmap
->first_page
;
351 if (page
<= bitmap
->last_page
) {
354 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
356 if (bank
== list
->bank_count
) {
364 hibernate_page_bitset(hibernate_page_list_t
* list
, boolean_t set
, uint32_t page
)
366 hibernate_bitmap_t
* bitmap
;
368 bitmap
= hibernate_page_bitmap(list
, page
);
370 page
-= bitmap
->first_page
;
372 bitmap
->bitmap
[page
>> 5] |= (0x80000000 >> (page
& 31));
374 //setbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]);
376 bitmap
->bitmap
[page
>> 5] &= ~(0x80000000 >> (page
& 31));
378 //clrbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]);
383 hibernate_page_bittst(hibernate_page_list_t
* list
, uint32_t page
)
385 boolean_t result
= TRUE
;
386 hibernate_bitmap_t
* bitmap
;
388 bitmap
= hibernate_page_bitmap(list
, page
);
390 page
-= bitmap
->first_page
;
391 result
= (0 != (bitmap
->bitmap
[page
>> 5] & (0x80000000 >> (page
& 31))));
396 // count bits clear or set (set == TRUE) starting at page.
398 hibernate_page_bitmap_count(hibernate_bitmap_t
* bitmap
, uint32_t set
, uint32_t page
)
400 uint32_t index
, bit
, bits
;
405 index
= (page
- bitmap
->first_page
) >> 5;
406 bit
= (page
- bitmap
->first_page
) & 31;
408 bits
= bitmap
->bitmap
[index
];
412 bits
= (bits
<< bit
);
414 count
+= __builtin_clz(bits
);
417 while (++index
< bitmap
->bitmapwords
) {
418 bits
= bitmap
->bitmap
[index
];
423 count
+= __builtin_clz(bits
);
430 if ((page
+ count
) > (bitmap
->last_page
+ 1)) {
431 count
= (bitmap
->last_page
+ 1) - page
;
438 hibernate_page_list_grab(hibernate_page_list_t
* list
, uint32_t * pNextFree
)
440 uint32_t nextFree
= *pNextFree
;
441 uint32_t nextFreeInBank
;
442 hibernate_bitmap_t
* bitmap
;
444 nextFreeInBank
= nextFree
+ 1;
445 while ((bitmap
= hibernate_page_bitmap_pin(list
, &nextFreeInBank
))) {
446 nextFreeInBank
+= hibernate_page_bitmap_count(bitmap
, FALSE
, nextFreeInBank
);
447 if (nextFreeInBank
<= bitmap
->last_page
) {
448 *pNextFree
= nextFreeInBank
;
454 debug_code(kIOHibernateRestoreCodeNoMemory
, nextFree
);
462 #pragma mark hibernate_scratch
465 hibernate_scratch_init(hibernate_scratch_t
* scratch
, hibernate_page_list_t
* map
, uint32_t * nextFree
)
467 // initialize "scratch" so we can start writing into it
468 __nosan_bzero(scratch
, sizeof(*scratch
));
470 scratch
->nextFree
= nextFree
;
471 scratch
->headPage
= hibernate_page_list_grab(scratch
->map
, scratch
->nextFree
);
472 scratch
->curPage
= (uint8_t *)pal_hib_map(SCRATCH_AREA
, ptoa_64(scratch
->headPage
));
476 hibernate_scratch_start_read(hibernate_scratch_t
* scratch
)
478 // re-initialize "scratch" so we can start reading from it it
479 hibernate_scratch_t result
;
480 __nosan_bzero(&result
, sizeof(result
));
481 result
.headPage
= scratch
->headPage
;
482 result
.curPage
= (uint8_t *)pal_hib_map(SCRATCH_AREA
, ptoa_64(result
.headPage
));
483 result
.totalLength
= scratch
->curPos
;
488 hibernate_scratch_io(hibernate_scratch_t
* scratch
, void * buffer
, size_t size
, bool write
)
490 // copy data to or from "scratch" based on the value of "write"
492 // check that we are in bounds
493 HIB_ASSERT(scratch
->curPos
+ size
<= scratch
->totalLength
);
496 // if we got to the end of a page (leaving room for our chain pointer), advance to the next page
497 if (scratch
->curPagePos
== PAGE_SIZE
- sizeof(ppnum_t
)) {
498 ppnum_t
*nextPage
= (ppnum_t
*)(scratch
->curPage
+ scratch
->curPagePos
);
500 // allocate the next page and store the page number
501 *nextPage
= hibernate_page_list_grab(scratch
->map
, scratch
->nextFree
);
503 scratch
->curPage
= (uint8_t *)pal_hib_map(SCRATCH_AREA
, ptoa_64(*nextPage
));
504 scratch
->curPagePos
= 0;
506 size_t curPageRemaining
= PAGE_SIZE
- sizeof(ppnum_t
) - scratch
->curPagePos
;
507 size_t toCopy
= MIN(size
, curPageRemaining
);
509 // copy from "buffer" into "scratch"
510 __nosan_memcpy(scratch
->curPage
+ scratch
->curPagePos
, buffer
, toCopy
);
512 // copy from "scratch" into "buffer"
513 __nosan_memcpy(buffer
, scratch
->curPage
+ scratch
->curPagePos
, toCopy
);
515 scratch
->curPos
+= toCopy
;
516 scratch
->curPagePos
+= toCopy
;
523 hibernate_scratch_write(hibernate_scratch_t
* scratch
, const void * buffer
, size_t size
)
525 hibernate_scratch_io(scratch
, (void *)(uintptr_t)buffer
, size
, true);
529 hibernate_scratch_read(hibernate_scratch_t
* scratch
, void * buffer
, size_t size
)
531 hibernate_scratch_io(scratch
, buffer
, size
, false);
537 store_one_page(uint32_t procFlags
, uint32_t * src
, uint32_t compressedSize
,
538 uint8_t * scratch
, uint32_t ppnum
)
540 uint64_t dst
= ptoa_64(ppnum
);
542 if (compressedSize
!= PAGE_SIZE
) {
543 dst
= pal_hib_map(DEST_COPY_AREA
, dst
);
544 if (compressedSize
!= 4) {
545 pal_hib_decompress_page(src
, (void *)dst
, scratch
, compressedSize
);
551 d
= (uint32_t *)(uintptr_t)dst
;
553 __nosan_bzero((void *) dst
, PAGE_SIZE
);
555 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(int32_t)); i
++) {
561 dst
= hibernate_restore_phys_page((uint64_t) (uintptr_t) src
, dst
, PAGE_SIZE
, procFlags
);
564 return hibernate_sum_page((uint8_t *)(uintptr_t)dst
, ppnum
);
568 hibernate_reserve_restore_pages(uint64_t headerPhys
, IOHibernateImageHeader
*header
, hibernate_page_list_t
* map
)
570 uint32_t lastImagePage
= atop_64_ppnum(HIB_ROUND_PAGE(headerPhys
+ header
->image1Size
));
571 uint32_t handoffPages
= header
->handoffPages
;
572 uint32_t handoffPageCount
= header
->handoffPageCount
;
575 // knock all the image pages to be used out of free map
576 for (ppnum
= atop_64_ppnum(headerPhys
); ppnum
<= lastImagePage
; ppnum
++) {
577 hibernate_page_bitset(map
, FALSE
, ppnum
);
579 // knock all the handoff pages to be used out of free map
580 for (ppnum
= handoffPages
; ppnum
< (handoffPages
+ handoffPageCount
); ppnum
++) {
581 hibernate_page_bitset(map
, FALSE
, ppnum
);
586 hibernate_kernel_entrypoint(uint32_t p1
,
587 uint32_t p2
, uint32_t p3
, uint32_t p4
)
592 uint64_t imageReadPhys
;
593 uint64_t pageIndexPhys
;
594 uint32_t * pageIndexSource
;
595 hibernate_page_list_t
* map
;
596 pal_hib_restore_stage_t stage
;
600 uint32_t conflictCount
;
601 uint32_t compressedSize
;
602 uint32_t uncompressedPages
;
607 uint32_t lastImagePage
;
608 uint32_t lastMapPage
;
609 uint32_t lastPageIndexPage
;
610 uint32_t handoffPages
;
611 uint32_t handoffPageCount
;
612 uint8_t * wkdmScratch
;
613 hibernate_scratch_t conflictList
;
614 pal_hib_ctx_t palHibCtx
;
617 timeStart
= rdtsc64();
619 #if !defined(__arm64__)
620 static_assert(sizeof(IOHibernateImageHeader
) == 512);
621 #endif /* !defined(__arm64__) */
623 headerPhys
= ptoa_64(p1
);
625 if ((kIOHibernateDebugRestoreLogs
& gIOHibernateDebugFlags
) && !debug_probe()) {
626 gIOHibernateDebugFlags
&= ~kIOHibernateDebugRestoreLogs
;
629 debug_code(kIOHibernateRestoreCodeImageStart
, headerPhys
);
631 __nosan_memcpy(gIOHibernateCurrentHeader
,
632 (void *) pal_hib_map(IMAGE_AREA
, headerPhys
),
633 sizeof(IOHibernateImageHeader
));
635 debug_code(kIOHibernateRestoreCodeSignature
, gIOHibernateCurrentHeader
->signature
);
638 + (offsetof(IOHibernateImageHeader
, fileExtentMap
)
639 + gIOHibernateCurrentHeader
->fileExtentMapSize
640 + ptoa_32(gIOHibernateCurrentHeader
->restore1PageCount
)
641 + gIOHibernateCurrentHeader
->previewSize
);
643 map
= (hibernate_page_list_t
*) pal_hib_map(BITMAP_AREA
, mapPhys
);
646 // make the rest of the image is safe for atop()
648 if (os_add_overflow(headerPhys
, gIOHibernateCurrentHeader
->image1Size
, &imageEnd
) || (imageEnd
> IO_MAX_PAGE_ADDR
)) {
652 lastImagePage
= atop_64_ppnum(HIB_ROUND_PAGE(headerPhys
+ gIOHibernateCurrentHeader
->image1Size
));
653 lastMapPage
= atop_64_ppnum(HIB_ROUND_PAGE(mapPhys
+ gIOHibernateCurrentHeader
->bitmapSize
));
655 handoffPages
= gIOHibernateCurrentHeader
->handoffPages
;
656 handoffPageCount
= gIOHibernateCurrentHeader
->handoffPageCount
;
658 debug_code(kIOHibernateRestoreCodeImageEnd
, ptoa_64(lastImagePage
));
659 debug_code(kIOHibernateRestoreCodeMapStart
, mapPhys
);
660 debug_code(kIOHibernateRestoreCodeMapEnd
, ptoa_64(lastMapPage
));
662 debug_code(kIOHibernateRestoreCodeMapVirt
, (uintptr_t) map
);
663 debug_code(kIOHibernateRestoreCodeHandoffPages
, ptoa_64(handoffPages
));
664 debug_code(kIOHibernateRestoreCodeHandoffCount
, handoffPageCount
);
666 #if defined(__arm64__)
667 // on arm64 we've already done this in pal_hib_resume_tramp
668 #else /* !defined(__arm64__) */
669 hibernate_reserve_restore_pages(headerPhys
, gIOHibernateCurrentHeader
, map
);
670 #endif /* !defined(__arm64__) */
673 hibernate_page_list_grab(map
, &nextFree
);
675 pal_hib_resume_init(&palHibCtx
, map
, &nextFree
);
677 // allocate scratch space for wkdm
678 wkdmScratch
= (uint8_t *)pal_hib_map(WKDM_AREA
, ptoa_64(hibernate_page_list_grab(map
, &nextFree
)));
680 sum
= gIOHibernateCurrentHeader
->actualRestore1Sum
;
681 gIOHibernateCurrentHeader
->diag
[0] = atop_64_ppnum(headerPhys
);
682 gIOHibernateCurrentHeader
->diag
[1] = sum
;
683 gIOHibernateCurrentHeader
->trampolineTime
= 0;
685 uncompressedPages
= 0;
688 compressedSize
= PAGE_SIZE
;
689 stage
= pal_hib_restore_stage_handoff_data
;
693 if (gIOHibernateCurrentHeader
->previewSize
) {
694 pageIndexPhys
= headerPhys
695 + (offsetof(IOHibernateImageHeader
, fileExtentMap
)
696 + gIOHibernateCurrentHeader
->fileExtentMapSize
697 + ptoa_32(gIOHibernateCurrentHeader
->restore1PageCount
));
698 imageReadPhys
= (pageIndexPhys
+ gIOHibernateCurrentHeader
->previewPageListSize
);
699 lastPageIndexPage
= atop_64_ppnum(HIB_ROUND_PAGE(imageReadPhys
));
700 pageIndexSource
= (uint32_t *) pal_hib_map(IMAGE2_AREA
, pageIndexPhys
);
703 lastPageIndexPage
= 0;
704 imageReadPhys
= (mapPhys
+ gIOHibernateCurrentHeader
->bitmapSize
);
707 debug_code(kIOHibernateRestoreCodePageIndexStart
, pageIndexPhys
);
708 debug_code(kIOHibernateRestoreCodePageIndexEnd
, ptoa_64(lastPageIndexPage
));
712 case pal_hib_restore_stage_handoff_data
:
714 count
= srcPhys
? 0 : handoffPageCount
;
718 if (count
> gIOHibernateHandoffPageCount
) {
719 count
= gIOHibernateHandoffPageCount
;
721 srcPhys
= ptoa_64(handoffPages
);
724 case pal_hib_restore_stage_preview_pages
:
725 // copy pageIndexSource pages == preview image data
727 if (!pageIndexPhys
) {
730 srcPhys
= imageReadPhys
;
732 ppnum
= pageIndexSource
[0];
733 count
= pageIndexSource
[1];
734 pageIndexSource
+= 2;
735 pageIndexPhys
+= 2 * sizeof(pageIndexSource
[0]);
736 imageReadPhys
= srcPhys
;
739 case pal_hib_restore_stage_dram_pages
:
742 srcPhys
= (mapPhys
+ gIOHibernateCurrentHeader
->bitmapSize
);
744 src
= (uint32_t *) pal_hib_map(IMAGE_AREA
, srcPhys
);
747 srcPhys
+= 2 * sizeof(*src
);
748 imageReadPhys
= srcPhys
;
754 if (stage
== pal_hib_restore_stage_dram_pages
) {
762 for (page
= 0; page
< count
; page
++, ppnum
++) {
766 src
= (uint32_t *) pal_hib_map(IMAGE_AREA
, srcPhys
);
768 if (stage
== pal_hib_restore_stage_handoff_data
) {
769 ppnum
= gIOHibernateHandoffPages
[page
];
770 } else if (stage
== pal_hib_restore_stage_dram_pages
) {
772 HIB_ASSERT((tag
& ~kIOHibernateTagLength
) == kIOHibernateTagSignature
);
773 // debug_code(kIOHibernateRestoreCodeTag, (uintptr_t) tag);
774 srcPhys
+= sizeof(*src
);
775 compressedSize
= kIOHibernateTagLength
& tag
;
776 HIB_ASSERT(compressedSize
<= PAGE_SIZE
);
779 conflicts
= (ppnum
>= atop_64_ppnum(mapPhys
)) && (ppnum
<= lastMapPage
);
781 conflicts
|= ((ppnum
>= atop_64_ppnum(imageReadPhys
)) && (ppnum
<= lastImagePage
));
783 if (stage
>= pal_hib_restore_stage_handoff_data
) {
784 conflicts
|= ((ppnum
>= atop_64_ppnum(srcPhys
)) && (ppnum
<= (handoffPages
+ handoffPageCount
- 1)));
787 if (stage
>= pal_hib_restore_stage_preview_pages
) {
788 conflicts
|= ((ppnum
>= atop_64_ppnum(pageIndexPhys
)) && (ppnum
<= lastPageIndexPage
));
792 pageSum
= store_one_page(gIOHibernateCurrentHeader
->processorFlags
,
793 src
, compressedSize
, wkdmScratch
, ppnum
);
794 if (stage
!= pal_hib_restore_stage_handoff_data
) {
799 // debug_code(kIOHibernateRestoreCodeConflictPage, ppnum);
800 // debug_code(kIOHibernateRestoreCodeConflictSource, (uintptr_t) src);
802 if (!conflictList
.headPage
) {
803 hibernate_scratch_init(&conflictList
, map
, &nextFree
);
805 hibernate_scratch_write(&conflictList
, &ppnum
, sizeof(ppnum
));
806 hibernate_scratch_write(&conflictList
, &compressedSize
, sizeof(compressedSize
));
807 hibernate_scratch_write(&conflictList
, &stage
, sizeof(stage
));
808 hibernate_scratch_write(&conflictList
, src
, compressedSize
);
810 srcPhys
+= ((compressedSize
+ 3) & ~3);
811 src
+= ((compressedSize
+ 3) >> 2);
812 pal_hib_restored_page(&palHibCtx
, stage
, ppnum
);
816 /* src points to the last page restored, so we need to skip over that */
817 pal_hib_restore_pal_state(src
);
819 // -- copy back conflicts
822 src
= (uint32_t *)pal_hib_map(COPY_PAGE_AREA
, ptoa_64(hibernate_page_list_grab(map
, &nextFree
)));
823 hibernate_scratch_start_read(&conflictList
);
824 for (uint32_t i
= 0; i
< conflictCount
; i
++) {
825 hibernate_scratch_read(&conflictList
, &ppnum
, sizeof(ppnum
));
826 hibernate_scratch_read(&conflictList
, &compressedSize
, sizeof(compressedSize
));
827 hibernate_scratch_read(&conflictList
, &stage
, sizeof(stage
));
828 HIB_ASSERT(compressedSize
<= PAGE_SIZE
);
829 hibernate_scratch_read(&conflictList
, src
, compressedSize
);
830 pageSum
= store_one_page(gIOHibernateCurrentHeader
->processorFlags
,
831 src
, compressedSize
, wkdmScratch
, ppnum
);
832 if (stage
!= pal_hib_restore_stage_handoff_data
) {
839 pal_hib_patchup(&palHibCtx
);
841 // -- image has been destroyed...
843 gIOHibernateCurrentHeader
->actualImage1Sum
= sum
;
844 gIOHibernateCurrentHeader
->actualUncompressedPages
= uncompressedPages
;
845 gIOHibernateCurrentHeader
->conflictCount
= conflictCount
;
846 gIOHibernateCurrentHeader
->nextFree
= nextFree
;
848 gIOHibernateState
= kIOHibernateStateWakingFromHibernate
;
850 gIOHibernateCurrentHeader
->trampolineTime
= ((uint32_t) (((rdtsc64() - timeStart
)) >> 8));
852 // debug_code('done', 0);
855 #if defined(__i386__) || defined(__x86_64__)
856 typedef void (*ResetProc
)(void);
858 proc
= HIB_ENTRYPOINT
;
863 #elif defined(__arm64__)
864 // return control to hibernate_machine_entrypoint
873 /* standalone printf implementation */
875 * Copyright (c) 1986, 1988, 1991, 1993
876 * The Regents of the University of California. All rights reserved.
877 * (c) UNIX System Laboratories, Inc.
878 * All or some portions of this file are derived from material licensed
879 * to the University of California by American Telephone and Telegraph
880 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
881 * the permission of UNIX System Laboratories, Inc.
883 * Redistribution and use in source and binary forms, with or without
884 * modification, are permitted provided that the following conditions
886 * 1. Redistributions of source code must retain the above copyright
887 * notice, this list of conditions and the following disclaimer.
888 * 2. Redistributions in binary form must reproduce the above copyright
889 * notice, this list of conditions and the following disclaimer in the
890 * documentation and/or other materials provided with the distribution.
891 * 4. Neither the name of the University nor the names of its contributors
892 * may be used to endorse or promote products derived from this software
893 * without specific prior written permission.
895 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
896 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
897 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
898 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
899 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
900 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
901 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
902 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
903 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
904 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
907 * @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
910 typedef long ptrdiff_t;
911 char const hibhex2ascii_data
[] = "0123456789abcdefghijklmnopqrstuvwxyz";
912 #define hibhex2ascii(hex) (hibhex2ascii_data[hex])
913 #define toupper(c) ((c) - 0x20 * (((c) >= 'a') && ((c) <= 'z')))
915 hibstrlen(const char *s
)
924 /* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */
925 #define MAXNBUF (sizeof(intmax_t) * NBBY + 1)
928 * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse
929 * order; return an optional length and a pointer to the last character
930 * written in the buffer (i.e., the first character of the string).
931 * The buffer pointed to by `nbuf' must have length >= MAXNBUF.
934 ksprintn(char *nbuf
, uintmax_t num
, int base
, int *lenp
, int upper
)
938 /* Truncate so we don't call umoddi3, which isn't in __HIB */
939 #if !defined(__LP64__)
940 uint32_t num2
= (uint32_t) num
;
942 uintmax_t num2
= num
;
948 c
= hibhex2ascii(num2
% base
);
949 *++p
= upper
? toupper(c
) : c
;
950 } while (num2
/= base
);
952 *lenp
= (int)(p
- nbuf
);
958 * Scaled down version of printf(3).
960 * Two additional formats:
962 * The format %b is supported to decode error registers.
965 * printf("reg=%b\n", regval, "<base><arg>*");
967 * where <base> is the output base expressed as a control character, e.g.
968 * \10 gives octal; \20 gives hex. Each arg is a sequence of characters,
969 * the first of which gives the bit number to be inspected (origin 1), and
970 * the next characters (up to a control character, i.e. a character <= 32),
971 * give the name of the register. Thus:
973 * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE");
975 * would produce output:
977 * reg=3<BITTWO,BITONE>
979 * XXX: %D -- Hexdump, takes pointer and separator string:
980 * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX
981 * ("%*D", len, ptr, " " -> XX XX XX XX ...
984 hibkvprintf(char const *fmt
, void (*func
)(int, void*), void *arg
, int radix
, va_list ap
)
986 #define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = (char)cc; retval++; }
989 const char *p
, *percent
, *q
;
993 int base
, lflag
, qflag
, tmp
, width
, ladjust
, sharpflag
, neg
, sign
, dot
;
994 int cflag
, hflag
, jflag
, tflag
, zflag
;
997 int stop
= 0, retval
= 0;
1007 fmt
= "(fmt null)\n";
1010 if (radix
< 2 || radix
> 36) {
1017 while ((ch
= (u_char
) * fmt
++) != '%' || stop
) {
1024 qflag
= 0; lflag
= 0; ladjust
= 0; sharpflag
= 0; neg
= 0;
1025 sign
= 0; dot
= 0; dwidth
= 0; upper
= 0;
1026 cflag
= 0; hflag
= 0; jflag
= 0; tflag
= 0; zflag
= 0;
1027 reswitch
: switch (ch
= (u_char
) * fmt
++) {
1045 width
= va_arg(ap
, int);
1051 dwidth
= va_arg(ap
, int);
1059 case '1': case '2': case '3': case '4':
1060 case '5': case '6': case '7': case '8': case '9':
1061 for (n
= 0;; ++fmt
) {
1062 n
= n
* 10 + ch
- '0';
1064 if (ch
< '0' || ch
> '9') {
1075 num
= (u_int
)va_arg(ap
, int);
1076 p
= va_arg(ap
, char *);
1077 for (q
= ksprintn(nbuf
, num
, *p
++, NULL
, 0); *q
;) {
1085 for (tmp
= 0; *p
;) {
1087 if (num
& (1 << (n
- 1))) {
1088 PCHAR(tmp
? ',' : '<');
1089 for (; (n
= *p
) > ' '; ++p
) {
1094 for (; *p
> ' '; ++p
) {
1104 PCHAR(va_arg(ap
, int));
1107 up
= va_arg(ap
, u_char
*);
1108 p
= va_arg(ap
, char *);
1113 PCHAR(hibhex2ascii(*up
>> 4));
1114 PCHAR(hibhex2ascii(*up
& 0x0f));
1117 for (q
= p
; *q
; q
++) {
1149 *(va_arg(ap
, intmax_t *)) = retval
;
1151 *(va_arg(ap
, quad_t
*)) = retval
;
1153 *(va_arg(ap
, long *)) = retval
;
1155 *(va_arg(ap
, size_t *)) = retval
;
1157 *(va_arg(ap
, short *)) = (short)retval
;
1159 *(va_arg(ap
, char *)) = (char)retval
;
1161 *(va_arg(ap
, int *)) = retval
;
1169 sharpflag
= (width
== 0);
1171 num
= (uintptr_t)va_arg(ap
, void *);
1183 p
= va_arg(ap
, char *);
1188 n
= (typeof(n
))hibstrlen(p
);
1190 for (n
= 0; n
< dwidth
&& p
[n
]; n
++) {
1197 if (!ladjust
&& width
> 0) {
1205 if (ladjust
&& width
> 0) {
1232 num
= va_arg(ap
, uintmax_t);
1234 num
= va_arg(ap
, u_quad_t
);
1236 num
= va_arg(ap
, ptrdiff_t);
1238 num
= va_arg(ap
, u_long
);
1240 num
= va_arg(ap
, size_t);
1242 num
= (u_short
)va_arg(ap
, int);
1244 num
= (u_char
)va_arg(ap
, int);
1246 num
= va_arg(ap
, u_int
);
1251 num
= va_arg(ap
, intmax_t);
1253 num
= va_arg(ap
, quad_t
);
1255 num
= va_arg(ap
, ptrdiff_t);
1257 num
= va_arg(ap
, long);
1259 num
= va_arg(ap
, ssize_t
);
1261 num
= (short)va_arg(ap
, int);
1263 num
= (char)va_arg(ap
, int);
1265 num
= va_arg(ap
, int);
1268 if (sign
&& (intmax_t)num
< 0) {
1270 num
= -(intmax_t)num
;
1272 p
= ksprintn(nbuf
, num
, base
, &tmp
, upper
);
1273 if (sharpflag
&& num
!= 0) {
1276 } else if (base
== 16) {
1284 if (!ladjust
&& padc
!= '0' && width
1285 && (width
-= tmp
) > 0) {
1293 if (sharpflag
&& num
!= 0) {
1296 } else if (base
== 16) {
1301 if (!ladjust
&& width
&& (width
-= tmp
) > 0) {
1311 if (ladjust
&& width
&& (width
-= tmp
) > 0) {
1319 while (percent
< fmt
) {
1323 * Since we ignore a formatting argument it is no
1324 * longer safe to obey the remaining formatting
1325 * arguments as the arguments will no longer match
1337 putchar(int c
, void *arg
)
1340 hib_uart_putc((char)c
);
1344 hibprintf(const char *fmt
, ...)
1346 /* http://www.pagetable.com/?p=298 */
1350 hibkvprintf(fmt
, putchar
, NULL
, 10, ap
);
1353 #endif /* CONFIG_DEBUG */
1355 #if __arm64__ && HIBERNATE_TRAP_HANDLER
1357 hibernate_trap(__unused arm_context_t
*context
, __unused
uint64_t trap_addr
)
1358 __attribute__((optnone
))
1361 gIOHibernateDebugFlags
|= kIOHibernateDebugRestoreLogs
;
1363 // dump some interesting registers
1364 for (int i
= 0; i
< 29; i
++) {
1365 debug_code(' r00' + (i
/ 10 * 256) + (i
% 10), context
->ss
.ss_64
.x
[i
]);
1367 debug_code(' fp', context
->ss
.ss_64
.fp
);
1368 debug_code(' lr', context
->ss
.ss_64
.lr
);
1369 debug_code(' sp', context
->ss
.ss_64
.sp
);
1370 debug_code(' pc', context
->ss
.ss_64
.pc
);
1371 debug_code('cpsr', context
->ss
.ss_64
.cpsr
);
1372 debug_code('asps', context
->ss
.ss_64
.aspsr
);
1373 debug_code(' far', context
->ss
.ss_64
.far
);
1374 debug_code(' esr', context
->ss
.ss_64
.esr
);
1376 // dump the trap_addr
1377 debug_code('trap', trap_addr
);
1379 // dump the kernel slide
1380 debug_code('slid', _hibernateHeader
.kernVirtSlide
);
1387 #endif /* __arm64__ && HIBERNATE_TRAP_HANDLER */