2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <ppc/proc_reg.h>
28 This code is linked into the kernel but part of the "__HIB" section, which means
29 its used by code running in the special context of restoring the kernel text and data
30 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
31 it calls or references (ie. hibernate_restore_phys_page())
32 needs to be careful to only touch memory also in the "__HIB" section.
37 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
41 .globl EXT(hibernate_restore_phys_page)
42 .globl EXT(hibernate_machine_entrypoint)
44 LEXT(hibernate_restore_phys_page)
47 bne hibernate_restore_phys_page64
49 srwi r10,r7,5 ; r10 <- 32-byte chunks to xfer
52 beq hibernate_restore_phys_pageFlush
54 hibernate_restore_phys_pageCopy:
64 dcbz 0,r6 ; avoid prefetch of next cache line
83 bdnz hibernate_restore_phys_pageCopy ; loop if more chunks
86 hibernate_restore_phys_pageFlush:
94 bdnz hibernate_restore_phys_pageFlush ; loop if more chunks
98 hibernate_restore_phys_page64:
99 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
100 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
101 rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg
102 rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits
104 mfmsr r9 ; Get the MSR
105 li r0,1 ; Note - we use this in a couple places below
106 rldimi r9,r0,63,MSR_SF_BIT ; set SF on in MSR we will copy with
107 mtmsrd r9 ; turn 64-bit addressing on
108 isync ; wait for it to happen
110 srwi r10,r7,7 ; r10 <- 128-byte chunks to xfer
113 beq hibernate_restore_phys_page64Flush
115 hibernate_restore_phys_page64Copy:
125 dcbz128 0,r4 ; avoid prefetch of next cache line
135 ld r0,64(r3) ; load 2nd half of chunk
162 bdnz hibernate_restore_phys_page64Copy ; loop if more chunks
165 hibernate_restore_phys_page64Done:
166 mfmsr r9 ; Get the MSR we used to copy
167 rldicl r9,r9,0,MSR_SF_BIT+1 ; clear SF
168 mtmsrd r9 ; turn 64-bit mode off
169 isync ; wait for it to happen
172 hibernate_restore_phys_page64Flush:
181 bdnz hibernate_restore_phys_page64Flush ; loop if more chunks
182 b hibernate_restore_phys_page64Done
184 LEXT(hibernate_machine_entrypoint)
185 b EXT(hibernate_kernel_entrypoint)