]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hibernate_restore.s
03af092698b6a733b07b051d33c1052523ec63ab
[apple/xnu.git] / osfmk / ppc / hibernate_restore.s
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <ppc/asm.h>
32 #include <ppc/proc_reg.h>
33 #include <assym.s>
34
35 /*
36 This code is linked into the kernel but part of the "__HIB" section, which means
37 its used by code running in the special context of restoring the kernel text and data
38 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
39 it calls or references (ie. hibernate_restore_phys_page())
40 needs to be careful to only touch memory also in the "__HIB" section.
41 */
42
43 /*
44 void
45 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
46 */
47
48 .align 5
49 .globl EXT(hibernate_restore_phys_page)
50 .globl EXT(hibernate_machine_entrypoint)
51
52 LEXT(hibernate_restore_phys_page)
53
54 andi. r0, r8, pf64Bit
55 bne hibernate_restore_phys_page64
56
57 srwi r10,r7,5 ; r10 <- 32-byte chunks to xfer
58 mtctr r10
59 cmpwi r4, 0
60 beq hibernate_restore_phys_pageFlush
61
62 hibernate_restore_phys_pageCopy:
63 lwz r0,0(r4)
64 lwz r2,4(r4)
65 lwz r7,8(r4)
66 lwz r8,12(r4)
67 lwz r9,16(r4)
68 lwz r10,20(r4)
69 lwz r11,24(r4)
70 lwz r12,28(r4)
71
72 dcbz 0,r6 ; avoid prefetch of next cache line
73 stw r0,0(r6)
74 stw r2,4(r6)
75 stw r7,8(r6)
76 stw r8,12(r6)
77 stw r9,16(r6)
78 stw r10,20(r6)
79 stw r11,24(r6)
80 stw r12,28(r6)
81
82 dcbf 0, r6
83 sync
84 icbi 0, r6
85 isync
86 sync
87
88 addi r4,r4,32
89 addi r6,r6,32
90
91 bdnz hibernate_restore_phys_pageCopy ; loop if more chunks
92 blr
93
94 hibernate_restore_phys_pageFlush:
95 dcbf 0, r6
96 sync
97 icbi 0, r6
98 isync
99 sync
100
101 addi r6,r6,32
102 bdnz hibernate_restore_phys_pageFlush ; loop if more chunks
103 blr
104
105
106 hibernate_restore_phys_page64:
107 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
108 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
109 rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg
110 rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits
111
112 mfmsr r9 ; Get the MSR
113 li r0,1 ; Note - we use this in a couple places below
114 rldimi r9,r0,63,MSR_SF_BIT ; set SF on in MSR we will copy with
115 mtmsrd r9 ; turn 64-bit addressing on
116 isync ; wait for it to happen
117
118 srwi r10,r7,7 ; r10 <- 128-byte chunks to xfer
119 mtctr r10
120 cmpdi r3, 0
121 beq hibernate_restore_phys_page64Flush
122
123 hibernate_restore_phys_page64Copy:
124 ld r0,0(r3)
125 ld r2,8(r3)
126 ld r7,16(r3)
127 ld r8,24(r3)
128 ld r9,32(r3)
129 ld r10,40(r3)
130 ld r11,48(r3)
131 ld r12,56(r3)
132
133 dcbz128 0,r4 ; avoid prefetch of next cache line
134 std r0,0(r4)
135 std r2,8(r4)
136 std r7,16(r4)
137 std r8,24(r4)
138 std r9,32(r4)
139 std r10,40(r4)
140 std r11,48(r4)
141 std r12,56(r4)
142
143 ld r0,64(r3) ; load 2nd half of chunk
144 ld r2,72(r3)
145 ld r7,80(r3)
146 ld r8,88(r3)
147 ld r9,96(r3)
148 ld r10,104(r3)
149 ld r11,112(r3)
150 ld r12,120(r3)
151
152 std r0,64(r4)
153 std r2,72(r4)
154 std r7,80(r4)
155 std r8,88(r4)
156 std r9,96(r4)
157 std r10,104(r4)
158 std r11,112(r4)
159 std r12,120(r4)
160
161 dcbf 0, r4
162 sync
163 icbi 0, r4
164 isync
165 sync
166
167 addi r3,r3,128
168 addi r4,r4,128
169
170 bdnz hibernate_restore_phys_page64Copy ; loop if more chunks
171
172
173 hibernate_restore_phys_page64Done:
174 mfmsr r9 ; Get the MSR we used to copy
175 rldicl r9,r9,0,MSR_SF_BIT+1 ; clear SF
176 mtmsrd r9 ; turn 64-bit mode off
177 isync ; wait for it to happen
178 blr
179
180 hibernate_restore_phys_page64Flush:
181 dcbf 0, r4
182 sync
183 icbi 0, r4
184 isync
185 sync
186
187 addi r4,r4,128
188
189 bdnz hibernate_restore_phys_page64Flush ; loop if more chunks
190 b hibernate_restore_phys_page64Done
191
192 LEXT(hibernate_machine_entrypoint)
193 b EXT(hibernate_kernel_entrypoint)
194