]>
Commit | Line | Data |
---|---|---|
3a60a9f5 A |
1 | /* |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
37839358 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
3a60a9f5 | 11 | * |
37839358 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
3a60a9f5 A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
37839358 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
3a60a9f5 A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <ppc/asm.h> | |
24 | #include <ppc/proc_reg.h> | |
25 | #include <assym.s> | |
26 | ||
27 | /* | |
28 | This code is linked into the kernel but part of the "__HIB" section, which means | |
29 | its used by code running in the special context of restoring the kernel text and data | |
30 | from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything | |
31 | it calls or references (ie. hibernate_restore_phys_page()) | |
32 | needs to be careful to only touch memory also in the "__HIB" section. | |
33 | */ | |
34 | ||
35 | /* | |
36 | void | |
37 | hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags); | |
38 | */ | |
39 | ||
40 | .align 5 | |
41 | .globl EXT(hibernate_restore_phys_page) | |
42 | .globl EXT(hibernate_machine_entrypoint) | |
43 | ||
44 | LEXT(hibernate_restore_phys_page) | |
45 | ||
46 | andi. r0, r8, pf64Bit | |
47 | bne hibernate_restore_phys_page64 | |
48 | ||
49 | srwi r10,r7,5 ; r10 <- 32-byte chunks to xfer | |
50 | mtctr r10 | |
51 | cmpwi r4, 0 | |
52 | beq hibernate_restore_phys_pageFlush | |
53 | ||
54 | hibernate_restore_phys_pageCopy: | |
55 | lwz r0,0(r4) | |
56 | lwz r2,4(r4) | |
57 | lwz r7,8(r4) | |
58 | lwz r8,12(r4) | |
59 | lwz r9,16(r4) | |
60 | lwz r10,20(r4) | |
61 | lwz r11,24(r4) | |
62 | lwz r12,28(r4) | |
63 | ||
64 | dcbz 0,r6 ; avoid prefetch of next cache line | |
65 | stw r0,0(r6) | |
66 | stw r2,4(r6) | |
67 | stw r7,8(r6) | |
68 | stw r8,12(r6) | |
69 | stw r9,16(r6) | |
70 | stw r10,20(r6) | |
71 | stw r11,24(r6) | |
72 | stw r12,28(r6) | |
73 | ||
74 | dcbf 0, r6 | |
75 | sync | |
76 | icbi 0, r6 | |
77 | isync | |
78 | sync | |
79 | ||
80 | addi r4,r4,32 | |
81 | addi r6,r6,32 | |
82 | ||
83 | bdnz hibernate_restore_phys_pageCopy ; loop if more chunks | |
84 | blr | |
85 | ||
86 | hibernate_restore_phys_pageFlush: | |
87 | dcbf 0, r6 | |
88 | sync | |
89 | icbi 0, r6 | |
90 | isync | |
91 | sync | |
92 | ||
93 | addi r6,r6,32 | |
94 | bdnz hibernate_restore_phys_pageFlush ; loop if more chunks | |
95 | blr | |
96 | ||
97 | ||
98 | hibernate_restore_phys_page64: | |
99 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
100 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
101 | rlwinm r4,r5,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
102 | rlwimi r4,r6,0,0,31 ; Combine bottom of long long to full 64-bits | |
103 | ||
104 | mfmsr r9 ; Get the MSR | |
105 | li r0,1 ; Note - we use this in a couple places below | |
106 | rldimi r9,r0,63,MSR_SF_BIT ; set SF on in MSR we will copy with | |
107 | mtmsrd r9 ; turn 64-bit addressing on | |
108 | isync ; wait for it to happen | |
109 | ||
110 | srwi r10,r7,7 ; r10 <- 128-byte chunks to xfer | |
111 | mtctr r10 | |
112 | cmpdi r3, 0 | |
113 | beq hibernate_restore_phys_page64Flush | |
114 | ||
115 | hibernate_restore_phys_page64Copy: | |
116 | ld r0,0(r3) | |
117 | ld r2,8(r3) | |
118 | ld r7,16(r3) | |
119 | ld r8,24(r3) | |
120 | ld r9,32(r3) | |
121 | ld r10,40(r3) | |
122 | ld r11,48(r3) | |
123 | ld r12,56(r3) | |
124 | ||
125 | dcbz128 0,r4 ; avoid prefetch of next cache line | |
126 | std r0,0(r4) | |
127 | std r2,8(r4) | |
128 | std r7,16(r4) | |
129 | std r8,24(r4) | |
130 | std r9,32(r4) | |
131 | std r10,40(r4) | |
132 | std r11,48(r4) | |
133 | std r12,56(r4) | |
134 | ||
135 | ld r0,64(r3) ; load 2nd half of chunk | |
136 | ld r2,72(r3) | |
137 | ld r7,80(r3) | |
138 | ld r8,88(r3) | |
139 | ld r9,96(r3) | |
140 | ld r10,104(r3) | |
141 | ld r11,112(r3) | |
142 | ld r12,120(r3) | |
143 | ||
144 | std r0,64(r4) | |
145 | std r2,72(r4) | |
146 | std r7,80(r4) | |
147 | std r8,88(r4) | |
148 | std r9,96(r4) | |
149 | std r10,104(r4) | |
150 | std r11,112(r4) | |
151 | std r12,120(r4) | |
152 | ||
153 | dcbf 0, r4 | |
154 | sync | |
155 | icbi 0, r4 | |
156 | isync | |
157 | sync | |
158 | ||
159 | addi r3,r3,128 | |
160 | addi r4,r4,128 | |
161 | ||
162 | bdnz hibernate_restore_phys_page64Copy ; loop if more chunks | |
163 | ||
164 | ||
165 | hibernate_restore_phys_page64Done: | |
166 | mfmsr r9 ; Get the MSR we used to copy | |
167 | rldicl r9,r9,0,MSR_SF_BIT+1 ; clear SF | |
168 | mtmsrd r9 ; turn 64-bit mode off | |
169 | isync ; wait for it to happen | |
170 | blr | |
171 | ||
172 | hibernate_restore_phys_page64Flush: | |
173 | dcbf 0, r4 | |
174 | sync | |
175 | icbi 0, r4 | |
176 | isync | |
177 | sync | |
178 | ||
179 | addi r4,r4,128 | |
180 | ||
181 | bdnz hibernate_restore_phys_page64Flush ; loop if more chunks | |
182 | b hibernate_restore_phys_page64Done | |
183 | ||
184 | LEXT(hibernate_machine_entrypoint) | |
185 | b EXT(hibernate_kernel_entrypoint) | |
186 |