]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | ||
57 | #include <mach_rt.h> | |
58 | #include <platforms.h> | |
b0d623f7 | 59 | #include <mach_kdp.h> |
b0d623f7 A |
60 | #include <mach_assert.h> |
61 | ||
62 | #include <sys/errno.h> | |
63 | #include <i386/asm.h> | |
64 | #include <i386/cpuid.h> | |
65 | #include <i386/eflags.h> | |
66 | #include <i386/proc_reg.h> | |
67 | #include <i386/trap.h> | |
68 | #include <assym.s> | |
69 | #include <mach/exception_types.h> | |
70 | #include <config_dtrace.h> | |
71 | ||
72 | #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ | |
73 | #include <mach/i386/syscall_sw.h> | |
74 | ||
75 | #include <i386/mp.h> | |
76 | ||
77 | /* | |
78 | * Fault recovery. | |
79 | */ | |
80 | ||
81 | #ifdef __MACHO__ | |
82 | #define RECOVERY_SECTION .section __VECTORS, __recover | |
83 | #else | |
84 | #define RECOVERY_SECTION .text | |
85 | #define RECOVERY_SECTION .text | |
86 | #endif | |
87 | ||
88 | #define RECOVER_TABLE_START \ | |
89 | .align 3 ; \ | |
90 | .globl EXT(recover_table) ;\ | |
91 | LEXT(recover_table) ;\ | |
92 | .text | |
93 | ||
94 | #define RECOVER(addr) \ | |
95 | .align 3; \ | |
96 | .quad 9f ;\ | |
97 | .quad addr ;\ | |
98 | .text ;\ | |
99 | 9: | |
100 | ||
101 | #define RECOVER_TABLE_END \ | |
102 | .align 3 ;\ | |
103 | .globl EXT(recover_table_end) ;\ | |
104 | LEXT(recover_table_end) ;\ | |
105 | .text | |
106 | ||
107 | /* | |
108 | * Allocate recovery and table. | |
109 | */ | |
110 | RECOVERY_SECTION | |
111 | RECOVER_TABLE_START | |
112 | ||
b0d623f7 A |
113 | /* |
114 | * int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi) | |
115 | */ | |
116 | ENTRY(rdmsr_carefully) | |
117 | movl %edi, %ecx | |
118 | movq %rdx, %rdi | |
119 | RECOVERY_SECTION | |
120 | RECOVER(rdmsr_fail) | |
121 | rdmsr | |
122 | movl %eax, (%rsi) | |
123 | movl %edx, (%rdi) | |
124 | xorl %eax, %eax | |
125 | ret | |
126 | ||
127 | rdmsr_fail: | |
128 | movq $1, %rax | |
129 | ret | |
130 | ||
bd504ef0 A |
131 | /* |
132 | * int rdmsr64_carefully(uint32_t msr, uint64_t *val); | |
133 | */ | |
134 | ||
135 | ENTRY(rdmsr64_carefully) | |
136 | movl %edi, %ecx | |
137 | RECOVERY_SECTION | |
138 | RECOVER(rdmsr64_carefully_fail) | |
139 | rdmsr | |
140 | movl %eax, (%rsi) | |
141 | movl %edx, 4(%rsi) | |
142 | xorl %eax, %eax | |
143 | ret | |
144 | rdmsr64_carefully_fail: | |
145 | movl $1, %eax | |
146 | ret | |
147 | /* | |
148 | * int wrmsr64_carefully(uint32_t msr, uint64_t val); | |
149 | */ | |
150 | ||
151 | ENTRY(wrmsr_carefully) | |
152 | movl %edi, %ecx | |
153 | movl %esi, %eax | |
154 | shr $32, %rsi | |
155 | movl %esi, %edx | |
156 | RECOVERY_SECTION | |
157 | RECOVER(wrmsr_fail) | |
158 | wrmsr | |
159 | xorl %eax, %eax | |
160 | ret | |
161 | wrmsr_fail: | |
162 | movl $1, %eax | |
163 | ret | |
164 | ||
b0d623f7 A |
165 | .globl EXT(thread_exception_return) |
166 | .globl EXT(thread_bootstrap_return) | |
167 | LEXT(thread_bootstrap_return) | |
168 | #if CONFIG_DTRACE | |
169 | call EXT(dtrace_thread_bootstrap) | |
170 | #endif | |
171 | ||
172 | LEXT(thread_exception_return) | |
173 | cli | |
6d2010ae | 174 | xorl %ecx, %ecx /* don't check if we're in the PFZ */ |
b0d623f7 A |
175 | jmp EXT(return_from_trap) |
176 | ||
177 | /* | |
178 | * Copyin/out from user/kernel address space. | |
179 | * rdi: source address | |
180 | * rsi: destination address | |
181 | * rdx: byte count | |
182 | */ | |
183 | Entry(_bcopy) | |
184 | // TODO not pop regs; movq; think about 32 bit or 64 bit byte count | |
185 | xchgq %rdi, %rsi /* source %rsi, dest %rdi */ | |
186 | ||
187 | cld /* count up */ | |
188 | movl %edx,%ecx /* move by longwords first */ | |
189 | shrl $3,%ecx | |
190 | RECOVERY_SECTION | |
191 | RECOVER(_bcopy_fail) | |
192 | rep | |
193 | movsq /* move longwords */ | |
194 | ||
195 | movl %edx,%ecx /* now move remaining bytes */ | |
196 | andl $7,%ecx | |
197 | RECOVERY_SECTION | |
198 | RECOVER(_bcopy_fail) | |
199 | rep | |
200 | movsb | |
201 | ||
202 | xorl %eax,%eax /* return 0 for success */ | |
203 | ret /* and return */ | |
204 | ||
205 | _bcopy_fail: | |
206 | movl $(EFAULT),%eax /* return error for failure */ | |
207 | ret | |
208 | ||
6d2010ae A |
209 | Entry(pmap_safe_read) |
210 | RECOVERY_SECTION | |
211 | RECOVER(_pmap_safe_read_fail) | |
212 | movq (%rdi), %rcx | |
213 | mov %rcx, (%rsi) | |
214 | mov $1, %eax | |
215 | ret | |
216 | _pmap_safe_read_fail: | |
217 | xor %eax, %eax | |
218 | ret | |
219 | ||
b0d623f7 A |
220 | |
221 | ||
222 | /* | |
223 | * Copyin string from user/kern address space. | |
224 | * rdi: source address | |
225 | * rsi: destination address | |
226 | * rdx: max byte count | |
227 | * rcx: actual byte count (OUT) | |
228 | */ | |
229 | Entry(_bcopystr) | |
230 | pushq %rdi | |
231 | xchgq %rdi, %rsi /* source %rsi, dest %rdi */ | |
232 | ||
233 | xorl %eax,%eax /* set to 0 here so that high 24 bits */ | |
234 | /* are 0 for the cmpl against 0 */ | |
235 | 2: | |
236 | RECOVERY_SECTION | |
237 | RECOVER(_bcopystr_fail) /* copy bytes... */ | |
238 | movb (%rsi),%al | |
239 | incq %rsi | |
240 | testq %rdi,%rdi /* if kernel address is ... */ | |
241 | jz 3f /* not NULL */ | |
242 | movb %al,(%rdi) /* copy the byte */ | |
243 | incq %rdi | |
244 | 3: | |
245 | testl %eax,%eax /* did we just stuff the 0-byte? */ | |
246 | jz 4f /* yes, return 0 already in %eax */ | |
247 | decq %rdx /* decrement #bytes left in buffer */ | |
248 | jnz 2b /* buffer not full, copy another byte */ | |
249 | movl $(ENAMETOOLONG),%eax /* buffer full, no \0: ENAMETOOLONG */ | |
250 | 4: | |
251 | cmpq $0,%rcx /* get OUT len ptr */ | |
252 | jz _bcopystr_ret /* if null, just return */ | |
253 | subq (%rsp),%rsi | |
254 | movq %rsi,(%rcx) /* else set OUT arg to xfer len */ | |
255 | popq %rdi /* restore registers */ | |
256 | _bcopystr_ret: | |
257 | ret /* and return */ | |
258 | ||
259 | _bcopystr_fail: | |
260 | popq %rdi /* restore registers */ | |
261 | movl $(EFAULT),%eax /* return error for failure */ | |
262 | ret | |
263 | ||
b0d623f7 A |
264 | /* |
265 | * Done with recovery table. | |
266 | */ | |
267 | RECOVERY_SECTION | |
268 | RECOVER_TABLE_END | |
269 |