]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | #include <mach_assert.h> | |
59 | ||
60 | #include <string.h> | |
61 | #include <mach/boolean.h> | |
62 | #include <mach/i386/vm_types.h> | |
63 | #include <mach/i386/vm_param.h> | |
64 | #include <kern/kern_types.h> | |
65 | #include <kern/misc_protos.h> | |
66 | #include <sys/errno.h> | |
67 | #include <i386/param.h> | |
68 | #include <i386/misc_protos.h> | |
69 | #include <i386/cpu_data.h> | |
70 | #include <i386/machine_routines.h> | |
71 | #include <i386/cpuid.h> | |
72 | #include <i386/vmx.h> | |
73 | #include <vm/pmap.h> | |
74 | #include <vm/vm_map.h> | |
75 | #include <vm/vm_kern.h> | |
76 | #include <vm/vm_fault.h> | |
77 | ||
78 | #include <libkern/OSAtomic.h> | |
79 | #include <sys/kdebug.h> | |
80 | ||
81 | #if 0 | |
82 | ||
83 | #undef KERNEL_DEBUG | |
84 | #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT | |
85 | #define KDEBUG 1 | |
86 | ||
87 | #endif | |
88 | ||
89 | /* XXX - should be gone from here */ | |
90 | extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); | |
91 | extern void flush_dcache64(addr64_t addr, unsigned count, int phys); | |
92 | extern boolean_t phys_page_exists(ppnum_t); | |
93 | extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes); | |
94 | extern void pmap_set_reference(ppnum_t pn); | |
95 | extern void mapping_set_mod(ppnum_t pa); | |
96 | extern void mapping_set_ref(ppnum_t pn); | |
97 | ||
98 | extern void ovbcopy(const char *from, | |
99 | char *to, | |
100 | vm_size_t nbytes); | |
101 | void machine_callstack(natural_t *buf, vm_size_t callstack_max); | |
102 | ||
103 | ||
104 | #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL) | |
105 | #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL)) | |
106 | ||
107 | #define JOE_DEBUG 0 | |
108 | ||
109 | void | |
110 | bzero_phys_nc( | |
111 | addr64_t src64, | |
112 | uint32_t bytes) | |
113 | { | |
114 | bzero_phys(src64,bytes); | |
115 | } | |
116 | ||
117 | void | |
118 | bzero_phys( | |
119 | addr64_t src64, | |
120 | uint32_t bytes) | |
121 | { | |
122 | mapwindow_t *map; | |
123 | ||
124 | mp_disable_preemption(); | |
125 | ||
126 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
127 | ||
128 | bzero((void *)((uintptr_t)map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), bytes); | |
129 | ||
130 | pmap_put_mapwindow(map); | |
131 | ||
132 | mp_enable_preemption(); | |
133 | } | |
134 | ||
135 | ||
136 | /* | |
137 | * bcopy_phys - like bcopy but copies from/to physical addresses. | |
138 | */ | |
139 | ||
140 | void | |
141 | bcopy_phys( | |
142 | addr64_t src64, | |
143 | addr64_t dst64, | |
144 | vm_size_t bytes) | |
145 | { | |
146 | mapwindow_t *src_map, *dst_map; | |
147 | ||
148 | /* ensure we stay within a page */ | |
149 | if ( ((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { | |
150 | panic("bcopy_phys alignment"); | |
151 | } | |
152 | mp_disable_preemption(); | |
153 | ||
154 | src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF)); | |
155 | dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | | |
156 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
157 | ||
158 | bcopy((void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), | |
159 | (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes); | |
160 | ||
161 | pmap_put_mapwindow(src_map); | |
162 | pmap_put_mapwindow(dst_map); | |
163 | ||
164 | mp_enable_preemption(); | |
165 | } | |
166 | ||
167 | /* | |
168 | * allow a function to get a quick virtual mapping of a physical page | |
169 | */ | |
170 | ||
171 | int | |
172 | apply_func_phys( | |
173 | addr64_t dst64, | |
174 | vm_size_t bytes, | |
175 | int (*func)(void * buffer, vm_size_t bytes, void * arg), | |
176 | void * arg) | |
177 | { | |
178 | mapwindow_t *dst_map; | |
179 | int rc = -1; | |
180 | ||
181 | /* ensure we stay within a page */ | |
182 | if ( ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { | |
183 | panic("apply_func_phys alignment"); | |
184 | } | |
185 | mp_disable_preemption(); | |
186 | ||
187 | dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | | |
188 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
189 | ||
190 | rc = func((void *)((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes, arg); | |
191 | ||
192 | pmap_put_mapwindow(dst_map); | |
193 | ||
194 | mp_enable_preemption(); | |
195 | ||
196 | return rc; | |
197 | } | |
198 | ||
199 | /* | |
200 | * ovbcopy - like bcopy, but recognizes overlapping ranges and handles | |
201 | * them correctly. | |
202 | */ | |
203 | ||
204 | void | |
205 | ovbcopy( | |
206 | const char *from, | |
207 | char *to, | |
208 | vm_size_t bytes) /* num bytes to copy */ | |
209 | { | |
210 | /* Assume that bcopy copies left-to-right (low addr first). */ | |
211 | if (from + bytes <= to || to + bytes <= from || to == from) | |
212 | bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ | |
213 | else if (from > to) | |
214 | bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ | |
215 | else { | |
216 | /* to > from: overlapping, and must copy right-to-left. */ | |
217 | from += bytes - 1; | |
218 | to += bytes - 1; | |
219 | while (bytes-- > 0) | |
220 | *to-- = *from--; | |
221 | } | |
222 | } | |
223 | ||
224 | ||
225 | /* | |
226 | * Read data from a physical address. | |
227 | */ | |
228 | ||
229 | ||
230 | static unsigned int | |
231 | ml_phys_read_data(pmap_paddr_t paddr, int size ) | |
232 | { | |
233 | mapwindow_t *map; | |
234 | unsigned int result; | |
235 | ||
236 | mp_disable_preemption(); | |
237 | ||
238 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF)); | |
239 | ||
240 | switch (size) { | |
241 | unsigned char s1; | |
242 | unsigned short s2; | |
243 | case 1: | |
244 | s1 = *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
245 | result = s1; | |
246 | break; | |
247 | case 2: | |
248 | s2 = *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
249 | result = s2; | |
250 | break; | |
251 | case 4: | |
252 | default: | |
253 | result = *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
254 | break; | |
255 | } | |
256 | pmap_put_mapwindow(map); | |
257 | ||
258 | mp_enable_preemption(); | |
259 | ||
260 | return result; | |
261 | } | |
262 | ||
263 | static unsigned long long | |
264 | ml_phys_read_long_long(pmap_paddr_t paddr ) | |
265 | { | |
266 | mapwindow_t *map; | |
267 | unsigned long long result; | |
268 | ||
269 | mp_disable_preemption(); | |
270 | ||
271 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF)); | |
272 | ||
273 | result = *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
274 | ||
275 | pmap_put_mapwindow(map); | |
276 | ||
277 | mp_enable_preemption(); | |
278 | ||
279 | return result; | |
280 | } | |
281 | ||
282 | unsigned int ml_phys_read( vm_offset_t paddr) | |
283 | { | |
284 | return ml_phys_read_data((pmap_paddr_t)paddr, 4); | |
285 | } | |
286 | ||
287 | unsigned int ml_phys_read_word(vm_offset_t paddr) { | |
288 | ||
289 | return ml_phys_read_data((pmap_paddr_t)paddr, 4); | |
290 | } | |
291 | ||
292 | unsigned int ml_phys_read_64(addr64_t paddr64) | |
293 | { | |
294 | return ml_phys_read_data((pmap_paddr_t)paddr64, 4); | |
295 | } | |
296 | ||
297 | unsigned int ml_phys_read_word_64(addr64_t paddr64) | |
298 | { | |
299 | return ml_phys_read_data((pmap_paddr_t)paddr64, 4); | |
300 | } | |
301 | ||
302 | unsigned int ml_phys_read_half(vm_offset_t paddr) | |
303 | { | |
304 | return ml_phys_read_data((pmap_paddr_t)paddr, 2); | |
305 | } | |
306 | ||
307 | unsigned int ml_phys_read_half_64(addr64_t paddr64) | |
308 | { | |
309 | return ml_phys_read_data((pmap_paddr_t)paddr64, 2); | |
310 | } | |
311 | ||
312 | unsigned int ml_phys_read_byte(vm_offset_t paddr) | |
313 | { | |
314 | return ml_phys_read_data((pmap_paddr_t)paddr, 1); | |
315 | } | |
316 | ||
317 | unsigned int ml_phys_read_byte_64(addr64_t paddr64) | |
318 | { | |
319 | return ml_phys_read_data((pmap_paddr_t)paddr64, 1); | |
320 | } | |
321 | ||
322 | unsigned long long ml_phys_read_double(vm_offset_t paddr) | |
323 | { | |
324 | return ml_phys_read_long_long((pmap_paddr_t)paddr); | |
325 | } | |
326 | ||
327 | unsigned long long ml_phys_read_double_64(addr64_t paddr64) | |
328 | { | |
329 | return ml_phys_read_long_long((pmap_paddr_t)paddr64); | |
330 | } | |
331 | ||
332 | ||
333 | ||
334 | /* | |
335 | * Write data to a physical address. | |
336 | */ | |
337 | ||
338 | static void | |
339 | ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size) | |
340 | { | |
341 | mapwindow_t *map; | |
342 | ||
343 | mp_disable_preemption(); | |
344 | ||
345 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | | |
346 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
347 | ||
348 | switch (size) { | |
349 | case 1: | |
350 | *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned char)data; | |
351 | break; | |
352 | case 2: | |
353 | *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned short)data; | |
354 | break; | |
355 | case 4: | |
356 | default: | |
357 | *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (uint32_t)data; | |
358 | break; | |
359 | } | |
360 | pmap_put_mapwindow(map); | |
361 | ||
362 | mp_enable_preemption(); | |
363 | } | |
364 | ||
365 | static void | |
366 | ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) | |
367 | { | |
368 | mapwindow_t *map; | |
369 | ||
370 | mp_disable_preemption(); | |
371 | ||
372 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | | |
373 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
374 | ||
375 | *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data; | |
376 | ||
377 | pmap_put_mapwindow(map); | |
378 | ||
379 | mp_enable_preemption(); | |
380 | } | |
381 | ||
382 | ||
383 | ||
384 | void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) | |
385 | { | |
386 | ml_phys_write_data((pmap_paddr_t)paddr, data, 1); | |
387 | } | |
388 | ||
389 | void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) | |
390 | { | |
391 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); | |
392 | } | |
393 | ||
394 | void ml_phys_write_half(vm_offset_t paddr, unsigned int data) | |
395 | { | |
396 | ml_phys_write_data((pmap_paddr_t)paddr, data, 2); | |
397 | } | |
398 | ||
399 | void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) | |
400 | { | |
401 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); | |
402 | } | |
403 | ||
404 | void ml_phys_write(vm_offset_t paddr, unsigned int data) | |
405 | { | |
406 | ml_phys_write_data((pmap_paddr_t)paddr, data, 4); | |
407 | } | |
408 | ||
409 | void ml_phys_write_64(addr64_t paddr64, unsigned int data) | |
410 | { | |
411 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); | |
412 | } | |
413 | ||
414 | void ml_phys_write_word(vm_offset_t paddr, unsigned int data) | |
415 | { | |
416 | ml_phys_write_data((pmap_paddr_t)paddr, data, 4); | |
417 | } | |
418 | ||
419 | void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) | |
420 | { | |
421 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); | |
422 | } | |
423 | ||
424 | void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) | |
425 | { | |
426 | ml_phys_write_long_long((pmap_paddr_t)paddr, data); | |
427 | } | |
428 | ||
429 | void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) | |
430 | { | |
431 | ml_phys_write_long_long((pmap_paddr_t)paddr64, data); | |
432 | } | |
433 | ||
434 | ||
435 | /* PCI config cycle probing | |
436 | * | |
437 | * | |
438 | * Read the memory location at physical address paddr. | |
439 | * This is a part of a device probe, so there is a good chance we will | |
440 | * have a machine check here. So we have to be able to handle that. | |
441 | * We assume that machine checks are enabled both in MSR and HIDs | |
442 | */ | |
443 | ||
444 | boolean_t | |
445 | ml_probe_read(vm_offset_t paddr, unsigned int *val) | |
446 | { | |
447 | if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) | |
448 | return FALSE; | |
449 | ||
450 | *val = ml_phys_read(paddr); | |
451 | ||
452 | return TRUE; | |
453 | } | |
454 | ||
455 | /* | |
456 | * Read the memory location at physical address paddr. | |
457 | * This is a part of a device probe, so there is a good chance we will | |
458 | * have a machine check here. So we have to be able to handle that. | |
459 | * We assume that machine checks are enabled both in MSR and HIDs | |
460 | */ | |
461 | boolean_t | |
462 | ml_probe_read_64(addr64_t paddr64, unsigned int *val) | |
463 | { | |
464 | if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) | |
465 | return FALSE; | |
466 | ||
467 | *val = ml_phys_read_64((pmap_paddr_t)paddr64); | |
468 | return TRUE; | |
469 | } | |
470 | ||
471 | ||
472 | int bcmp( | |
473 | const void *pa, | |
474 | const void *pb, | |
475 | size_t len) | |
476 | { | |
477 | const char *a = (const char *)pa; | |
478 | const char *b = (const char *)pb; | |
479 | ||
480 | if (len == 0) | |
481 | return 0; | |
482 | ||
483 | do | |
484 | if (*a++ != *b++) | |
485 | break; | |
486 | while (--len); | |
487 | ||
488 | return (int)len; | |
489 | } | |
490 | ||
491 | int | |
492 | memcmp(const void *s1, const void *s2, size_t n) | |
493 | { | |
494 | if (n != 0) { | |
495 | const unsigned char *p1 = s1, *p2 = s2; | |
496 | ||
497 | do { | |
498 | if (*p1++ != *p2++) | |
499 | return (*--p1 - *--p2); | |
500 | } while (--n != 0); | |
501 | } | |
502 | return (0); | |
503 | } | |
504 | ||
505 | /* | |
506 | * Abstract: | |
507 | * strlen returns the number of characters in "string" preceeding | |
508 | * the terminating null character. | |
509 | */ | |
510 | ||
511 | size_t | |
512 | strlen( | |
513 | register const char *string) | |
514 | { | |
515 | register const char *ret = string; | |
516 | ||
517 | while (*string++ != '\0') | |
518 | continue; | |
519 | return string - 1 - ret; | |
520 | } | |
521 | ||
522 | uint32_t | |
523 | hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest) | |
524 | { | |
525 | return OSCompareAndSwap((UInt32)oldval, | |
526 | (UInt32)newval, | |
527 | (volatile UInt32 *)dest); | |
528 | } | |
529 | ||
530 | #if MACH_ASSERT | |
531 | ||
532 | /* | |
533 | * Machine-dependent routine to fill in an array with up to callstack_max | |
534 | * levels of return pc information. | |
535 | */ | |
536 | void machine_callstack( | |
537 | __unused natural_t *buf, | |
538 | __unused vm_size_t callstack_max) | |
539 | { | |
540 | } | |
541 | ||
542 | #endif /* MACH_ASSERT */ | |
543 | ||
544 | void fillPage(ppnum_t pa, unsigned int fill) | |
545 | { | |
546 | mapwindow_t *map; | |
547 | pmap_paddr_t src; | |
548 | int i; | |
549 | int cnt = PAGE_SIZE/sizeof(unsigned int); | |
550 | unsigned int *addr; | |
551 | ||
552 | mp_disable_preemption(); | |
553 | ||
554 | src = i386_ptob(pa); | |
555 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) | | |
556 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
557 | ||
558 | for (i = 0, addr = (unsigned int *)map->prv_CADDR; i < cnt ; i++ ) | |
559 | *addr++ = fill; | |
560 | ||
561 | pmap_put_mapwindow(map); | |
562 | ||
563 | mp_enable_preemption(); | |
564 | } | |
565 | ||
566 | static inline void __sfence(void) | |
567 | { | |
568 | __asm__ volatile("sfence"); | |
569 | } | |
570 | static inline void __mfence(void) | |
571 | { | |
572 | __asm__ volatile("mfence"); | |
573 | } | |
574 | static inline void __wbinvd(void) | |
575 | { | |
576 | __asm__ volatile("wbinvd"); | |
577 | } | |
578 | static inline void __clflush(void *ptr) | |
579 | { | |
580 | __asm__ volatile("clflush (%0)" : : "r" (ptr)); | |
581 | } | |
582 | ||
583 | void dcache_incoherent_io_store64(addr64_t pa, unsigned int count) | |
584 | { | |
585 | mapwindow_t *map; | |
586 | uint32_t linesize = cpuid_info()->cache_linesize; | |
587 | addr64_t addr; | |
588 | uint32_t offset, chunk; | |
589 | boolean_t istate; | |
590 | ||
591 | __mfence(); | |
592 | ||
593 | istate = ml_set_interrupts_enabled(FALSE); | |
594 | ||
595 | offset = (uint32_t)(pa & (linesize - 1)); | |
596 | addr = pa - offset; | |
597 | ||
598 | map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID)); | |
599 | ||
600 | count += offset; | |
601 | offset = (uint32_t)(addr & ((addr64_t) (page_size - 1))); | |
602 | chunk = (uint32_t)page_size - offset; | |
603 | ||
604 | do | |
605 | { | |
606 | if (chunk > count) | |
607 | chunk = count; | |
608 | ||
609 | for (; offset < chunk; offset += linesize) | |
610 | __clflush((void *)(((uintptr_t)map->prv_CADDR) + offset)); | |
611 | ||
612 | count -= chunk; | |
613 | addr += chunk; | |
614 | chunk = (uint32_t) page_size; | |
615 | offset = 0; | |
616 | ||
617 | if (count) { | |
618 | pmap_store_pte(map->prv_CMAP, (pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID)); | |
619 | invlpg((uintptr_t)map->prv_CADDR); | |
620 | } | |
621 | } | |
622 | while (count); | |
623 | ||
624 | pmap_put_mapwindow(map); | |
625 | ||
626 | (void) ml_set_interrupts_enabled(istate); | |
627 | ||
628 | __mfence(); | |
629 | } | |
630 | ||
631 | void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count) | |
632 | { | |
633 | return(dcache_incoherent_io_store64(pa,count)); | |
634 | } | |
635 | ||
636 | ||
637 | void | |
638 | flush_dcache64(addr64_t addr, unsigned count, int phys) | |
639 | { | |
640 | if (phys) { | |
641 | dcache_incoherent_io_flush64(addr, count); | |
642 | } | |
643 | else { | |
644 | uint32_t linesize = cpuid_info()->cache_linesize; | |
645 | addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1); | |
646 | __mfence(); | |
647 | while (addr < bound) { | |
648 | __clflush((void *) (uintptr_t) addr); | |
649 | addr += linesize; | |
650 | } | |
651 | __mfence(); | |
652 | } | |
653 | } | |
654 | ||
655 | void | |
656 | invalidate_icache64(__unused addr64_t addr, | |
657 | __unused unsigned count, | |
658 | __unused int phys) | |
659 | { | |
660 | } | |
661 | ||
662 | ||
663 | addr64_t vm_last_addr; | |
664 | ||
665 | void | |
666 | mapping_set_mod(ppnum_t pn) | |
667 | { | |
668 | pmap_set_modify(pn); | |
669 | } | |
670 | ||
671 | void | |
672 | mapping_set_ref(ppnum_t pn) | |
673 | { | |
674 | pmap_set_reference(pn); | |
675 | } | |
676 | ||
677 | void | |
678 | cache_flush_page_phys(ppnum_t pa) | |
679 | { | |
680 | mapwindow_t *map; | |
681 | boolean_t istate; | |
682 | int i; | |
683 | unsigned char *cacheline_addr; | |
684 | int cacheline_size = cpuid_info()->cache_linesize; | |
685 | int cachelines_in_page = PAGE_SIZE/cacheline_size; | |
686 | ||
687 | __mfence(); | |
688 | ||
689 | istate = ml_set_interrupts_enabled(FALSE); | |
690 | ||
691 | map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(pa) | INTEL_PTE_VALID)); | |
692 | ||
693 | for (i = 0, cacheline_addr = (unsigned char *)map->prv_CADDR; | |
694 | i < cachelines_in_page; | |
695 | i++, cacheline_addr += cacheline_size) { | |
696 | __clflush((void *) cacheline_addr); | |
697 | } | |
698 | pmap_put_mapwindow(map); | |
699 | ||
700 | (void) ml_set_interrupts_enabled(istate); | |
701 | ||
702 | __mfence(); | |
703 | } | |
704 | ||
705 | ||
706 | #if !MACH_KDP | |
707 | void | |
708 | kdp_register_callout(void) | |
709 | { | |
710 | } | |
711 | #endif | |
712 | ||
713 | #if !CONFIG_VMX | |
714 | int host_vmxon(boolean_t exclusive __unused) | |
715 | { | |
716 | return VMX_UNSUPPORTED; | |
717 | } | |
718 | ||
719 | void host_vmxoff(void) | |
720 | { | |
721 | return; | |
722 | } | |
723 | #endif | |
724 | ||
725 | #ifdef __LP64__ | |
726 | ||
727 | #define INT_SIZE (BYTE_SIZE * sizeof (int)) | |
728 | ||
729 | /* | |
730 | * Set indicated bit in bit string. | |
731 | */ | |
732 | void | |
733 | setbit(int bitno, int *s) | |
734 | { | |
735 | s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE); | |
736 | } | |
737 | ||
738 | /* | |
739 | * Clear indicated bit in bit string. | |
740 | */ | |
741 | void | |
742 | clrbit(int bitno, int *s) | |
743 | { | |
744 | s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE)); | |
745 | } | |
746 | ||
747 | /* | |
748 | * Test if indicated bit is set in bit string. | |
749 | */ | |
750 | int | |
751 | testbit(int bitno, int *s) | |
752 | { | |
753 | return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE)); | |
754 | } | |
755 | ||
756 | /* | |
757 | * Find first bit set in bit string. | |
758 | */ | |
759 | int | |
760 | ffsbit(int *s) | |
761 | { | |
762 | int offset; | |
763 | ||
764 | for (offset = 0; !*s; offset += (int)INT_SIZE, ++s); | |
765 | return offset + __builtin_ctz(*s); | |
766 | } | |
767 | ||
768 | int | |
769 | ffs(unsigned int mask) | |
770 | { | |
771 | if (mask == 0) | |
772 | return 0; | |
773 | ||
774 | /* | |
775 | * NOTE: cannot use __builtin_ffs because it generates a call to | |
776 | * 'ffs' | |
777 | */ | |
778 | return 1 + __builtin_ctz(mask); | |
779 | } | |
780 | #endif |