]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/loose_ends.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / x86_64 / loose_ends.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 #include <mach_assert.h>
59
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
72 #include <i386/vmx.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
77
78 #include <libkern/OSAtomic.h>
79 #include <libkern/OSDebug.h>
80 #include <sys/kdebug.h>
81
82 #if !MACH_KDP
83 #include <kdp/kdp_callout.h>
84 #endif /* !MACH_KDP */
85
86 #include <architecture/i386/pio.h>
87
88 #include <libkern/OSDebug.h>
89 #if CONFIG_DTRACE
90 #include <mach/sdt.h>
91 #endif
92
93 #if 0
94
95 #undef KERNEL_DEBUG
96 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
97 #define KDEBUG 1
98
99 #endif
100
101 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
102 #undef bcopy
103
104 /* XXX - should be gone from here */
105 extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
106 extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
107 extern boolean_t phys_page_exists(ppnum_t);
108 extern void bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
109 extern void pmap_set_reference(ppnum_t pn);
110 extern void mapping_set_mod(ppnum_t pa);
111 extern void mapping_set_ref(ppnum_t pn);
112
113 extern void ovbcopy(const char *from,
114 char *to,
115 vm_size_t nbytes);
116 void machine_callstack(uintptr_t *buf, vm_size_t callstack_max);
117
118
119 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
120 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
121
122 #define INT_SIZE (BYTE_SIZE * sizeof (int))
123
124 /*
125 * Set indicated bit in bit string.
126 */
127 void
128 setbit(int bitno, int *s)
129 {
130 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
131 }
132
133 /*
134 * Clear indicated bit in bit string.
135 */
136 void
137 clrbit(int bitno, int *s)
138 {
139 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
140 }
141
142 /*
143 * Test if indicated bit is set in bit string.
144 */
145 int
146 testbit(int bitno, int *s)
147 {
148 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
149 }
150
151 /*
152 * Find first bit set in bit string.
153 */
154 int
155 ffsbit(int *s)
156 {
157 int offset;
158
159 for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
160 ;
161 }
162 return offset + __builtin_ctz(*s);
163 }
164
165 int
166 ffs(unsigned int mask)
167 {
168 if (mask == 0) {
169 return 0;
170 }
171
172 /*
173 * NOTE: cannot use __builtin_ffs because it generates a call to
174 * 'ffs'
175 */
176 return 1 + __builtin_ctz(mask);
177 }
178
179 int
180 ffsll(unsigned long long mask)
181 {
182 if (mask == 0) {
183 return 0;
184 }
185
186 /*
187 * NOTE: cannot use __builtin_ffsll because it generates a call to
188 * 'ffsll'
189 */
190 return 1 + __builtin_ctzll(mask);
191 }
192
193 /*
194 * Find last bit set in bit string.
195 */
196 int
197 fls(unsigned int mask)
198 {
199 if (mask == 0) {
200 return 0;
201 }
202
203 return (sizeof(mask) << 3) - __builtin_clz(mask);
204 }
205
206 int
207 flsll(unsigned long long mask)
208 {
209 if (mask == 0) {
210 return 0;
211 }
212
213 return (sizeof(mask) << 3) - __builtin_clzll(mask);
214 }
215
216 void
217 bzero_phys_nc(
218 addr64_t src64,
219 uint32_t bytes)
220 {
221 bzero_phys(src64, bytes);
222 }
223
224 void
225 bzero_phys(
226 addr64_t src64,
227 uint32_t bytes)
228 {
229 bzero(PHYSMAP_PTOV(src64), bytes);
230 }
231
232
233 /*
234 * bcopy_phys - like bcopy but copies from/to physical addresses.
235 */
236
237 void
238 bcopy_phys(
239 addr64_t src64,
240 addr64_t dst64,
241 vm_size_t bytes)
242 {
243 /* Not necessary for K64 - but ensure we stay within a page */
244 if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
245 ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
246 panic("bcopy_phys alignment");
247 }
248 bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
249 }
250
251 /*
252 * allow a function to get a quick virtual mapping of a physical page
253 */
254
255 int
256 apply_func_phys(
257 addr64_t dst64,
258 vm_size_t bytes,
259 int (*func)(void * buffer, vm_size_t bytes, void * arg),
260 void * arg)
261 {
262 /* Not necessary for K64 - but ensure we stay within a page */
263 if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
264 panic("apply_func_phys alignment");
265 }
266
267 return func(PHYSMAP_PTOV(dst64), bytes, arg);
268 }
269
270 /*
271 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
272 * them correctly.
273 */
274
275 void
276 ovbcopy(
277 const char *from,
278 char *to,
279 vm_size_t bytes) /* num bytes to copy */
280 {
281 /* Assume that bcopy copies left-to-right (low addr first). */
282 if (from + bytes <= to || to + bytes <= from || to == from) {
283 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
284 } else if (from > to) {
285 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
286 } else {
287 /* to > from: overlapping, and must copy right-to-left. */
288 from += bytes - 1;
289 to += bytes - 1;
290 while (bytes-- > 0) {
291 *to-- = *from--;
292 }
293 }
294 }
295
296
297 /*
298 * Read data from a physical address. Memory should not be cache inhibited.
299 */
300
301 uint64_t reportphyreaddelayabs;
302 uint64_t reportphywritedelayabs;
303 uint32_t reportphyreadosbt;
304 uint32_t reportphywriteosbt;
305
306 #if DEVELOPMENT || DEBUG
307 uint32_t phyreadpanic = 1;
308 uint32_t phywritepanic = 1;
309 uint64_t tracephyreaddelayabs = 50 * NSEC_PER_USEC;
310 uint64_t tracephywritedelayabs = 50 * NSEC_PER_USEC;
311 uint64_t simulate_stretched_io = 0;
312 #else
313 uint32_t phyreadpanic = 0;
314 uint32_t phywritepanic = 0;
315 uint64_t tracephyreaddelayabs = 0;
316 uint64_t tracephywritedelayabs = 0;
317 #endif
318
319 __private_extern__ uint64_t
320 ml_phys_read_data(uint64_t paddr, int size)
321 {
322 uint64_t result = 0;
323 unsigned char s1;
324 unsigned short s2;
325 boolean_t istate = TRUE, timeread = FALSE;
326 uint64_t sabs = 0, eabs;
327
328 if (__improbable(!physmap_enclosed(paddr))) {
329 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
330 }
331
332 if (__improbable(reportphyreaddelayabs != 0)) {
333 istate = ml_set_interrupts_enabled(FALSE);
334 sabs = mach_absolute_time();
335 timeread = TRUE;
336 }
337 #if DEVELOPMENT || DEBUG
338 if (__improbable(timeread && simulate_stretched_io)) {
339 sabs -= simulate_stretched_io;
340 }
341 #endif /* x86_64 DEVELOPMENT || DEBUG */
342
343 switch (size) {
344 case 1:
345 s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
346 result = s1;
347 break;
348 case 2:
349 s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
350 result = s2;
351 break;
352 case 4:
353 result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
354 break;
355 case 8:
356 result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
357 break;
358 default:
359 panic("Invalid size %d for ml_phys_read_data", size);
360 break;
361 }
362
363 if (__improbable(timeread == TRUE)) {
364 eabs = mach_absolute_time();
365
366 #if DEVELOPMENT || DEBUG
367 iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
368 #endif
369
370 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
371 (void)ml_set_interrupts_enabled(istate);
372
373 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
374 panic_io_port_read();
375 panic("Read from physical addr 0x%llx took %llu ns, "
376 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
377 paddr, (eabs - sabs), result, sabs, eabs,
378 reportphyreaddelayabs);
379 }
380
381 if (reportphyreadosbt) {
382 OSReportWithBacktrace("ml_phys_read_data took %lluus",
383 (eabs - sabs) / NSEC_PER_USEC);
384 }
385 #if CONFIG_DTRACE
386 DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
387 uint64_t, paddr, uint32_t, size, uint64_t, result);
388 #endif /* CONFIG_DTRACE */
389 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
390 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
391 (eabs - sabs), sabs, paddr, result);
392
393 (void)ml_set_interrupts_enabled(istate);
394 } else {
395 (void)ml_set_interrupts_enabled(istate);
396 }
397 }
398
399 return result;
400 }
401
402 static unsigned long long
403 ml_phys_read_long_long(uint64_t paddr)
404 {
405 return ml_phys_read_data(paddr, 8);
406 }
407
408 unsigned int
409 ml_phys_read(vm_offset_t paddr)
410 {
411 return (unsigned int) ml_phys_read_data(paddr, 4);
412 }
413
414 unsigned int
415 ml_phys_read_word(vm_offset_t paddr)
416 {
417 return (unsigned int) ml_phys_read_data(paddr, 4);
418 }
419
420 unsigned int
421 ml_phys_read_64(addr64_t paddr64)
422 {
423 return (unsigned int) ml_phys_read_data(paddr64, 4);
424 }
425
426 unsigned int
427 ml_phys_read_word_64(addr64_t paddr64)
428 {
429 return (unsigned int) ml_phys_read_data(paddr64, 4);
430 }
431
432 unsigned int
433 ml_phys_read_half(vm_offset_t paddr)
434 {
435 return (unsigned int) ml_phys_read_data(paddr, 2);
436 }
437
438 unsigned int
439 ml_phys_read_half_64(addr64_t paddr64)
440 {
441 return (unsigned int) ml_phys_read_data(paddr64, 2);
442 }
443
444 unsigned int
445 ml_phys_read_byte(vm_offset_t paddr)
446 {
447 return (unsigned int) ml_phys_read_data(paddr, 1);
448 }
449
450 unsigned int
451 ml_phys_read_byte_64(addr64_t paddr64)
452 {
453 return (unsigned int) ml_phys_read_data(paddr64, 1);
454 }
455
456 unsigned long long
457 ml_phys_read_double(vm_offset_t paddr)
458 {
459 return ml_phys_read_long_long(paddr);
460 }
461
462 unsigned long long
463 ml_phys_read_double_64(addr64_t paddr64)
464 {
465 return ml_phys_read_long_long(paddr64);
466 }
467
468
469
470 /*
471 * Write data to a physical address. Memory should not be cache inhibited.
472 */
473
474 __private_extern__ void
475 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
476 {
477 boolean_t istate = TRUE, timewrite = FALSE;
478 uint64_t sabs = 0, eabs;
479
480 if (__improbable(!physmap_enclosed(paddr))) {
481 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
482 }
483
484 if (__improbable(reportphywritedelayabs != 0)) {
485 istate = ml_set_interrupts_enabled(FALSE);
486 sabs = mach_absolute_time();
487 timewrite = TRUE;
488 }
489 #if DEVELOPMENT || DEBUG
490 if (__improbable(timewrite && simulate_stretched_io)) {
491 sabs -= simulate_stretched_io;
492 }
493 #endif /* x86_64 DEVELOPMENT || DEBUG */
494
495 switch (size) {
496 case 1:
497 *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
498 break;
499 case 2:
500 *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
501 break;
502 case 4:
503 *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
504 break;
505 case 8:
506 *(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
507 break;
508 default:
509 panic("Invalid size %d for ml_phys_write_data", size);
510 break;
511 }
512
513 if (__improbable(timewrite == TRUE)) {
514 eabs = mach_absolute_time();
515
516 #if DEVELOPMENT || DEBUG
517 iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
518 #endif
519
520 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
521 (void)ml_set_interrupts_enabled(istate);
522
523 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
524 panic_io_port_read();
525 panic("Write to physical addr 0x%llx took %llu ns, "
526 "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
527 paddr, (eabs - sabs), data, sabs, eabs,
528 reportphywritedelayabs);
529 }
530
531 if (reportphywriteosbt) {
532 OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
533 "took %lluus",
534 paddr, data, (eabs - sabs) / NSEC_PER_USEC);
535 }
536 #if CONFIG_DTRACE
537 DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
538 uint64_t, paddr, uint32_t, size, uint64_t, data);
539 #endif /* CONFIG_DTRACE */
540 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
541 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
542 (eabs - sabs), sabs, paddr, data);
543
544 (void)ml_set_interrupts_enabled(istate);
545 } else {
546 (void)ml_set_interrupts_enabled(istate);
547 }
548 }
549 }
550
551 void
552 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
553 {
554 ml_phys_write_data(paddr, data, 1);
555 }
556
557 void
558 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
559 {
560 ml_phys_write_data(paddr64, data, 1);
561 }
562
563 void
564 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
565 {
566 ml_phys_write_data(paddr, data, 2);
567 }
568
569 void
570 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
571 {
572 ml_phys_write_data(paddr64, data, 2);
573 }
574
575 void
576 ml_phys_write(vm_offset_t paddr, unsigned int data)
577 {
578 ml_phys_write_data(paddr, data, 4);
579 }
580
581 void
582 ml_phys_write_64(addr64_t paddr64, unsigned int data)
583 {
584 ml_phys_write_data(paddr64, data, 4);
585 }
586
587 void
588 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
589 {
590 ml_phys_write_data(paddr, data, 4);
591 }
592
593 void
594 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
595 {
596 ml_phys_write_data(paddr64, data, 4);
597 }
598
599 void
600 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
601 {
602 ml_phys_write_data(paddr, data, 8);
603 }
604
605 void
606 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
607 {
608 ml_phys_write_data(paddr64, data, 8);
609 }
610
611 uint32_t
612 ml_port_io_read(uint16_t ioport, int size)
613 {
614 uint32_t result = 0;
615
616 uint64_t sabs, eabs;
617 boolean_t istate, timeread = FALSE;
618
619 if (__improbable(reportphyreaddelayabs != 0)) {
620 istate = ml_set_interrupts_enabled(FALSE);
621 sabs = mach_absolute_time();
622 timeread = TRUE;
623 }
624
625 #if DEVELOPMENT || DEBUG
626 if (__improbable(timeread && simulate_stretched_io)) {
627 sabs -= simulate_stretched_io;
628 }
629 #endif /* x86_64 DEVELOPMENT || DEBUG */
630
631 switch (size) {
632 case 1:
633 result = inb(ioport);
634 break;
635 case 2:
636 result = inw(ioport);
637 break;
638 case 4:
639 result = inl(ioport);
640 break;
641 default:
642 panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
643 break;
644 }
645
646 if (__improbable(timeread == TRUE)) {
647 eabs = mach_absolute_time();
648
649 #if DEVELOPMENT || DEBUG
650 iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
651 #endif
652
653 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
654 (void)ml_set_interrupts_enabled(istate);
655
656 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
657 panic_io_port_read();
658 panic("Read from IO port 0x%x took %llu ns, "
659 "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
660 ioport, (eabs - sabs), result, sabs, eabs,
661 reportphyreaddelayabs);
662 }
663
664 if (reportphyreadosbt) {
665 OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
666 ioport, (eabs - sabs) / NSEC_PER_USEC);
667 }
668 #if CONFIG_DTRACE
669 DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
670 uint16_t, ioport, uint32_t, size);
671 #endif /* CONFIG_DTRACE */
672 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
673 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
674 (eabs - sabs), sabs, ioport, result);
675
676 (void)ml_set_interrupts_enabled(istate);
677 } else {
678 (void)ml_set_interrupts_enabled(istate);
679 }
680 }
681
682 return result;
683 }
684
685 void
686 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
687 {
688 uint64_t sabs, eabs;
689 boolean_t istate, timewrite = FALSE;
690
691 if (__improbable(reportphywritedelayabs != 0)) {
692 istate = ml_set_interrupts_enabled(FALSE);
693 sabs = mach_absolute_time();
694 timewrite = TRUE;
695 }
696 #if DEVELOPMENT || DEBUG
697 if (__improbable(timewrite && simulate_stretched_io)) {
698 sabs -= simulate_stretched_io;
699 }
700 #endif /* x86_64 DEVELOPMENT || DEBUG */
701
702 switch (size) {
703 case 1:
704 outb(ioport, (uint8_t)val);
705 break;
706 case 2:
707 outw(ioport, (uint16_t)val);
708 break;
709 case 4:
710 outl(ioport, (uint32_t)val);
711 break;
712 default:
713 panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
714 break;
715 }
716
717 if (__improbable(timewrite == TRUE)) {
718 eabs = mach_absolute_time();
719
720 #if DEVELOPMENT || DEBUG
721 iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
722 #endif
723
724 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
725 (void)ml_set_interrupts_enabled(istate);
726
727 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
728 panic_io_port_read();
729 panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
730 " (start: %llu, end: %llu), ceiling: %llu",
731 ioport, (eabs - sabs), val, sabs, eabs,
732 reportphywritedelayabs);
733 }
734
735 if (reportphywriteosbt) {
736 OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) "
737 "took %lluus",
738 ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
739 }
740
741 #if CONFIG_DTRACE
742 DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
743 uint16_t, ioport, uint32_t, size, uint64_t, val);
744 #endif /* CONFIG_DTRACE */
745 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
746 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
747 (eabs - sabs), sabs, ioport, val);
748
749 (void)ml_set_interrupts_enabled(istate);
750 } else {
751 (void)ml_set_interrupts_enabled(istate);
752 }
753 }
754 }
755
756 uint8_t
757 ml_port_io_read8(uint16_t ioport)
758 {
759 return ml_port_io_read(ioport, 1);
760 }
761
762 uint16_t
763 ml_port_io_read16(uint16_t ioport)
764 {
765 return ml_port_io_read(ioport, 2);
766 }
767
768 uint32_t
769 ml_port_io_read32(uint16_t ioport)
770 {
771 return ml_port_io_read(ioport, 4);
772 }
773
774 void
775 ml_port_io_write8(uint16_t ioport, uint8_t val)
776 {
777 ml_port_io_write(ioport, val, 1);
778 }
779
780 void
781 ml_port_io_write16(uint16_t ioport, uint16_t val)
782 {
783 ml_port_io_write(ioport, val, 2);
784 }
785
786 void
787 ml_port_io_write32(uint16_t ioport, uint32_t val)
788 {
789 ml_port_io_write(ioport, val, 4);
790 }
791
792 /* PCI config cycle probing
793 *
794 *
795 * Read the memory location at physical address paddr.
796 * *Does not* recover from machine checks, unlike the PowerPC implementation.
797 * Should probably be deprecated.
798 */
799
800 boolean_t
801 ml_probe_read(vm_offset_t paddr, unsigned int *val)
802 {
803 if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
804 return FALSE;
805 }
806
807 *val = ml_phys_read(paddr);
808
809 return TRUE;
810 }
811
812 /*
813 * Read the memory location at physical address paddr.
814 * This is a part of a device probe, so there is a good chance we will
815 * have a machine check here. So we have to be able to handle that.
816 * We assume that machine checks are enabled both in MSR and HIDs
817 */
818 boolean_t
819 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
820 {
821 if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
822 return FALSE;
823 }
824
825 *val = ml_phys_read_64(paddr64);
826 return TRUE;
827 }
828
829
830 #undef bcmp
831 int
832 bcmp(
833 const void *pa,
834 const void *pb,
835 size_t len)
836 {
837 const char *a = (const char *)pa;
838 const char *b = (const char *)pb;
839
840 if (len == 0) {
841 return 0;
842 }
843
844 do{
845 if (*a++ != *b++) {
846 break;
847 }
848 } while (--len);
849
850 return (int)len;
851 }
852
853 #undef memcmp
854 int
855 memcmp(const void *s1, const void *s2, size_t n)
856 {
857 if (n != 0) {
858 const unsigned char *p1 = s1, *p2 = s2;
859
860 do {
861 if (*p1++ != *p2++) {
862 return *--p1 - *--p2;
863 }
864 } while (--n != 0);
865 }
866 return 0;
867 }
868
869 #undef memmove
870 void *
871 memmove(void *dst, const void *src, size_t ulen)
872 {
873 bcopy(src, dst, ulen);
874 return dst;
875 }
876
877 /*
878 * Abstract:
879 * strlen returns the number of characters in "string" preceeding
880 * the terminating null character.
881 */
882
883 #undef strlen
884 size_t
885 strlen(
886 const char *string)
887 {
888 const char *ret = string;
889
890 while (*string++ != '\0') {
891 continue;
892 }
893 return string - 1 - ret;
894 }
895
896 #if MACH_ASSERT
897
898 /*
899 * Machine-dependent routine to fill in an array with up to callstack_max
900 * levels of return pc information.
901 */
902 void
903 machine_callstack(
904 __unused uintptr_t *buf,
905 __unused vm_size_t callstack_max)
906 {
907 }
908
909 #endif /* MACH_ASSERT */
910
911 void
912 fillPage(ppnum_t pa, unsigned int fill)
913 {
914 uint64_t src;
915 int i;
916 int cnt = PAGE_SIZE / sizeof(unsigned int);
917 unsigned int *addr;
918
919 src = i386_ptob(pa);
920 for (i = 0, addr = (unsigned int *)PHYSMAP_PTOV(src); i < cnt; i++) {
921 *addr++ = fill;
922 }
923 }
924
925 static inline void
926 __clflush(void *ptr)
927 {
928 __asm__ volatile ("clflush (%0)" : : "r" (ptr));
929 }
930
931 void
932 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
933 {
934 addr64_t linesize = cpuid_info()->cache_linesize;
935 addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1);
936
937 mfence();
938
939 while (pa < bound) {
940 __clflush(PHYSMAP_PTOV(pa));
941 pa += linesize;
942 }
943
944 mfence();
945 }
946
947 void
948 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
949 {
950 return dcache_incoherent_io_store64(pa, count);
951 }
952
953 void
954 flush_dcache64(addr64_t addr, unsigned count, int phys)
955 {
956 if (phys) {
957 dcache_incoherent_io_flush64(addr, count);
958 } else {
959 uint64_t linesize = cpuid_info()->cache_linesize;
960 addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1);
961 mfence();
962 while (addr < bound) {
963 __clflush((void *) (uintptr_t) addr);
964 addr += linesize;
965 }
966 mfence();
967 }
968 }
969
970 void
971 invalidate_icache64(__unused addr64_t addr,
972 __unused unsigned count,
973 __unused int phys)
974 {
975 }
976
977
978 addr64_t vm_last_addr;
979
980 void
981 mapping_set_mod(ppnum_t pn)
982 {
983 pmap_set_modify(pn);
984 }
985
986 void
987 mapping_set_ref(ppnum_t pn)
988 {
989 pmap_set_reference(pn);
990 }
991
992 extern i386_cpu_info_t cpuid_cpu_info;
993 void
994 cache_flush_page_phys(ppnum_t pa)
995 {
996 boolean_t istate;
997 unsigned char *cacheline_addr;
998 i386_cpu_info_t *cpuid_infop = cpuid_info();
999 int cacheline_size;
1000 int cachelines_to_flush;
1001
1002 cacheline_size = cpuid_infop->cache_linesize;
1003 if (cacheline_size == 0) {
1004 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop);
1005 }
1006 cachelines_to_flush = PAGE_SIZE / cacheline_size;
1007
1008 mfence();
1009
1010 istate = ml_set_interrupts_enabled(FALSE);
1011
1012 for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
1013 cachelines_to_flush > 0;
1014 cachelines_to_flush--, cacheline_addr += cacheline_size) {
1015 __clflush((void *) cacheline_addr);
1016 }
1017
1018 (void) ml_set_interrupts_enabled(istate);
1019
1020 mfence();
1021 }
1022
1023
1024 #if !MACH_KDP
1025 void
1026 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
1027 {
1028 #pragma unused(fn,arg)
1029 }
1030 #endif
1031
1032 #if !CONFIG_VMX
1033 int
1034 host_vmxon(boolean_t exclusive __unused)
1035 {
1036 return VMX_UNSUPPORTED;
1037 }
1038
1039 void
1040 host_vmxoff(void)
1041 {
1042 return;
1043 }
1044 #endif