]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/loose_ends.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / x86_64 / loose_ends.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 #include <mach_assert.h>
59
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <kern/locks.h>
67 #include <sys/errno.h>
68 #include <i386/param.h>
69 #include <i386/misc_protos.h>
70 #include <i386/panic_notify.h>
71 #include <i386/cpu_data.h>
72 #include <i386/machine_routines.h>
73 #include <i386/cpuid.h>
74 #include <i386/vmx.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_fault.h>
79
80 #include <libkern/OSAtomic.h>
81 #include <libkern/OSDebug.h>
82 #include <sys/kdebug.h>
83
84 #if !MACH_KDP
85 #include <kdp/kdp_callout.h>
86 #endif /* !MACH_KDP */
87
88 #include <architecture/i386/pio.h>
89
90 #include <libkern/OSDebug.h>
91 #if CONFIG_DTRACE
92 #include <mach/sdt.h>
93 #endif
94
95 #if 0
96
97 #undef KERNEL_DEBUG
98 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
99 #define KDEBUG 1
100
101 #endif
102
103 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
104 #undef bcopy
105
106 /* XXX - should be gone from here */
107 extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
108 extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
109 extern boolean_t phys_page_exists(ppnum_t);
110 extern void bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes);
111 extern void pmap_set_reference(ppnum_t pn);
112 extern void mapping_set_mod(ppnum_t pa);
113 extern void mapping_set_ref(ppnum_t pn);
114
115 extern void ovbcopy(const char *from,
116 char *to,
117 vm_size_t nbytes);
118 void machine_callstack(uintptr_t *buf, vm_size_t callstack_max);
119
120
121 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
122 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
123
124 #define INT_SIZE (BYTE_SIZE * sizeof (int))
125
126 /*
127 * Set indicated bit in bit string.
128 */
129 void
130 setbit(int bitno, int *s)
131 {
132 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
133 }
134
135 /*
136 * Clear indicated bit in bit string.
137 */
138 void
139 clrbit(int bitno, int *s)
140 {
141 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
142 }
143
144 /*
145 * Test if indicated bit is set in bit string.
146 */
147 int
148 testbit(int bitno, int *s)
149 {
150 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
151 }
152
153 /*
154 * Find first bit set in bit string.
155 */
156 int
157 ffsbit(int *s)
158 {
159 int offset;
160
161 for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) {
162 ;
163 }
164 return offset + __builtin_ctz(*s);
165 }
166
167 int
168 ffs(unsigned int mask)
169 {
170 if (mask == 0) {
171 return 0;
172 }
173
174 /*
175 * NOTE: cannot use __builtin_ffs because it generates a call to
176 * 'ffs'
177 */
178 return 1 + __builtin_ctz(mask);
179 }
180
181 int
182 ffsll(unsigned long long mask)
183 {
184 if (mask == 0) {
185 return 0;
186 }
187
188 /*
189 * NOTE: cannot use __builtin_ffsll because it generates a call to
190 * 'ffsll'
191 */
192 return 1 + __builtin_ctzll(mask);
193 }
194
195 /*
196 * Find last bit set in bit string.
197 */
198 int
199 fls(unsigned int mask)
200 {
201 if (mask == 0) {
202 return 0;
203 }
204
205 return (sizeof(mask) << 3) - __builtin_clz(mask);
206 }
207
208 int
209 flsll(unsigned long long mask)
210 {
211 if (mask == 0) {
212 return 0;
213 }
214
215 return (sizeof(mask) << 3) - __builtin_clzll(mask);
216 }
217
218 void
219 bzero_phys_nc(
220 addr64_t src64,
221 uint32_t bytes)
222 {
223 bzero_phys(src64, bytes);
224 }
225
226 void
227 bzero_phys(
228 addr64_t src64,
229 uint32_t bytes)
230 {
231 bzero(PHYSMAP_PTOV(src64), bytes);
232 }
233
234
235 /*
236 * bcopy_phys - like bcopy but copies from/to physical addresses.
237 */
238
239 void
240 bcopy_phys(
241 addr64_t src64,
242 addr64_t dst64,
243 vm_size_t bytes)
244 {
245 /* Not necessary for K64 - but ensure we stay within a page */
246 if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) ||
247 ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
248 panic("bcopy_phys alignment");
249 }
250 bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
251 }
252
253 /*
254 * allow a function to get a quick virtual mapping of a physical page
255 */
256
257 int
258 apply_func_phys(
259 addr64_t dst64,
260 vm_size_t bytes,
261 int (*func)(void * buffer, vm_size_t bytes, void * arg),
262 void * arg)
263 {
264 /* Not necessary for K64 - but ensure we stay within a page */
265 if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) {
266 panic("apply_func_phys alignment");
267 }
268
269 return func(PHYSMAP_PTOV(dst64), bytes, arg);
270 }
271
272 /*
273 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
274 * them correctly.
275 */
276
277 void
278 ovbcopy(
279 const char *from,
280 char *to,
281 vm_size_t bytes) /* num bytes to copy */
282 {
283 /* Assume that bcopy copies left-to-right (low addr first). */
284 if (from + bytes <= to || to + bytes <= from || to == from) {
285 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
286 } else if (from > to) {
287 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
288 } else {
289 /* to > from: overlapping, and must copy right-to-left. */
290 from += bytes - 1;
291 to += bytes - 1;
292 while (bytes-- > 0) {
293 *to-- = *from--;
294 }
295 }
296 }
297
298
299 /*
300 * Read data from a physical address. Memory should not be cache inhibited.
301 */
302
303 uint64_t reportphyreaddelayabs;
304 uint64_t reportphywritedelayabs;
305 uint32_t reportphyreadosbt;
306 uint32_t reportphywriteosbt;
307
308 #if DEVELOPMENT || DEBUG
309 uint32_t phyreadpanic = 1;
310 uint32_t phywritepanic = 1;
311 uint64_t tracephyreaddelayabs = 50 * NSEC_PER_USEC;
312 uint64_t tracephywritedelayabs = 50 * NSEC_PER_USEC;
313 uint64_t simulate_stretched_io = 0;
314 #else
315 uint32_t phyreadpanic = 0;
316 uint32_t phywritepanic = 0;
317 uint64_t tracephyreaddelayabs = 0;
318 uint64_t tracephywritedelayabs = 0;
319 #endif
320
321 __private_extern__ uint64_t
322 ml_phys_read_data(uint64_t paddr, int size)
323 {
324 uint64_t result = 0;
325 unsigned char s1;
326 unsigned short s2;
327 boolean_t istate = TRUE, timeread = FALSE;
328 uint64_t sabs = 0, eabs;
329
330 if (__improbable(!physmap_enclosed(paddr))) {
331 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
332 }
333
334 if (__improbable(reportphyreaddelayabs != 0)) {
335 istate = ml_set_interrupts_enabled(FALSE);
336 sabs = mach_absolute_time();
337 timeread = TRUE;
338 }
339 #if DEVELOPMENT || DEBUG
340 if (__improbable(timeread && simulate_stretched_io)) {
341 sabs -= simulate_stretched_io;
342 }
343 #endif /* x86_64 DEVELOPMENT || DEBUG */
344
345 switch (size) {
346 case 1:
347 s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
348 result = s1;
349 break;
350 case 2:
351 s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
352 result = s2;
353 break;
354 case 4:
355 result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
356 break;
357 case 8:
358 result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
359 break;
360 default:
361 panic("Invalid size %d for ml_phys_read_data", size);
362 break;
363 }
364
365 if (__improbable(timeread == TRUE)) {
366 eabs = mach_absolute_time();
367
368 #if DEVELOPMENT || DEBUG
369 iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs);
370 #endif
371
372 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
373 (void)ml_set_interrupts_enabled(istate);
374
375 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
376 panic_notify();
377 panic("Read from physical addr 0x%llx took %llu ns, "
378 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
379 paddr, (eabs - sabs), result, sabs, eabs,
380 reportphyreaddelayabs);
381 }
382
383 if (reportphyreadosbt) {
384 OSReportWithBacktrace("ml_phys_read_data took %lluus",
385 (eabs - sabs) / NSEC_PER_USEC);
386 }
387 #if CONFIG_DTRACE
388 DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
389 uint64_t, paddr, uint32_t, size, uint64_t, result);
390 #endif /* CONFIG_DTRACE */
391 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
392 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
393 (eabs - sabs), sabs, paddr, result);
394
395 (void)ml_set_interrupts_enabled(istate);
396 } else {
397 (void)ml_set_interrupts_enabled(istate);
398 }
399 }
400
401 return result;
402 }
403
404 static unsigned long long
405 ml_phys_read_long_long(uint64_t paddr)
406 {
407 return ml_phys_read_data(paddr, 8);
408 }
409
410 unsigned int
411 ml_phys_read(vm_offset_t paddr)
412 {
413 return (unsigned int) ml_phys_read_data(paddr, 4);
414 }
415
416 unsigned int
417 ml_phys_read_word(vm_offset_t paddr)
418 {
419 return (unsigned int) ml_phys_read_data(paddr, 4);
420 }
421
422 unsigned int
423 ml_phys_read_64(addr64_t paddr64)
424 {
425 return (unsigned int) ml_phys_read_data(paddr64, 4);
426 }
427
428 unsigned int
429 ml_phys_read_word_64(addr64_t paddr64)
430 {
431 return (unsigned int) ml_phys_read_data(paddr64, 4);
432 }
433
434 unsigned int
435 ml_phys_read_half(vm_offset_t paddr)
436 {
437 return (unsigned int) ml_phys_read_data(paddr, 2);
438 }
439
440 unsigned int
441 ml_phys_read_half_64(addr64_t paddr64)
442 {
443 return (unsigned int) ml_phys_read_data(paddr64, 2);
444 }
445
446 unsigned int
447 ml_phys_read_byte(vm_offset_t paddr)
448 {
449 return (unsigned int) ml_phys_read_data(paddr, 1);
450 }
451
452 unsigned int
453 ml_phys_read_byte_64(addr64_t paddr64)
454 {
455 return (unsigned int) ml_phys_read_data(paddr64, 1);
456 }
457
458 unsigned long long
459 ml_phys_read_double(vm_offset_t paddr)
460 {
461 return ml_phys_read_long_long(paddr);
462 }
463
464 unsigned long long
465 ml_phys_read_double_64(addr64_t paddr64)
466 {
467 return ml_phys_read_long_long(paddr64);
468 }
469
470
471
472 /*
473 * Write data to a physical address. Memory should not be cache inhibited.
474 */
475
476 __private_extern__ void
477 ml_phys_write_data(uint64_t paddr, unsigned long long data, int size)
478 {
479 boolean_t istate = TRUE, timewrite = FALSE;
480 uint64_t sabs = 0, eabs;
481
482 if (__improbable(!physmap_enclosed(paddr))) {
483 panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
484 }
485
486 if (__improbable(reportphywritedelayabs != 0)) {
487 istate = ml_set_interrupts_enabled(FALSE);
488 sabs = mach_absolute_time();
489 timewrite = TRUE;
490 }
491 #if DEVELOPMENT || DEBUG
492 if (__improbable(timewrite && simulate_stretched_io)) {
493 sabs -= simulate_stretched_io;
494 }
495 #endif /* x86_64 DEVELOPMENT || DEBUG */
496
497 switch (size) {
498 case 1:
499 *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
500 break;
501 case 2:
502 *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
503 break;
504 case 4:
505 *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
506 break;
507 case 8:
508 *(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data;
509 break;
510 default:
511 panic("Invalid size %d for ml_phys_write_data", size);
512 break;
513 }
514
515 if (__improbable(timewrite == TRUE)) {
516 eabs = mach_absolute_time();
517
518 #if DEVELOPMENT || DEBUG
519 iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
520 #endif
521
522 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
523 (void)ml_set_interrupts_enabled(istate);
524
525 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
526 panic_notify();
527 panic("Write to physical addr 0x%llx took %llu ns, "
528 "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
529 paddr, (eabs - sabs), data, sabs, eabs,
530 reportphywritedelayabs);
531 }
532
533 if (reportphywriteosbt) {
534 OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
535 "took %lluus",
536 paddr, data, (eabs - sabs) / NSEC_PER_USEC);
537 }
538 #if CONFIG_DTRACE
539 DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
540 uint64_t, paddr, uint32_t, size, uint64_t, data);
541 #endif /* CONFIG_DTRACE */
542 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
543 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
544 (eabs - sabs), sabs, paddr, data);
545
546 (void)ml_set_interrupts_enabled(istate);
547 } else {
548 (void)ml_set_interrupts_enabled(istate);
549 }
550 }
551 }
552
553 void
554 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
555 {
556 ml_phys_write_data(paddr, data, 1);
557 }
558
559 void
560 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
561 {
562 ml_phys_write_data(paddr64, data, 1);
563 }
564
565 void
566 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
567 {
568 ml_phys_write_data(paddr, data, 2);
569 }
570
571 void
572 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
573 {
574 ml_phys_write_data(paddr64, data, 2);
575 }
576
577 void
578 ml_phys_write(vm_offset_t paddr, unsigned int data)
579 {
580 ml_phys_write_data(paddr, data, 4);
581 }
582
583 void
584 ml_phys_write_64(addr64_t paddr64, unsigned int data)
585 {
586 ml_phys_write_data(paddr64, data, 4);
587 }
588
589 void
590 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
591 {
592 ml_phys_write_data(paddr, data, 4);
593 }
594
595 void
596 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
597 {
598 ml_phys_write_data(paddr64, data, 4);
599 }
600
601 void
602 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
603 {
604 ml_phys_write_data(paddr, data, 8);
605 }
606
607 void
608 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
609 {
610 ml_phys_write_data(paddr64, data, 8);
611 }
612
613 uint32_t
614 ml_port_io_read(uint16_t ioport, int size)
615 {
616 uint32_t result = 0;
617
618 uint64_t sabs, eabs;
619 boolean_t istate, timeread = FALSE;
620
621 if (__improbable(reportphyreaddelayabs != 0)) {
622 istate = ml_set_interrupts_enabled(FALSE);
623 sabs = mach_absolute_time();
624 timeread = TRUE;
625 }
626
627 #if DEVELOPMENT || DEBUG
628 if (__improbable(timeread && simulate_stretched_io)) {
629 sabs -= simulate_stretched_io;
630 }
631 #endif /* x86_64 DEVELOPMENT || DEBUG */
632
633 switch (size) {
634 case 1:
635 result = inb(ioport);
636 break;
637 case 2:
638 result = inw(ioport);
639 break;
640 case 4:
641 result = inl(ioport);
642 break;
643 default:
644 panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport);
645 break;
646 }
647
648 if (__improbable(timeread == TRUE)) {
649 eabs = mach_absolute_time();
650
651 #if DEVELOPMENT || DEBUG
652 iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs);
653 #endif
654
655 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
656 (void)ml_set_interrupts_enabled(istate);
657
658 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
659 panic_notify();
660 panic("Read from IO port 0x%x took %llu ns, "
661 "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
662 ioport, (eabs - sabs), result, sabs, eabs,
663 reportphyreaddelayabs);
664 }
665
666 if (reportphyreadosbt) {
667 OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
668 ioport, (eabs - sabs) / NSEC_PER_USEC);
669 }
670 #if CONFIG_DTRACE
671 DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs),
672 uint16_t, ioport, uint32_t, size);
673 #endif /* CONFIG_DTRACE */
674 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
675 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ),
676 (eabs - sabs), sabs, ioport, result);
677
678 (void)ml_set_interrupts_enabled(istate);
679 } else {
680 (void)ml_set_interrupts_enabled(istate);
681 }
682 }
683
684 return result;
685 }
686
687 void
688 ml_port_io_write(uint16_t ioport, uint32_t val, int size)
689 {
690 uint64_t sabs, eabs;
691 boolean_t istate, timewrite = FALSE;
692
693 if (__improbable(reportphywritedelayabs != 0)) {
694 istate = ml_set_interrupts_enabled(FALSE);
695 sabs = mach_absolute_time();
696 timewrite = TRUE;
697 }
698 #if DEVELOPMENT || DEBUG
699 if (__improbable(timewrite && simulate_stretched_io)) {
700 sabs -= simulate_stretched_io;
701 }
702 #endif /* x86_64 DEVELOPMENT || DEBUG */
703
704 switch (size) {
705 case 1:
706 outb(ioport, (uint8_t)val);
707 break;
708 case 2:
709 outw(ioport, (uint16_t)val);
710 break;
711 case 4:
712 outl(ioport, (uint32_t)val);
713 break;
714 default:
715 panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport);
716 break;
717 }
718
719 if (__improbable(timewrite == TRUE)) {
720 eabs = mach_absolute_time();
721
722 #if DEVELOPMENT || DEBUG
723 iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs);
724 #endif
725
726 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
727 (void)ml_set_interrupts_enabled(istate);
728
729 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
730 panic_notify();
731 panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
732 " (start: %llu, end: %llu), ceiling: %llu",
733 ioport, (eabs - sabs), val, sabs, eabs,
734 reportphywritedelayabs);
735 }
736
737 if (reportphywriteosbt) {
738 OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) "
739 "took %lluus",
740 ioport, size, val, (eabs - sabs) / NSEC_PER_USEC);
741 }
742
743 #if CONFIG_DTRACE
744 DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs),
745 uint16_t, ioport, uint32_t, size, uint64_t, val);
746 #endif /* CONFIG_DTRACE */
747 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
748 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE),
749 (eabs - sabs), sabs, ioport, val);
750
751 (void)ml_set_interrupts_enabled(istate);
752 } else {
753 (void)ml_set_interrupts_enabled(istate);
754 }
755 }
756 }
757
758 uint8_t
759 ml_port_io_read8(uint16_t ioport)
760 {
761 return ml_port_io_read(ioport, 1);
762 }
763
764 uint16_t
765 ml_port_io_read16(uint16_t ioport)
766 {
767 return ml_port_io_read(ioport, 2);
768 }
769
770 uint32_t
771 ml_port_io_read32(uint16_t ioport)
772 {
773 return ml_port_io_read(ioport, 4);
774 }
775
776 void
777 ml_port_io_write8(uint16_t ioport, uint8_t val)
778 {
779 ml_port_io_write(ioport, val, 1);
780 }
781
782 void
783 ml_port_io_write16(uint16_t ioport, uint16_t val)
784 {
785 ml_port_io_write(ioport, val, 2);
786 }
787
788 void
789 ml_port_io_write32(uint16_t ioport, uint32_t val)
790 {
791 ml_port_io_write(ioport, val, 4);
792 }
793
794 /* PCI config cycle probing
795 *
796 *
797 * Read the memory location at physical address paddr.
798 * *Does not* recover from machine checks, unlike the PowerPC implementation.
799 * Should probably be deprecated.
800 */
801
802 boolean_t
803 ml_probe_read(vm_offset_t paddr, unsigned int *val)
804 {
805 if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) {
806 return FALSE;
807 }
808
809 *val = ml_phys_read(paddr);
810
811 return TRUE;
812 }
813
814 /*
815 * Read the memory location at physical address paddr.
816 * This is a part of a device probe, so there is a good chance we will
817 * have a machine check here. So we have to be able to handle that.
818 * We assume that machine checks are enabled both in MSR and HIDs
819 */
820 boolean_t
821 ml_probe_read_64(addr64_t paddr64, unsigned int *val)
822 {
823 if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) {
824 return FALSE;
825 }
826
827 *val = ml_phys_read_64(paddr64);
828 return TRUE;
829 }
830
831
832 #undef bcmp
833 int
834 bcmp(
835 const void *pa,
836 const void *pb,
837 size_t len)
838 {
839 const char *a = (const char *)pa;
840 const char *b = (const char *)pb;
841
842 if (len == 0) {
843 return 0;
844 }
845
846 do {
847 if (*a++ != *b++) {
848 break;
849 }
850 } while (--len);
851
852 /*
853 * Check for the overflow case but continue to handle the non-overflow
854 * case the same way just in case someone is using the return value
855 * as more than zero/non-zero
856 */
857 if (__improbable(!(len & 0x00000000FFFFFFFFULL) && (len & 0xFFFFFFFF00000000ULL))) {
858 return 0xFFFFFFFF;
859 } else {
860 return (int)len;
861 }
862 }
863
864 #undef memcmp
865 int
866 memcmp(const void *s1, const void *s2, size_t n)
867 {
868 if (n != 0) {
869 const unsigned char *p1 = s1, *p2 = s2;
870
871 do {
872 if (*p1++ != *p2++) {
873 return *--p1 - *--p2;
874 }
875 } while (--n != 0);
876 }
877 return 0;
878 }
879
880 unsigned long
881 memcmp_zero_ptr_aligned(const void *addr, size_t size)
882 {
883 const uint64_t *p = (const uint64_t *)addr;
884 uint64_t a = p[0];
885
886 static_assert(sizeof(unsigned long) == sizeof(uint64_t));
887
888 if (size < 4 * sizeof(uint64_t)) {
889 if (size > 1 * sizeof(uint64_t)) {
890 a |= p[1];
891 if (size > 2 * sizeof(uint64_t)) {
892 a |= p[2];
893 }
894 }
895 } else {
896 size_t count = size / sizeof(uint64_t);
897 uint64_t b = p[1];
898 uint64_t c = p[2];
899 uint64_t d = p[3];
900
901 /*
902 * note: for sizes not a multiple of 32 bytes, this will load
903 * the bytes [size % 32 .. 32) twice which is ok
904 */
905 while (count > 4) {
906 count -= 4;
907 a |= p[count + 0];
908 b |= p[count + 1];
909 c |= p[count + 2];
910 d |= p[count + 3];
911 }
912
913 a |= b | c | d;
914 }
915
916 return a;
917 }
918
919 #undef memmove
920 void *
921 memmove(void *dst, const void *src, size_t ulen)
922 {
923 bcopy(src, dst, ulen);
924 return dst;
925 }
926
927 /*
928 * Abstract:
929 * strlen returns the number of characters in "string" preceeding
930 * the terminating null character.
931 */
932
933 #undef strlen
934 size_t
935 strlen(
936 const char *string)
937 {
938 const char *ret = string;
939
940 while (*string++ != '\0') {
941 continue;
942 }
943 return string - 1 - ret;
944 }
945
946 #if MACH_ASSERT
947
948 /*
949 * Machine-dependent routine to fill in an array with up to callstack_max
950 * levels of return pc information.
951 */
952 void
953 machine_callstack(
954 __unused uintptr_t *buf,
955 __unused vm_size_t callstack_max)
956 {
957 }
958
959 #endif /* MACH_ASSERT */
960
961 void
962 fillPage(ppnum_t pa, unsigned int fill)
963 {
964 uint64_t src;
965 int cnt = PAGE_SIZE / sizeof(unsigned int);
966
967 src = i386_ptob(pa);
968 memset_word((int *)PHYSMAP_PTOV(src), fill, cnt);
969 }
970
971 static inline void
972 __clflush(void *ptr)
973 {
974 __asm__ volatile ("clflush (%0)" : : "r" (ptr));
975 }
976
977 void
978 dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
979 {
980 addr64_t linesize = cpuid_info()->cache_linesize;
981 addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1);
982
983 mfence();
984
985 while (pa < bound) {
986 __clflush(PHYSMAP_PTOV(pa));
987 pa += linesize;
988 }
989
990 mfence();
991 }
992
993 void
994 dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
995 {
996 return dcache_incoherent_io_store64(pa, count);
997 }
998
999 void
1000 flush_dcache64(addr64_t addr, unsigned count, int phys)
1001 {
1002 if (phys) {
1003 dcache_incoherent_io_flush64(addr, count);
1004 } else {
1005 uint64_t linesize = cpuid_info()->cache_linesize;
1006 addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1);
1007 mfence();
1008 while (addr < bound) {
1009 __clflush((void *) (uintptr_t) addr);
1010 addr += linesize;
1011 }
1012 mfence();
1013 }
1014 }
1015
1016 void
1017 invalidate_icache64(__unused addr64_t addr,
1018 __unused unsigned count,
1019 __unused int phys)
1020 {
1021 }
1022
1023
1024 addr64_t vm_last_addr;
1025
1026 void
1027 mapping_set_mod(ppnum_t pn)
1028 {
1029 pmap_set_modify(pn);
1030 }
1031
1032 void
1033 mapping_set_ref(ppnum_t pn)
1034 {
1035 pmap_set_reference(pn);
1036 }
1037
1038 extern i386_cpu_info_t cpuid_cpu_info;
1039 void
1040 cache_flush_page_phys(ppnum_t pa)
1041 {
1042 boolean_t istate;
1043 unsigned char *cacheline_addr;
1044 i386_cpu_info_t *cpuid_infop = cpuid_info();
1045 int cacheline_size;
1046 int cachelines_to_flush;
1047
1048 cacheline_size = cpuid_infop->cache_linesize;
1049 if (cacheline_size == 0) {
1050 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop);
1051 }
1052 cachelines_to_flush = PAGE_SIZE / cacheline_size;
1053
1054 mfence();
1055
1056 istate = ml_set_interrupts_enabled(FALSE);
1057
1058 for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa));
1059 cachelines_to_flush > 0;
1060 cachelines_to_flush--, cacheline_addr += cacheline_size) {
1061 __clflush((void *) cacheline_addr);
1062 }
1063
1064 (void) ml_set_interrupts_enabled(istate);
1065
1066 mfence();
1067 }
1068
1069
1070 #if !MACH_KDP
1071 void
1072 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
1073 {
1074 #pragma unused(fn,arg)
1075 }
1076 #endif
1077
1078 #if !CONFIG_VMX
1079 int
1080 host_vmxon(boolean_t exclusive __unused)
1081 {
1082 return VMX_UNSUPPORTED;
1083 }
1084
1085 void
1086 host_vmxoff(void)
1087 {
1088 return;
1089 }
1090 #endif
1091
1092 static lck_grp_t xcpm_lck_grp;
1093 static lck_grp_attr_t xcpm_lck_grp_attr;
1094 static lck_attr_t xcpm_lck_attr;
1095 static lck_spin_t xcpm_lock;
1096
1097 void xcpm_bootstrap(void);
1098 void xcpm_mbox_lock(void);
1099 void xcpm_mbox_unlock(void);
1100 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd);
1101 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd);
1102 void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data);
1103 boolean_t xcpm_is_hwp_enabled(void);
1104
1105 void
1106 xcpm_bootstrap(void)
1107 {
1108 lck_grp_attr_setdefault(&xcpm_lck_grp_attr);
1109 lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr);
1110 lck_attr_setdefault(&xcpm_lck_attr);
1111 lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr);
1112 }
1113
1114 void
1115 xcpm_mbox_lock(void)
1116 {
1117 lck_spin_lock(&xcpm_lock);
1118 }
1119
1120 void
1121 xcpm_mbox_unlock(void)
1122 {
1123 lck_spin_unlock(&xcpm_lock);
1124 }
1125
1126 static uint32_t __xcpm_state[64] = {};
1127
1128 uint32_t
1129 xcpm_bios_mbox_cmd_read(uint32_t cmd)
1130 {
1131 uint32_t reg;
1132 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1133 xcpm_mbox_lock();
1134 reg = xcpm_bios_mbox_cmd_unsafe_read(cmd);
1135 xcpm_mbox_unlock();
1136 ml_set_interrupts_enabled(istate);
1137 return reg;
1138 }
1139
1140 uint32_t
1141 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd)
1142 {
1143 return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))];
1144 }
1145
1146 void
1147 xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data)
1148 {
1149 uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]));
1150 idx &= ~0x1;
1151
1152 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1153 xcpm_mbox_lock();
1154 __xcpm_state[idx] = data;
1155 xcpm_mbox_unlock();
1156 ml_set_interrupts_enabled(istate);
1157 }
1158
1159 boolean_t
1160 xcpm_is_hwp_enabled(void)
1161 {
1162 return FALSE;
1163 }
1164