2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <kern/locks.h>
67 #include <sys/errno.h>
68 #include <i386/param.h>
69 #include <i386/misc_protos.h>
70 #include <i386/panic_notify.h>
71 #include <i386/cpu_data.h>
72 #include <i386/machine_routines.h>
73 #include <i386/cpuid.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_fault.h>
80 #include <libkern/OSAtomic.h>
81 #include <libkern/OSDebug.h>
82 #include <sys/kdebug.h>
85 #include <kdp/kdp_callout.h>
86 #endif /* !MACH_KDP */
88 #include <architecture/i386/pio.h>
90 #include <libkern/OSDebug.h>
98 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
103 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
106 /* XXX - should be gone from here */
107 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
108 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
109 extern boolean_t
phys_page_exists(ppnum_t
);
110 extern void bcopy_no_overwrite(const char *from
, char *to
, vm_size_t bytes
);
111 extern void pmap_set_reference(ppnum_t pn
);
112 extern void mapping_set_mod(ppnum_t pa
);
113 extern void mapping_set_ref(ppnum_t pn
);
115 extern void ovbcopy(const char *from
,
118 void machine_callstack(uintptr_t *buf
, vm_size_t callstack_max
);
121 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
122 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
124 #define INT_SIZE (BYTE_SIZE * sizeof (int))
127 * Set indicated bit in bit string.
130 setbit(int bitno
, int *s
)
132 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
136 * Clear indicated bit in bit string.
139 clrbit(int bitno
, int *s
)
141 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
145 * Test if indicated bit is set in bit string.
148 testbit(int bitno
, int *s
)
150 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
154 * Find first bit set in bit string.
161 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
) {
164 return offset
+ __builtin_ctz(*s
);
168 ffs(unsigned int mask
)
175 * NOTE: cannot use __builtin_ffs because it generates a call to
178 return 1 + __builtin_ctz(mask
);
182 ffsll(unsigned long long mask
)
189 * NOTE: cannot use __builtin_ffsll because it generates a call to
192 return 1 + __builtin_ctzll(mask
);
196 * Find last bit set in bit string.
199 fls(unsigned int mask
)
205 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
209 flsll(unsigned long long mask
)
215 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
223 bzero_phys(src64
, bytes
);
231 bzero(PHYSMAP_PTOV(src64
), bytes
);
236 * bcopy_phys - like bcopy but copies from/to physical addresses.
245 /* Not necessary for K64 - but ensure we stay within a page */
246 if (((((uint32_t)src64
& (NBPG
- 1)) + bytes
) > NBPG
) ||
247 ((((uint32_t)dst64
& (NBPG
- 1)) + bytes
) > NBPG
)) {
248 panic("bcopy_phys alignment");
250 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
254 * allow a function to get a quick virtual mapping of a physical page
261 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
264 /* Not necessary for K64 - but ensure we stay within a page */
265 if (((((uint32_t)dst64
& (NBPG
- 1)) + bytes
) > NBPG
)) {
266 panic("apply_func_phys alignment");
269 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
273 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
281 vm_size_t bytes
) /* num bytes to copy */
283 /* Assume that bcopy copies left-to-right (low addr first). */
284 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
) {
285 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
286 } else if (from
> to
) {
287 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
289 /* to > from: overlapping, and must copy right-to-left. */
292 while (bytes
-- > 0) {
300 * Read data from a physical address. Memory should not be cache inhibited.
303 uint64_t reportphyreaddelayabs
;
304 uint64_t reportphywritedelayabs
;
305 uint32_t reportphyreadosbt
;
306 uint32_t reportphywriteosbt
;
308 #if DEVELOPMENT || DEBUG
309 uint32_t phyreadpanic
= 1;
310 uint32_t phywritepanic
= 1;
311 uint64_t tracephyreaddelayabs
= 50 * NSEC_PER_USEC
;
312 uint64_t tracephywritedelayabs
= 50 * NSEC_PER_USEC
;
313 uint64_t simulate_stretched_io
= 0;
315 uint32_t phyreadpanic
= 0;
316 uint32_t phywritepanic
= 0;
317 uint64_t tracephyreaddelayabs
= 0;
318 uint64_t tracephywritedelayabs
= 0;
321 __private_extern__
uint64_t
322 ml_phys_read_data(uint64_t paddr
, int size
)
327 boolean_t istate
= TRUE
, timeread
= FALSE
;
328 uint64_t sabs
= 0, eabs
;
330 if (__improbable(!physmap_enclosed(paddr
))) {
331 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
334 if (__improbable(reportphyreaddelayabs
!= 0)) {
335 istate
= ml_set_interrupts_enabled(FALSE
);
336 sabs
= mach_absolute_time();
339 #if DEVELOPMENT || DEBUG
340 if (__improbable(timeread
&& simulate_stretched_io
)) {
341 sabs
-= simulate_stretched_io
;
343 #endif /* x86_64 DEVELOPMENT || DEBUG */
347 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
351 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
355 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
358 result
= *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
361 panic("Invalid size %d for ml_phys_read_data", size
);
365 if (__improbable(timeread
== TRUE
)) {
366 eabs
= mach_absolute_time();
368 #if DEVELOPMENT || DEBUG
369 iotrace(IOTRACE_PHYS_READ
, 0, paddr
, size
, result
, sabs
, eabs
- sabs
);
372 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
373 (void)ml_set_interrupts_enabled(istate
);
375 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
377 panic("Read from physical addr 0x%llx took %llu ns, "
378 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
379 paddr
, (eabs
- sabs
), result
, sabs
, eabs
,
380 reportphyreaddelayabs
);
383 if (reportphyreadosbt
) {
384 OSReportWithBacktrace("ml_phys_read_data took %lluus",
385 (eabs
- sabs
) / NSEC_PER_USEC
);
388 DTRACE_PHYSLAT4(physread
, uint64_t, (eabs
- sabs
),
389 uint64_t, paddr
, uint32_t, size
, uint64_t, result
);
390 #endif /* CONFIG_DTRACE */
391 } else if (__improbable(tracephyreaddelayabs
> 0 && (eabs
- sabs
) > tracephyreaddelayabs
)) {
392 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PHYS_READ
),
393 (eabs
- sabs
), sabs
, paddr
, result
);
395 (void)ml_set_interrupts_enabled(istate
);
397 (void)ml_set_interrupts_enabled(istate
);
404 static unsigned long long
405 ml_phys_read_long_long(uint64_t paddr
)
407 return ml_phys_read_data(paddr
, 8);
411 ml_phys_read(vm_offset_t paddr
)
413 return (unsigned int) ml_phys_read_data(paddr
, 4);
417 ml_phys_read_word(vm_offset_t paddr
)
419 return (unsigned int) ml_phys_read_data(paddr
, 4);
423 ml_phys_read_64(addr64_t paddr64
)
425 return (unsigned int) ml_phys_read_data(paddr64
, 4);
429 ml_phys_read_word_64(addr64_t paddr64
)
431 return (unsigned int) ml_phys_read_data(paddr64
, 4);
435 ml_phys_read_half(vm_offset_t paddr
)
437 return (unsigned int) ml_phys_read_data(paddr
, 2);
441 ml_phys_read_half_64(addr64_t paddr64
)
443 return (unsigned int) ml_phys_read_data(paddr64
, 2);
447 ml_phys_read_byte(vm_offset_t paddr
)
449 return (unsigned int) ml_phys_read_data(paddr
, 1);
453 ml_phys_read_byte_64(addr64_t paddr64
)
455 return (unsigned int) ml_phys_read_data(paddr64
, 1);
459 ml_phys_read_double(vm_offset_t paddr
)
461 return ml_phys_read_long_long(paddr
);
465 ml_phys_read_double_64(addr64_t paddr64
)
467 return ml_phys_read_long_long(paddr64
);
473 * Write data to a physical address. Memory should not be cache inhibited.
476 __private_extern__
void
477 ml_phys_write_data(uint64_t paddr
, unsigned long long data
, int size
)
479 boolean_t istate
= TRUE
, timewrite
= FALSE
;
480 uint64_t sabs
= 0, eabs
;
482 if (__improbable(!physmap_enclosed(paddr
))) {
483 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
486 if (__improbable(reportphywritedelayabs
!= 0)) {
487 istate
= ml_set_interrupts_enabled(FALSE
);
488 sabs
= mach_absolute_time();
491 #if DEVELOPMENT || DEBUG
492 if (__improbable(timewrite
&& simulate_stretched_io
)) {
493 sabs
-= simulate_stretched_io
;
495 #endif /* x86_64 DEVELOPMENT || DEBUG */
499 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
502 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
505 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
508 *(volatile unsigned long *)PHYSMAP_PTOV(paddr
) = data
;
511 panic("Invalid size %d for ml_phys_write_data", size
);
515 if (__improbable(timewrite
== TRUE
)) {
516 eabs
= mach_absolute_time();
518 #if DEVELOPMENT || DEBUG
519 iotrace(IOTRACE_PHYS_WRITE
, 0, paddr
, size
, data
, sabs
, eabs
- sabs
);
522 if (__improbable((eabs
- sabs
) > reportphywritedelayabs
)) {
523 (void)ml_set_interrupts_enabled(istate
);
525 if (phywritepanic
&& (machine_timeout_suspended() == FALSE
)) {
527 panic("Write to physical addr 0x%llx took %llu ns, "
528 "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
529 paddr
, (eabs
- sabs
), data
, sabs
, eabs
,
530 reportphywritedelayabs
);
533 if (reportphywriteosbt
) {
534 OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) "
536 paddr
, data
, (eabs
- sabs
) / NSEC_PER_USEC
);
539 DTRACE_PHYSLAT4(physwrite
, uint64_t, (eabs
- sabs
),
540 uint64_t, paddr
, uint32_t, size
, uint64_t, data
);
541 #endif /* CONFIG_DTRACE */
542 } else if (__improbable(tracephywritedelayabs
> 0 && (eabs
- sabs
) > tracephywritedelayabs
)) {
543 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PHYS_WRITE
),
544 (eabs
- sabs
), sabs
, paddr
, data
);
546 (void)ml_set_interrupts_enabled(istate
);
548 (void)ml_set_interrupts_enabled(istate
);
554 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
556 ml_phys_write_data(paddr
, data
, 1);
560 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
562 ml_phys_write_data(paddr64
, data
, 1);
566 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
568 ml_phys_write_data(paddr
, data
, 2);
572 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
574 ml_phys_write_data(paddr64
, data
, 2);
578 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
580 ml_phys_write_data(paddr
, data
, 4);
584 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
586 ml_phys_write_data(paddr64
, data
, 4);
590 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
592 ml_phys_write_data(paddr
, data
, 4);
596 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
598 ml_phys_write_data(paddr64
, data
, 4);
602 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
604 ml_phys_write_data(paddr
, data
, 8);
608 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
610 ml_phys_write_data(paddr64
, data
, 8);
614 ml_port_io_read(uint16_t ioport
, int size
)
619 boolean_t istate
, timeread
= FALSE
;
621 if (__improbable(reportphyreaddelayabs
!= 0)) {
622 istate
= ml_set_interrupts_enabled(FALSE
);
623 sabs
= mach_absolute_time();
627 #if DEVELOPMENT || DEBUG
628 if (__improbable(timeread
&& simulate_stretched_io
)) {
629 sabs
-= simulate_stretched_io
;
631 #endif /* x86_64 DEVELOPMENT || DEBUG */
635 result
= inb(ioport
);
638 result
= inw(ioport
);
641 result
= inl(ioport
);
644 panic("Invalid size %d for ml_port_io_read(0x%x)", size
, (unsigned)ioport
);
648 if (__improbable(timeread
== TRUE
)) {
649 eabs
= mach_absolute_time();
651 #if DEVELOPMENT || DEBUG
652 iotrace(IOTRACE_PORTIO_READ
, 0, ioport
, size
, result
, sabs
, eabs
- sabs
);
655 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
656 (void)ml_set_interrupts_enabled(istate
);
658 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
660 panic("Read from IO port 0x%x took %llu ns, "
661 "result: 0x%x (start: %llu, end: %llu), ceiling: %llu",
662 ioport
, (eabs
- sabs
), result
, sabs
, eabs
,
663 reportphyreaddelayabs
);
666 if (reportphyreadosbt
) {
667 OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus",
668 ioport
, (eabs
- sabs
) / NSEC_PER_USEC
);
671 DTRACE_PHYSLAT3(portioread
, uint64_t, (eabs
- sabs
),
672 uint16_t, ioport
, uint32_t, size
);
673 #endif /* CONFIG_DTRACE */
674 } else if (__improbable(tracephyreaddelayabs
> 0 && (eabs
- sabs
) > tracephyreaddelayabs
)) {
675 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PORTIO_READ
),
676 (eabs
- sabs
), sabs
, ioport
, result
);
678 (void)ml_set_interrupts_enabled(istate
);
680 (void)ml_set_interrupts_enabled(istate
);
688 ml_port_io_write(uint16_t ioport
, uint32_t val
, int size
)
691 boolean_t istate
, timewrite
= FALSE
;
693 if (__improbable(reportphywritedelayabs
!= 0)) {
694 istate
= ml_set_interrupts_enabled(FALSE
);
695 sabs
= mach_absolute_time();
698 #if DEVELOPMENT || DEBUG
699 if (__improbable(timewrite
&& simulate_stretched_io
)) {
700 sabs
-= simulate_stretched_io
;
702 #endif /* x86_64 DEVELOPMENT || DEBUG */
706 outb(ioport
, (uint8_t)val
);
709 outw(ioport
, (uint16_t)val
);
712 outl(ioport
, (uint32_t)val
);
715 panic("Invalid size %d for ml_port_io_write(0x%x)", size
, (unsigned)ioport
);
719 if (__improbable(timewrite
== TRUE
)) {
720 eabs
= mach_absolute_time();
722 #if DEVELOPMENT || DEBUG
723 iotrace(IOTRACE_PORTIO_WRITE
, 0, ioport
, size
, val
, sabs
, eabs
- sabs
);
726 if (__improbable((eabs
- sabs
) > reportphywritedelayabs
)) {
727 (void)ml_set_interrupts_enabled(istate
);
729 if (phywritepanic
&& (machine_timeout_suspended() == FALSE
)) {
731 panic("Write to IO port 0x%x took %llu ns, val: 0x%x"
732 " (start: %llu, end: %llu), ceiling: %llu",
733 ioport
, (eabs
- sabs
), val
, sabs
, eabs
,
734 reportphywritedelayabs
);
737 if (reportphywriteosbt
) {
738 OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) "
740 ioport
, size
, val
, (eabs
- sabs
) / NSEC_PER_USEC
);
744 DTRACE_PHYSLAT4(portiowrite
, uint64_t, (eabs
- sabs
),
745 uint16_t, ioport
, uint32_t, size
, uint64_t, val
);
746 #endif /* CONFIG_DTRACE */
747 } else if (__improbable(tracephywritedelayabs
> 0 && (eabs
- sabs
) > tracephywritedelayabs
)) {
748 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_PORTIO_WRITE
),
749 (eabs
- sabs
), sabs
, ioport
, val
);
751 (void)ml_set_interrupts_enabled(istate
);
753 (void)ml_set_interrupts_enabled(istate
);
759 ml_port_io_read8(uint16_t ioport
)
761 return ml_port_io_read(ioport
, 1);
765 ml_port_io_read16(uint16_t ioport
)
767 return ml_port_io_read(ioport
, 2);
771 ml_port_io_read32(uint16_t ioport
)
773 return ml_port_io_read(ioport
, 4);
777 ml_port_io_write8(uint16_t ioport
, uint8_t val
)
779 ml_port_io_write(ioport
, val
, 1);
783 ml_port_io_write16(uint16_t ioport
, uint16_t val
)
785 ml_port_io_write(ioport
, val
, 2);
789 ml_port_io_write32(uint16_t ioport
, uint32_t val
)
791 ml_port_io_write(ioport
, val
, 4);
794 /* PCI config cycle probing
797 * Read the memory location at physical address paddr.
798 * *Does not* recover from machine checks, unlike the PowerPC implementation.
799 * Should probably be deprecated.
803 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
805 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4) {
809 *val
= ml_phys_read(paddr
);
815 * Read the memory location at physical address paddr.
816 * This is a part of a device probe, so there is a good chance we will
817 * have a machine check here. So we have to be able to handle that.
818 * We assume that machine checks are enabled both in MSR and HIDs
821 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
823 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4) {
827 *val
= ml_phys_read_64(paddr64
);
839 const char *a
= (const char *)pa
;
840 const char *b
= (const char *)pb
;
853 * Check for the overflow case but continue to handle the non-overflow
854 * case the same way just in case someone is using the return value
855 * as more than zero/non-zero
857 if (__improbable(!(len
& 0x00000000FFFFFFFFULL
) && (len
& 0xFFFFFFFF00000000ULL
))) {
866 memcmp(const void *s1
, const void *s2
, size_t n
)
869 const unsigned char *p1
= s1
, *p2
= s2
;
872 if (*p1
++ != *p2
++) {
873 return *--p1
- *--p2
;
881 memcmp_zero_ptr_aligned(const void *addr
, size_t size
)
883 const uint64_t *p
= (const uint64_t *)addr
;
886 static_assert(sizeof(unsigned long) == sizeof(uint64_t));
888 if (size
< 4 * sizeof(uint64_t)) {
889 if (size
> 1 * sizeof(uint64_t)) {
891 if (size
> 2 * sizeof(uint64_t)) {
896 size_t count
= size
/ sizeof(uint64_t);
902 * note: for sizes not a multiple of 32 bytes, this will load
903 * the bytes [size % 32 .. 32) twice which is ok
921 memmove(void *dst
, const void *src
, size_t ulen
)
923 bcopy(src
, dst
, ulen
);
929 * strlen returns the number of characters in "string" preceeding
930 * the terminating null character.
938 const char *ret
= string
;
940 while (*string
++ != '\0') {
943 return string
- 1 - ret
;
949 * Machine-dependent routine to fill in an array with up to callstack_max
950 * levels of return pc information.
954 __unused
uintptr_t *buf
,
955 __unused vm_size_t callstack_max
)
959 #endif /* MACH_ASSERT */
962 fillPage(ppnum_t pa
, unsigned int fill
)
965 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
968 memset_word((int *)PHYSMAP_PTOV(src
), fill
, cnt
);
974 __asm__
volatile ("clflush (%0)" : : "r" (ptr
));
978 dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
980 addr64_t linesize
= cpuid_info()->cache_linesize
;
981 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
986 __clflush(PHYSMAP_PTOV(pa
));
994 dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
996 return dcache_incoherent_io_store64(pa
, count
);
1000 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
1003 dcache_incoherent_io_flush64(addr
, count
);
1005 uint64_t linesize
= cpuid_info()->cache_linesize
;
1006 addr64_t bound
= (addr
+ count
+ linesize
- 1) & ~(linesize
- 1);
1008 while (addr
< bound
) {
1009 __clflush((void *) (uintptr_t) addr
);
1017 invalidate_icache64(__unused addr64_t addr
,
1018 __unused
unsigned count
,
1024 addr64_t vm_last_addr
;
1027 mapping_set_mod(ppnum_t pn
)
1029 pmap_set_modify(pn
);
1033 mapping_set_ref(ppnum_t pn
)
1035 pmap_set_reference(pn
);
1038 extern i386_cpu_info_t cpuid_cpu_info
;
1040 cache_flush_page_phys(ppnum_t pa
)
1043 unsigned char *cacheline_addr
;
1044 i386_cpu_info_t
*cpuid_infop
= cpuid_info();
1046 int cachelines_to_flush
;
1048 cacheline_size
= cpuid_infop
->cache_linesize
;
1049 if (cacheline_size
== 0) {
1050 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop
);
1052 cachelines_to_flush
= PAGE_SIZE
/ cacheline_size
;
1056 istate
= ml_set_interrupts_enabled(FALSE
);
1058 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
1059 cachelines_to_flush
> 0;
1060 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
1061 __clflush((void *) cacheline_addr
);
1064 (void) ml_set_interrupts_enabled(istate
);
1072 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
1074 #pragma unused(fn,arg)
1080 host_vmxon(boolean_t exclusive __unused
)
1082 return VMX_UNSUPPORTED
;
1092 static lck_grp_t xcpm_lck_grp
;
1093 static lck_grp_attr_t xcpm_lck_grp_attr
;
1094 static lck_attr_t xcpm_lck_attr
;
1095 static lck_spin_t xcpm_lock
;
1097 void xcpm_bootstrap(void);
1098 void xcpm_mbox_lock(void);
1099 void xcpm_mbox_unlock(void);
1100 uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd
);
1101 uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd
);
1102 void xcpm_bios_mbox_cmd_write(uint32_t cmd
, uint32_t data
);
1103 boolean_t
xcpm_is_hwp_enabled(void);
1106 xcpm_bootstrap(void)
1108 lck_grp_attr_setdefault(&xcpm_lck_grp_attr
);
1109 lck_grp_init(&xcpm_lck_grp
, "xcpm", &xcpm_lck_grp_attr
);
1110 lck_attr_setdefault(&xcpm_lck_attr
);
1111 lck_spin_init(&xcpm_lock
, &xcpm_lck_grp
, &xcpm_lck_attr
);
1115 xcpm_mbox_lock(void)
1117 lck_spin_lock(&xcpm_lock
);
1121 xcpm_mbox_unlock(void)
1123 lck_spin_unlock(&xcpm_lock
);
1126 static uint32_t __xcpm_state
[64] = {};
1129 xcpm_bios_mbox_cmd_read(uint32_t cmd
)
1132 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1134 reg
= xcpm_bios_mbox_cmd_unsafe_read(cmd
);
1136 ml_set_interrupts_enabled(istate
);
1141 xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd
)
1143 return __xcpm_state
[cmd
% (sizeof(__xcpm_state
) / sizeof(__xcpm_state
[0]))];
1147 xcpm_bios_mbox_cmd_write(uint32_t cmd
, uint32_t data
)
1149 uint32_t idx
= cmd
% (sizeof(__xcpm_state
) / sizeof(__xcpm_state
[0]));
1152 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
1154 __xcpm_state
[idx
] = data
;
1156 ml_set_interrupts_enabled(istate
);
1160 xcpm_is_hwp_enabled(void)