2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <sys/kdebug.h>
82 #include <kdp/kdp_callout.h>
83 #endif /* !MACH_KDP */
85 #include <libkern/OSDebug.h>
93 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
98 /* prevent infinite recursion when memmove calls bcopy; in string.h, bcopy is defined to call memmove */
101 /* XXX - should be gone from here */
102 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
103 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
104 extern boolean_t
phys_page_exists(ppnum_t
);
105 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
106 extern void pmap_set_reference(ppnum_t pn
);
107 extern void mapping_set_mod(ppnum_t pa
);
108 extern void mapping_set_ref(ppnum_t pn
);
110 extern void ovbcopy(const char *from
,
113 void machine_callstack(uintptr_t *buf
, vm_size_t callstack_max
);
116 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
117 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
119 #define INT_SIZE (BYTE_SIZE * sizeof (int))
122 * Set indicated bit in bit string.
125 setbit(int bitno
, int *s
)
127 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
131 * Clear indicated bit in bit string.
134 clrbit(int bitno
, int *s
)
136 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
140 * Test if indicated bit is set in bit string.
143 testbit(int bitno
, int *s
)
145 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
149 * Find first bit set in bit string.
156 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
);
157 return offset
+ __builtin_ctz(*s
);
161 ffs(unsigned int mask
)
167 * NOTE: cannot use __builtin_ffs because it generates a call to
170 return 1 + __builtin_ctz(mask
);
174 ffsll(unsigned long long mask
)
180 * NOTE: cannot use __builtin_ffsll because it generates a call to
183 return 1 + __builtin_ctzll(mask
);
187 * Find last bit set in bit string.
190 fls(unsigned int mask
)
195 return (sizeof (mask
) << 3) - __builtin_clz(mask
);
199 flsll(unsigned long long mask
)
204 return (sizeof (mask
) << 3) - __builtin_clzll(mask
);
212 bzero_phys(src64
,bytes
);
220 bzero(PHYSMAP_PTOV(src64
), bytes
);
225 * bcopy_phys - like bcopy but copies from/to physical addresses.
234 /* Not necessary for K64 - but ensure we stay within a page */
235 if (((((uint32_t)src64
& (NBPG
-1)) + bytes
) > NBPG
) ||
236 ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
237 panic("bcopy_phys alignment");
239 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
243 * allow a function to get a quick virtual mapping of a physical page
250 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
253 /* Not necessary for K64 - but ensure we stay within a page */
254 if (((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
255 panic("apply_func_phys alignment");
258 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
262 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
270 vm_size_t bytes
) /* num bytes to copy */
272 /* Assume that bcopy copies left-to-right (low addr first). */
273 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
274 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
276 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
278 /* to > from: overlapping, and must copy right-to-left. */
288 * Read data from a physical address. Memory should not be cache inhibited.
291 uint64_t reportphyreaddelayabs
;
292 uint32_t reportphyreadosbt
;
294 #if DEVELOPMENT || DEBUG
295 uint32_t phyreadpanic
= 1;
297 uint32_t phyreadpanic
= 0;
300 __private_extern__
uint64_t
301 ml_phys_read_data(pmap_paddr_t paddr
, int size
) {
305 boolean_t istate
= TRUE
, timeread
= FALSE
;
306 uint64_t sabs
= 0, eabs
;
308 if (__improbable(!physmap_enclosed(paddr
)))
309 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
311 if (__improbable(reportphyreaddelayabs
!= 0)) {
312 istate
= ml_set_interrupts_enabled(FALSE
);
313 sabs
= mach_absolute_time();
319 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
323 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
327 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
330 result
= *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
333 panic("Invalid size %d for ml_phys_read_data\n", size
);
337 if (__improbable(timeread
== TRUE
)) {
338 eabs
= mach_absolute_time();
339 (void)ml_set_interrupts_enabled(istate
);
341 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
342 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
343 panic_io_port_read();
344 panic("Read from physical addr 0x%llx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", paddr
, (eabs
- sabs
), result
, sabs
, eabs
, reportphyreaddelayabs
);
347 if (reportphyreadosbt
) {
348 OSReportWithBacktrace("ml_phys_read_data took %lluus\n", (eabs
- sabs
) / 1000);
351 DTRACE_PHYSLAT3(physread
, uint64_t, (eabs
- sabs
),
352 pmap_paddr_t
, paddr
, uint32_t, size
);
360 static unsigned long long
361 ml_phys_read_long_long(pmap_paddr_t paddr
) {
362 return ml_phys_read_data(paddr
, 8);
365 unsigned int ml_phys_read( vm_offset_t paddr
)
367 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
370 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
372 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
375 unsigned int ml_phys_read_64(addr64_t paddr64
)
377 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
380 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
382 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
385 unsigned int ml_phys_read_half(vm_offset_t paddr
)
387 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
390 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
392 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
395 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
397 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
400 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
402 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
405 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
407 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
410 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
412 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
418 * Write data to a physical address. Memory should not be cache inhibited.
422 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
424 if (!physmap_enclosed(paddr
))
425 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
429 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
432 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
435 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
438 panic("Invalid size %d for ml_phys_write_data\n", size
);
444 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
446 if (!physmap_enclosed(paddr
))
447 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
449 *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
) = data
;
452 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
454 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
457 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
459 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
462 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
464 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
467 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
469 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
472 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
474 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
477 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
479 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
482 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
484 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
487 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
489 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
492 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
494 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
497 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
499 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
503 /* PCI config cycle probing
506 * Read the memory location at physical address paddr.
507 * *Does not* recover from machine checks, unlike the PowerPC implementation.
508 * Should probably be deprecated.
512 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
514 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4)
517 *val
= ml_phys_read((pmap_paddr_t
)paddr
);
523 * Read the memory location at physical address paddr.
524 * This is a part of a device probe, so there is a good chance we will
525 * have a machine check here. So we have to be able to handle that.
526 * We assume that machine checks are enabled both in MSR and HIDs
529 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
531 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4)
534 *val
= ml_phys_read_64((pmap_paddr_t
)paddr64
);
545 const char *a
= (const char *)pa
;
546 const char *b
= (const char *)pb
;
561 memcmp(const void *s1
, const void *s2
, size_t n
)
564 const unsigned char *p1
= s1
, *p2
= s2
;
568 return (*--p1
- *--p2
);
576 memmove(void *dst
, const void *src
, size_t ulen
)
578 bcopy(src
, dst
, ulen
);
584 * strlen returns the number of characters in "string" preceeding
585 * the terminating null character.
593 const char *ret
= string
;
595 while (*string
++ != '\0')
597 return string
- 1 - ret
;
603 * Machine-dependent routine to fill in an array with up to callstack_max
604 * levels of return pc information.
606 void machine_callstack(
607 __unused
uintptr_t *buf
,
608 __unused vm_size_t callstack_max
)
612 #endif /* MACH_ASSERT */
614 void fillPage(ppnum_t pa
, unsigned int fill
)
618 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
622 for (i
= 0, addr
= (unsigned int *)PHYSMAP_PTOV(src
); i
< cnt
; i
++)
626 static inline void __clflush(void *ptr
)
628 __asm__
volatile("clflush (%0)" : : "r" (ptr
));
631 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
633 addr64_t linesize
= cpuid_info()->cache_linesize
;
634 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
639 __clflush(PHYSMAP_PTOV(pa
));
646 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
648 return(dcache_incoherent_io_store64(pa
,count
));
652 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
655 dcache_incoherent_io_flush64(addr
, count
);
658 uint64_t linesize
= cpuid_info()->cache_linesize
;
659 addr64_t bound
= (addr
+ count
+ linesize
-1) & ~(linesize
- 1);
661 while (addr
< bound
) {
662 __clflush((void *) (uintptr_t) addr
);
670 invalidate_icache64(__unused addr64_t addr
,
671 __unused
unsigned count
,
677 addr64_t vm_last_addr
;
680 mapping_set_mod(ppnum_t pn
)
686 mapping_set_ref(ppnum_t pn
)
688 pmap_set_reference(pn
);
691 extern i386_cpu_info_t cpuid_cpu_info
;
693 cache_flush_page_phys(ppnum_t pa
)
696 unsigned char *cacheline_addr
;
697 i386_cpu_info_t
*cpuid_infop
= cpuid_info();
699 int cachelines_to_flush
;
701 cacheline_size
= cpuid_infop
->cache_linesize
;
702 if (cacheline_size
== 0)
703 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop
);
704 cachelines_to_flush
= PAGE_SIZE
/cacheline_size
;
708 istate
= ml_set_interrupts_enabled(FALSE
);
710 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
711 cachelines_to_flush
> 0;
712 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
713 __clflush((void *) cacheline_addr
);
716 (void) ml_set_interrupts_enabled(istate
);
724 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
726 #pragma unused(fn,arg)
731 int host_vmxon(boolean_t exclusive __unused
)
733 return VMX_UNSUPPORTED
;
736 void host_vmxoff(void)