2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <sys/kdebug.h>
82 #include <kdp/kdp_callout.h>
83 #endif /* !MACH_KDP */
88 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
93 /* XXX - should be gone from here */
94 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
95 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
96 extern boolean_t
phys_page_exists(ppnum_t
);
97 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
98 extern void pmap_set_reference(ppnum_t pn
);
99 extern void mapping_set_mod(ppnum_t pa
);
100 extern void mapping_set_ref(ppnum_t pn
);
102 extern void ovbcopy(const char *from
,
105 void machine_callstack(uintptr_t *buf
, vm_size_t callstack_max
);
108 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
109 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
111 #define INT_SIZE (BYTE_SIZE * sizeof (int))
114 * Set indicated bit in bit string.
117 setbit(int bitno
, int *s
)
119 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
123 * Clear indicated bit in bit string.
126 clrbit(int bitno
, int *s
)
128 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
132 * Test if indicated bit is set in bit string.
135 testbit(int bitno
, int *s
)
137 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
141 * Find first bit set in bit string.
148 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
);
149 return offset
+ __builtin_ctz(*s
);
153 ffs(unsigned int mask
)
159 * NOTE: cannot use __builtin_ffs because it generates a call to
162 return 1 + __builtin_ctz(mask
);
170 bzero_phys(src64
,bytes
);
178 bzero(PHYSMAP_PTOV(src64
), bytes
);
183 * bcopy_phys - like bcopy but copies from/to physical addresses.
192 /* Not necessary for K64 - but ensure we stay within a page */
193 if (((((uint32_t)src64
& (NBPG
-1)) + bytes
) > NBPG
) ||
194 ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
195 panic("bcopy_phys alignment");
197 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
201 * allow a function to get a quick virtual mapping of a physical page
208 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
211 /* Not necessary for K64 - but ensure we stay within a page */
212 if (((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
213 panic("apply_func_phys alignment");
216 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
220 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
228 vm_size_t bytes
) /* num bytes to copy */
230 /* Assume that bcopy copies left-to-right (low addr first). */
231 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
232 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
234 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
236 /* to > from: overlapping, and must copy right-to-left. */
246 * Read data from a physical address. Memory should not be cache inhibited.
250 static inline unsigned int
251 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
253 unsigned int result
= 0;
255 if (!physmap_enclosed(paddr
))
256 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
262 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
266 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
270 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
273 panic("Invalid size %d for ml_phys_read_data\n", size
);
279 static unsigned long long
280 ml_phys_read_long_long(pmap_paddr_t paddr
)
282 if (!physmap_enclosed(paddr
))
283 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
284 return *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
287 unsigned int ml_phys_read( vm_offset_t paddr
)
289 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
292 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
294 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
297 unsigned int ml_phys_read_64(addr64_t paddr64
)
299 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
302 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
304 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
307 unsigned int ml_phys_read_half(vm_offset_t paddr
)
309 return ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
312 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
314 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
317 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
319 return ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
322 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
324 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
327 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
329 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
332 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
334 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
340 * Write data to a physical address. Memory should not be cache inhibited.
344 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
346 if (!physmap_enclosed(paddr
))
347 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
351 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
354 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
357 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
360 panic("Invalid size %d for ml_phys_write_data\n", size
);
366 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
368 if (!physmap_enclosed(paddr
))
369 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
371 *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
) = data
;
374 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
376 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
379 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
381 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
384 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
386 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
389 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
391 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
394 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
396 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
399 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
401 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
404 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
406 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
409 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
411 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
414 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
416 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
419 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
421 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
425 /* PCI config cycle probing
428 * Read the memory location at physical address paddr.
429 * *Does not* recover from machine checks, unlike the PowerPC implementation.
430 * Should probably be deprecated.
434 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
436 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4)
439 *val
= ml_phys_read((pmap_paddr_t
)paddr
);
445 * Read the memory location at physical address paddr.
446 * This is a part of a device probe, so there is a good chance we will
447 * have a machine check here. So we have to be able to handle that.
448 * We assume that machine checks are enabled both in MSR and HIDs
451 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
453 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4)
456 *val
= ml_phys_read_64((pmap_paddr_t
)paddr64
);
466 const char *a
= (const char *)pa
;
467 const char *b
= (const char *)pb
;
481 memcmp(const void *s1
, const void *s2
, size_t n
)
484 const unsigned char *p1
= s1
, *p2
= s2
;
488 return (*--p1
- *--p2
);
496 * strlen returns the number of characters in "string" preceeding
497 * the terminating null character.
502 register const char *string
)
504 register const char *ret
= string
;
506 while (*string
++ != '\0')
508 return string
- 1 - ret
;
512 hw_compare_and_store(uint32_t oldval
, uint32_t newval
, volatile uint32_t *dest
)
514 return OSCompareAndSwap((UInt32
)oldval
,
516 (volatile UInt32
*)dest
);
522 * Machine-dependent routine to fill in an array with up to callstack_max
523 * levels of return pc information.
525 void machine_callstack(
526 __unused
uintptr_t *buf
,
527 __unused vm_size_t callstack_max
)
531 #endif /* MACH_ASSERT */
533 void fillPage(ppnum_t pa
, unsigned int fill
)
537 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
541 for (i
= 0, addr
= (unsigned int *)PHYSMAP_PTOV(src
); i
< cnt
; i
++)
545 static inline void __clflush(void *ptr
)
547 __asm__
volatile("clflush (%0)" : : "r" (ptr
));
550 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
552 addr64_t linesize
= cpuid_info()->cache_linesize
;
553 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
558 __clflush(PHYSMAP_PTOV(pa
));
565 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
567 return(dcache_incoherent_io_store64(pa
,count
));
571 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
574 dcache_incoherent_io_flush64(addr
, count
);
577 uint64_t linesize
= cpuid_info()->cache_linesize
;
578 addr64_t bound
= (addr
+ count
+ linesize
-1) & ~(linesize
- 1);
580 while (addr
< bound
) {
581 __clflush((void *) (uintptr_t) addr
);
589 invalidate_icache64(__unused addr64_t addr
,
590 __unused
unsigned count
,
596 addr64_t vm_last_addr
;
599 mapping_set_mod(ppnum_t pn
)
605 mapping_set_ref(ppnum_t pn
)
607 pmap_set_reference(pn
);
610 extern i386_cpu_info_t cpuid_cpu_info
;
612 cache_flush_page_phys(ppnum_t pa
)
615 unsigned char *cacheline_addr
;
616 i386_cpu_info_t
*cpuid_infop
= cpuid_info();
618 int cachelines_to_flush
;
620 cacheline_size
= cpuid_infop
->cache_linesize
;
621 if (cacheline_size
== 0)
622 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop
);
623 cachelines_to_flush
= PAGE_SIZE
/cacheline_size
;
627 istate
= ml_set_interrupts_enabled(FALSE
);
629 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
630 cachelines_to_flush
> 0;
631 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
632 __clflush((void *) cacheline_addr
);
635 (void) ml_set_interrupts_enabled(istate
);
643 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
645 #pragma unused(fn,arg)
650 * Return a uniformly distributed 64-bit random number.
652 * This interface should have minimal dependencies on kernel
653 * services, and thus be available very early in the life
654 * of the kernel. But as a result, it may not be very random
660 return (ml_early_random());
664 int host_vmxon(boolean_t exclusive __unused
)
666 return VMX_UNSUPPORTED
;
669 void host_vmxoff(void)