]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/loose_ends.c
495cc7c03d9fb24197330c33df18a0b553020f68
[apple/xnu.git] / osfmk / arm64 / loose_ends.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49
50 #include <arm/misc_protos.h>
51
52 #include <sys/errno.h>
53
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
57 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
58 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
59 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
60
61 static kern_return_t
62 bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
63 {
64 unsigned int src_index;
65 unsigned int dst_index;
66 vm_offset_t src_offset;
67 vm_offset_t dst_offset;
68 unsigned int wimg_bits_src, wimg_bits_dst;
69 unsigned int cpu_num = 0;
70 ppnum_t pn_src;
71 ppnum_t pn_dst;
72 addr64_t end __assert_only;
73 kern_return_t res = KERN_SUCCESS;
74
75 assert(!__improbable(os_add_overflow(src, bytes, &end)));
76 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
77
78 while ((bytes > 0) && (res == KERN_SUCCESS)) {
79 src_offset = src & PAGE_MASK;
80 dst_offset = dst & PAGE_MASK;
81 boolean_t use_copy_window_src = FALSE;
82 boolean_t use_copy_window_dst = FALSE;
83 vm_size_t count = bytes;
84 vm_size_t count2 = bytes;
85 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
86 use_copy_window_src = !pmap_valid_address(src);
87 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
88 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
89 count = PAGE_SIZE - src_offset;
90 wimg_bits_src = pmap_cache_attributes(pn_src);
91 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
92 use_copy_window_src = TRUE;
93 }
94 #else
95 if (use_copy_window_src) {
96 wimg_bits_src = pmap_cache_attributes(pn_src);
97 count = PAGE_SIZE - src_offset;
98 }
99 #endif
100 }
101 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
102 // write preflighting needed for things like dtrace which may write static read-only mappings
103 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
104 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
105 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
106 count2 = PAGE_SIZE - dst_offset;
107 wimg_bits_dst = pmap_cache_attributes(pn_dst);
108 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
109 use_copy_window_dst = TRUE;
110 }
111 #else
112 if (use_copy_window_dst) {
113 wimg_bits_dst = pmap_cache_attributes(pn_dst);
114 count2 = PAGE_SIZE - dst_offset;
115 }
116 #endif
117 }
118
119 char *tmp_src;
120 char *tmp_dst;
121
122 if (use_copy_window_src || use_copy_window_dst) {
123 mp_disable_preemption();
124 cpu_num = cpu_number();
125 }
126
127 if (use_copy_window_src) {
128 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
129 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
130 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
131 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
132 } else {
133 tmp_src = (char*)src;
134 }
135 if (use_copy_window_dst) {
136 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
137 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
138 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
139 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
140 } else {
141 tmp_dst = (char*)dst;
142 }
143
144 if (count > count2) {
145 count = count2;
146 }
147 if (count > bytes) {
148 count = bytes;
149 }
150
151 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
152 res = copyin((user_addr_t)src, tmp_dst, count);
153 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
154 res = copyout(tmp_src, (user_addr_t)dst, count);
155 } else {
156 bcopy(tmp_src, tmp_dst, count);
157 }
158
159 if (use_copy_window_src) {
160 pmap_unmap_cpu_windows_copy(src_index);
161 }
162 if (use_copy_window_dst) {
163 pmap_unmap_cpu_windows_copy(dst_index);
164 }
165 if (use_copy_window_src || use_copy_window_dst) {
166 mp_enable_preemption();
167 }
168
169 src += count;
170 dst += count;
171 bytes -= count;
172 }
173 return res;
174 }
175
176 void
177 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
178 {
179 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
180 }
181
182 void
183 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
184 {
185 bzero_phys(src64, bytes);
186 }
187
188 /* Zero bytes starting at a physical address */
189 void
190 bzero_phys(addr64_t src, vm_size_t bytes)
191 {
192 unsigned int wimg_bits;
193 unsigned int cpu_num = cpu_number();
194 ppnum_t pn;
195 addr64_t end __assert_only;
196
197 assert(!__improbable(os_add_overflow(src, bytes, &end)));
198
199 vm_offset_t offset = src & PAGE_MASK;
200 while (bytes > 0) {
201 vm_size_t count = bytes;
202
203 boolean_t use_copy_window = !pmap_valid_address(src);
204 pn = (ppnum_t)(src >> PAGE_SHIFT);
205 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
206 count = PAGE_SIZE - offset;
207 wimg_bits = pmap_cache_attributes(pn);
208 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
209 use_copy_window = TRUE;
210 }
211 #else
212 if (use_copy_window) {
213 wimg_bits = pmap_cache_attributes(pn);
214 count = PAGE_SIZE - offset;
215 }
216 #endif
217 char *buf;
218 unsigned int index;
219 if (use_copy_window) {
220 mp_disable_preemption();
221 cpu_num = cpu_number();
222 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
223 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
224 } else {
225 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
226 }
227
228 if (count > bytes) {
229 count = bytes;
230 }
231
232 bzero(buf, count);
233
234 if (use_copy_window) {
235 pmap_unmap_cpu_windows_copy(index);
236 mp_enable_preemption();
237 }
238
239 src += count;
240 bytes -= count;
241 offset = 0;
242 }
243 }
244
245 /*
246 * Read data from a physical address.
247 */
248
249
250 static unsigned long long
251 ml_phys_read_data(pmap_paddr_t paddr, int size)
252 {
253 unsigned int index;
254 unsigned int wimg_bits;
255 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
256 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
257 unsigned long long result = 0;
258 vm_offset_t copywindow_vaddr = 0;
259 unsigned char s1;
260 unsigned short s2;
261 unsigned int s4;
262
263 if (__improbable(pn_end != pn)) {
264 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
265 }
266
267 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
268 if (pmap_valid_address(paddr)) {
269 switch (size) {
270 case 1:
271 s1 = *(volatile unsigned char *)phystokv(paddr);
272 result = s1;
273 break;
274 case 2:
275 s2 = *(volatile unsigned short *)phystokv(paddr);
276 result = s2;
277 break;
278 case 4:
279 s4 = *(volatile unsigned int *)phystokv(paddr);
280 result = s4;
281 break;
282 case 8:
283 result = *(volatile unsigned long long *)phystokv(paddr);
284 break;
285 default:
286 panic("Invalid size %d for ml_phys_read_data\n", size);
287 break;
288 }
289 return result;
290 }
291 #endif
292
293 mp_disable_preemption();
294 wimg_bits = pmap_cache_attributes(pn);
295 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
296 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
297
298 switch (size) {
299 case 1:
300 s1 = *(volatile unsigned char *)copywindow_vaddr;
301 result = s1;
302 break;
303 case 2:
304 s2 = *(volatile unsigned short *)copywindow_vaddr;
305 result = s2;
306 break;
307 case 4:
308 s4 = *(volatile unsigned int *)copywindow_vaddr;
309 result = s4;
310 break;
311 case 8:
312 result = *(volatile unsigned long long*)copywindow_vaddr;
313 break;
314 default:
315 panic("Invalid size %d for ml_phys_read_data\n", size);
316 break;
317 }
318
319 pmap_unmap_cpu_windows_copy(index);
320 mp_enable_preemption();
321
322 return result;
323 }
324
325 unsigned int
326 ml_phys_read( vm_offset_t paddr)
327 {
328 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
329 }
330
331 unsigned int
332 ml_phys_read_word(vm_offset_t paddr)
333 {
334 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
335 }
336
337 unsigned int
338 ml_phys_read_64(addr64_t paddr64)
339 {
340 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
341 }
342
343 unsigned int
344 ml_phys_read_word_64(addr64_t paddr64)
345 {
346 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
347 }
348
349 unsigned int
350 ml_phys_read_half(vm_offset_t paddr)
351 {
352 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
353 }
354
355 unsigned int
356 ml_phys_read_half_64(addr64_t paddr64)
357 {
358 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
359 }
360
361 unsigned int
362 ml_phys_read_byte(vm_offset_t paddr)
363 {
364 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
365 }
366
367 unsigned int
368 ml_phys_read_byte_64(addr64_t paddr64)
369 {
370 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
371 }
372
373 unsigned long long
374 ml_phys_read_double(vm_offset_t paddr)
375 {
376 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
377 }
378
379 unsigned long long
380 ml_phys_read_double_64(addr64_t paddr64)
381 {
382 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
383 }
384
385
386
387 /*
388 * Write data to a physical address.
389 */
390
391 static void
392 ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
393 {
394 unsigned int index;
395 unsigned int wimg_bits;
396 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
397 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
398 vm_offset_t copywindow_vaddr = 0;
399
400 if (__improbable(pn_end != pn)) {
401 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
402 }
403
404 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
405 if (pmap_valid_address(paddr)) {
406 switch (size) {
407 case 1:
408 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
409 return;
410 case 2:
411 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
412 return;
413 case 4:
414 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
415 return;
416 case 8:
417 *(volatile unsigned long long *)phystokv(paddr) = data;
418 return;
419 default:
420 panic("Invalid size %d for ml_phys_write_data\n", size);
421 }
422 }
423 #endif
424
425 mp_disable_preemption();
426 wimg_bits = pmap_cache_attributes(pn);
427 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
428 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
429
430 switch (size) {
431 case 1:
432 *(volatile unsigned char *)(copywindow_vaddr) =
433 (unsigned char)data;
434 break;
435 case 2:
436 *(volatile unsigned short *)(copywindow_vaddr) =
437 (unsigned short)data;
438 break;
439 case 4:
440 *(volatile unsigned int *)(copywindow_vaddr) =
441 (uint32_t)data;
442 break;
443 case 8:
444 *(volatile unsigned long long *)(copywindow_vaddr) =
445 (unsigned long long)data;
446 break;
447 default:
448 panic("Invalid size %d for ml_phys_write_data\n", size);
449 break;
450 }
451
452 pmap_unmap_cpu_windows_copy(index);
453 mp_enable_preemption();
454 }
455
456 void
457 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
458 {
459 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
460 }
461
462 void
463 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
464 {
465 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
466 }
467
468 void
469 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
470 {
471 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
472 }
473
474 void
475 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
476 {
477 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
478 }
479
480 void
481 ml_phys_write(vm_offset_t paddr, unsigned int data)
482 {
483 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
484 }
485
486 void
487 ml_phys_write_64(addr64_t paddr64, unsigned int data)
488 {
489 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
490 }
491
492 void
493 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
494 {
495 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
496 }
497
498 void
499 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
500 {
501 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
502 }
503
504 void
505 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
506 {
507 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
508 }
509
510 void
511 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
512 {
513 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
514 }
515
516
517 /*
518 * Set indicated bit in bit string.
519 */
520 void
521 setbit(int bitno, int *s)
522 {
523 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
524 }
525
526 /*
527 * Clear indicated bit in bit string.
528 */
529 void
530 clrbit(int bitno, int *s)
531 {
532 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
533 }
534
535 /*
536 * Test if indicated bit is set in bit string.
537 */
538 int
539 testbit(int bitno, int *s)
540 {
541 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
542 }
543
544 /*
545 * Find first bit set in bit string.
546 */
547 int
548 ffsbit(int *s)
549 {
550 int offset;
551
552 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
553 ;
554 }
555 return offset + __builtin_ctz(*s);
556 }
557
558 int
559 ffs(unsigned int mask)
560 {
561 if (mask == 0) {
562 return 0;
563 }
564
565 /*
566 * NOTE: cannot use __builtin_ffs because it generates a call to
567 * 'ffs'
568 */
569 return 1 + __builtin_ctz(mask);
570 }
571
572 int
573 ffsll(unsigned long long mask)
574 {
575 if (mask == 0) {
576 return 0;
577 }
578
579 /*
580 * NOTE: cannot use __builtin_ffsll because it generates a call to
581 * 'ffsll'
582 */
583 return 1 + __builtin_ctzll(mask);
584 }
585
586 /*
587 * Find last bit set in bit string.
588 */
589 int
590 fls(unsigned int mask)
591 {
592 if (mask == 0) {
593 return 0;
594 }
595
596 return (sizeof(mask) << 3) - __builtin_clz(mask);
597 }
598
599 int
600 flsll(unsigned long long mask)
601 {
602 if (mask == 0) {
603 return 0;
604 }
605
606 return (sizeof(mask) << 3) - __builtin_clzll(mask);
607 }
608
609 #undef bcmp
610 int
611 bcmp(
612 const void *pa,
613 const void *pb,
614 size_t len)
615 {
616 const char *a = (const char *) pa;
617 const char *b = (const char *) pb;
618
619 if (len == 0) {
620 return 0;
621 }
622
623 do{
624 if (*a++ != *b++) {
625 break;
626 }
627 } while (--len);
628
629 /*
630 * Check for the overflow case but continue to handle the non-overflow
631 * case the same way just in case someone is using the return value
632 * as more than zero/non-zero
633 */
634 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
635 return 0xFFFFFFFFL;
636 } else {
637 return (int)len;
638 }
639 }
640
641 #undef memcmp
642 int
643 memcmp(const void *s1, const void *s2, size_t n)
644 {
645 if (n != 0) {
646 const unsigned char *p1 = s1, *p2 = s2;
647
648 do {
649 if (*p1++ != *p2++) {
650 return *--p1 - *--p2;
651 }
652 } while (--n != 0);
653 }
654 return 0;
655 }
656
657 kern_return_t
658 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
659 {
660 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
661 panic("%s: no more than 1 parameter may be virtual", __func__);
662 }
663
664 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
665
666 #ifndef __ARM_COHERENT_IO__
667 if (which & cppvFsrc) {
668 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
669 }
670
671 if (which & cppvFsnk) {
672 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
673 }
674 #endif
675
676 return res;
677 }
678
679 #if MACH_ASSERT
680
681 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
682
683 /*
684 * Machine-dependent routine to fill in an array with up to callstack_max
685 * levels of return pc information.
686 */
687 void
688 machine_callstack(
689 uintptr_t * buf,
690 vm_size_t callstack_max)
691 {
692 /* Captures the USER call stack */
693 uint32_t i = 0;
694
695 struct arm_saved_state *state = find_user_regs(current_thread());
696
697 if (!state) {
698 while (i < callstack_max) {
699 buf[i++] = 0;
700 }
701 } else {
702 if (is_saved_state64(state)) {
703 uint64_t frame[2];
704 buf[i++] = (uintptr_t)get_saved_state_pc(state);
705 frame[0] = get_saved_state_fp(state);
706 while (i < callstack_max && frame[0] != 0) {
707 if (copyinframe(frame[0], (void*) frame, TRUE)) {
708 break;
709 }
710 buf[i++] = (uintptr_t)frame[1];
711 }
712 } else {
713 uint32_t frame[2];
714 buf[i++] = (uintptr_t)get_saved_state_pc(state);
715 frame[0] = (uint32_t)get_saved_state_fp(state);
716 while (i < callstack_max && frame[0] != 0) {
717 if (copyinframe(frame[0], (void*) frame, FALSE)) {
718 break;
719 }
720 buf[i++] = (uintptr_t)frame[1];
721 }
722 }
723
724 while (i < callstack_max) {
725 buf[i++] = 0;
726 }
727 }
728 }
729
730 #endif /* MACH_ASSERT */
731
732 int
733 clr_be_bit(void)
734 {
735 panic("clr_be_bit");
736 return 0;
737 }
738
739 boolean_t
740 ml_probe_read(
741 __unused vm_offset_t paddr,
742 __unused unsigned int *val)
743 {
744 panic("ml_probe_read() unimplemented");
745 return 1;
746 }
747
748 boolean_t
749 ml_probe_read_64(
750 __unused addr64_t paddr,
751 __unused unsigned int *val)
752 {
753 panic("ml_probe_read_64() unimplemented");
754 return 1;
755 }
756
757
758 void
759 ml_thread_policy(
760 __unused thread_t thread,
761 __unused unsigned policy_id,
762 __unused unsigned policy_info)
763 {
764 // <rdar://problem/7141284>: Reduce print noise
765 // kprintf("ml_thread_policy() unimplemented\n");
766 }
767
768 __dead2
769 void
770 panic_unimplemented(void)
771 {
772 panic("Not yet implemented.");
773 }
774
775 /* ARM64_TODO <rdar://problem/9198953> */
776 void abort(void) __dead2;
777
778 void
779 abort(void)
780 {
781 panic("Abort.");
782 }
783
784
785 #if !MACH_KDP
786 void
787 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
788 {
789 #pragma unused(fn,arg)
790 }
791 #endif