]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/loose_ends.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / loose_ends.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49
50 #include <arm/misc_protos.h>
51
52 #include <sys/errno.h>
53
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
57 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
58 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
59 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
60
61 static kern_return_t
62 bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
63 {
64 unsigned int src_index;
65 unsigned int dst_index;
66 vm_offset_t src_offset;
67 vm_offset_t dst_offset;
68 unsigned int wimg_bits_src, wimg_bits_dst;
69 unsigned int cpu_num = 0;
70 ppnum_t pn_src;
71 ppnum_t pn_dst;
72 addr64_t end __assert_only;
73 kern_return_t res = KERN_SUCCESS;
74
75 assert(!__improbable(os_add_overflow(src, bytes, &end)));
76 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
77
78 while ((bytes > 0) && (res == KERN_SUCCESS)) {
79 src_offset = src & PAGE_MASK;
80 dst_offset = dst & PAGE_MASK;
81 boolean_t use_copy_window_src = FALSE;
82 boolean_t use_copy_window_dst = FALSE;
83 vm_size_t count = bytes;
84 vm_size_t count2 = bytes;
85 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
86 use_copy_window_src = !pmap_valid_address(src);
87 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
88 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
89 count = PAGE_SIZE - src_offset;
90 wimg_bits_src = pmap_cache_attributes(pn_src);
91 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
92 use_copy_window_src = TRUE;
93 }
94 #else
95 if (use_copy_window_src) {
96 wimg_bits_src = pmap_cache_attributes(pn_src);
97 count = PAGE_SIZE - src_offset;
98 }
99 #endif
100 }
101 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
102 // write preflighting needed for things like dtrace which may write static read-only mappings
103 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
104 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
105 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
106 count2 = PAGE_SIZE - dst_offset;
107 wimg_bits_dst = pmap_cache_attributes(pn_dst);
108 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
109 use_copy_window_dst = TRUE;
110 }
111 #else
112 if (use_copy_window_dst) {
113 wimg_bits_dst = pmap_cache_attributes(pn_dst);
114 count2 = PAGE_SIZE - dst_offset;
115 }
116 #endif
117 }
118
119 char *tmp_src;
120 char *tmp_dst;
121
122 if (use_copy_window_src || use_copy_window_dst) {
123 mp_disable_preemption();
124 cpu_num = cpu_number();
125 }
126
127 if (use_copy_window_src) {
128 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
129 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
130 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
131 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
132 } else {
133 tmp_src = (char*)src;
134 }
135 if (use_copy_window_dst) {
136 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
137 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
138 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
139 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
140 } else {
141 tmp_dst = (char*)dst;
142 }
143
144 if (count > count2) {
145 count = count2;
146 }
147 if (count > bytes) {
148 count = bytes;
149 }
150
151 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
152 res = copyin((user_addr_t)src, tmp_dst, count);
153 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
154 res = copyout(tmp_src, (user_addr_t)dst, count);
155 } else {
156 bcopy(tmp_src, tmp_dst, count);
157 }
158
159 if (use_copy_window_src) {
160 pmap_unmap_cpu_windows_copy(src_index);
161 }
162 if (use_copy_window_dst) {
163 pmap_unmap_cpu_windows_copy(dst_index);
164 }
165 if (use_copy_window_src || use_copy_window_dst) {
166 mp_enable_preemption();
167 }
168
169 src += count;
170 dst += count;
171 bytes -= count;
172 }
173 return res;
174 }
175
176 void
177 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
178 {
179 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
180 }
181
182 void
183 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
184 {
185 bzero_phys(src64, bytes);
186 }
187
188 extern void *secure_memset(void *, int, size_t);
189
190 /* Zero bytes starting at a physical address */
191 void
192 bzero_phys(addr64_t src, vm_size_t bytes)
193 {
194 unsigned int wimg_bits;
195 unsigned int cpu_num = cpu_number();
196 ppnum_t pn;
197 addr64_t end __assert_only;
198
199 assert(!__improbable(os_add_overflow(src, bytes, &end)));
200
201 vm_offset_t offset = src & PAGE_MASK;
202 while (bytes > 0) {
203 vm_size_t count = bytes;
204
205 boolean_t use_copy_window = !pmap_valid_address(src);
206 pn = (ppnum_t)(src >> PAGE_SHIFT);
207 wimg_bits = pmap_cache_attributes(pn);
208 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
209 count = PAGE_SIZE - offset;
210 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
211 use_copy_window = TRUE;
212 }
213 #else
214 if (use_copy_window) {
215 count = PAGE_SIZE - offset;
216 }
217 #endif
218 char *buf;
219 unsigned int index;
220 if (use_copy_window) {
221 mp_disable_preemption();
222 cpu_num = cpu_number();
223 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
224 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
225 } else {
226 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
227 }
228
229 if (count > bytes) {
230 count = bytes;
231 }
232
233 switch (wimg_bits & VM_WIMG_MASK) {
234 case VM_WIMG_DEFAULT:
235 case VM_WIMG_WCOMB:
236 case VM_WIMG_INNERWBACK:
237 case VM_WIMG_WTHRU:
238 bzero(buf, count);
239 break;
240 default:
241 /* 'dc zva' performed by bzero is not safe for device memory */
242 secure_memset((void*)buf, 0, count);
243 }
244
245 if (use_copy_window) {
246 pmap_unmap_cpu_windows_copy(index);
247 mp_enable_preemption();
248 }
249
250 src += count;
251 bytes -= count;
252 offset = 0;
253 }
254 }
255
256 /*
257 * Read data from a physical address.
258 */
259
260
261 static unsigned long long
262 ml_phys_read_data(pmap_paddr_t paddr, int size)
263 {
264 unsigned int index;
265 unsigned int wimg_bits;
266 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
267 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
268 unsigned long long result = 0;
269 vm_offset_t copywindow_vaddr = 0;
270 unsigned char s1;
271 unsigned short s2;
272 unsigned int s4;
273
274 if (__improbable(pn_end != pn)) {
275 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
276 }
277
278 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
279 if (pmap_valid_address(paddr)) {
280 switch (size) {
281 case 1:
282 s1 = *(volatile unsigned char *)phystokv(paddr);
283 result = s1;
284 break;
285 case 2:
286 s2 = *(volatile unsigned short *)phystokv(paddr);
287 result = s2;
288 break;
289 case 4:
290 s4 = *(volatile unsigned int *)phystokv(paddr);
291 result = s4;
292 break;
293 case 8:
294 result = *(volatile unsigned long long *)phystokv(paddr);
295 break;
296 default:
297 panic("Invalid size %d for ml_phys_read_data\n", size);
298 break;
299 }
300 return result;
301 }
302 #endif
303
304 mp_disable_preemption();
305 wimg_bits = pmap_cache_attributes(pn);
306 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
307 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
308
309 switch (size) {
310 case 1:
311 s1 = *(volatile unsigned char *)copywindow_vaddr;
312 result = s1;
313 break;
314 case 2:
315 s2 = *(volatile unsigned short *)copywindow_vaddr;
316 result = s2;
317 break;
318 case 4:
319 s4 = *(volatile unsigned int *)copywindow_vaddr;
320 result = s4;
321 break;
322 case 8:
323 result = *(volatile unsigned long long*)copywindow_vaddr;
324 break;
325 default:
326 panic("Invalid size %d for ml_phys_read_data\n", size);
327 break;
328 }
329
330 pmap_unmap_cpu_windows_copy(index);
331 mp_enable_preemption();
332
333 return result;
334 }
335
336 unsigned int
337 ml_phys_read( vm_offset_t paddr)
338 {
339 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
340 }
341
342 unsigned int
343 ml_phys_read_word(vm_offset_t paddr)
344 {
345 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
346 }
347
348 unsigned int
349 ml_phys_read_64(addr64_t paddr64)
350 {
351 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
352 }
353
354 unsigned int
355 ml_phys_read_word_64(addr64_t paddr64)
356 {
357 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
358 }
359
360 unsigned int
361 ml_phys_read_half(vm_offset_t paddr)
362 {
363 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
364 }
365
366 unsigned int
367 ml_phys_read_half_64(addr64_t paddr64)
368 {
369 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
370 }
371
372 unsigned int
373 ml_phys_read_byte(vm_offset_t paddr)
374 {
375 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
376 }
377
378 unsigned int
379 ml_phys_read_byte_64(addr64_t paddr64)
380 {
381 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
382 }
383
384 unsigned long long
385 ml_phys_read_double(vm_offset_t paddr)
386 {
387 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
388 }
389
390 unsigned long long
391 ml_phys_read_double_64(addr64_t paddr64)
392 {
393 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
394 }
395
396
397
398 /*
399 * Write data to a physical address.
400 */
401
402 static void
403 ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
404 {
405 unsigned int index;
406 unsigned int wimg_bits;
407 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
408 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
409 vm_offset_t copywindow_vaddr = 0;
410
411 if (__improbable(pn_end != pn)) {
412 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
413 }
414
415 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
416 if (pmap_valid_address(paddr)) {
417 switch (size) {
418 case 1:
419 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
420 return;
421 case 2:
422 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
423 return;
424 case 4:
425 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
426 return;
427 case 8:
428 *(volatile unsigned long long *)phystokv(paddr) = data;
429 return;
430 default:
431 panic("Invalid size %d for ml_phys_write_data\n", size);
432 }
433 }
434 #endif
435
436 mp_disable_preemption();
437 wimg_bits = pmap_cache_attributes(pn);
438 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
439 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
440
441 switch (size) {
442 case 1:
443 *(volatile unsigned char *)(copywindow_vaddr) =
444 (unsigned char)data;
445 break;
446 case 2:
447 *(volatile unsigned short *)(copywindow_vaddr) =
448 (unsigned short)data;
449 break;
450 case 4:
451 *(volatile unsigned int *)(copywindow_vaddr) =
452 (uint32_t)data;
453 break;
454 case 8:
455 *(volatile unsigned long long *)(copywindow_vaddr) =
456 (unsigned long long)data;
457 break;
458 default:
459 panic("Invalid size %d for ml_phys_write_data\n", size);
460 break;
461 }
462
463 pmap_unmap_cpu_windows_copy(index);
464 mp_enable_preemption();
465 }
466
467 void
468 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
469 {
470 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
471 }
472
473 void
474 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
475 {
476 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
477 }
478
479 void
480 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
481 {
482 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
483 }
484
485 void
486 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
487 {
488 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
489 }
490
491 void
492 ml_phys_write(vm_offset_t paddr, unsigned int data)
493 {
494 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
495 }
496
497 void
498 ml_phys_write_64(addr64_t paddr64, unsigned int data)
499 {
500 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
501 }
502
503 void
504 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
505 {
506 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
507 }
508
509 void
510 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
511 {
512 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
513 }
514
515 void
516 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
517 {
518 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
519 }
520
521 void
522 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
523 {
524 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
525 }
526
527
528 /*
529 * Set indicated bit in bit string.
530 */
531 void
532 setbit(int bitno, int *s)
533 {
534 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
535 }
536
537 /*
538 * Clear indicated bit in bit string.
539 */
540 void
541 clrbit(int bitno, int *s)
542 {
543 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
544 }
545
546 /*
547 * Test if indicated bit is set in bit string.
548 */
549 int
550 testbit(int bitno, int *s)
551 {
552 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
553 }
554
555 /*
556 * Find first bit set in bit string.
557 */
558 int
559 ffsbit(int *s)
560 {
561 int offset;
562
563 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
564 ;
565 }
566 return offset + __builtin_ctz(*s);
567 }
568
569 int
570 ffs(unsigned int mask)
571 {
572 if (mask == 0) {
573 return 0;
574 }
575
576 /*
577 * NOTE: cannot use __builtin_ffs because it generates a call to
578 * 'ffs'
579 */
580 return 1 + __builtin_ctz(mask);
581 }
582
583 int
584 ffsll(unsigned long long mask)
585 {
586 if (mask == 0) {
587 return 0;
588 }
589
590 /*
591 * NOTE: cannot use __builtin_ffsll because it generates a call to
592 * 'ffsll'
593 */
594 return 1 + __builtin_ctzll(mask);
595 }
596
597 /*
598 * Find last bit set in bit string.
599 */
600 int
601 fls(unsigned int mask)
602 {
603 if (mask == 0) {
604 return 0;
605 }
606
607 return (sizeof(mask) << 3) - __builtin_clz(mask);
608 }
609
610 int
611 flsll(unsigned long long mask)
612 {
613 if (mask == 0) {
614 return 0;
615 }
616
617 return (sizeof(mask) << 3) - __builtin_clzll(mask);
618 }
619
620 #undef bcmp
621 int
622 bcmp(
623 const void *pa,
624 const void *pb,
625 size_t len)
626 {
627 const char *a = (const char *) pa;
628 const char *b = (const char *) pb;
629
630 if (len == 0) {
631 return 0;
632 }
633
634 do{
635 if (*a++ != *b++) {
636 break;
637 }
638 } while (--len);
639
640 /*
641 * Check for the overflow case but continue to handle the non-overflow
642 * case the same way just in case someone is using the return value
643 * as more than zero/non-zero
644 */
645 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
646 return 0xFFFFFFFFL;
647 } else {
648 return (int)len;
649 }
650 }
651
652 #undef memcmp
653 int
654 memcmp(const void *s1, const void *s2, size_t n)
655 {
656 if (n != 0) {
657 const unsigned char *p1 = s1, *p2 = s2;
658
659 do {
660 if (*p1++ != *p2++) {
661 return *--p1 - *--p2;
662 }
663 } while (--n != 0);
664 }
665 return 0;
666 }
667
668 kern_return_t
669 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
670 {
671 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
672 panic("%s: no more than 1 parameter may be virtual", __func__);
673 }
674
675 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
676
677 #ifndef __ARM_COHERENT_IO__
678 if (which & cppvFsrc) {
679 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
680 }
681
682 if (which & cppvFsnk) {
683 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
684 }
685 #endif
686
687 return res;
688 }
689
690 #if MACH_ASSERT
691
692 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
693
694 /*
695 * Machine-dependent routine to fill in an array with up to callstack_max
696 * levels of return pc information.
697 */
698 void
699 machine_callstack(
700 uintptr_t * buf,
701 vm_size_t callstack_max)
702 {
703 /* Captures the USER call stack */
704 uint32_t i = 0;
705
706 struct arm_saved_state *state = find_user_regs(current_thread());
707
708 if (!state) {
709 while (i < callstack_max) {
710 buf[i++] = 0;
711 }
712 } else {
713 if (is_saved_state64(state)) {
714 uint64_t frame[2];
715 buf[i++] = (uintptr_t)get_saved_state_pc(state);
716 frame[0] = get_saved_state_fp(state);
717 while (i < callstack_max && frame[0] != 0) {
718 if (copyinframe(frame[0], (void*) frame, TRUE)) {
719 break;
720 }
721 buf[i++] = (uintptr_t)frame[1];
722 }
723 } else {
724 uint32_t frame[2];
725 buf[i++] = (uintptr_t)get_saved_state_pc(state);
726 frame[0] = (uint32_t)get_saved_state_fp(state);
727 while (i < callstack_max && frame[0] != 0) {
728 if (copyinframe(frame[0], (void*) frame, FALSE)) {
729 break;
730 }
731 buf[i++] = (uintptr_t)frame[1];
732 }
733 }
734
735 while (i < callstack_max) {
736 buf[i++] = 0;
737 }
738 }
739 }
740
741 #endif /* MACH_ASSERT */
742
743 int
744 clr_be_bit(void)
745 {
746 panic("clr_be_bit");
747 return 0;
748 }
749
750 boolean_t
751 ml_probe_read(
752 __unused vm_offset_t paddr,
753 __unused unsigned int *val)
754 {
755 panic("ml_probe_read() unimplemented");
756 return 1;
757 }
758
759 boolean_t
760 ml_probe_read_64(
761 __unused addr64_t paddr,
762 __unused unsigned int *val)
763 {
764 panic("ml_probe_read_64() unimplemented");
765 return 1;
766 }
767
768
769 void
770 ml_thread_policy(
771 __unused thread_t thread,
772 __unused unsigned policy_id,
773 __unused unsigned policy_info)
774 {
775 // <rdar://problem/7141284>: Reduce print noise
776 // kprintf("ml_thread_policy() unimplemented\n");
777 }
778
779 __dead2
780 void
781 panic_unimplemented(void)
782 {
783 panic("Not yet implemented.");
784 }
785
786 /* ARM64_TODO <rdar://problem/9198953> */
787 void abort(void) __dead2;
788
789 void
790 abort(void)
791 {
792 panic("Abort.");
793 }
794
795
796 #if !MACH_KDP
797 void
798 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
799 {
800 #pragma unused(fn,arg)
801 }
802 #endif