]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/loose_ends.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / loose_ends.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49
50 #include <arm/misc_protos.h>
51
52 #include <sys/errno.h>
53 #include <libkern/section_keywords.h>
54
55 #define INT_SIZE (BYTE_SIZE * sizeof (int))
56
57 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
58 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
59 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
60 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
61
62 static kern_return_t
63 bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
64 {
65 unsigned int src_index;
66 unsigned int dst_index;
67 vm_offset_t src_offset;
68 vm_offset_t dst_offset;
69 unsigned int wimg_bits_src, wimg_bits_dst;
70 unsigned int cpu_num = 0;
71 ppnum_t pn_src;
72 ppnum_t pn_dst;
73 addr64_t end __assert_only;
74 kern_return_t res = KERN_SUCCESS;
75
76 if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
77 assert(!__improbable(os_add_overflow(src, bytes, &end)));
78 }
79 if (!BCOPY_PHYS_DST_IS_USER(flags)) {
80 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
81 }
82
83 while ((bytes > 0) && (res == KERN_SUCCESS)) {
84 src_offset = src & PAGE_MASK;
85 dst_offset = dst & PAGE_MASK;
86 boolean_t use_copy_window_src = FALSE;
87 boolean_t use_copy_window_dst = FALSE;
88 vm_size_t count = bytes;
89 vm_size_t count2 = bytes;
90 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
91 use_copy_window_src = !pmap_valid_address(src);
92 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
93 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
94 count = PAGE_SIZE - src_offset;
95 wimg_bits_src = pmap_cache_attributes(pn_src);
96 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
97 use_copy_window_src = TRUE;
98 }
99 #else
100 if (use_copy_window_src) {
101 wimg_bits_src = pmap_cache_attributes(pn_src);
102 count = PAGE_SIZE - src_offset;
103 }
104 #endif
105 }
106 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
107 // write preflighting needed for things like dtrace which may write static read-only mappings
108 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
109 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
110 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
111 count2 = PAGE_SIZE - dst_offset;
112 wimg_bits_dst = pmap_cache_attributes(pn_dst);
113 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
114 use_copy_window_dst = TRUE;
115 }
116 #else
117 if (use_copy_window_dst) {
118 wimg_bits_dst = pmap_cache_attributes(pn_dst);
119 count2 = PAGE_SIZE - dst_offset;
120 }
121 #endif
122 }
123
124 char *tmp_src;
125 char *tmp_dst;
126
127 if (use_copy_window_src || use_copy_window_dst) {
128 mp_disable_preemption();
129 cpu_num = cpu_number();
130 }
131
132 if (use_copy_window_src) {
133 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
134 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
135 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
136 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
137 } else {
138 tmp_src = (char*)src;
139 }
140 if (use_copy_window_dst) {
141 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
142 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
143 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
144 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
145 } else {
146 tmp_dst = (char*)dst;
147 }
148
149 if (count > count2) {
150 count = count2;
151 }
152 if (count > bytes) {
153 count = bytes;
154 }
155
156 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
157 res = copyin((user_addr_t)src, tmp_dst, count);
158 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
159 res = copyout(tmp_src, (user_addr_t)dst, count);
160 } else {
161 bcopy(tmp_src, tmp_dst, count);
162 }
163
164 if (use_copy_window_src) {
165 pmap_unmap_cpu_windows_copy(src_index);
166 }
167 if (use_copy_window_dst) {
168 pmap_unmap_cpu_windows_copy(dst_index);
169 }
170 if (use_copy_window_src || use_copy_window_dst) {
171 mp_enable_preemption();
172 }
173
174 src += count;
175 dst += count;
176 bytes -= count;
177 }
178 return res;
179 }
180
181 void
182 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
183 {
184 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
185 }
186
187 void
188 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
189 {
190 bzero_phys(src64, bytes);
191 }
192
193 extern void *secure_memset(void *, int, size_t);
194
195 /* Zero bytes starting at a physical address */
196 void
197 bzero_phys(addr64_t src, vm_size_t bytes)
198 {
199 unsigned int wimg_bits;
200 unsigned int cpu_num = cpu_number();
201 ppnum_t pn;
202 addr64_t end __assert_only;
203
204 assert(!__improbable(os_add_overflow(src, bytes, &end)));
205
206 vm_offset_t offset = src & PAGE_MASK;
207 while (bytes > 0) {
208 vm_size_t count = bytes;
209
210 boolean_t use_copy_window = !pmap_valid_address(src);
211 pn = (ppnum_t)(src >> PAGE_SHIFT);
212 wimg_bits = pmap_cache_attributes(pn);
213 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
214 count = PAGE_SIZE - offset;
215 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
216 use_copy_window = TRUE;
217 }
218 #else
219 if (use_copy_window) {
220 count = PAGE_SIZE - offset;
221 }
222 #endif
223 char *buf;
224 unsigned int index;
225 if (use_copy_window) {
226 mp_disable_preemption();
227 cpu_num = cpu_number();
228 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
229 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
230 } else {
231 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
232 }
233
234 if (count > bytes) {
235 count = bytes;
236 }
237
238 switch (wimg_bits & VM_WIMG_MASK) {
239 case VM_WIMG_DEFAULT:
240 case VM_WIMG_WCOMB:
241 case VM_WIMG_INNERWBACK:
242 case VM_WIMG_WTHRU:
243 bzero(buf, count);
244 break;
245 default:
246 /* 'dc zva' performed by bzero is not safe for device memory */
247 secure_memset((void*)buf, 0, count);
248 }
249
250 if (use_copy_window) {
251 pmap_unmap_cpu_windows_copy(index);
252 mp_enable_preemption();
253 }
254
255 src += count;
256 bytes -= count;
257 offset = 0;
258 }
259 }
260
261 /*
262 * Read data from a physical address.
263 */
264
265
266 static unsigned long long
267 ml_phys_read_data(pmap_paddr_t paddr, int size)
268 {
269 unsigned int index;
270 unsigned int wimg_bits;
271 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
272 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
273 unsigned long long result = 0;
274 vm_offset_t copywindow_vaddr = 0;
275 unsigned char s1;
276 unsigned short s2;
277 unsigned int s4;
278
279 if (__improbable(pn_end != pn)) {
280 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
281 }
282
283 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
284 if (pmap_valid_address(paddr)) {
285 switch (size) {
286 case 1:
287 s1 = *(volatile unsigned char *)phystokv(paddr);
288 result = s1;
289 break;
290 case 2:
291 s2 = *(volatile unsigned short *)phystokv(paddr);
292 result = s2;
293 break;
294 case 4:
295 s4 = *(volatile unsigned int *)phystokv(paddr);
296 result = s4;
297 break;
298 case 8:
299 result = *(volatile unsigned long long *)phystokv(paddr);
300 break;
301 default:
302 panic("Invalid size %d for ml_phys_read_data\n", size);
303 break;
304 }
305 return result;
306 }
307 #endif
308
309 mp_disable_preemption();
310 wimg_bits = pmap_cache_attributes(pn);
311 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
312 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
313
314 switch (size) {
315 case 1:
316 s1 = *(volatile unsigned char *)copywindow_vaddr;
317 result = s1;
318 break;
319 case 2:
320 s2 = *(volatile unsigned short *)copywindow_vaddr;
321 result = s2;
322 break;
323 case 4:
324 s4 = *(volatile unsigned int *)copywindow_vaddr;
325 result = s4;
326 break;
327 case 8:
328 result = *(volatile unsigned long long*)copywindow_vaddr;
329 break;
330 default:
331 panic("Invalid size %d for ml_phys_read_data\n", size);
332 break;
333 }
334
335 pmap_unmap_cpu_windows_copy(index);
336 mp_enable_preemption();
337
338 return result;
339 }
340
341 unsigned int
342 ml_phys_read( vm_offset_t paddr)
343 {
344 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
345 }
346
347 unsigned int
348 ml_phys_read_word(vm_offset_t paddr)
349 {
350 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
351 }
352
353 unsigned int
354 ml_phys_read_64(addr64_t paddr64)
355 {
356 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
357 }
358
359 unsigned int
360 ml_phys_read_word_64(addr64_t paddr64)
361 {
362 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
363 }
364
365 unsigned int
366 ml_phys_read_half(vm_offset_t paddr)
367 {
368 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
369 }
370
371 unsigned int
372 ml_phys_read_half_64(addr64_t paddr64)
373 {
374 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
375 }
376
377 unsigned int
378 ml_phys_read_byte(vm_offset_t paddr)
379 {
380 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
381 }
382
383 unsigned int
384 ml_phys_read_byte_64(addr64_t paddr64)
385 {
386 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
387 }
388
389 unsigned long long
390 ml_phys_read_double(vm_offset_t paddr)
391 {
392 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
393 }
394
395 unsigned long long
396 ml_phys_read_double_64(addr64_t paddr64)
397 {
398 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
399 }
400
401
402
403 /*
404 * Write data to a physical address.
405 */
406
407 static void
408 ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
409 {
410 unsigned int index;
411 unsigned int wimg_bits;
412 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
413 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
414 vm_offset_t copywindow_vaddr = 0;
415
416 if (__improbable(pn_end != pn)) {
417 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
418 }
419
420 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
421 if (pmap_valid_address(paddr)) {
422 switch (size) {
423 case 1:
424 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
425 return;
426 case 2:
427 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
428 return;
429 case 4:
430 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
431 return;
432 case 8:
433 *(volatile unsigned long long *)phystokv(paddr) = data;
434 return;
435 default:
436 panic("Invalid size %d for ml_phys_write_data\n", size);
437 }
438 }
439 #endif
440
441 mp_disable_preemption();
442 wimg_bits = pmap_cache_attributes(pn);
443 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
444 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
445
446 switch (size) {
447 case 1:
448 *(volatile unsigned char *)(copywindow_vaddr) =
449 (unsigned char)data;
450 break;
451 case 2:
452 *(volatile unsigned short *)(copywindow_vaddr) =
453 (unsigned short)data;
454 break;
455 case 4:
456 *(volatile unsigned int *)(copywindow_vaddr) =
457 (uint32_t)data;
458 break;
459 case 8:
460 *(volatile unsigned long long *)(copywindow_vaddr) =
461 (unsigned long long)data;
462 break;
463 default:
464 panic("Invalid size %d for ml_phys_write_data\n", size);
465 break;
466 }
467
468 pmap_unmap_cpu_windows_copy(index);
469 mp_enable_preemption();
470 }
471
472 void
473 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
474 {
475 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
476 }
477
478 void
479 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
480 {
481 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
482 }
483
484 void
485 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
486 {
487 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
488 }
489
490 void
491 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
492 {
493 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
494 }
495
496 void
497 ml_phys_write(vm_offset_t paddr, unsigned int data)
498 {
499 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
500 }
501
502 void
503 ml_phys_write_64(addr64_t paddr64, unsigned int data)
504 {
505 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
506 }
507
508 void
509 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
510 {
511 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
512 }
513
514 void
515 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
516 {
517 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
518 }
519
520 void
521 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
522 {
523 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
524 }
525
526 void
527 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
528 {
529 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
530 }
531
532
533 /*
534 * Set indicated bit in bit string.
535 */
536 void
537 setbit(int bitno, int *s)
538 {
539 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
540 }
541
542 /*
543 * Clear indicated bit in bit string.
544 */
545 void
546 clrbit(int bitno, int *s)
547 {
548 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
549 }
550
551 /*
552 * Test if indicated bit is set in bit string.
553 */
554 int
555 testbit(int bitno, int *s)
556 {
557 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
558 }
559
560 /*
561 * Find first bit set in bit string.
562 */
563 int
564 ffsbit(int *s)
565 {
566 int offset;
567
568 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
569 ;
570 }
571 return offset + __builtin_ctz(*s);
572 }
573
574 int
575 ffs(unsigned int mask)
576 {
577 if (mask == 0) {
578 return 0;
579 }
580
581 /*
582 * NOTE: cannot use __builtin_ffs because it generates a call to
583 * 'ffs'
584 */
585 return 1 + __builtin_ctz(mask);
586 }
587
588 int
589 ffsll(unsigned long long mask)
590 {
591 if (mask == 0) {
592 return 0;
593 }
594
595 /*
596 * NOTE: cannot use __builtin_ffsll because it generates a call to
597 * 'ffsll'
598 */
599 return 1 + __builtin_ctzll(mask);
600 }
601
602 /*
603 * Find last bit set in bit string.
604 */
605 int
606 fls(unsigned int mask)
607 {
608 if (mask == 0) {
609 return 0;
610 }
611
612 return (sizeof(mask) << 3) - __builtin_clz(mask);
613 }
614
615 int
616 flsll(unsigned long long mask)
617 {
618 if (mask == 0) {
619 return 0;
620 }
621
622 return (sizeof(mask) << 3) - __builtin_clzll(mask);
623 }
624
625 #undef bcmp
626 int
627 bcmp(
628 const void *pa,
629 const void *pb,
630 size_t len)
631 {
632 const char *a = (const char *) pa;
633 const char *b = (const char *) pb;
634
635 if (len == 0) {
636 return 0;
637 }
638
639 do{
640 if (*a++ != *b++) {
641 break;
642 }
643 } while (--len);
644
645 /*
646 * Check for the overflow case but continue to handle the non-overflow
647 * case the same way just in case someone is using the return value
648 * as more than zero/non-zero
649 */
650 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
651 return 0xFFFFFFFFL;
652 } else {
653 return (int)len;
654 }
655 }
656
657 #undef memcmp
658 MARK_AS_HIBERNATE_TEXT
659 int
660 memcmp(const void *s1, const void *s2, size_t n)
661 {
662 if (n != 0) {
663 const unsigned char *p1 = s1, *p2 = s2;
664
665 do {
666 if (*p1++ != *p2++) {
667 return *--p1 - *--p2;
668 }
669 } while (--n != 0);
670 }
671 return 0;
672 }
673
674 kern_return_t
675 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
676 {
677 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
678 panic("%s: no more than 1 parameter may be virtual", __func__);
679 }
680
681 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
682
683 #ifndef __ARM_COHERENT_IO__
684 if (which & cppvFsrc) {
685 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
686 }
687
688 if (which & cppvFsnk) {
689 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
690 }
691 #endif
692
693 return res;
694 }
695
696 #if MACH_ASSERT
697
698 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
699
700 /*
701 * Machine-dependent routine to fill in an array with up to callstack_max
702 * levels of return pc information.
703 */
704 void
705 machine_callstack(
706 uintptr_t * buf,
707 vm_size_t callstack_max)
708 {
709 /* Captures the USER call stack */
710 uint32_t i = 0;
711
712 struct arm_saved_state *state = find_user_regs(current_thread());
713
714 if (!state) {
715 while (i < callstack_max) {
716 buf[i++] = 0;
717 }
718 } else {
719 if (is_saved_state64(state)) {
720 uint64_t frame[2];
721 buf[i++] = (uintptr_t)get_saved_state_pc(state);
722 frame[0] = get_saved_state_fp(state);
723 while (i < callstack_max && frame[0] != 0) {
724 if (copyinframe(frame[0], (void*) frame, TRUE)) {
725 break;
726 }
727 buf[i++] = (uintptr_t)frame[1];
728 }
729 } else {
730 uint32_t frame[2];
731 buf[i++] = (uintptr_t)get_saved_state_pc(state);
732 frame[0] = (uint32_t)get_saved_state_fp(state);
733 while (i < callstack_max && frame[0] != 0) {
734 if (copyinframe(frame[0], (void*) frame, FALSE)) {
735 break;
736 }
737 buf[i++] = (uintptr_t)frame[1];
738 }
739 }
740
741 while (i < callstack_max) {
742 buf[i++] = 0;
743 }
744 }
745 }
746
747 #endif /* MACH_ASSERT */
748
749 int
750 clr_be_bit(void)
751 {
752 panic("clr_be_bit");
753 return 0;
754 }
755
756 boolean_t
757 ml_probe_read(
758 __unused vm_offset_t paddr,
759 __unused unsigned int *val)
760 {
761 panic("ml_probe_read() unimplemented");
762 return 1;
763 }
764
765 boolean_t
766 ml_probe_read_64(
767 __unused addr64_t paddr,
768 __unused unsigned int *val)
769 {
770 panic("ml_probe_read_64() unimplemented");
771 return 1;
772 }
773
774
775 void
776 ml_thread_policy(
777 __unused thread_t thread,
778 __unused unsigned policy_id,
779 __unused unsigned policy_info)
780 {
781 // <rdar://problem/7141284>: Reduce print noise
782 // kprintf("ml_thread_policy() unimplemented\n");
783 }
784
785 __dead2
786 void
787 panic_unimplemented(void)
788 {
789 panic("Not yet implemented.");
790 }
791
792 /* ARM64_TODO <rdar://problem/9198953> */
793 void abort(void) __dead2;
794
795 void
796 abort(void)
797 {
798 panic("Abort.");
799 }
800
801
802 #if !MACH_KDP
803 void
804 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
805 {
806 #pragma unused(fn,arg)
807 }
808 #endif
809
810 /*
811 * Get a quick virtual mapping of a physical page and run a callback on that
812 * page's virtual address.
813 *
814 * @param dst64 Physical address to access (doesn't need to be page-aligned).
815 * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
816 * @param func Callback function to call with the page's virtual address.
817 * @param arg Argument passed directly to `func`.
818 *
819 * @return The return value from `func`.
820 */
821 int
822 apply_func_phys(
823 addr64_t dst64,
824 vm_size_t bytes,
825 int (*func)(void * buffer, vm_size_t bytes, void * arg),
826 void * arg)
827 {
828 /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
829 if (!pmap_valid_address(dst64)) {
830 panic("%s address error: passed in address (%#llx) not a kernel managed address",
831 __FUNCTION__, dst64);
832 }
833
834 /* Ensure we stay within a single page */
835 if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
836 panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
837 __FUNCTION__, dst64, bytes);
838 }
839
840 return func((void*)phystokv(dst64), bytes, arg);
841 }