]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/loose_ends.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / loose_ends.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_assert.h>
30#include <mach/vm_types.h>
31#include <mach/mach_time.h>
32#include <kern/timer.h>
33#include <kern/clock.h>
34#include <kern/machine.h>
35#include <mach/machine.h>
36#include <mach/machine/vm_param.h>
37#include <mach_kdp.h>
38#include <kdp/kdp_udp.h>
39#if !MACH_KDP
40#include <kdp/kdp_callout.h>
41#endif /* !MACH_KDP */
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#include <arm/caches_internal.h>
45
46#include <vm/vm_kern.h>
47#include <vm/vm_map.h>
48#include <vm/pmap.h>
49
50#include <arm/misc_protos.h>
51
52#include <sys/errno.h>
53
54#define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56
57void
58bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
59{
60 unsigned int src_index;
61 unsigned int dst_index;
62 vm_offset_t src_offset;
63 vm_offset_t dst_offset;
64 unsigned int cpu_num;
65 unsigned int wimg_bits_src, wimg_bits_dst;
66 ppnum_t pn_src = (src >> PAGE_SHIFT);
67 ppnum_t pn_dst = (dst >> PAGE_SHIFT);
68
69 wimg_bits_src = pmap_cache_attributes(pn_src);
70 wimg_bits_dst = pmap_cache_attributes(pn_dst);
71
72 if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) &&
0a7de745
A
73 ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
74 ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
5ba3f43e
A
75 /* Fast path - dst is writable and both source and destination have default attributes */
76 bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
77 return;
78 }
79
80 src_offset = src & PAGE_MASK;
81 dst_offset = dst & PAGE_MASK;
82
0a7de745 83 if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE) {
5ba3f43e 84 panic("bcopy extends beyond copy windows");
0a7de745 85 }
5ba3f43e
A
86
87 mp_disable_preemption();
88 cpu_num = cpu_number();
89 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
0a7de745 90 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
5ba3f43e 91
0a7de745
A
92 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset),
93 (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset),
94 bytes);
5ba3f43e
A
95
96 pmap_unmap_cpu_windows_copy(src_index);
97 pmap_unmap_cpu_windows_copy(dst_index);
98 mp_enable_preemption();
99}
100
101void
102bzero_phys_nc(addr64_t src64, vm_size_t bytes)
103{
104 bzero_phys(src64, bytes);
105}
106
107/* Zero bytes starting at a physical address */
108void
109bzero_phys(addr64_t src, vm_size_t bytes)
110{
111 unsigned int wimg_bits;
112 ppnum_t pn = (src >> PAGE_SHIFT);
113
114 wimg_bits = pmap_cache_attributes(pn);
115 if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
116 /* Fast path - default attributes */
117 bzero((char *)phystokv((pmap_paddr_t) src), bytes);
118 } else {
119 mp_disable_preemption();
120
121 unsigned int cpu_num = cpu_number();
122
123 while (bytes > 0) {
124 vm_offset_t offset = src & PAGE_MASK;
125 uint32_t count = PAGE_SIZE - offset;
126
0a7de745 127 if (count > bytes) {
5ba3f43e 128 count = bytes;
0a7de745 129 }
5ba3f43e
A
130
131 unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
132
133 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
134
135 pmap_unmap_cpu_windows_copy(index);
136
137 src += count;
138 bytes -= count;
139 }
140
141 mp_enable_preemption();
142 }
143}
144
145/*
146 * Read data from a physical address.
147 */
148
149
150static unsigned int
151ml_phys_read_data(pmap_paddr_t paddr, int size)
152{
153 unsigned int index;
154 unsigned int result;
155 unsigned int wimg_bits;
156 ppnum_t pn = (paddr >> PAGE_SHIFT);
157 unsigned char s1;
158 unsigned short s2;
159 vm_offset_t copywindow_vaddr = 0;
160
161 mp_disable_preemption();
162 wimg_bits = pmap_cache_attributes(pn);
163 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
164 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);;
165
166 switch (size) {
0a7de745
A
167 case 1:
168 s1 = *(volatile unsigned char *)(copywindow_vaddr);
169 result = s1;
170 break;
171 case 2:
172 s2 = *(volatile unsigned short *)(copywindow_vaddr);
173 result = s2;
174 break;
175 case 4:
176 default:
177 result = *(volatile unsigned int *)(copywindow_vaddr);
178 break;
5ba3f43e
A
179 }
180
181 pmap_unmap_cpu_windows_copy(index);
182 mp_enable_preemption();
183
184 return result;
185}
186
187static unsigned long long
188ml_phys_read_long_long(pmap_paddr_t paddr)
189{
190 unsigned int index;
191 unsigned int result;
192 unsigned int wimg_bits;
193 ppnum_t pn = (paddr >> PAGE_SHIFT);
194
195 mp_disable_preemption();
196 wimg_bits = pmap_cache_attributes(pn);
197 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
198
199 result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
0a7de745 200 | ((uint32_t)paddr & PAGE_MASK));
5ba3f43e
A
201
202 pmap_unmap_cpu_windows_copy(index);
203 mp_enable_preemption();
204
205 return result;
206}
207
0a7de745
A
208unsigned int
209ml_phys_read( vm_offset_t paddr)
5ba3f43e 210{
0a7de745 211 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
5ba3f43e
A
212}
213
0a7de745
A
214unsigned int
215ml_phys_read_word(vm_offset_t paddr)
216{
217 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
5ba3f43e
A
218}
219
0a7de745
A
220unsigned int
221ml_phys_read_64(addr64_t paddr64)
5ba3f43e 222{
0a7de745 223 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
5ba3f43e
A
224}
225
0a7de745
A
226unsigned int
227ml_phys_read_word_64(addr64_t paddr64)
5ba3f43e 228{
0a7de745 229 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
5ba3f43e
A
230}
231
0a7de745
A
232unsigned int
233ml_phys_read_half(vm_offset_t paddr)
5ba3f43e 234{
0a7de745 235 return ml_phys_read_data((pmap_paddr_t)paddr, 2);
5ba3f43e
A
236}
237
0a7de745
A
238unsigned int
239ml_phys_read_half_64(addr64_t paddr64)
5ba3f43e 240{
0a7de745 241 return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
5ba3f43e
A
242}
243
0a7de745
A
244unsigned int
245ml_phys_read_byte(vm_offset_t paddr)
5ba3f43e 246{
0a7de745 247 return ml_phys_read_data((pmap_paddr_t)paddr, 1);
5ba3f43e
A
248}
249
0a7de745
A
250unsigned int
251ml_phys_read_byte_64(addr64_t paddr64)
5ba3f43e 252{
0a7de745 253 return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
5ba3f43e
A
254}
255
0a7de745
A
256unsigned long long
257ml_phys_read_double(vm_offset_t paddr)
5ba3f43e 258{
0a7de745 259 return ml_phys_read_long_long((pmap_paddr_t)paddr);
5ba3f43e
A
260}
261
0a7de745
A
262unsigned long long
263ml_phys_read_double_64(addr64_t paddr64)
5ba3f43e 264{
0a7de745 265 return ml_phys_read_long_long((pmap_paddr_t)paddr64);
5ba3f43e
A
266}
267
268
269
270/*
271 * Write data to a physical address.
272 */
273
274static void
275ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
276{
277 unsigned int index;
278 unsigned int wimg_bits;
279 ppnum_t pn = (paddr >> PAGE_SHIFT);
280 vm_offset_t copywindow_vaddr = 0;
281
282 mp_disable_preemption();
283 wimg_bits = pmap_cache_attributes(pn);
0a7de745 284 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
5ba3f43e
A
285 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK);
286
287 switch (size) {
0a7de745
A
288 case 1:
289 *(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data;
290 break;
291 case 2:
292 *(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data;
293 break;
294 case 4:
295 default:
296 *(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data;
297 break;
5ba3f43e
A
298 }
299
300 pmap_unmap_cpu_windows_copy(index);
301 mp_enable_preemption();
302}
303
304static void
305ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
306{
307 unsigned int index;
308 unsigned int wimg_bits;
309 ppnum_t pn = (paddr >> PAGE_SHIFT);
310
311 mp_disable_preemption();
312 wimg_bits = pmap_cache_attributes(pn);
0a7de745 313 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
5ba3f43e
A
314
315 *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
0a7de745 316 | ((uint32_t)paddr & PAGE_MASK)) = data;
5ba3f43e
A
317
318 pmap_unmap_cpu_windows_copy(index);
319 mp_enable_preemption();
320}
321
322
323
0a7de745
A
324void
325ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
5ba3f43e 326{
0a7de745 327 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
5ba3f43e
A
328}
329
0a7de745
A
330void
331ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
5ba3f43e 332{
0a7de745 333 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
5ba3f43e
A
334}
335
0a7de745
A
336void
337ml_phys_write_half(vm_offset_t paddr, unsigned int data)
5ba3f43e 338{
0a7de745 339 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
5ba3f43e
A
340}
341
0a7de745
A
342void
343ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
5ba3f43e 344{
0a7de745 345 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
5ba3f43e
A
346}
347
0a7de745
A
348void
349ml_phys_write(vm_offset_t paddr, unsigned int data)
5ba3f43e 350{
0a7de745 351 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
5ba3f43e
A
352}
353
0a7de745
A
354void
355ml_phys_write_64(addr64_t paddr64, unsigned int data)
5ba3f43e 356{
0a7de745 357 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
5ba3f43e
A
358}
359
0a7de745
A
360void
361ml_phys_write_word(vm_offset_t paddr, unsigned int data)
5ba3f43e 362{
0a7de745 363 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
5ba3f43e
A
364}
365
0a7de745
A
366void
367ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
5ba3f43e 368{
0a7de745 369 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
5ba3f43e
A
370}
371
0a7de745
A
372void
373ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
5ba3f43e 374{
0a7de745 375 ml_phys_write_long_long((pmap_paddr_t)paddr, data);
5ba3f43e
A
376}
377
0a7de745
A
378void
379ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
5ba3f43e 380{
0a7de745 381 ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
5ba3f43e
A
382}
383
384
385/*
386 * Set indicated bit in bit string.
387 */
388void
389setbit(int bitno, int *s)
390{
391 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
392}
393
394/*
395 * Clear indicated bit in bit string.
396 */
397void
398clrbit(int bitno, int *s)
399{
400 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
401}
402
403/*
404 * Test if indicated bit is set in bit string.
405 */
406int
407testbit(int bitno, int *s)
408{
409 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
410}
411
412/*
413 * Find first bit set in bit string.
414 */
415int
416ffsbit(int *s)
417{
418 int offset;
419
0a7de745
A
420 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
421 ;
422 }
5ba3f43e
A
423 return offset + __builtin_ctz(*s);
424}
425
426int
427ffs(unsigned int mask)
428{
0a7de745 429 if (mask == 0) {
5ba3f43e 430 return 0;
0a7de745 431 }
5ba3f43e
A
432
433 /*
434 * NOTE: cannot use __builtin_ffs because it generates a call to
435 * 'ffs'
436 */
437 return 1 + __builtin_ctz(mask);
438}
439
440int
441ffsll(unsigned long long mask)
442{
0a7de745 443 if (mask == 0) {
5ba3f43e 444 return 0;
0a7de745 445 }
5ba3f43e
A
446
447 /*
448 * NOTE: cannot use __builtin_ffsll because it generates a call to
449 * 'ffsll'
450 */
451 return 1 + __builtin_ctzll(mask);
452}
453
454/*
455 * Find last bit set in bit string.
456 */
457int
458fls(unsigned int mask)
459{
0a7de745 460 if (mask == 0) {
5ba3f43e 461 return 0;
0a7de745 462 }
5ba3f43e 463
0a7de745 464 return (sizeof(mask) << 3) - __builtin_clz(mask);
5ba3f43e
A
465}
466
467int
468flsll(unsigned long long mask)
469{
0a7de745 470 if (mask == 0) {
5ba3f43e 471 return 0;
0a7de745 472 }
5ba3f43e 473
0a7de745 474 return (sizeof(mask) << 3) - __builtin_clzll(mask);
5ba3f43e
A
475}
476
0a7de745 477int
5ba3f43e 478bcmp(
0a7de745
A
479 const void *pa,
480 const void *pb,
481 size_t len)
5ba3f43e
A
482{
483 const char *a = (const char *) pa;
484 const char *b = (const char *) pb;
485
0a7de745 486 if (len == 0) {
5ba3f43e 487 return 0;
0a7de745 488 }
5ba3f43e 489
0a7de745
A
490 do{
491 if (*a++ != *b++) {
5ba3f43e 492 break;
0a7de745
A
493 }
494 } while (--len);
5ba3f43e
A
495
496 return len;
497}
498
499int
500memcmp(const void *s1, const void *s2, size_t n)
501{
502 if (n != 0) {
503 const unsigned char *p1 = s1, *p2 = s2;
504
505 do {
0a7de745
A
506 if (*p1++ != *p2++) {
507 return *--p1 - *--p2;
508 }
5ba3f43e
A
509 } while (--n != 0);
510 }
0a7de745 511 return 0;
5ba3f43e
A
512}
513
514kern_return_t
515copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
516{
517 kern_return_t retval = KERN_SUCCESS;
0a7de745
A
518 void *from, *to;
519 unsigned int from_wimg_bits, to_wimg_bits;
5ba3f43e
A
520
521 from = CAST_DOWN(void *, source);
522 to = CAST_DOWN(void *, sink);
523
0a7de745
A
524 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only
525 * one is virtual */
526 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
527 }
528 if (which & cppvPsrc) {
d9a64523 529 from = (void *)phystokv((pmap_paddr_t)from);
0a7de745
A
530 }
531 if (which & cppvPsnk) {
d9a64523 532 to = (void *)phystokv((pmap_paddr_t)to);
0a7de745 533 }
5ba3f43e 534
0a7de745
A
535 if ((which & (cppvPsrc | cppvKmap)) == 0) { /* Source is virtual in
536 * current map */
5ba3f43e 537 retval = copyin((user_addr_t) from, to, size);
0a7de745
A
538 } else if ((which & (cppvPsnk | cppvKmap)) == 0) { /* Sink is virtual in
539 * current map */
5ba3f43e 540 retval = copyout(from, (user_addr_t) to, size);
0a7de745 541 } else { /* both addresses are physical or kernel map */
5ba3f43e 542 bcopy(from, to, size);
0a7de745 543 }
5ba3f43e
A
544
545 if (which & cppvFsrc) {
546 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
547 } else if (which & cppvPsrc) {
548 from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
0a7de745 549 if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU)) {
5ba3f43e 550 flush_dcache64(source, size, TRUE);
0a7de745 551 }
5ba3f43e
A
552 }
553
554 if (which & cppvFsnk) {
555 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
0a7de745 556 } else if (which & cppvPsnk) {
5ba3f43e 557 to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
0a7de745 558 if (to_wimg_bits != VM_WIMG_COPYBACK) {
5ba3f43e 559 flush_dcache64(sink, size, TRUE);
0a7de745 560 }
5ba3f43e
A
561 }
562 return retval;
563}
564
565/*
566 * Copy sizes bigger than this value will cause a kernel panic.
567 *
568 * Yes, this is an arbitrary fixed limit, but it's almost certainly
569 * a programming error to be copying more than this amount between
570 * user and wired kernel memory in a single invocation on this
571 * platform.
572 */
573const int copysize_limit_panic = (64 * 1024 * 1024);
574
575/*
576 * Validate the arguments to copy{in,out} on this platform.
577 *
578 * Called when nbytes is "large" e.g. more than a page. Such sizes are
579 * infrequent, and very large sizes are likely indications of attempts
580 * to exploit kernel programming errors (bugs).
581 */
582static int
583copy_validate(const user_addr_t user_addr,
584 uintptr_t kernel_addr, vm_size_t nbytes)
585{
586 uintptr_t kernel_addr_last = kernel_addr + nbytes;
587
d9a64523 588 if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS ||
5ba3f43e
A
589 kernel_addr > VM_MAX_KERNEL_ADDRESS ||
590 kernel_addr_last < kernel_addr ||
0a7de745 591 kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) {
5ba3f43e
A
592 panic("%s(%p, %p, %u) - kaddr not in kernel", __func__,
593 (void *)user_addr, (void *)kernel_addr, nbytes);
0a7de745 594 }
5ba3f43e
A
595
596 user_addr_t user_addr_last = user_addr + nbytes;
597
d9a64523 598 if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) ||
0a7de745
A
599 (user_addr < vm_map_min(current_thread()->map)))) {
600 return EFAULT;
601 }
5ba3f43e 602
0a7de745 603 if (__improbable(nbytes > copysize_limit_panic)) {
5ba3f43e
A
604 panic("%s(%p, %p, %u) - transfer too large", __func__,
605 (void *)user_addr, (void *)kernel_addr, nbytes);
0a7de745 606 }
5ba3f43e 607
0a7de745 608 return 0;
5ba3f43e
A
609}
610
611int
612copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
613{
0a7de745 614 return copy_validate(ua, ka, nbytes);
5ba3f43e
A
615}
616
617int
618copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
619{
0a7de745 620 return copy_validate(ua, ka, nbytes);
5ba3f43e
A
621}
622
623#if MACH_ASSERT
624
0a7de745 625extern int copyinframe(vm_address_t fp, char *frame);
5ba3f43e
A
626
627/*
628 * Machine-dependent routine to fill in an array with up to callstack_max
629 * levels of return pc information.
630 */
631void
632machine_callstack(
0a7de745
A
633 uintptr_t * buf,
634 vm_size_t callstack_max)
5ba3f43e
A
635{
636 /* Captures the USER call stack */
0a7de745 637 uint32_t i = 0;
5ba3f43e
A
638 uint32_t frame[2];
639
640 struct arm_saved_state* state = find_user_regs(current_thread());
641
642 if (!state) {
0a7de745 643 while (i < callstack_max) {
5ba3f43e 644 buf[i++] = 0;
0a7de745 645 }
5ba3f43e
A
646 } else {
647 buf[i++] = (uintptr_t)state->pc;
648 frame[0] = state->r[7];
649
0a7de745
A
650 while (i < callstack_max && frame[0] != 0) {
651 if (copyinframe(frame[0], (void*) frame)) {
5ba3f43e 652 break;
0a7de745 653 }
5ba3f43e
A
654 buf[i++] = (uintptr_t)frame[1];
655 }
656
0a7de745 657 while (i < callstack_max) {
5ba3f43e 658 buf[i++] = 0;
0a7de745 659 }
5ba3f43e
A
660 }
661}
662
0a7de745 663#endif /* MACH_ASSERT */
5ba3f43e
A
664
665int
666clr_be_bit(void)
667{
668 panic("clr_be_bit");
669 return 0;
670}
671
672boolean_t
673ml_probe_read(
0a7de745
A
674 __unused vm_offset_t paddr,
675 __unused unsigned int *val)
5ba3f43e
A
676{
677 panic("ml_probe_read() unimplemented");
678 return 1;
679}
680
681boolean_t
682ml_probe_read_64(
0a7de745
A
683 __unused addr64_t paddr,
684 __unused unsigned int *val)
5ba3f43e
A
685{
686 panic("ml_probe_read_64() unimplemented");
687 return 1;
688}
689
690
691void
692ml_thread_policy(
0a7de745
A
693 __unused thread_t thread,
694 __unused unsigned policy_id,
695 __unused unsigned policy_info)
5ba3f43e 696{
0a7de745
A
697 // <rdar://problem/7141284>: Reduce print noise
698 // kprintf("ml_thread_policy() unimplemented\n");
5ba3f43e
A
699}
700
701#if !MACH_KDP
702void
703kdp_register_callout(kdp_callout_fn_t fn, void *arg)
704{
705#pragma unused(fn,arg)
706}
707#endif