]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/loose_ends.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / loose_ends.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_assert.h>
30#include <mach/vm_types.h>
31#include <mach/mach_time.h>
32#include <kern/timer.h>
33#include <kern/clock.h>
34#include <kern/machine.h>
35#include <mach/machine.h>
36#include <mach/machine/vm_param.h>
37#include <mach_kdp.h>
38#include <kdp/kdp_udp.h>
39#if !MACH_KDP
40#include <kdp/kdp_callout.h>
41#endif /* !MACH_KDP */
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#include <arm/caches_internal.h>
45
46#include <vm/vm_kern.h>
47#include <vm/vm_map.h>
48#include <vm/pmap.h>
49
50#include <arm/misc_protos.h>
51
52#include <sys/errno.h>
f427ee49 53#include <libkern/section_keywords.h>
5ba3f43e
A
54
55#define INT_SIZE (BYTE_SIZE * sizeof (int))
56
d9a64523
A
57#define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
58#define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
59#define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
60#define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
61
62static kern_return_t
63bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
5ba3f43e
A
64{
65 unsigned int src_index;
66 unsigned int dst_index;
67 vm_offset_t src_offset;
68 vm_offset_t dst_offset;
69 unsigned int wimg_bits_src, wimg_bits_dst;
70 unsigned int cpu_num = 0;
d9a64523
A
71 ppnum_t pn_src;
72 ppnum_t pn_dst;
73 addr64_t end __assert_only;
74 kern_return_t res = KERN_SUCCESS;
75
f427ee49
A
76 if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
77 assert(!__improbable(os_add_overflow(src, bytes, &end)));
78 }
79 if (!BCOPY_PHYS_DST_IS_USER(flags)) {
80 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
81 }
d9a64523
A
82
83 while ((bytes > 0) && (res == KERN_SUCCESS)) {
84 src_offset = src & PAGE_MASK;
85 dst_offset = dst & PAGE_MASK;
86 boolean_t use_copy_window_src = FALSE;
87 boolean_t use_copy_window_dst = FALSE;
88 vm_size_t count = bytes;
89 vm_size_t count2 = bytes;
90 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
91 use_copy_window_src = !pmap_valid_address(src);
92 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
93#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
94 count = PAGE_SIZE - src_offset;
95 wimg_bits_src = pmap_cache_attributes(pn_src);
0a7de745 96 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
d9a64523 97 use_copy_window_src = TRUE;
0a7de745 98 }
d9a64523
A
99#else
100 if (use_copy_window_src) {
101 wimg_bits_src = pmap_cache_attributes(pn_src);
102 count = PAGE_SIZE - src_offset;
103 }
5ba3f43e 104#endif
d9a64523
A
105 }
106 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
107 // write preflighting needed for things like dtrace which may write static read-only mappings
108 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
109 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
110#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
111 count2 = PAGE_SIZE - dst_offset;
112 wimg_bits_dst = pmap_cache_attributes(pn_dst);
0a7de745 113 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
d9a64523 114 use_copy_window_dst = TRUE;
0a7de745 115 }
d9a64523
A
116#else
117 if (use_copy_window_dst) {
118 wimg_bits_dst = pmap_cache_attributes(pn_dst);
119 count2 = PAGE_SIZE - dst_offset;
120 }
121#endif
122 }
5ba3f43e 123
d9a64523
A
124 char *tmp_src;
125 char *tmp_dst;
5ba3f43e 126
d9a64523
A
127 if (use_copy_window_src || use_copy_window_dst) {
128 mp_disable_preemption();
129 cpu_num = cpu_number();
130 }
131
132 if (use_copy_window_src) {
133 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
134 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
135 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
136 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
137 } else {
138 tmp_src = (char*)src;
139 }
140 if (use_copy_window_dst) {
141 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
142 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
143 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
144 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
145 } else {
146 tmp_dst = (char*)dst;
147 }
5ba3f43e 148
0a7de745 149 if (count > count2) {
d9a64523 150 count = count2;
0a7de745
A
151 }
152 if (count > bytes) {
d9a64523 153 count = bytes;
0a7de745 154 }
5ba3f43e 155
0a7de745 156 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
d9a64523 157 res = copyin((user_addr_t)src, tmp_dst, count);
0a7de745 158 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
d9a64523 159 res = copyout(tmp_src, (user_addr_t)dst, count);
0a7de745 160 } else {
d9a64523 161 bcopy(tmp_src, tmp_dst, count);
0a7de745 162 }
5ba3f43e 163
0a7de745 164 if (use_copy_window_src) {
d9a64523 165 pmap_unmap_cpu_windows_copy(src_index);
0a7de745
A
166 }
167 if (use_copy_window_dst) {
d9a64523 168 pmap_unmap_cpu_windows_copy(dst_index);
0a7de745
A
169 }
170 if (use_copy_window_src || use_copy_window_dst) {
d9a64523 171 mp_enable_preemption();
0a7de745 172 }
5ba3f43e 173
d9a64523
A
174 src += count;
175 dst += count;
176 bytes -= count;
177 }
178 return res;
179}
5ba3f43e 180
d9a64523
A
181void
182bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
183{
184 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
5ba3f43e
A
185}
186
187void
188bzero_phys_nc(addr64_t src64, vm_size_t bytes)
189{
190 bzero_phys(src64, bytes);
191}
192
94ff46dc
A
193extern void *secure_memset(void *, int, size_t);
194
5ba3f43e
A
195/* Zero bytes starting at a physical address */
196void
197bzero_phys(addr64_t src, vm_size_t bytes)
198{
199 unsigned int wimg_bits;
200 unsigned int cpu_num = cpu_number();
d9a64523
A
201 ppnum_t pn;
202 addr64_t end __assert_only;
5ba3f43e 203
d9a64523 204 assert(!__improbable(os_add_overflow(src, bytes, &end)));
5ba3f43e 205
d9a64523
A
206 vm_offset_t offset = src & PAGE_MASK;
207 while (bytes > 0) {
208 vm_size_t count = bytes;
5ba3f43e 209
d9a64523
A
210 boolean_t use_copy_window = !pmap_valid_address(src);
211 pn = (ppnum_t)(src >> PAGE_SHIFT);
94ff46dc 212 wimg_bits = pmap_cache_attributes(pn);
d9a64523
A
213#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
214 count = PAGE_SIZE - offset;
0a7de745 215 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
d9a64523 216 use_copy_window = TRUE;
0a7de745 217 }
d9a64523
A
218#else
219 if (use_copy_window) {
d9a64523
A
220 count = PAGE_SIZE - offset;
221 }
5ba3f43e 222#endif
d9a64523
A
223 char *buf;
224 unsigned int index;
225 if (use_copy_window) {
226 mp_disable_preemption();
227 cpu_num = cpu_number();
228 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
229 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
230 } else {
231 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
232 }
5ba3f43e 233
0a7de745 234 if (count > bytes) {
5ba3f43e 235 count = bytes;
0a7de745 236 }
5ba3f43e 237
94ff46dc
A
238 switch (wimg_bits & VM_WIMG_MASK) {
239 case VM_WIMG_DEFAULT:
240 case VM_WIMG_WCOMB:
241 case VM_WIMG_INNERWBACK:
242 case VM_WIMG_WTHRU:
243 bzero(buf, count);
244 break;
245 default:
246 /* 'dc zva' performed by bzero is not safe for device memory */
247 secure_memset((void*)buf, 0, count);
248 }
5ba3f43e 249
d9a64523
A
250 if (use_copy_window) {
251 pmap_unmap_cpu_windows_copy(index);
252 mp_enable_preemption();
253 }
5ba3f43e
A
254
255 src += count;
256 bytes -= count;
d9a64523 257 offset = 0;
5ba3f43e 258 }
5ba3f43e
A
259}
260
261/*
262 * Read data from a physical address.
263 */
264
265
266static unsigned long long
267ml_phys_read_data(pmap_paddr_t paddr, int size)
268{
269 unsigned int index;
270 unsigned int wimg_bits;
271 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
0a7de745 272 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
5ba3f43e
A
273 unsigned long long result = 0;
274 vm_offset_t copywindow_vaddr = 0;
275 unsigned char s1;
276 unsigned short s2;
277 unsigned int s4;
278
0a7de745 279 if (__improbable(pn_end != pn)) {
d9a64523 280 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
0a7de745 281 }
d9a64523
A
282
283#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
5ba3f43e
A
284 if (pmap_valid_address(paddr)) {
285 switch (size) {
286 case 1:
287 s1 = *(volatile unsigned char *)phystokv(paddr);
288 result = s1;
289 break;
290 case 2:
291 s2 = *(volatile unsigned short *)phystokv(paddr);
292 result = s2;
293 break;
294 case 4:
295 s4 = *(volatile unsigned int *)phystokv(paddr);
296 result = s4;
297 break;
298 case 8:
299 result = *(volatile unsigned long long *)phystokv(paddr);
300 break;
301 default:
302 panic("Invalid size %d for ml_phys_read_data\n", size);
0a7de745 303 break;
5ba3f43e
A
304 }
305 return result;
306 }
307#endif
308
309 mp_disable_preemption();
310 wimg_bits = pmap_cache_attributes(pn);
311 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
312 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
313
314 switch (size) {
0a7de745
A
315 case 1:
316 s1 = *(volatile unsigned char *)copywindow_vaddr;
317 result = s1;
318 break;
319 case 2:
320 s2 = *(volatile unsigned short *)copywindow_vaddr;
321 result = s2;
322 break;
323 case 4:
324 s4 = *(volatile unsigned int *)copywindow_vaddr;
325 result = s4;
326 break;
327 case 8:
328 result = *(volatile unsigned long long*)copywindow_vaddr;
329 break;
330 default:
331 panic("Invalid size %d for ml_phys_read_data\n", size);
332 break;
5ba3f43e
A
333 }
334
335 pmap_unmap_cpu_windows_copy(index);
336 mp_enable_preemption();
337
338 return result;
339}
340
0a7de745
A
341unsigned int
342ml_phys_read( vm_offset_t paddr)
5ba3f43e 343{
0a7de745 344 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
5ba3f43e
A
345}
346
0a7de745
A
347unsigned int
348ml_phys_read_word(vm_offset_t paddr)
349{
350 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
5ba3f43e
A
351}
352
0a7de745
A
353unsigned int
354ml_phys_read_64(addr64_t paddr64)
5ba3f43e 355{
0a7de745 356 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
5ba3f43e
A
357}
358
0a7de745
A
359unsigned int
360ml_phys_read_word_64(addr64_t paddr64)
5ba3f43e 361{
0a7de745 362 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
5ba3f43e
A
363}
364
0a7de745
A
365unsigned int
366ml_phys_read_half(vm_offset_t paddr)
5ba3f43e 367{
0a7de745 368 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
5ba3f43e
A
369}
370
0a7de745
A
371unsigned int
372ml_phys_read_half_64(addr64_t paddr64)
5ba3f43e 373{
0a7de745 374 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
5ba3f43e
A
375}
376
0a7de745
A
377unsigned int
378ml_phys_read_byte(vm_offset_t paddr)
5ba3f43e 379{
0a7de745 380 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
5ba3f43e
A
381}
382
0a7de745
A
383unsigned int
384ml_phys_read_byte_64(addr64_t paddr64)
5ba3f43e 385{
0a7de745 386 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
5ba3f43e
A
387}
388
0a7de745
A
389unsigned long long
390ml_phys_read_double(vm_offset_t paddr)
5ba3f43e 391{
0a7de745 392 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
5ba3f43e
A
393}
394
0a7de745
A
395unsigned long long
396ml_phys_read_double_64(addr64_t paddr64)
5ba3f43e 397{
0a7de745 398 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
5ba3f43e
A
399}
400
401
402
403/*
404 * Write data to a physical address.
405 */
406
407static void
408ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
409{
410 unsigned int index;
411 unsigned int wimg_bits;
412 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
0a7de745 413 ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
5ba3f43e
A
414 vm_offset_t copywindow_vaddr = 0;
415
0a7de745 416 if (__improbable(pn_end != pn)) {
d9a64523 417 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
0a7de745 418 }
d9a64523
A
419
420#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
5ba3f43e
A
421 if (pmap_valid_address(paddr)) {
422 switch (size) {
423 case 1:
424 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
425 return;
426 case 2:
427 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
428 return;
429 case 4:
430 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
431 return;
432 case 8:
433 *(volatile unsigned long long *)phystokv(paddr) = data;
434 return;
435 default:
436 panic("Invalid size %d for ml_phys_write_data\n", size);
437 }
438 }
439#endif
440
441 mp_disable_preemption();
442 wimg_bits = pmap_cache_attributes(pn);
0a7de745 443 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
5ba3f43e
A
444 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
445
446 switch (size) {
0a7de745
A
447 case 1:
448 *(volatile unsigned char *)(copywindow_vaddr) =
449 (unsigned char)data;
450 break;
451 case 2:
452 *(volatile unsigned short *)(copywindow_vaddr) =
453 (unsigned short)data;
454 break;
455 case 4:
456 *(volatile unsigned int *)(copywindow_vaddr) =
457 (uint32_t)data;
458 break;
459 case 8:
460 *(volatile unsigned long long *)(copywindow_vaddr) =
461 (unsigned long long)data;
462 break;
463 default:
464 panic("Invalid size %d for ml_phys_write_data\n", size);
465 break;
5ba3f43e
A
466 }
467
468 pmap_unmap_cpu_windows_copy(index);
469 mp_enable_preemption();
470}
471
0a7de745
A
472void
473ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
5ba3f43e 474{
0a7de745 475 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
5ba3f43e
A
476}
477
0a7de745
A
478void
479ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
5ba3f43e 480{
0a7de745 481 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
5ba3f43e
A
482}
483
0a7de745
A
484void
485ml_phys_write_half(vm_offset_t paddr, unsigned int data)
5ba3f43e 486{
0a7de745 487 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
5ba3f43e
A
488}
489
0a7de745
A
490void
491ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
5ba3f43e 492{
0a7de745 493 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
5ba3f43e
A
494}
495
0a7de745
A
496void
497ml_phys_write(vm_offset_t paddr, unsigned int data)
5ba3f43e 498{
0a7de745 499 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
5ba3f43e
A
500}
501
0a7de745
A
502void
503ml_phys_write_64(addr64_t paddr64, unsigned int data)
5ba3f43e 504{
0a7de745 505 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
5ba3f43e
A
506}
507
0a7de745
A
508void
509ml_phys_write_word(vm_offset_t paddr, unsigned int data)
5ba3f43e 510{
0a7de745 511 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
5ba3f43e
A
512}
513
0a7de745
A
514void
515ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
5ba3f43e 516{
0a7de745 517 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
5ba3f43e
A
518}
519
0a7de745
A
520void
521ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
5ba3f43e 522{
0a7de745 523 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
5ba3f43e
A
524}
525
0a7de745
A
526void
527ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
5ba3f43e 528{
0a7de745 529 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
5ba3f43e
A
530}
531
532
533/*
534 * Set indicated bit in bit string.
535 */
536void
537setbit(int bitno, int *s)
538{
cb323159 539 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
5ba3f43e
A
540}
541
542/*
543 * Clear indicated bit in bit string.
544 */
545void
546clrbit(int bitno, int *s)
547{
cb323159 548 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
5ba3f43e
A
549}
550
551/*
552 * Test if indicated bit is set in bit string.
553 */
554int
555testbit(int bitno, int *s)
556{
cb323159 557 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
5ba3f43e
A
558}
559
560/*
561 * Find first bit set in bit string.
562 */
563int
564ffsbit(int *s)
565{
566 int offset;
567
0a7de745
A
568 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
569 ;
570 }
5ba3f43e
A
571 return offset + __builtin_ctz(*s);
572}
573
574int
575ffs(unsigned int mask)
576{
0a7de745 577 if (mask == 0) {
5ba3f43e 578 return 0;
0a7de745 579 }
5ba3f43e
A
580
581 /*
582 * NOTE: cannot use __builtin_ffs because it generates a call to
583 * 'ffs'
584 */
585 return 1 + __builtin_ctz(mask);
586}
587
588int
589ffsll(unsigned long long mask)
590{
0a7de745 591 if (mask == 0) {
5ba3f43e 592 return 0;
0a7de745 593 }
5ba3f43e
A
594
595 /*
596 * NOTE: cannot use __builtin_ffsll because it generates a call to
597 * 'ffsll'
598 */
599 return 1 + __builtin_ctzll(mask);
600}
601
602/*
603 * Find last bit set in bit string.
604 */
605int
606fls(unsigned int mask)
607{
0a7de745 608 if (mask == 0) {
5ba3f43e 609 return 0;
0a7de745 610 }
5ba3f43e 611
0a7de745 612 return (sizeof(mask) << 3) - __builtin_clz(mask);
5ba3f43e
A
613}
614
615int
616flsll(unsigned long long mask)
617{
0a7de745 618 if (mask == 0) {
5ba3f43e 619 return 0;
0a7de745 620 }
5ba3f43e 621
0a7de745 622 return (sizeof(mask) << 3) - __builtin_clzll(mask);
5ba3f43e
A
623}
624
625#undef bcmp
0a7de745 626int
5ba3f43e 627bcmp(
0a7de745
A
628 const void *pa,
629 const void *pb,
630 size_t len)
5ba3f43e
A
631{
632 const char *a = (const char *) pa;
633 const char *b = (const char *) pb;
634
0a7de745 635 if (len == 0) {
5ba3f43e 636 return 0;
0a7de745 637 }
5ba3f43e 638
0a7de745
A
639 do{
640 if (*a++ != *b++) {
5ba3f43e 641 break;
0a7de745
A
642 }
643 } while (--len);
5ba3f43e
A
644
645 /*
646 * Check for the overflow case but continue to handle the non-overflow
647 * case the same way just in case someone is using the return value
648 * as more than zero/non-zero
649 */
0a7de745 650 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
5ba3f43e 651 return 0xFFFFFFFFL;
0a7de745 652 } else {
5ba3f43e 653 return (int)len;
0a7de745 654 }
5ba3f43e
A
655}
656
657#undef memcmp
f427ee49 658MARK_AS_HIBERNATE_TEXT
5ba3f43e
A
659int
660memcmp(const void *s1, const void *s2, size_t n)
661{
662 if (n != 0) {
663 const unsigned char *p1 = s1, *p2 = s2;
664
665 do {
0a7de745
A
666 if (*p1++ != *p2++) {
667 return *--p1 - *--p2;
668 }
5ba3f43e
A
669 } while (--n != 0);
670 }
0a7de745 671 return 0;
5ba3f43e
A
672}
673
674kern_return_t
675copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
676{
0a7de745 677 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
d9a64523 678 panic("%s: no more than 1 parameter may be virtual", __func__);
0a7de745 679 }
5ba3f43e 680
d9a64523 681 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
5ba3f43e 682
d9a64523 683#ifndef __ARM_COHERENT_IO__
0a7de745
A
684 if (which & cppvFsrc) {
685 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
686 }
d9a64523 687
0a7de745
A
688 if (which & cppvFsnk) {
689 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
690 }
5ba3f43e 691#endif
5ba3f43e 692
d9a64523
A
693 return res;
694}
5ba3f43e
A
695
696#if MACH_ASSERT
697
698extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
699
700/*
701 * Machine-dependent routine to fill in an array with up to callstack_max
702 * levels of return pc information.
703 */
704void
705machine_callstack(
0a7de745
A
706 uintptr_t * buf,
707 vm_size_t callstack_max)
5ba3f43e
A
708{
709 /* Captures the USER call stack */
0a7de745 710 uint32_t i = 0;
5ba3f43e
A
711
712 struct arm_saved_state *state = find_user_regs(current_thread());
713
714 if (!state) {
0a7de745 715 while (i < callstack_max) {
5ba3f43e 716 buf[i++] = 0;
0a7de745 717 }
5ba3f43e
A
718 } else {
719 if (is_saved_state64(state)) {
720 uint64_t frame[2];
721 buf[i++] = (uintptr_t)get_saved_state_pc(state);
722 frame[0] = get_saved_state_fp(state);
0a7de745
A
723 while (i < callstack_max && frame[0] != 0) {
724 if (copyinframe(frame[0], (void*) frame, TRUE)) {
5ba3f43e 725 break;
0a7de745 726 }
5ba3f43e
A
727 buf[i++] = (uintptr_t)frame[1];
728 }
0a7de745 729 } else {
5ba3f43e
A
730 uint32_t frame[2];
731 buf[i++] = (uintptr_t)get_saved_state_pc(state);
732 frame[0] = (uint32_t)get_saved_state_fp(state);
0a7de745
A
733 while (i < callstack_max && frame[0] != 0) {
734 if (copyinframe(frame[0], (void*) frame, FALSE)) {
5ba3f43e 735 break;
0a7de745 736 }
5ba3f43e
A
737 buf[i++] = (uintptr_t)frame[1];
738 }
739 }
740
0a7de745 741 while (i < callstack_max) {
5ba3f43e 742 buf[i++] = 0;
0a7de745 743 }
5ba3f43e
A
744 }
745}
746
0a7de745 747#endif /* MACH_ASSERT */
5ba3f43e
A
748
749int
750clr_be_bit(void)
751{
752 panic("clr_be_bit");
753 return 0;
754}
755
756boolean_t
757ml_probe_read(
0a7de745
A
758 __unused vm_offset_t paddr,
759 __unused unsigned int *val)
5ba3f43e
A
760{
761 panic("ml_probe_read() unimplemented");
762 return 1;
763}
764
765boolean_t
766ml_probe_read_64(
0a7de745
A
767 __unused addr64_t paddr,
768 __unused unsigned int *val)
5ba3f43e
A
769{
770 panic("ml_probe_read_64() unimplemented");
771 return 1;
772}
773
774
775void
776ml_thread_policy(
0a7de745
A
777 __unused thread_t thread,
778 __unused unsigned policy_id,
779 __unused unsigned policy_info)
5ba3f43e 780{
0a7de745
A
781 // <rdar://problem/7141284>: Reduce print noise
782 // kprintf("ml_thread_policy() unimplemented\n");
5ba3f43e
A
783}
784
cb323159 785__dead2
5ba3f43e 786void
cb323159 787panic_unimplemented(void)
5ba3f43e
A
788{
789 panic("Not yet implemented.");
790}
791
792/* ARM64_TODO <rdar://problem/9198953> */
cb323159 793void abort(void) __dead2;
5ba3f43e
A
794
795void
cb323159 796abort(void)
5ba3f43e
A
797{
798 panic("Abort.");
799}
800
801
802#if !MACH_KDP
803void
804kdp_register_callout(kdp_callout_fn_t fn, void *arg)
805{
806#pragma unused(fn,arg)
807}
808#endif
f427ee49
A
809
810/*
811 * Get a quick virtual mapping of a physical page and run a callback on that
812 * page's virtual address.
813 *
814 * @param dst64 Physical address to access (doesn't need to be page-aligned).
815 * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
816 * @param func Callback function to call with the page's virtual address.
817 * @param arg Argument passed directly to `func`.
818 *
819 * @return The return value from `func`.
820 */
821int
822apply_func_phys(
823 addr64_t dst64,
824 vm_size_t bytes,
825 int (*func)(void * buffer, vm_size_t bytes, void * arg),
826 void * arg)
827{
828 /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
829 if (!pmap_valid_address(dst64)) {
830 panic("%s address error: passed in address (%#llx) not a kernel managed address",
831 __FUNCTION__, dst64);
832 }
833
834 /* Ensure we stay within a single page */
835 if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
836 panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
837 __FUNCTION__, dst64, bytes);
838 }
839
840 return func((void*)phystokv(dst64), bytes, arg);
841}