2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
31 #include <IOKit/IOLib.h>
32 #include <sys/msgbuf.h>
33 #include <sys/errno.h>
34 #include <arm64/pgtrace.h>
35 #include <libkern/OSDebug.h>
45 #if CONFIG_PGTRACE_NONKEXT
46 #include "pgtrace_decoder.h"
48 //--------------------------------------------
51 #define RBUF_DEFAULT_SIZE 1024
52 #define RBUF_IDX(idx, mask) ((idx) & (mask))
55 //--------------------------------------------
58 typedef uint8_t RWLOCK
;
62 pgtrace_run_result_t res
;
63 void *stack
[PGTRACE_STACK_DEPTH
];
66 //--------------------------------------------
70 log_t
*logs
; // Protect
71 uint32_t size
; // Protect
72 uint64_t rdidx
, wridx
; // Protect
73 decl_simple_lock_data(, loglock
);
80 queue_head_t probes
; // Protect
83 lck_grp_attr_t
*lock_grp_attr
;
84 lck_attr_t
*lock_attr
;
88 //--------------------------------------------
94 simple_lock_init(&pgtrace
.loglock
, 0);
96 pgtrace
.lock_attr
= lck_attr_alloc_init();
97 pgtrace
.lock_grp_attr
= lck_grp_attr_alloc_init();
98 pgtrace
.lock_grp
= lck_grp_alloc_init("pgtrace_lock", pgtrace
.lock_grp_attr
);
100 lck_mtx_init(&pgtrace
.probelock
, pgtrace
.lock_grp
, pgtrace
.lock_attr
);
102 queue_init(&pgtrace
.probes
);
104 pgtrace
.size
= RBUF_DEFAULT_SIZE
;
105 pgtrace
.logs
= kalloc(RBUF_DEFAULT_SIZE
* sizeof(log_t
));
109 pgtrace_clear_probe(void)
112 queue_head_t
*q
= &pgtrace
.probes
;
114 lck_mtx_lock(&pgtrace
.probelock
);
116 p
= (probe_t
*)queue_first(q
);
117 while (!queue_end(q
, (queue_entry_t
)p
)) {
118 next
= (probe_t
*)queue_next(&(p
->chain
));
120 queue_remove(q
, p
, probe_t
*, chain
);
121 kfree(p
, sizeof(probe_t
));
126 lck_mtx_unlock(&pgtrace
.probelock
);
132 pgtrace_add_probe(thread_t thread
, vm_offset_t start
, vm_offset_t end
)
135 queue_head_t
*q
= &pgtrace
.probes
;
138 kprintf("%s Invalid start=%lx end=%lx\n", __func__
, start
, end
);
142 p
= kalloc(sizeof(probe_t
));
145 if (thread
== NULL
) {
148 p
->pmap
= vm_map_pmap(thread
->map
);
151 lck_mtx_lock(&pgtrace
.probelock
);
152 queue_enter(q
, p
, probe_t
*, chain
);
153 lck_mtx_unlock(&pgtrace
.probelock
);
162 queue_head_t
*q
= &pgtrace
.probes
;
164 kprintf("%s\n", __func__
);
166 if (pgtrace
.enabled
) {
172 lck_mtx_lock(&pgtrace
.probelock
);
174 queue_iterate(q
, p
, probe_t
*, chain
) {
175 pmap_pgtrace_add_page(p
->pmap
, p
->start
, p
->end
);
178 lck_mtx_unlock(&pgtrace
.probelock
);
187 queue_head_t
*q
= &pgtrace
.probes
;
189 kprintf("%s\n", __func__
);
191 lck_mtx_lock(&pgtrace
.probelock
);
193 queue_iterate(q
, p
, probe_t
*, chain
) {
194 pmap_pgtrace_delete_page(p
->pmap
, p
->start
, p
->end
);
197 lck_mtx_unlock(&pgtrace
.probelock
);
203 pgtrace_get_size(void)
209 pgtrace_set_size(uint32_t size
)
211 log_t
*old_buf
, *new_buf
;
212 uint32_t old_size
, new_size
= 1;
214 // round up to next power of 2
215 while (size
> new_size
) {
217 if (new_size
> 0x100000) {
218 // over million entries
219 kprintf("%s: size=%x new_size=%x is too big\n", __func__
, size
, new_size
);
224 new_buf
= kalloc(new_size
* sizeof(log_t
));
225 if (new_buf
== NULL
) {
226 kprintf("%s: can't allocate new_size=%x\n entries", __func__
, new_size
);
232 simple_lock(&pgtrace
.loglock
);
233 old_buf
= pgtrace
.logs
;
234 old_size
= pgtrace
.size
;
235 pgtrace
.logs
= new_buf
;
236 pgtrace
.size
= new_size
;
237 pgtrace
.rdidx
= pgtrace
.wridx
= 0;
238 simple_unlock(&pgtrace
.loglock
);
241 kfree(old_buf
, old_size
* sizeof(log_t
));
248 pgtrace_clear_trace(void)
250 simple_lock(&pgtrace
.loglock
);
251 pgtrace
.rdidx
= pgtrace
.wridx
= 0;
252 simple_unlock(&pgtrace
.loglock
);
258 return pgtrace
.enabled
> 0;
262 pgtrace_get_option(void)
264 return pgtrace
.option
;
268 pgtrace_set_option(uint32_t option
)
270 pgtrace
.option
= option
;
273 // pgtrace_write_log() is in interrupt disabled context
275 pgtrace_write_log(pgtrace_run_result_t res
)
279 const char *rwmap
[] = { "R", "W", "PREFETCH" };
281 log
.id
= pgtrace
.id
++;
284 if (pgtrace
.option
& PGTRACE_OPTION_KPRINTF
) {
290 snprintf(p
, MSG_MAX
, "%llu %s ", res
.rr_time
, rwmap
[res
.rr_rw
]);
293 for (i
= 0; i
< res
.rr_num
; i
++) {
294 snprintf(p
, MSG_MAX
- (p
- msg
), "%lx=%llx ", res
.rr_addrdata
[i
].ad_addr
, res
.rr_addrdata
[i
].ad_data
);
298 kprintf("%s %s\n", __func__
, msg
);
301 if (pgtrace
.option
& PGTRACE_OPTION_STACK
) {
302 OSBacktrace(log
.stack
, PGTRACE_STACK_DEPTH
);
305 pgtrace
.bytes
+= sizeof(log
);
307 simple_lock(&pgtrace
.loglock
);
309 pgtrace
.logs
[RBUF_IDX(pgtrace
.wridx
, pgtrace
.size
- 1)] = log
;
311 // Advance rdidx if ring is full
312 if (RBUF_IDX(pgtrace
.wridx
, pgtrace
.size
- 1) == RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
- 1) &&
313 (pgtrace
.wridx
!= pgtrace
.rdidx
)) {
318 // Signal if ring was empty
319 if (pgtrace
.wridx
== (pgtrace
.rdidx
+ 1)) {
320 thread_wakeup(pgtrace
.logs
);
323 simple_unlock(&pgtrace
.loglock
);
328 // pgtrace_read_log() is in user thread
330 pgtrace_read_log(uint8_t *buf
, uint32_t size
)
332 int total
, front
, back
;
336 if (pgtrace
.enabled
== FALSE
) {
340 total
= size
/ sizeof(log_t
);
342 // Check if buf is too small
343 if (buf
&& total
== 0) {
347 ints
= ml_set_interrupts_enabled(FALSE
);
348 simple_lock(&pgtrace
.loglock
);
350 // Wait if ring is empty
351 if (pgtrace
.rdidx
== pgtrace
.wridx
) {
352 assert_wait(pgtrace
.logs
, THREAD_ABORTSAFE
);
354 simple_unlock(&pgtrace
.loglock
);
355 ml_set_interrupts_enabled(ints
);
357 wr
= thread_block(NULL
);
358 if (wr
!= THREAD_AWAKENED
) {
362 ints
= ml_set_interrupts_enabled(FALSE
);
363 simple_lock(&pgtrace
.loglock
);
367 if ((pgtrace
.rdidx
+ total
) > pgtrace
.wridx
) {
368 total
= (int)(pgtrace
.wridx
- pgtrace
.rdidx
);
372 if ((RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
- 1) + total
) >= pgtrace
.size
) {
373 front
= pgtrace
.size
- RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
- 1);
378 memcpy(buf
, &(pgtrace
.logs
[RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
- 1)]), front
* sizeof(log_t
));
381 back
= total
- front
;
383 buf
+= front
* sizeof(log_t
);
384 memcpy(buf
, pgtrace
.logs
, back
* sizeof(log_t
));
387 pgtrace
.rdidx
+= total
;
389 simple_unlock(&pgtrace
.loglock
);
390 ml_set_interrupts_enabled(ints
);
392 return total
* sizeof(log_t
);
396 pgtrace_get_stats(pgtrace_stats_t
*stats
)
402 stats
->stat_logger
.sl_bytes
= pgtrace
.bytes
;
403 pgtrace_decoder_get_stats(stats
);
408 #else // CONFIG_PGTRACE_NONKEXT
417 lck_grp_attr_t
*lock_grp_attr
;
418 lck_attr_t
*lock_attr
;
422 //------------------------------------
423 // functions for pmap fault handler
424 // - pgtrace_decode_and_run
425 // - pgtrace_write_log
426 //------------------------------------
428 pgtrace_decode_and_run(uint32_t inst
, vm_offset_t fva
, vm_map_offset_t
*cva_page
, arm_saved_state_t
*ss
, pgtrace_run_result_t
*res
)
431 pgtrace_instruction_info_t info
;
432 vm_offset_t cva_front_page
= cva_page
[0];
433 vm_offset_t cva_cur_page
= cva_page
[1];
435 pgtrace
.decoder
->decode(inst
, ss
, &info
);
437 if (info
.addr
== fva
) {
438 cva
= cva_cur_page
+ (fva
& ARM_PGMASK
);
440 // which means a front page is not a tracing page
441 cva
= cva_front_page
+ (fva
& ARM_PGMASK
);
446 panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__
, cva
, fva
, info
.addr
, inst
);
449 absolutetime_to_nanoseconds(mach_absolute_time(), &res
->rr_time
);
451 pgtrace
.decoder
->run(inst
, pa
, cva
, ss
, res
);
457 pgtrace_write_log(pgtrace_run_result_t res
)
459 pgtrace
.logger
->write(res
);
463 //------------------------------------
464 // functions for kext
466 // - pgtrace_add_probe
467 // - pgtrace_clear_probe
471 //------------------------------------
473 pgtrace_init(decoder_t
*decoder
, logger_t
*logger
)
475 kprintf("%s decoder=%p logger=%p\n", __func__
, decoder
, logger
);
477 assert(decoder
&& logger
);
479 if (decoder
->magic
!= 0xfeedface || logger
->magic
!= 0xfeedface ||
480 strcmp(decoder
->arch
, "arm64") != 0 || strcmp(logger
->arch
, "arm64") != 0) {
481 kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__
, decoder
->magic
, logger
->magic
, decoder
->arch
, logger
->arch
);
485 pgtrace
.lock_attr
= lck_attr_alloc_init();
486 pgtrace
.lock_grp_attr
= lck_grp_attr_alloc_init();
487 pgtrace
.lock_grp
= lck_grp_alloc_init("pgtrace_lock", pgtrace
.lock_grp_attr
);
489 lck_mtx_init(&pgtrace
.probelock
, pgtrace
.lock_grp
, pgtrace
.lock_attr
);
491 queue_init(&pgtrace
.probes
);
492 pgtrace
.decoder
= decoder
;
493 pgtrace
.logger
= logger
;
499 pgtrace_add_probe(thread_t thread
, vm_offset_t start
, vm_offset_t end
)
502 queue_head_t
*q
= &pgtrace
.probes
;
504 kprintf("%s start=%lx end=%lx\n", __func__
, start
, end
);
507 kprintf("%s Invalid start=%lx end=%lx\n", __func__
, start
, end
);
511 p
= kalloc(sizeof(probe_t
));
514 if (thread
== NULL
) {
517 p
->pmap
= vm_map_pmap(thread
->map
);
520 lck_mtx_lock(&pgtrace
.probelock
);
521 queue_enter(q
, p
, probe_t
*, chain
);
522 lck_mtx_unlock(&pgtrace
.probelock
);
528 pgtrace_clear_probe(void)
531 queue_head_t
*q
= &pgtrace
.probes
;
533 kprintf("%s\n", __func__
);
535 lck_mtx_lock(&pgtrace
.probelock
);
537 p
= (probe_t
*)queue_first(q
);
538 while (!queue_end(q
, (queue_entry_t
)p
)) {
539 next
= (probe_t
*)queue_next(&(p
->chain
));
541 queue_remove(q
, p
, probe_t
*, chain
);
542 kfree(p
, sizeof(probe_t
));
547 lck_mtx_unlock(&pgtrace
.probelock
);
556 queue_head_t
*q
= &pgtrace
.probes
;
558 kprintf("%s\n", __func__
);
560 if (pgtrace
.active
== true) {
564 pgtrace
.active
= true;
566 lck_mtx_lock(&pgtrace
.probelock
);
568 queue_iterate(q
, p
, probe_t
*, chain
) {
569 pmap_pgtrace_add_page(p
->pmap
, p
->start
, p
->end
);
572 lck_mtx_unlock(&pgtrace
.probelock
);
581 queue_head_t
*q
= &pgtrace
.probes
;
583 kprintf("%s\n", __func__
);
585 lck_mtx_lock(&pgtrace
.probelock
);
587 queue_iterate(q
, p
, probe_t
*, chain
) {
588 pmap_pgtrace_delete_page(p
->pmap
, p
->start
, p
->end
);
591 lck_mtx_unlock(&pgtrace
.probelock
);
593 pgtrace
.active
= false;
599 return pgtrace
.active
;
601 #endif // CONFIG_PGTRACE_NONKEXT
603 // empty funcs for release kernel
604 extern void pgtrace_stop(void);
605 extern void pgtrace_start(void);
606 extern void pgtrace_clear_probe(void);
607 extern void pgtrace_add_probe(void);
608 extern void pgtrace_init(void);
609 extern void pgtrace_active(void);
619 pgtrace_clear_probe(void)
623 pgtrace_add_probe(void)