2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
31 #include <IOKit/IOLib.h>
32 #include <sys/msgbuf.h>
33 #include <sys/errno.h>
34 #include <arm64/pgtrace.h>
35 #include <libkern/OSDebug.h>
45 #if CONFIG_PGTRACE_NONKEXT
46 #include "pgtrace_decoder.h"
48 //--------------------------------------------
51 #define RBUF_DEFAULT_SIZE 1024
52 #define RBUF_IDX(idx, mask) ((idx) & (mask))
55 //--------------------------------------------
58 typedef uint8_t RWLOCK
;
62 pgtrace_run_result_t res
;
63 void *stack
[PGTRACE_STACK_DEPTH
];
66 //--------------------------------------------
70 log_t
*logs
; // Protect
71 uint32_t size
; // Protect
72 uint64_t rdidx
, wridx
; // Protect
73 decl_simple_lock_data(, loglock
);
80 queue_head_t probes
; // Protect
83 lck_grp_attr_t
*lock_grp_attr
;
84 lck_attr_t
*lock_attr
;
88 //--------------------------------------------
91 void pgtrace_init(void)
93 simple_lock_init(&pgtrace
.loglock
, 0);
95 pgtrace
.lock_attr
= lck_attr_alloc_init();
96 pgtrace
.lock_grp_attr
= lck_grp_attr_alloc_init();
97 pgtrace
.lock_grp
= lck_grp_alloc_init("pgtrace_lock", pgtrace
.lock_grp_attr
);
99 lck_mtx_init(&pgtrace
.probelock
, pgtrace
.lock_grp
, pgtrace
.lock_attr
);
101 queue_init(&pgtrace
.probes
);
103 pgtrace
.size
= RBUF_DEFAULT_SIZE
;
104 pgtrace
.logs
= kalloc(RBUF_DEFAULT_SIZE
* sizeof(log_t
));
107 void pgtrace_clear_probe(void)
110 queue_head_t
*q
= &pgtrace
.probes
;
112 lck_mtx_lock(&pgtrace
.probelock
);
114 p
= (probe_t
*)queue_first(q
);
115 while (!queue_end(q
, (queue_entry_t
)p
)) {
116 next
= (probe_t
*)queue_next(&(p
->chain
));
118 queue_remove(q
, p
, probe_t
*, chain
);
119 kfree(p
, sizeof(probe_t
));
124 lck_mtx_unlock(&pgtrace
.probelock
);
129 int pgtrace_add_probe(thread_t thread
, vm_offset_t start
, vm_offset_t end
)
132 queue_head_t
*q
= &pgtrace
.probes
;
135 kprintf("%s Invalid start=%lx end=%lx\n", __func__
, start
, end
);
139 p
= kalloc(sizeof(probe_t
));
142 if (thread
== NULL
) {
145 p
->pmap
= vm_map_pmap(thread
->map
);
148 lck_mtx_lock(&pgtrace
.probelock
);
149 queue_enter(q
, p
, probe_t
*, chain
);
150 lck_mtx_unlock(&pgtrace
.probelock
);
155 void pgtrace_start(void)
158 queue_head_t
*q
= &pgtrace
.probes
;
160 kprintf("%s\n", __func__
);
162 if (pgtrace
.enabled
) {
168 lck_mtx_lock(&pgtrace
.probelock
);
170 queue_iterate(q
, p
, probe_t
*, chain
) {
171 pmap_pgtrace_add_page(p
->pmap
, p
->start
, p
->end
);
174 lck_mtx_unlock(&pgtrace
.probelock
);
179 void pgtrace_stop(void)
182 queue_head_t
*q
= &pgtrace
.probes
;
184 kprintf("%s\n", __func__
);
186 lck_mtx_lock(&pgtrace
.probelock
);
188 queue_iterate(q
, p
, probe_t
*, chain
) {
189 pmap_pgtrace_delete_page(p
->pmap
, p
->start
, p
->end
);
192 lck_mtx_unlock(&pgtrace
.probelock
);
197 uint32_t pgtrace_get_size(void)
202 bool pgtrace_set_size(uint32_t size
)
204 log_t
*old_buf
, *new_buf
;
205 uint32_t old_size
, new_size
= 1;
207 // round up to next power of 2
208 while (size
> new_size
) {
210 if (new_size
> 0x100000) {
211 // over million entries
212 kprintf("%s: size=%x new_size=%x is too big\n", __func__
, size
, new_size
);
217 new_buf
= kalloc(new_size
* sizeof(log_t
));
218 if (new_buf
== NULL
) {
219 kprintf("%s: can't allocate new_size=%x\n entries", __func__
, new_size
);
225 simple_lock(&pgtrace
.loglock
);
226 old_buf
= pgtrace
.logs
;
227 old_size
= pgtrace
.size
;
228 pgtrace
.logs
= new_buf
;
229 pgtrace
.size
= new_size
;
230 pgtrace
.rdidx
= pgtrace
.wridx
= 0;
231 simple_unlock(&pgtrace
.loglock
);
234 kfree(old_buf
, old_size
* sizeof(log_t
));
240 void pgtrace_clear_trace(void)
242 simple_lock(&pgtrace
.loglock
);
243 pgtrace
.rdidx
= pgtrace
.wridx
= 0;
244 simple_unlock(&pgtrace
.loglock
);
247 boolean_t
pgtrace_active(void)
249 return (pgtrace
.enabled
> 0);
252 uint32_t pgtrace_get_option(void)
254 return pgtrace
.option
;
257 void pgtrace_set_option(uint32_t option
)
259 pgtrace
.option
= option
;
262 // pgtrace_write_log() is in interrupt disabled context
263 void pgtrace_write_log(pgtrace_run_result_t res
)
267 const char *rwmap
[] = { "R", "W", "PREFETCH" };
269 log
.id
= pgtrace
.id
++;
272 if (pgtrace
.option
& PGTRACE_OPTION_KPRINTF
) {
278 snprintf(p
, MSG_MAX
, "%llu %s ", res
.rr_time
, rwmap
[res
.rr_rw
]);
281 for (i
= 0; i
< res
.rr_num
; i
++) {
282 snprintf(p
, MSG_MAX
-(p
-msg
), "%lx=%llx ", res
.rr_addrdata
[i
].ad_addr
, res
.rr_addrdata
[i
].ad_data
);
286 kprintf("%s %s\n", __func__
, msg
);
289 if (pgtrace
.option
& PGTRACE_OPTION_STACK
) {
290 OSBacktrace(log
.stack
, PGTRACE_STACK_DEPTH
);
293 pgtrace
.bytes
+= sizeof(log
);
295 simple_lock(&pgtrace
.loglock
);
297 pgtrace
.logs
[RBUF_IDX(pgtrace
.wridx
, pgtrace
.size
-1)] = log
;
299 // Advance rdidx if ring is full
300 if (RBUF_IDX(pgtrace
.wridx
, pgtrace
.size
-1) == RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
-1) &&
301 (pgtrace
.wridx
!= pgtrace
.rdidx
)) {
306 // Signal if ring was empty
307 if (pgtrace
.wridx
== (pgtrace
.rdidx
+ 1)) {
308 thread_wakeup(pgtrace
.logs
);
311 simple_unlock(&pgtrace
.loglock
);
316 // pgtrace_read_log() is in user thread
317 int64_t pgtrace_read_log(uint8_t *buf
, uint32_t size
)
319 int total
, front
, back
;
323 if (pgtrace
.enabled
== FALSE
) {
327 total
= size
/ sizeof(log_t
);
329 // Check if buf is too small
330 if (buf
&& total
== 0) {
334 ints
= ml_set_interrupts_enabled(FALSE
);
335 simple_lock(&pgtrace
.loglock
);
337 // Wait if ring is empty
338 if (pgtrace
.rdidx
== pgtrace
.wridx
) {
339 assert_wait(pgtrace
.logs
, THREAD_ABORTSAFE
);
341 simple_unlock(&pgtrace
.loglock
);
342 ml_set_interrupts_enabled(ints
);
344 wr
= thread_block(NULL
);
345 if (wr
!= THREAD_AWAKENED
) {
349 ints
= ml_set_interrupts_enabled(FALSE
);
350 simple_lock(&pgtrace
.loglock
);
354 if ((pgtrace
.rdidx
+ total
) > pgtrace
.wridx
) {
355 total
= (int)(pgtrace
.wridx
- pgtrace
.rdidx
);
359 if ((RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
-1) + total
) >= pgtrace
.size
) {
360 front
= pgtrace
.size
- RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
-1);
365 memcpy(buf
, &(pgtrace
.logs
[RBUF_IDX(pgtrace
.rdidx
, pgtrace
.size
-1)]), front
*sizeof(log_t
));
370 buf
+= front
* sizeof(log_t
);
371 memcpy(buf
, pgtrace
.logs
, back
*sizeof(log_t
));
374 pgtrace
.rdidx
+= total
;
376 simple_unlock(&pgtrace
.loglock
);
377 ml_set_interrupts_enabled(ints
);
379 return total
*sizeof(log_t
);
382 int pgtrace_get_stats(pgtrace_stats_t
*stats
)
388 stats
->stat_logger
.sl_bytes
= pgtrace
.bytes
;
389 pgtrace_decoder_get_stats(stats
);
394 #else // CONFIG_PGTRACE_NONKEXT
403 lck_grp_attr_t
*lock_grp_attr
;
404 lck_attr_t
*lock_attr
;
408 //------------------------------------
409 // functions for pmap fault handler
410 // - pgtrace_decode_and_run
411 // - pgtrace_write_log
412 //------------------------------------
413 int pgtrace_decode_and_run(uint32_t inst
, vm_offset_t fva
, vm_map_offset_t
*cva_page
, arm_saved_state_t
*ss
, pgtrace_run_result_t
*res
)
416 pgtrace_instruction_info_t info
;
417 vm_offset_t cva_front_page
= cva_page
[0];
418 vm_offset_t cva_cur_page
= cva_page
[1];
420 pgtrace
.decoder
->decode(inst
, ss
, &info
);
422 if (info
.addr
== fva
) {
423 cva
= cva_cur_page
+ (fva
& ARM_PGMASK
);
425 // which means a front page is not a tracing page
426 cva
= cva_front_page
+ (fva
& ARM_PGMASK
);
431 panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__
, cva
, fva
, info
.addr
, inst
);
434 absolutetime_to_nanoseconds(mach_absolute_time(), &res
->rr_time
);
436 pgtrace
.decoder
->run(inst
, pa
, cva
, ss
, res
);
441 int pgtrace_write_log(pgtrace_run_result_t res
)
443 pgtrace
.logger
->write(res
);
447 //------------------------------------
448 // functions for kext
450 // - pgtrace_add_probe
451 // - pgtrace_clear_probe
455 //------------------------------------
456 int pgtrace_init(decoder_t
*decoder
, logger_t
*logger
)
458 kprintf("%s decoder=%p logger=%p\n", __func__
, decoder
, logger
);
460 assert(decoder
&& logger
);
462 if (decoder
->magic
!= 0xfeedface || logger
->magic
!= 0xfeedface ||
463 strcmp(decoder
->arch
, "arm64") != 0 || strcmp(logger
->arch
, "arm64") != 0) {
464 kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__
, decoder
->magic
, logger
->magic
, decoder
->arch
, logger
->arch
);
468 pgtrace
.lock_attr
= lck_attr_alloc_init();
469 pgtrace
.lock_grp_attr
= lck_grp_attr_alloc_init();
470 pgtrace
.lock_grp
= lck_grp_alloc_init("pgtrace_lock", pgtrace
.lock_grp_attr
);
472 lck_mtx_init(&pgtrace
.probelock
, pgtrace
.lock_grp
, pgtrace
.lock_attr
);
474 queue_init(&pgtrace
.probes
);
475 pgtrace
.decoder
= decoder
;
476 pgtrace
.logger
= logger
;
481 int pgtrace_add_probe(thread_t thread
, vm_offset_t start
, vm_offset_t end
)
484 queue_head_t
*q
= &pgtrace
.probes
;
486 kprintf("%s start=%lx end=%lx\n", __func__
, start
, end
);
489 kprintf("%s Invalid start=%lx end=%lx\n", __func__
, start
, end
);
493 p
= kalloc(sizeof(probe_t
));
496 if (thread
== NULL
) {
499 p
->pmap
= vm_map_pmap(thread
->map
);
502 lck_mtx_lock(&pgtrace
.probelock
);
503 queue_enter(q
, p
, probe_t
*, chain
);
504 lck_mtx_unlock(&pgtrace
.probelock
);
509 void pgtrace_clear_probe(void)
512 queue_head_t
*q
= &pgtrace
.probes
;
514 kprintf("%s\n", __func__
);
516 lck_mtx_lock(&pgtrace
.probelock
);
518 p
= (probe_t
*)queue_first(q
);
519 while (!queue_end(q
, (queue_entry_t
)p
)) {
520 next
= (probe_t
*)queue_next(&(p
->chain
));
522 queue_remove(q
, p
, probe_t
*, chain
);
523 kfree(p
, sizeof(probe_t
));
528 lck_mtx_unlock(&pgtrace
.probelock
);
533 void pgtrace_start(void)
536 queue_head_t
*q
= &pgtrace
.probes
;
538 kprintf("%s\n", __func__
);
540 if (pgtrace
.active
== true) {
544 pgtrace
.active
= true;
546 lck_mtx_lock(&pgtrace
.probelock
);
548 queue_iterate(q
, p
, probe_t
*, chain
) {
549 pmap_pgtrace_add_page(p
->pmap
, p
->start
, p
->end
);
552 lck_mtx_unlock(&pgtrace
.probelock
);
557 void pgtrace_stop(void)
560 queue_head_t
*q
= &pgtrace
.probes
;
562 kprintf("%s\n", __func__
);
564 lck_mtx_lock(&pgtrace
.probelock
);
566 queue_iterate(q
, p
, probe_t
*, chain
) {
567 pmap_pgtrace_delete_page(p
->pmap
, p
->start
, p
->end
);
570 lck_mtx_unlock(&pgtrace
.probelock
);
572 pgtrace
.active
= false;
575 bool pgtrace_active(void)
577 return pgtrace
.active
;
579 #endif // CONFIG_PGTRACE_NONKEXT
581 // empty funcs for release kernel
582 extern void pgtrace_stop(void);
583 extern void pgtrace_start(void);
584 extern void pgtrace_clear_probe(void);
585 extern void pgtrace_add_probe(void);
586 extern void pgtrace_init(void);
587 extern void pgtrace_active(void);
588 void pgtrace_stop(void) {}
589 void pgtrace_start(void) {}
590 void pgtrace_clear_probe(void) {}
591 void pgtrace_add_probe(void) {}
592 void pgtrace_init(void) {}
593 void pgtrace_active(void) {}