]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pgtrace.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / pgtrace.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_PGTRACE
30 #include <mach/mach_types.h>
31 #include <IOKit/IOLib.h>
32 #include <sys/msgbuf.h>
33 #include <sys/errno.h>
34 #include <arm64/pgtrace.h>
35 #include <libkern/OSDebug.h>
36
37 typedef struct {
38 queue_chain_t chain;
39
40 pmap_t pmap;
41 vm_offset_t start;
42 vm_offset_t end;
43 } probe_t;
44
45 #if CONFIG_PGTRACE_NONKEXT
46 #include "pgtrace_decoder.h"
47
48 //--------------------------------------------
49 // Macros
50 //
51 #define RBUF_DEFAULT_SIZE 1024
52 #define RBUF_IDX(idx, mask) ((idx) & (mask))
53 #define MSG_MAX 130
54
55 //--------------------------------------------
56 // Types
57 //
58 typedef uint8_t RWLOCK;
59
60 typedef struct {
61 uint64_t id;
62 pgtrace_run_result_t res;
63 void *stack[PGTRACE_STACK_DEPTH];
64 } log_t;
65
66 //--------------------------------------------
67 // Statics
68 //
69 static struct {
70 log_t *logs; // Protect
71 uint32_t size; // Protect
72 uint64_t rdidx, wridx; // Protect
73 decl_simple_lock_data(, loglock);
74
75 uint64_t id;
76 uint32_t option;
77 uint32_t enabled;
78 uint32_t bytes;
79
80 queue_head_t probes; // Protect
81
82 lck_grp_t *lock_grp;
83 lck_grp_attr_t *lock_grp_attr;
84 lck_attr_t *lock_attr;
85 lck_mtx_t probelock;
86 } pgtrace = {};
87
88 //--------------------------------------------
89 // Globals
90 //
91 void
92 pgtrace_init(void)
93 {
94 simple_lock_init(&pgtrace.loglock, 0);
95
96 pgtrace.lock_attr = lck_attr_alloc_init();
97 pgtrace.lock_grp_attr = lck_grp_attr_alloc_init();
98 pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr);
99
100 lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr);
101
102 queue_init(&pgtrace.probes);
103
104 pgtrace.size = RBUF_DEFAULT_SIZE;
105 pgtrace.logs = kalloc(RBUF_DEFAULT_SIZE * sizeof(log_t));
106 }
107
108 void
109 pgtrace_clear_probe(void)
110 {
111 probe_t *p, *next;
112 queue_head_t *q = &pgtrace.probes;
113
114 lck_mtx_lock(&pgtrace.probelock);
115
116 p = (probe_t *)queue_first(q);
117 while (!queue_end(q, (queue_entry_t)p)) {
118 next = (probe_t *)queue_next(&(p->chain));
119
120 queue_remove(q, p, probe_t *, chain);
121 kfree(p, sizeof(probe_t));
122
123 p = next;
124 }
125
126 lck_mtx_unlock(&pgtrace.probelock);
127
128 return;
129 }
130
131 int
132 pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end)
133 {
134 probe_t *p;
135 queue_head_t *q = &pgtrace.probes;
136
137 if (start > end) {
138 kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end);
139 return -1;
140 }
141
142 p = kalloc(sizeof(probe_t));
143 p->start = start;
144 p->end = end;
145 if (thread == NULL) {
146 p->pmap = NULL;
147 } else {
148 p->pmap = vm_map_pmap(thread->map);
149 }
150
151 lck_mtx_lock(&pgtrace.probelock);
152 queue_enter(q, p, probe_t *, chain);
153 lck_mtx_unlock(&pgtrace.probelock);
154
155 return 0;
156 }
157
158 void
159 pgtrace_start(void)
160 {
161 probe_t *p;
162 queue_head_t *q = &pgtrace.probes;
163
164 kprintf("%s\n", __func__);
165
166 if (pgtrace.enabled) {
167 return;
168 }
169
170 pgtrace.enabled = 1;
171
172 lck_mtx_lock(&pgtrace.probelock);
173
174 queue_iterate(q, p, probe_t *, chain) {
175 pmap_pgtrace_add_page(p->pmap, p->start, p->end);
176 }
177
178 lck_mtx_unlock(&pgtrace.probelock);
179
180 return;
181 }
182
183 void
184 pgtrace_stop(void)
185 {
186 probe_t *p;
187 queue_head_t *q = &pgtrace.probes;
188
189 kprintf("%s\n", __func__);
190
191 lck_mtx_lock(&pgtrace.probelock);
192
193 queue_iterate(q, p, probe_t *, chain) {
194 pmap_pgtrace_delete_page(p->pmap, p->start, p->end);
195 }
196
197 lck_mtx_unlock(&pgtrace.probelock);
198
199 pgtrace.enabled = 0;
200 }
201
202 uint32_t
203 pgtrace_get_size(void)
204 {
205 return pgtrace.size;
206 }
207
208 bool
209 pgtrace_set_size(uint32_t size)
210 {
211 log_t *old_buf, *new_buf;
212 uint32_t old_size, new_size = 1;
213
214 // round up to next power of 2
215 while (size > new_size) {
216 new_size <<= 1;
217 if (new_size > 0x100000) {
218 // over million entries
219 kprintf("%s: size=%x new_size=%x is too big\n", __func__, size, new_size);
220 return false;
221 }
222 }
223
224 new_buf = kalloc(new_size * sizeof(log_t));
225 if (new_buf == NULL) {
226 kprintf("%s: can't allocate new_size=%x\n entries", __func__, new_size);
227 return false;
228 }
229
230 pgtrace_stop();
231
232 simple_lock(&pgtrace.loglock);
233 old_buf = pgtrace.logs;
234 old_size = pgtrace.size;
235 pgtrace.logs = new_buf;
236 pgtrace.size = new_size;
237 pgtrace.rdidx = pgtrace.wridx = 0;
238 simple_unlock(&pgtrace.loglock);
239
240 if (old_buf) {
241 kfree(old_buf, old_size * sizeof(log_t));
242 }
243
244 return true;
245 }
246
247 void
248 pgtrace_clear_trace(void)
249 {
250 simple_lock(&pgtrace.loglock);
251 pgtrace.rdidx = pgtrace.wridx = 0;
252 simple_unlock(&pgtrace.loglock);
253 }
254
255 boolean_t
256 pgtrace_active(void)
257 {
258 return pgtrace.enabled > 0;
259 }
260
261 uint32_t
262 pgtrace_get_option(void)
263 {
264 return pgtrace.option;
265 }
266
267 void
268 pgtrace_set_option(uint32_t option)
269 {
270 pgtrace.option = option;
271 }
272
273 // pgtrace_write_log() is in interrupt disabled context
274 void
275 pgtrace_write_log(pgtrace_run_result_t res)
276 {
277 uint8_t i;
278 log_t log = {};
279 const char *rwmap[] = { "R", "W", "PREFETCH" };
280
281 log.id = pgtrace.id++;
282 log.res = res;
283
284 if (pgtrace.option & PGTRACE_OPTION_KPRINTF) {
285 char msg[MSG_MAX];
286 char *p;
287
288 p = msg;
289
290 snprintf(p, MSG_MAX, "%llu %s ", res.rr_time, rwmap[res.rr_rw]);
291 p += strlen(p);
292
293 for (i = 0; i < res.rr_num; i++) {
294 snprintf(p, MSG_MAX - (p - msg), "%lx=%llx ", res.rr_addrdata[i].ad_addr, res.rr_addrdata[i].ad_data);
295 p += strlen(p);
296 }
297
298 kprintf("%s %s\n", __func__, msg);
299 }
300
301 if (pgtrace.option & PGTRACE_OPTION_STACK) {
302 OSBacktrace(log.stack, PGTRACE_STACK_DEPTH);
303 }
304
305 pgtrace.bytes += sizeof(log);
306
307 simple_lock(&pgtrace.loglock);
308
309 pgtrace.logs[RBUF_IDX(pgtrace.wridx, pgtrace.size - 1)] = log;
310
311 // Advance rdidx if ring is full
312 if (RBUF_IDX(pgtrace.wridx, pgtrace.size - 1) == RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1) &&
313 (pgtrace.wridx != pgtrace.rdidx)) {
314 pgtrace.rdidx++;
315 }
316 pgtrace.wridx++;
317
318 // Signal if ring was empty
319 if (pgtrace.wridx == (pgtrace.rdidx + 1)) {
320 thread_wakeup(pgtrace.logs);
321 }
322
323 simple_unlock(&pgtrace.loglock);
324
325 return;
326 }
327
328 // pgtrace_read_log() is in user thread
329 int64_t
330 pgtrace_read_log(uint8_t *buf, uint32_t size)
331 {
332 int total, front, back;
333 boolean_t ints;
334 wait_result_t wr;
335
336 if (pgtrace.enabled == FALSE) {
337 return -EINVAL;
338 }
339
340 total = size / sizeof(log_t);
341
342 // Check if buf is too small
343 if (buf && total == 0) {
344 return -EINVAL;
345 }
346
347 ints = ml_set_interrupts_enabled(FALSE);
348 simple_lock(&pgtrace.loglock);
349
350 // Wait if ring is empty
351 if (pgtrace.rdidx == pgtrace.wridx) {
352 assert_wait(pgtrace.logs, THREAD_ABORTSAFE);
353
354 simple_unlock(&pgtrace.loglock);
355 ml_set_interrupts_enabled(ints);
356
357 wr = thread_block(NULL);
358 if (wr != THREAD_AWAKENED) {
359 return -EINTR;
360 }
361
362 ints = ml_set_interrupts_enabled(FALSE);
363 simple_lock(&pgtrace.loglock);
364 }
365
366 // Trim the size
367 if ((pgtrace.rdidx + total) > pgtrace.wridx) {
368 total = (int)(pgtrace.wridx - pgtrace.rdidx);
369 }
370
371 // Copy front
372 if ((RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1) + total) >= pgtrace.size) {
373 front = pgtrace.size - RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1);
374 } else {
375 front = total;
376 }
377
378 memcpy(buf, &(pgtrace.logs[RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1)]), front * sizeof(log_t));
379
380 // Copy back if any
381 back = total - front;
382 if (back) {
383 buf += front * sizeof(log_t);
384 memcpy(buf, pgtrace.logs, back * sizeof(log_t));
385 }
386
387 pgtrace.rdidx += total;
388
389 simple_unlock(&pgtrace.loglock);
390 ml_set_interrupts_enabled(ints);
391
392 return total * sizeof(log_t);
393 }
394
395 int
396 pgtrace_get_stats(pgtrace_stats_t *stats)
397 {
398 if (!stats) {
399 return -1;
400 }
401
402 stats->stat_logger.sl_bytes = pgtrace.bytes;
403 pgtrace_decoder_get_stats(stats);
404
405 return 0;
406 }
407
408 #else // CONFIG_PGTRACE_NONKEXT
409
410 static struct {
411 bool active;
412 decoder_t *decoder;
413 logger_t *logger;
414 queue_head_t probes;
415
416 lck_grp_t *lock_grp;
417 lck_grp_attr_t *lock_grp_attr;
418 lck_attr_t *lock_attr;
419 lck_mtx_t probelock;
420 } pgtrace = {};
421
422 //------------------------------------
423 // functions for pmap fault handler
424 // - pgtrace_decode_and_run
425 // - pgtrace_write_log
426 //------------------------------------
427 int
428 pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res)
429 {
430 vm_offset_t pa, cva;
431 pgtrace_instruction_info_t info;
432 vm_offset_t cva_front_page = cva_page[0];
433 vm_offset_t cva_cur_page = cva_page[1];
434
435 pgtrace.decoder->decode(inst, ss, &info);
436
437 if (info.addr == fva) {
438 cva = cva_cur_page + (fva & ARM_PGMASK);
439 } else {
440 // which means a front page is not a tracing page
441 cva = cva_front_page + (fva & ARM_PGMASK);
442 }
443
444 pa = mmu_kvtop(cva);
445 if (!pa) {
446 panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst);
447 }
448
449 absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time);
450
451 pgtrace.decoder->run(inst, pa, cva, ss, res);
452
453 return 0;
454 }
455
456 int
457 pgtrace_write_log(pgtrace_run_result_t res)
458 {
459 pgtrace.logger->write(res);
460 return 0;
461 }
462
463 //------------------------------------
464 // functions for kext
465 // - pgtrace_init
466 // - pgtrace_add_probe
467 // - pgtrace_clear_probe
468 // - pgtrace_start
469 // - pgtrace_stop
470 // - pgtrace_active
471 //------------------------------------
472 int
473 pgtrace_init(decoder_t *decoder, logger_t *logger)
474 {
475 kprintf("%s decoder=%p logger=%p\n", __func__, decoder, logger);
476
477 assert(decoder && logger);
478
479 if (decoder->magic != 0xfeedface || logger->magic != 0xfeedface ||
480 strcmp(decoder->arch, "arm64") != 0 || strcmp(logger->arch, "arm64") != 0) {
481 kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__, decoder->magic, logger->magic, decoder->arch, logger->arch);
482 return EINVAL;
483 }
484
485 pgtrace.lock_attr = lck_attr_alloc_init();
486 pgtrace.lock_grp_attr = lck_grp_attr_alloc_init();
487 pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr);
488
489 lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr);
490
491 queue_init(&pgtrace.probes);
492 pgtrace.decoder = decoder;
493 pgtrace.logger = logger;
494
495 return 0;
496 }
497
498 int
499 pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end)
500 {
501 probe_t *p;
502 queue_head_t *q = &pgtrace.probes;
503
504 kprintf("%s start=%lx end=%lx\n", __func__, start, end);
505
506 if (start > end) {
507 kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end);
508 return -1;
509 }
510
511 p = kalloc(sizeof(probe_t));
512 p->start = start;
513 p->end = end;
514 if (thread == NULL) {
515 p->pmap = NULL;
516 } else {
517 p->pmap = vm_map_pmap(thread->map);
518 }
519
520 lck_mtx_lock(&pgtrace.probelock);
521 queue_enter(q, p, probe_t *, chain);
522 lck_mtx_unlock(&pgtrace.probelock);
523
524 return 0;
525 }
526
527 void
528 pgtrace_clear_probe(void)
529 {
530 probe_t *p, *next;
531 queue_head_t *q = &pgtrace.probes;
532
533 kprintf("%s\n", __func__);
534
535 lck_mtx_lock(&pgtrace.probelock);
536
537 p = (probe_t *)queue_first(q);
538 while (!queue_end(q, (queue_entry_t)p)) {
539 next = (probe_t *)queue_next(&(p->chain));
540
541 queue_remove(q, p, probe_t *, chain);
542 kfree(p, sizeof(probe_t));
543
544 p = next;
545 }
546
547 lck_mtx_unlock(&pgtrace.probelock);
548
549 return;
550 }
551
552 void
553 pgtrace_start(void)
554 {
555 probe_t *p;
556 queue_head_t *q = &pgtrace.probes;
557
558 kprintf("%s\n", __func__);
559
560 if (pgtrace.active == true) {
561 return;
562 }
563
564 pgtrace.active = true;
565
566 lck_mtx_lock(&pgtrace.probelock);
567
568 queue_iterate(q, p, probe_t *, chain) {
569 pmap_pgtrace_add_page(p->pmap, p->start, p->end);
570 }
571
572 lck_mtx_unlock(&pgtrace.probelock);
573
574 return;
575 }
576
577 void
578 pgtrace_stop(void)
579 {
580 probe_t *p;
581 queue_head_t *q = &pgtrace.probes;
582
583 kprintf("%s\n", __func__);
584
585 lck_mtx_lock(&pgtrace.probelock);
586
587 queue_iterate(q, p, probe_t *, chain) {
588 pmap_pgtrace_delete_page(p->pmap, p->start, p->end);
589 }
590
591 lck_mtx_unlock(&pgtrace.probelock);
592
593 pgtrace.active = false;
594 }
595
596 bool
597 pgtrace_active(void)
598 {
599 return pgtrace.active;
600 }
601 #endif // CONFIG_PGTRACE_NONKEXT
602 #else
603 // empty funcs for release kernel
604 extern void pgtrace_stop(void);
605 extern void pgtrace_start(void);
606 extern void pgtrace_clear_probe(void);
607 extern void pgtrace_add_probe(void);
608 extern void pgtrace_init(void);
609 extern void pgtrace_active(void);
610 void
611 pgtrace_stop(void)
612 {
613 }
614 void
615 pgtrace_start(void)
616 {
617 }
618 void
619 pgtrace_clear_probe(void)
620 {
621 }
622 void
623 pgtrace_add_probe(void)
624 {
625 }
626 void
627 pgtrace_init(void)
628 {
629 }
630 void
631 pgtrace_active(void)
632 {
633 }
634 #endif