]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pgtrace.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / pgtrace.c
1 /*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_PGTRACE
30 #include <mach/mach_types.h>
31 #include <IOKit/IOLib.h>
32 #include <sys/msgbuf.h>
33 #include <sys/errno.h>
34 #include <arm64/pgtrace.h>
35 #include <libkern/OSDebug.h>
36
37 typedef struct {
38 queue_chain_t chain;
39
40 pmap_t pmap;
41 vm_offset_t start;
42 vm_offset_t end;
43 } probe_t;
44
45 #if CONFIG_PGTRACE_NONKEXT
46 #include "pgtrace_decoder.h"
47
48 //--------------------------------------------
49 // Macros
50 //
51 #define RBUF_DEFAULT_SIZE 1024
52 #define RBUF_IDX(idx, mask) ((idx) & (mask))
53 #define MSG_MAX 130
54
55 //--------------------------------------------
56 // Types
57 //
58 typedef uint8_t RWLOCK;
59
60 typedef struct {
61 uint64_t id;
62 pgtrace_run_result_t res;
63 void *stack[PGTRACE_STACK_DEPTH];
64 } log_t;
65
66 //--------------------------------------------
67 // Statics
68 //
69 static struct {
70 log_t *logs; // Protect
71 uint32_t size; // Protect
72 uint64_t rdidx, wridx; // Protect
73 decl_simple_lock_data(, loglock);
74
75 uint64_t id;
76 uint32_t option;
77 uint32_t enabled;
78 uint32_t bytes;
79
80 queue_head_t probes; // Protect
81
82 lck_grp_t *lock_grp;
83 lck_grp_attr_t *lock_grp_attr;
84 lck_attr_t *lock_attr;
85 lck_mtx_t probelock;
86 } pgtrace = {};
87
88 //--------------------------------------------
89 // Globals
90 //
91 void pgtrace_init(void)
92 {
93 simple_lock_init(&pgtrace.loglock, 0);
94
95 pgtrace.lock_attr = lck_attr_alloc_init();
96 pgtrace.lock_grp_attr = lck_grp_attr_alloc_init();
97 pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr);
98
99 lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr);
100
101 queue_init(&pgtrace.probes);
102
103 pgtrace.size = RBUF_DEFAULT_SIZE;
104 pgtrace.logs = kalloc(RBUF_DEFAULT_SIZE * sizeof(log_t));
105 }
106
107 void pgtrace_clear_probe(void)
108 {
109 probe_t *p, *next;
110 queue_head_t *q = &pgtrace.probes;
111
112 lck_mtx_lock(&pgtrace.probelock);
113
114 p = (probe_t *)queue_first(q);
115 while (!queue_end(q, (queue_entry_t)p)) {
116 next = (probe_t *)queue_next(&(p->chain));
117
118 queue_remove(q, p, probe_t *, chain);
119 kfree(p, sizeof(probe_t));
120
121 p = next;
122 }
123
124 lck_mtx_unlock(&pgtrace.probelock);
125
126 return;
127 }
128
129 int pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end)
130 {
131 probe_t *p;
132 queue_head_t *q = &pgtrace.probes;
133
134 if (start > end) {
135 kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end);
136 return -1;
137 }
138
139 p = kalloc(sizeof(probe_t));
140 p->start = start;
141 p->end = end;
142 if (thread == NULL) {
143 p->pmap = NULL;
144 } else {
145 p->pmap = vm_map_pmap(thread->map);
146 }
147
148 lck_mtx_lock(&pgtrace.probelock);
149 queue_enter(q, p, probe_t *, chain);
150 lck_mtx_unlock(&pgtrace.probelock);
151
152 return 0;
153 }
154
155 void pgtrace_start(void)
156 {
157 probe_t *p;
158 queue_head_t *q = &pgtrace.probes;
159
160 kprintf("%s\n", __func__);
161
162 if (pgtrace.enabled) {
163 return;
164 }
165
166 pgtrace.enabled = 1;
167
168 lck_mtx_lock(&pgtrace.probelock);
169
170 queue_iterate(q, p, probe_t *, chain) {
171 pmap_pgtrace_add_page(p->pmap, p->start, p->end);
172 }
173
174 lck_mtx_unlock(&pgtrace.probelock);
175
176 return;
177 }
178
179 void pgtrace_stop(void)
180 {
181 probe_t *p;
182 queue_head_t *q = &pgtrace.probes;
183
184 kprintf("%s\n", __func__);
185
186 lck_mtx_lock(&pgtrace.probelock);
187
188 queue_iterate(q, p, probe_t *, chain) {
189 pmap_pgtrace_delete_page(p->pmap, p->start, p->end);
190 }
191
192 lck_mtx_unlock(&pgtrace.probelock);
193
194 pgtrace.enabled = 0;
195 }
196
197 uint32_t pgtrace_get_size(void)
198 {
199 return pgtrace.size;
200 }
201
202 bool pgtrace_set_size(uint32_t size)
203 {
204 log_t *old_buf, *new_buf;
205 uint32_t old_size, new_size = 1;
206
207 // round up to next power of 2
208 while (size > new_size) {
209 new_size <<= 1;
210 if (new_size > 0x100000) {
211 // over million entries
212 kprintf("%s: size=%x new_size=%x is too big\n", __func__, size, new_size);
213 return false;
214 }
215 }
216
217 new_buf = kalloc(new_size * sizeof(log_t));
218 if (new_buf == NULL) {
219 kprintf("%s: can't allocate new_size=%x\n entries", __func__, new_size);
220 return false;
221 }
222
223 pgtrace_stop();
224
225 simple_lock(&pgtrace.loglock);
226 old_buf = pgtrace.logs;
227 old_size = pgtrace.size;
228 pgtrace.logs = new_buf;
229 pgtrace.size = new_size;
230 pgtrace.rdidx = pgtrace.wridx = 0;
231 simple_unlock(&pgtrace.loglock);
232
233 if (old_buf) {
234 kfree(old_buf, old_size * sizeof(log_t));
235 }
236
237 return true;
238 }
239
240 void pgtrace_clear_trace(void)
241 {
242 simple_lock(&pgtrace.loglock);
243 pgtrace.rdidx = pgtrace.wridx = 0;
244 simple_unlock(&pgtrace.loglock);
245 }
246
247 boolean_t pgtrace_active(void)
248 {
249 return (pgtrace.enabled > 0);
250 }
251
252 uint32_t pgtrace_get_option(void)
253 {
254 return pgtrace.option;
255 }
256
257 void pgtrace_set_option(uint32_t option)
258 {
259 pgtrace.option = option;
260 }
261
262 // pgtrace_write_log() is in interrupt disabled context
263 void pgtrace_write_log(pgtrace_run_result_t res)
264 {
265 uint8_t i;
266 log_t log = {};
267 const char *rwmap[] = { "R", "W", "PREFETCH" };
268
269 log.id = pgtrace.id++;
270 log.res = res;
271
272 if (pgtrace.option & PGTRACE_OPTION_KPRINTF) {
273 char msg[MSG_MAX];
274 char *p;
275
276 p = msg;
277
278 snprintf(p, MSG_MAX, "%llu %s ", res.rr_time, rwmap[res.rr_rw]);
279 p += strlen(p);
280
281 for (i = 0; i < res.rr_num; i++) {
282 snprintf(p, MSG_MAX-(p-msg), "%lx=%llx ", res.rr_addrdata[i].ad_addr, res.rr_addrdata[i].ad_data);
283 p += strlen(p);
284 }
285
286 kprintf("%s %s\n", __func__, msg);
287 }
288
289 if (pgtrace.option & PGTRACE_OPTION_STACK) {
290 OSBacktrace(log.stack, PGTRACE_STACK_DEPTH);
291 }
292
293 pgtrace.bytes += sizeof(log);
294
295 simple_lock(&pgtrace.loglock);
296
297 pgtrace.logs[RBUF_IDX(pgtrace.wridx, pgtrace.size-1)] = log;
298
299 // Advance rdidx if ring is full
300 if (RBUF_IDX(pgtrace.wridx, pgtrace.size-1) == RBUF_IDX(pgtrace.rdidx, pgtrace.size-1) &&
301 (pgtrace.wridx != pgtrace.rdidx)) {
302 pgtrace.rdidx++;
303 }
304 pgtrace.wridx++;
305
306 // Signal if ring was empty
307 if (pgtrace.wridx == (pgtrace.rdidx + 1)) {
308 thread_wakeup(pgtrace.logs);
309 }
310
311 simple_unlock(&pgtrace.loglock);
312
313 return;
314 }
315
316 // pgtrace_read_log() is in user thread
317 int64_t pgtrace_read_log(uint8_t *buf, uint32_t size)
318 {
319 int total, front, back;
320 boolean_t ints;
321 wait_result_t wr;
322
323 if (pgtrace.enabled == FALSE) {
324 return -EINVAL;
325 }
326
327 total = size / sizeof(log_t);
328
329 // Check if buf is too small
330 if (buf && total == 0) {
331 return -EINVAL;
332 }
333
334 ints = ml_set_interrupts_enabled(FALSE);
335 simple_lock(&pgtrace.loglock);
336
337 // Wait if ring is empty
338 if (pgtrace.rdidx == pgtrace.wridx) {
339 assert_wait(pgtrace.logs, THREAD_ABORTSAFE);
340
341 simple_unlock(&pgtrace.loglock);
342 ml_set_interrupts_enabled(ints);
343
344 wr = thread_block(NULL);
345 if (wr != THREAD_AWAKENED) {
346 return -EINTR;
347 }
348
349 ints = ml_set_interrupts_enabled(FALSE);
350 simple_lock(&pgtrace.loglock);
351 }
352
353 // Trim the size
354 if ((pgtrace.rdidx + total) > pgtrace.wridx) {
355 total = (int)(pgtrace.wridx - pgtrace.rdidx);
356 }
357
358 // Copy front
359 if ((RBUF_IDX(pgtrace.rdidx, pgtrace.size-1) + total) >= pgtrace.size) {
360 front = pgtrace.size - RBUF_IDX(pgtrace.rdidx, pgtrace.size-1);
361 } else {
362 front = total;
363 }
364
365 memcpy(buf, &(pgtrace.logs[RBUF_IDX(pgtrace.rdidx, pgtrace.size-1)]), front*sizeof(log_t));
366
367 // Copy back if any
368 back = total-front;
369 if (back) {
370 buf += front * sizeof(log_t);
371 memcpy(buf, pgtrace.logs, back*sizeof(log_t));
372 }
373
374 pgtrace.rdidx += total;
375
376 simple_unlock(&pgtrace.loglock);
377 ml_set_interrupts_enabled(ints);
378
379 return total*sizeof(log_t);
380 }
381
382 int pgtrace_get_stats(pgtrace_stats_t *stats)
383 {
384 if (!stats) {
385 return -1;
386 }
387
388 stats->stat_logger.sl_bytes = pgtrace.bytes;
389 pgtrace_decoder_get_stats(stats);
390
391 return 0;
392 }
393
394 #else // CONFIG_PGTRACE_NONKEXT
395
396 static struct {
397 bool active;
398 decoder_t *decoder;
399 logger_t *logger;
400 queue_head_t probes;
401
402 lck_grp_t *lock_grp;
403 lck_grp_attr_t *lock_grp_attr;
404 lck_attr_t *lock_attr;
405 lck_mtx_t probelock;
406 } pgtrace = {};
407
408 //------------------------------------
409 // functions for pmap fault handler
410 // - pgtrace_decode_and_run
411 // - pgtrace_write_log
412 //------------------------------------
413 int pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res)
414 {
415 vm_offset_t pa, cva;
416 pgtrace_instruction_info_t info;
417 vm_offset_t cva_front_page = cva_page[0];
418 vm_offset_t cva_cur_page = cva_page[1];
419
420 pgtrace.decoder->decode(inst, ss, &info);
421
422 if (info.addr == fva) {
423 cva = cva_cur_page + (fva & ARM_PGMASK);
424 } else {
425 // which means a front page is not a tracing page
426 cva = cva_front_page + (fva & ARM_PGMASK);
427 }
428
429 pa = mmu_kvtop(cva);
430 if (!pa) {
431 panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst);
432 }
433
434 absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time);
435
436 pgtrace.decoder->run(inst, pa, cva, ss, res);
437
438 return 0;
439 }
440
441 int pgtrace_write_log(pgtrace_run_result_t res)
442 {
443 pgtrace.logger->write(res);
444 return 0;
445 }
446
447 //------------------------------------
448 // functions for kext
449 // - pgtrace_init
450 // - pgtrace_add_probe
451 // - pgtrace_clear_probe
452 // - pgtrace_start
453 // - pgtrace_stop
454 // - pgtrace_active
455 //------------------------------------
456 int pgtrace_init(decoder_t *decoder, logger_t *logger)
457 {
458 kprintf("%s decoder=%p logger=%p\n", __func__, decoder, logger);
459
460 assert(decoder && logger);
461
462 if (decoder->magic != 0xfeedface || logger->magic != 0xfeedface ||
463 strcmp(decoder->arch, "arm64") != 0 || strcmp(logger->arch, "arm64") != 0) {
464 kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__, decoder->magic, logger->magic, decoder->arch, logger->arch);
465 return EINVAL;
466 }
467
468 pgtrace.lock_attr = lck_attr_alloc_init();
469 pgtrace.lock_grp_attr = lck_grp_attr_alloc_init();
470 pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr);
471
472 lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr);
473
474 queue_init(&pgtrace.probes);
475 pgtrace.decoder = decoder;
476 pgtrace.logger = logger;
477
478 return 0;
479 }
480
481 int pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end)
482 {
483 probe_t *p;
484 queue_head_t *q = &pgtrace.probes;
485
486 kprintf("%s start=%lx end=%lx\n", __func__, start, end);
487
488 if (start > end) {
489 kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end);
490 return -1;
491 }
492
493 p = kalloc(sizeof(probe_t));
494 p->start = start;
495 p->end = end;
496 if (thread == NULL) {
497 p->pmap = NULL;
498 } else {
499 p->pmap = vm_map_pmap(thread->map);
500 }
501
502 lck_mtx_lock(&pgtrace.probelock);
503 queue_enter(q, p, probe_t *, chain);
504 lck_mtx_unlock(&pgtrace.probelock);
505
506 return 0;
507 }
508
509 void pgtrace_clear_probe(void)
510 {
511 probe_t *p, *next;
512 queue_head_t *q = &pgtrace.probes;
513
514 kprintf("%s\n", __func__);
515
516 lck_mtx_lock(&pgtrace.probelock);
517
518 p = (probe_t *)queue_first(q);
519 while (!queue_end(q, (queue_entry_t)p)) {
520 next = (probe_t *)queue_next(&(p->chain));
521
522 queue_remove(q, p, probe_t *, chain);
523 kfree(p, sizeof(probe_t));
524
525 p = next;
526 }
527
528 lck_mtx_unlock(&pgtrace.probelock);
529
530 return;
531 }
532
533 void pgtrace_start(void)
534 {
535 probe_t *p;
536 queue_head_t *q = &pgtrace.probes;
537
538 kprintf("%s\n", __func__);
539
540 if (pgtrace.active == true) {
541 return;
542 }
543
544 pgtrace.active = true;
545
546 lck_mtx_lock(&pgtrace.probelock);
547
548 queue_iterate(q, p, probe_t *, chain) {
549 pmap_pgtrace_add_page(p->pmap, p->start, p->end);
550 }
551
552 lck_mtx_unlock(&pgtrace.probelock);
553
554 return;
555 }
556
557 void pgtrace_stop(void)
558 {
559 probe_t *p;
560 queue_head_t *q = &pgtrace.probes;
561
562 kprintf("%s\n", __func__);
563
564 lck_mtx_lock(&pgtrace.probelock);
565
566 queue_iterate(q, p, probe_t *, chain) {
567 pmap_pgtrace_delete_page(p->pmap, p->start, p->end);
568 }
569
570 lck_mtx_unlock(&pgtrace.probelock);
571
572 pgtrace.active = false;
573 }
574
575 bool pgtrace_active(void)
576 {
577 return pgtrace.active;
578 }
579 #endif // CONFIG_PGTRACE_NONKEXT
580 #else
581 // empty funcs for release kernel
582 extern void pgtrace_stop(void);
583 extern void pgtrace_start(void);
584 extern void pgtrace_clear_probe(void);
585 extern void pgtrace_add_probe(void);
586 extern void pgtrace_init(void);
587 extern void pgtrace_active(void);
588 void pgtrace_stop(void) {}
589 void pgtrace_start(void) {}
590 void pgtrace_clear_probe(void) {}
591 void pgtrace_add_probe(void) {}
592 void pgtrace_init(void) {}
593 void pgtrace_active(void) {}
594 #endif