]> git.saurik.com Git - apple/xnu.git/blame - osfmk/profiling/i386/profile-md.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / profiling / i386 / profile-md.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * HISTORY
27 *
28 * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez
29 * Import of Mac OS X kernel (~semeria)
30 *
31 * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez
32 * Import of OSF Mach kernel (~mburg)
33 *
34 * Revision 1.1.5.1 1995/01/06 19:53:45 devrcs
35 * mk6 CR668 - 1.3b26 merge
36 * new file for mk6
37 * [1994/10/12 22:25:24 dwm]
38 *
39 * Revision 1.1.2.2 1994/05/16 19:19:22 meissner
40 * Protect against hash_ptr being null in _profile_update_stats.
41 * [1994/05/16 17:23:53 meissner]
42 *
43 * Remove _profile_cnt_to_hex, _profile_strbuffer.
44 * _profile_print_stats now takes const pointers.
45 * Use the new 64-bit arithmetic support instead of converting to double.
46 * Add _profile_merge_stats to merge statistics.
47 * [1994/04/28 21:45:04 meissner]
48 *
49 * If MACH_ASSERT is on in server or kernel, turn on profiling printfs.
50 * Print out fractional digits for average # of hash searches in stats.
51 * Update overflow_ticks for # times the lprofil counter overflows into high word.
52 * Don't make sizes of C/asm structures a const array, since it has pointers in it.
53 * Add support for converting 64 bit ints to a string.
54 * Use PROF_CNT_TO_DECIMAL where possible instead of PROF_CNT_TO_LDOUBLE.
55 * [1994/04/20 15:47:02 meissner]
56 *
57 * Revision 1.1.2.1 1994/04/08 17:51:51 meissner
58 * no change
59 * [1994/04/08 02:11:40 meissner]
60 *
61 * Make most stats 64 bits, except for things like memory allocation.
62 * [1994/04/02 14:58:28 meissner]
63 *
64 * Add some printfs under #idef DEBUG_PROFILE.
65 * [1994/03/29 21:00:11 meissner]
66 *
67 * Further changes for gprof/prof overflow support.
68 * Add overflow support for {gprof,prof,old,dummy}_mcount counters.
69 * [1994/03/17 20:13:31 meissner]
70 *
71 * Add gprof/prof overflow support
72 * [1994/03/17 14:56:51 meissner]
73 *
74 * Use memset instead of bzero.
75 * [1994/02/28 23:56:10 meissner]
76 *
77 * Add size of histogram counters & unused fields to profile_profil struct
78 * [1994/02/17 21:41:50 meissner]
79 *
80 * Allocate slop space for server in addition to microkernel.
81 * Add 3rd argument to _profile_print_stats for profil info.
82 * Print # histogram ticks too low/too high for server/mk.
83 * [1994/02/16 22:38:18 meissner]
84 *
85 * Calculate percentages for # of hash buckets.
86 * [1994/02/11 16:52:04 meissner]
87 *
88 * Print stats as an unsigned number.
89 * [1994/02/07 18:47:05 meissner]
90 *
91 * For kernel and server, include <kern/assert.h> not <assert.h>.
92 * Always do assert on comparing asm vs. C structure sizes.
93 * Add _profile_reset to reset profiling information.
94 * Add _profile_update_stats to update the statistics.
95 * Move _gprof_write code that updates hash stats to _profile_update_stats.
96 * Don't allocate space for basic block support just yet.
97 * Add support for range checking the gprof arc {from,self}pc addresses.
98 * _profile_debug now calls _profile_update_stats.
99 * Print how many times the acontext was locked.
100 * If DEBUG_PROFILE is defined, set pv->debug to 1.
101 * Expand copyright.
102 * [1994/02/07 12:41:03 meissner]
103 *
104 * Keep track of the number of times the kernel overflows the HISTCOUNTER counter.
105 * [1994/02/03 20:13:28 meissner]
106 *
107 * Add stats for {user,kernel,idle} mode in the kernel.
108 * [1994/02/03 15:17:31 meissner]
109 *
110 * Print unused stats in hex as well as decimal.
111 * [1994/02/03 14:52:20 meissner]
112 *
113 * _profile_print_stats no longer takes profile_{vars,md} pointer arguments.
114 * If stream is NULL, _profile_print_stats will use stdout.
115 * Separate _profile_update_stats from _gprof_write.
116 * [1994/02/03 00:58:55 meissner]
117 *
118 * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars.
119 * [1994/02/01 12:04:01 meissner]
120 *
121 * Add allocation flag to _profile_md_init.
122 * Fix core dumps in _profile_print_stats if no profile_vars ptr passed.
123 * Print numbers in 12 columns, not 8.
124 * Print my_cpu/max_cpu if max_cpu != 0.
125 * Make allocations print like other stats.
126 * Use ACONTEXT_FIRST to start loop on, not ACONTEXT_PROF.
127 * [1994/01/28 23:33:26 meissner]
128 *
129 * Move callback pointers into separate allocation context.
130 * Add size fields for other structures to profile-vars.
131 * [1994/01/26 20:23:37 meissner]
132 *
133 * Allocate initial memory at startup.
134 * Print structure sizes and version number when printing stats.
135 * Initialize size fields and version numbers.
136 * Allocation context pointers moved to _profile_vars.
137 * [1994/01/25 01:46:04 meissner]
138 *
139 * Move init code here from assembly language.
140 * [1994/01/22 01:13:21 meissner]
141 *
142 * Include <profile/profile-internal.h> instead of "profile-md.h".
143 * [1994/01/20 20:56:49 meissner]
144 *
145 * Fixup copyright.
146 * [1994/01/18 23:08:02 meissner]
147 *
148 * Rename profile.h -> profile-md.h.
149 * [1994/01/18 19:44:57 meissner]
150 *
151 * Write out stats unused fields.
152 * Make _prof_write write out the prof stats gprof collects.
153 * [1994/01/15 18:40:37 meissner]
154 *
155 * Remove debug code called from profile-asm.s.
156 * Always print out the # of profil buckets.
157 * [1994/01/15 00:59:06 meissner]
158 *
159 * Fix typo.
160 * [1994/01/04 16:34:46 meissner]
161 *
162 * Move max hash bucket calculation into _gprof_write & put info in stats structure.
163 * [1994/01/04 16:15:17 meissner]
164 *
165 * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to.
166 * [1994/01/04 15:37:46 meissner]
167 *
168 * Correctly handle case where more than one allocation context was
169 * allocated due to multiple threads.
170 * Cast stats to long for output.
171 * Print number of profil buckets field in _profile_stats.
172 * Add support for GFUNC allocation context.
173 * [1994/01/04 14:26:00 meissner]
174 *
175 * CR 10198 - Initial version.
176 * [1994/01/01 22:44:10 meissne
177 *
178 * $EndLog$
179 */
180
181#include <profiling/profile-internal.h>
182#include <stdlib.h>
183#include <string.h>
184
185#if defined(MACH_KERNEL) || defined(_KERNEL)
186
187#include <mach_assert.h>
188#if MACH_ASSERT && !defined(DEBUG_PROFILE)
189#define DEBUG_PROFILE 1
190#endif
191
192extern int printf(const char *, ...);
193extern void panic(const char *);
194#else
195#include <assert.h>
196#define panic(str) exit(1)
197#endif
198
199#ifndef PROFILE_NUM_FUNCS
200#define PROFILE_NUM_FUNCS 2000
201#endif
202
203#ifndef PROFILE_NUM_ARCS
204#define PROFILE_NUM_ARCS 8000
205#endif
206
207/*
208 * Information passed on from profile-asm.s
209 */
210
211extern int _profile_do_stats;
212extern size_t _profile_size;
213extern size_t _profile_stats_size;
214extern size_t _profile_md_size;
215extern size_t _profile_profil_size;
216extern size_t _profile_hash_size;
217
218/*
219 * All profiling variables, and a dummy gprof record.
220 */
221
222struct profile_vars _profile_vars = { 0 };
223struct hasharc _gprof_dummy = { 0 };
224
225/*
226 * Forward references.
227 */
228
229static void *_profile_md_acontext(struct profile_vars *pv,
230 void *ptr,
231 size_t len,
232 acontext_type_t type);
233
234static void _profile_reset_alloc(struct profile_vars *,
235 acontext_type_t);
236
237extern void _bogus_function(void);
238\f
239/*
240 * Function to set up the initial allocation for a context block.
241 */
242
243static void *
244_profile_md_acontext(struct profile_vars *pv,
245 void *ptr,
246 size_t len,
247 acontext_type_t type)
248{
249 struct memory {
250 struct alloc_context context;
251 struct page_list plist;
252 int data[1];
253 };
254
255 struct memory *mptr = (struct memory *)ptr;
256 struct alloc_context *context = &mptr->context;
257 struct page_list *plist = &mptr->plist;
258
259#ifdef DEBUG_PROFILE
260 _profile_printf("_profile_md_acontext: pv= 0x%lx, ptr= 0x%lx, len= %6ld, type= %d\n",
261 (long)pv,
262 (long)ptr,
263 (long)len,
264 (int)type);
265#endif
266
267 /* Fill in context block header */
268 context->next = pv->acontext[type];
269 context->plist = plist;
270 context->lock = 0;
271
272 /* Fill in first page list information */
273 plist->ptr = plist->first = (void *)&mptr->data[0];
274 plist->next = (struct page_list *)0;
275 plist->bytes_free = len - ((char *)plist->ptr - (char *)ptr);
276 plist->bytes_allocated = 0;
277 plist->num_allocations = 0;
278
279 /* Update statistics */
280 pv->stats.num_context[type]++;
281 pv->stats.wasted[type] += plist->bytes_free;
282 pv->stats.overhead[type] += len - plist->bytes_free;
283
284 /* And setup context block */
285 pv->acontext[type] = context;
286
287 return (void *)((char *)ptr+len);
288}
289
290\f
291/*
292 * Machine dependent function to initialize things.
293 */
294
295void
296_profile_md_init(struct profile_vars *pv,
297 profile_type_t type,
298 profile_alloc_mem_t alloc_mem)
299{
300 size_t page_size = pv->page_size;
301 size_t arc_size;
302 size_t func_size;
303 size_t misc_size;
304 size_t hash_size;
305 size_t extra_arc_size;
306 size_t extra_func_size;
307 size_t callback_size = page_size;
308 void *ptr;
309 acontext_type_t ac;
310 int i;
311 static struct {
312 size_t c_size; /* size C thinks structure is */
313 size_t *asm_size_ptr; /* pointer to size asm thinks struct is */
314 const char *name; /* structure name */
315 } sizes[] = {
316 { sizeof(struct profile_profil), &_profile_profil_size, "profile_profil" },
317 { sizeof(struct profile_stats), &_profile_stats_size, "profile_stats" },
318 { sizeof(struct profile_md), &_profile_md_size, "profile_md" },
319 { sizeof(struct profile_vars), &_profile_size, "profile_vars" }};
320
321#ifdef DEBUG_PROFILE
322 _profile_printf("_profile_md_init: pv = 0x%lx, type = %d, alloc = %d\n",
323 (long) pv,
324 (int)type,
325 (int)alloc_mem);
326#endif
327
328 for (i = 0; i < sizeof (sizes) / sizeof(sizes[0]); i++) {
329 if (sizes[i].c_size != *sizes[i].asm_size_ptr) {
330 _profile_printf("C thinks struct %s is %ld bytes, asm thinks it is %ld bytes\n",
331 sizes[i].name,
332 (long)sizes[i].c_size,
333 (long)*sizes[i].asm_size_ptr);
334
335 panic(sizes[i].name);
336 }
337 }
338
339 /* Figure out which function will handle compiler generated profiling */
340 if (type == PROFILE_GPROF) {
341 pv->md.save_mcount_ptr = _gprof_mcount;
342
343 } else if (type == PROFILE_PROF) {
344 pv->md.save_mcount_ptr = _prof_mcount;
345
346 } else {
347 pv->md.save_mcount_ptr = _dummy_mcount;
348 }
349
350 pv->vars_size = sizeof(struct profile_vars);
351 pv->plist_size = sizeof(struct page_list);
352 pv->acontext_size = sizeof(struct alloc_context);
353 pv->callback_size = sizeof(struct callback);
354 pv->major_version = PROFILE_MAJOR_VERSION;
355 pv->minor_version = PROFILE_MINOR_VERSION;
356 pv->type = type;
357 pv->do_profile = 1;
358 pv->use_dci = 1;
359 pv->use_profil = 1;
360 pv->output_uarea = 1;
361 pv->output_stats = (prof_flag_t) _profile_do_stats;
362 pv->output_clock = 1;
363 pv->multiple_sections = 1;
364 pv->init_format = 0;
365 pv->bogus_func = _bogus_function;
366
367#ifdef DEBUG_PROFILE
368 pv->debug = 1;
369#endif
370
371 if (!pv->error_msg) {
372 pv->error_msg = "error in profiling";
373 }
374
375 if (!pv->page_size) {
376 pv->page_size = 4096;
377 }
378
379 pv->stats.stats_size = sizeof(struct profile_stats);
380 pv->stats.major_version = PROFILE_MAJOR_VERSION;
381 pv->stats.minor_version = PROFILE_MINOR_VERSION;
382
383 pv->md.md_size = sizeof(struct profile_md);
384 pv->md.major_version = PROFILE_MAJOR_VERSION;
385 pv->md.minor_version = PROFILE_MINOR_VERSION;
386 pv->md.hash_size = _profile_hash_size;
387 pv->md.num_cache = MAX_CACHE;
388 pv->md.mcount_ptr_ptr = &_mcount_ptr;
389 pv->md.dummy_ptr = &_gprof_dummy;
390 pv->md.alloc_pages = _profile_alloc_pages;
391
392 /* zero out all allocation context blocks */
393 for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) {
394 pv->acontext[ac] = (struct alloc_context *)0;
395 }
396
397 /* Don't allocate memory if not desired */
398 if (!alloc_mem) {
399 return;
400 }
401
402 /* Allocate some space for the initial allocations */
403 switch (type) {
404 default:
405 misc_size = page_size;
406 ptr = _profile_alloc_pages(misc_size + callback_size);
407 ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC);
408 ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK);
409 break;
410
411 case PROFILE_GPROF:
412
413#if defined(MACH_KERNEL) || defined(_KERNEL)
414 /*
415 * For the MK & server allocate some slop space now for the
416 * secondary context blocks in case allocations are done at
417 * interrupt level when another allocation is being done. This
418 * is done before the main allocation blocks and will be pushed
419 * so that it will only be used when the main allocation block
420 * is locked.
421 */
422 extra_arc_size = 4*page_size;
423 extra_func_size = 2*page_size;
424#else
425 extra_arc_size = extra_func_size = 0;
426#endif
427
428 /* Set up allocation areas */
429 arc_size = ROUNDUP(PROFILE_NUM_ARCS * sizeof(struct hasharc), page_size);
430 func_size = ROUNDUP(PROFILE_NUM_FUNCS * sizeof(struct gfuncs), page_size);
431 hash_size = _profile_hash_size * sizeof (struct hasharc *);
432 misc_size = ROUNDUP(hash_size + page_size, page_size);
433
434 ptr = _profile_alloc_pages(arc_size
435 + func_size
436 + misc_size
437 + callback_size
438 + extra_arc_size
439 + extra_func_size);
440
441#if defined(MACH_KERNEL) || defined(_KERNEL)
442 ptr = _profile_md_acontext(pv, ptr, extra_arc_size, ACONTEXT_GPROF);
443 ptr = _profile_md_acontext(pv, ptr, extra_func_size, ACONTEXT_GFUNC);
444#endif
445 ptr = _profile_md_acontext(pv, ptr, arc_size, ACONTEXT_GPROF);
446 ptr = _profile_md_acontext(pv, ptr, func_size, ACONTEXT_GFUNC);
447 ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC);
448 ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK);
449
450 /* Allocate hash table */
451 pv->md.hash_ptr = (struct hasharc **) _profile_alloc(pv, hash_size, ACONTEXT_MISC);
452 break;
453
454 case PROFILE_PROF:
455 /* Set up allocation areas */
456 func_size = ROUNDUP(PROFILE_NUM_FUNCS * sizeof(struct prof_ext), page_size);
457 misc_size = page_size;
458
459 ptr = _profile_alloc_pages(func_size
460 + misc_size
461 + callback_size);
462
463 ptr = _profile_md_acontext(pv, ptr, func_size, ACONTEXT_PROF);
464 ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC);
465 ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK);
466 break;
467 }
468}
469
470\f
471/*
472 * Machine dependent functions to start and stop profiling.
473 */
474
475int
476_profile_md_start(void)
477{
478 _mcount_ptr = _profile_vars.md.save_mcount_ptr;
479 return 0;
480}
481
482int
483_profile_md_stop(void)
484{
485 _mcount_ptr = _dummy_mcount;
486 return 0;
487}
488
489\f
490/*
491 * Free up all memory in a memory context block.
492 */
493
494static void
495_profile_reset_alloc(struct profile_vars *pv, acontext_type_t ac)
496{
497 struct alloc_context *aptr;
498 struct page_list *plist;
499
500 for (aptr = pv->acontext[ac];
501 aptr != (struct alloc_context *)0;
502 aptr = aptr->next) {
503
504 for (plist = aptr->plist;
505 plist != (struct page_list *)0;
506 plist = plist->next) {
507
508 plist->ptr = plist->first;
509 plist->bytes_free += plist->bytes_allocated;
510 plist->bytes_allocated = 0;
511 plist->num_allocations = 0;
512 memset(plist->first, '\0', plist->bytes_allocated);
513 }
514 }
515}
516
517\f
518/*
519 * Reset profiling. Since the only user of this function is the kernel
520 * and the server, we don't have to worry about other stuff than gprof.
521 */
522
523void
524_profile_reset(struct profile_vars *pv)
525{
526 struct alloc_context *aptr;
527 struct page_list *plist;
528 struct gfuncs *gfunc;
529
530 if (pv->active) {
531 _profile_md_stop();
532 }
533
534 /* Reset all function unique pointers back to 0 */
535 for (aptr = pv->acontext[ACONTEXT_GFUNC];
536 aptr != (struct alloc_context *)0;
537 aptr = aptr->next) {
538
539 for (plist = aptr->plist;
540 plist != (struct page_list *)0;
541 plist = plist->next) {
542
543 for (gfunc = (struct gfuncs *)plist->first;
544 gfunc < (struct gfuncs *)plist->ptr;
545 gfunc++) {
546
547 *(gfunc->unique_ptr) = (struct hasharc *)0;
548 }
549 }
550 }
551
552 /* Release memory */
553 _profile_reset_alloc(pv, ACONTEXT_GPROF);
554 _profile_reset_alloc(pv, ACONTEXT_GFUNC);
555 _profile_reset_alloc(pv, ACONTEXT_PROF);
556
557 memset((void *)pv->profil_buf, '\0', pv->profil_info.profil_len);
558 memset((void *)pv->md.hash_ptr, '\0', pv->md.hash_size * sizeof(struct hasharc *));
559 memset((void *)&pv->stats, '\0', sizeof(pv->stats));
560
561 pv->stats.stats_size = sizeof(struct profile_stats);
562 pv->stats.major_version = PROFILE_MAJOR_VERSION;
563 pv->stats.minor_version = PROFILE_MINOR_VERSION;
564
565 if (pv->active) {
566 _profile_md_start();
567 }
568}
569
570\f
571/*
572 * Machine dependent function to write out gprof records.
573 */
574
575size_t
576_gprof_write(struct profile_vars *pv, struct callback *callback_ptr)
577{
578 struct alloc_context *aptr;
579 struct page_list *plist;
580 size_t bytes = 0;
581 struct hasharc *hptr;
582 int i;
583
584 for (aptr = pv->acontext[ACONTEXT_GPROF];
585 aptr != (struct alloc_context *)0;
586 aptr = aptr->next) {
587
588 for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) {
589 hptr = (struct hasharc *)plist->first;
590 for (i = 0; i < plist->num_allocations; (i++, hptr++)) {
591
592 struct gprof_arc arc = hptr->arc;
593 int nrecs = 1 + (hptr->overflow * 2);
594 int j;
595
596 if (pv->check_funcs) {
597 if (arc.frompc < pv->profil_info.lowpc ||
598 arc.frompc > pv->profil_info.highpc) {
599
600 arc.frompc = (prof_uptrint_t)pv->bogus_func;
601 }
602
603 if (arc.selfpc < pv->profil_info.lowpc ||
604 arc.selfpc > pv->profil_info.highpc) {
605
606 arc.selfpc = (prof_uptrint_t)pv->bogus_func;
607 }
608 }
609
610 /* For each overflow, emit 2 extra records with the count
611 set to 0x80000000 */
612 for (j = 0; j < nrecs; j++) {
613 bytes += sizeof (arc);
614 if ((*pv->fwrite_func)((void *)&arc,
615 sizeof(arc),
616 1,
617 pv->stream) != 1) {
618
619 _profile_error(pv);
620 }
621
622 arc.count = 0x80000000;
623 }
624 }
625 }
626 }
627
628 return bytes;
629}
630
631\f
632/*
633 * Machine dependent function to write out prof records.
634 */
635
636size_t
637_prof_write(struct profile_vars *pv, struct callback *callback_ptr)
638{
639 struct alloc_context *aptr;
640 struct page_list *plist;
641 size_t bytes = 0;
642 struct prof_ext prof_st;
643 struct prof_int *pptr;
644 struct gfuncs *gptr;
645 int nrecs;
646 int i, j;
647
648 /* Write out information prof_mcount collects */
649 for (aptr = pv->acontext[ACONTEXT_PROF];
650 aptr != (struct alloc_context *)0;
651 aptr = aptr->next) {
652
653 for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) {
654 pptr = (struct prof_int *)plist->first;
655
656 for (i = 0; i < plist->num_allocations; (i++, pptr++)) {
657
658 /* Write out 2 records for each overflow, each with a
659 count of 0x80000000 + the normal record */
660 prof_st = pptr->prof;
661 nrecs = 1 + (pptr->overflow * 2);
662
663 for (j = 0; j < nrecs; j++) {
664 bytes += sizeof (struct prof_ext);
665 if ((*pv->fwrite_func)((void *)&prof_st,
666 sizeof(prof_st),
667 1,
668 pv->stream) != 1) {
669
670 _profile_error(pv);
671 }
672
673 prof_st.cncall = 0x80000000;
674 }
675 }
676 }
677 }
678
679 /* Now write out the prof information that gprof collects */
680 for (aptr = pv->acontext[ACONTEXT_GFUNC];
681 aptr != (struct alloc_context *)0;
682 aptr = aptr->next) {
683
684 for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) {
685 gptr = (struct gfuncs *)plist->first;
686
687 for (i = 0; i < plist->num_allocations; (i++, gptr++)) {
688
689 /* Write out 2 records for each overflow, each with a
690 count of 0x80000000 + the normal record */
691 prof_st = gptr->prof.prof;
692 nrecs = 1 + (gptr->prof.overflow * 2);
693
694 for (j = 0; j < nrecs; j++) {
695 bytes += sizeof (struct prof_ext);
696 if ((*pv->fwrite_func)((void *)&prof_st,
697 sizeof(prof_st),
698 1,
699 pv->stream) != 1) {
700
701 _profile_error(pv);
702 }
703
704 prof_st.cncall = 0x80000000;
705 }
706 }
707 }
708 }
709
710 return bytes;
711}
712
713\f
714/*
715 * Update any statistics. For the 386, calculate the hash table loading factor.
716 * Also figure out how many overflows occured.
717 */
718
719void
720_profile_update_stats(struct profile_vars *pv)
721{
722 struct alloc_context *aptr;
723 struct page_list *plist;
724 struct hasharc *hptr;
725 struct prof_int *pptr;
726 struct gfuncs *fptr;
727 LHISTCOUNTER *lptr;
728 int i;
729
730 for(i = 0; i < MAX_BUCKETS+1; i++) {
731 pv->stats.buckets[i] = 0;
732 }
733
734 pv->stats.hash_buckets = 0;
735
736 if (pv->md.hash_ptr) {
737 for (i = 0; i < pv->md.hash_size; i++) {
738 long nbuckets = 0;
739 struct hasharc *hptr;
740
741 for (hptr = pv->md.hash_ptr[i]; hptr; hptr = hptr->next) {
742 nbuckets++;
743 }
744
745 pv->stats.buckets[ (nbuckets < MAX_BUCKETS) ? nbuckets : MAX_BUCKETS ]++;
746 if (pv->stats.hash_buckets < nbuckets) {
747 pv->stats.hash_buckets = nbuckets;
748 }
749 }
750 }
751
752 /* Count how many times functions are out of bounds */
753 if (pv->check_funcs) {
754 pv->stats.bogus_count = 0;
755
756 for (aptr = pv->acontext[ACONTEXT_GPROF];
757 aptr != (struct alloc_context *)0;
758 aptr = aptr->next) {
759
760 for (plist = aptr->plist;
761 plist != (struct page_list *)0;
762 plist = plist->next) {
763
764 hptr = (struct hasharc *)plist->first;
765 for (i = 0; i < plist->num_allocations; (i++, hptr++)) {
766
767 if (hptr->arc.frompc < pv->profil_info.lowpc ||
768 hptr->arc.frompc > pv->profil_info.highpc) {
769 pv->stats.bogus_count++;
770 }
771
772 if (hptr->arc.selfpc < pv->profil_info.lowpc ||
773 hptr->arc.selfpc > pv->profil_info.highpc) {
774 pv->stats.bogus_count++;
775 }
776 }
777 }
778 }
779 }
780
781 /* Figure out how many overflows occurred */
782 PROF_ULONG_TO_CNT(pv->stats.prof_overflow, 0);
783 PROF_ULONG_TO_CNT(pv->stats.gprof_overflow, 0);
784
785 for (aptr = pv->acontext[ACONTEXT_GPROF];
786 aptr != (struct alloc_context *)0;
787 aptr = aptr->next) {
788
789 for (plist = aptr->plist;
790 plist != (struct page_list *)0;
791 plist = plist->next) {
792
793 hptr = (struct hasharc *)plist->first;
794 for (i = 0; i < plist->num_allocations; (i++, hptr++)) {
795 PROF_CNT_ADD(pv->stats.gprof_overflow, hptr->overflow);
796 }
797 }
798 }
799
800 for (aptr = pv->acontext[ACONTEXT_PROF];
801 aptr != (struct alloc_context *)0;
802 aptr = aptr->next) {
803
804 for (plist = aptr->plist;
805 plist != (struct page_list *)0;
806 plist = plist->next) {
807
808 pptr = (struct prof_int *)plist->first;
809 for (i = 0; i < plist->num_allocations; (i++, pptr++)) {
810 PROF_CNT_ADD(pv->stats.prof_overflow, pptr->overflow);
811 }
812 }
813 }
814
815 for (aptr = pv->acontext[ACONTEXT_GFUNC];
816 aptr != (struct alloc_context *)0;
817 aptr = aptr->next) {
818
819 for (plist = aptr->plist;
820 plist != (struct page_list *)0;
821 plist = plist->next) {
822
823 fptr = (struct gfuncs *)plist->first;
824 for (i = 0; i < plist->num_allocations; (i++, fptr++)) {
825 PROF_CNT_ADD(pv->stats.prof_overflow, fptr->prof.overflow);
826 }
827 }
828 }
829
830 /* Now go through & count how many times the LHISTCOUNTER overflowed into a 2nd word */
831 lptr = (LHISTCOUNTER *)pv->profil_buf;
832
833 if (pv->use_profil &&
834 pv->profil_info.counter_size == sizeof(LHISTCOUNTER) &&
835 lptr != (LHISTCOUNTER *)0) {
836
837 PROF_ULONG_TO_CNT(pv->stats.overflow_ticks, 0);
838 for (i = 0; i < pv->stats.profil_buckets; i++) {
839 PROF_CNT_ADD(pv->stats.overflow_ticks, lptr[i].high);
840 }
841 }
842}
843\f
844#if !defined(_KERNEL) && !defined(MACH_KERNEL)
845
846/*
847 * Routine callable from the debugger that prints the statistics.
848 */
849
850int _profile_debug(void)
851{
852 _profile_update_stats(&_profile_vars);
853 _profile_print_stats(stderr, &_profile_vars.stats, &_profile_vars.profil_info);
854 return 0;
855}
856
857/*
858 * Print the statistics structure in a meaningful way.
859 */
860
861void _profile_print_stats(FILE *stream,
862 const struct profile_stats *stats,
863 const struct profile_profil *pinfo)
864{
865 int i;
866 prof_cnt_t total_hits;
867 acontext_type_t ac;
868 int width_cname = 0;
869 int width_alloc = 0;
870 int width_wasted = 0;
871 int width_overhead = 0;
872 int width_context = 0;
873 static const char *cname[ACONTEXT_MAX] = ACONTEXT_NAMES;
874 char buf[20];
875
876 if (!stats) {
877 return;
878 }
879
880 if (!stream) {
881 stream = stdout;
882 }
883
884 sprintf(buf, "%ld.%ld", (long)stats->major_version, (long)stats->minor_version);
885 fprintf(stream, "%12s profiling version number\n", buf);
886 fprintf(stream, "%12lu size of profile_vars\n", (long unsigned)sizeof(struct profile_vars));
887 fprintf(stream, "%12lu size of profile_stats\n", (long unsigned)sizeof(struct profile_stats));
888 fprintf(stream, "%12lu size of profile_md\n", (long unsigned)sizeof(struct profile_md));
889 fprintf(stream, "%12s calls to _{,g}prof_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->cnt));
890 fprintf(stream, "%12s calls to old mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->old_mcount));
891 fprintf(stream, "%12s calls to _dummy_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->dummy));
892 fprintf(stream, "%12lu functions profiled\n", (long unsigned)stats->prof_records);
893 fprintf(stream, "%12lu gprof arcs\n", (long unsigned)stats->gprof_records);
894
895 if (pinfo) {
896 fprintf(stream, "%12lu profil buckets\n", (long unsigned)stats->profil_buckets);
897 fprintf(stream, "%12lu profil lowpc [0x%lx]\n",
898 (long unsigned)pinfo->lowpc,
899 (long unsigned)pinfo->lowpc);
900
901 fprintf(stream, "%12lu profil highpc [0x%lx]\n",
902 (long unsigned)pinfo->highpc,
903 (long unsigned)pinfo->highpc);
904
905 fprintf(stream, "%12lu profil highpc-lowpc\n", (long unsigned)(pinfo->highpc - pinfo->lowpc));
906 fprintf(stream, "%12lu profil buffer length\n", (long unsigned)pinfo->profil_len);
907 fprintf(stream, "%12lu profil sizeof counters\n", (long unsigned)pinfo->counter_size);
908 fprintf(stream, "%12lu profil scale (%g)\n",
909 (long unsigned)pinfo->scale,
910 ((double)pinfo->scale) / ((double) 0x10000));
911
912
913 for (i = 0; i < sizeof (pinfo->profil_unused) / sizeof (pinfo->profil_unused[0]); i++) {
914 if (pinfo->profil_unused[i]) {
915 fprintf(stream, "%12lu profil unused[%2d] {0x%.8lx}\n",
916 (long unsigned)pinfo->profil_unused[i],
917 i,
918 (long unsigned)pinfo->profil_unused[i]);
919 }
920 }
921 }
922
923 if (stats->max_cpu) {
924 fprintf(stream, "%12lu current cpu/thread\n", (long unsigned)stats->my_cpu);
925 fprintf(stream, "%12lu max cpu/thread+1\n", (long unsigned)stats->max_cpu);
926 }
927
928 if (stats->bogus_count != 0) {
929 fprintf(stream,
930 "%12lu gprof functions found outside of range\n",
931 (long unsigned)stats->bogus_count);
932 }
933
934 if (PROF_CNT_NE_0(stats->too_low)) {
935 fprintf(stream,
936 "%12s histogram ticks were too low\n",
937 PROF_CNT_TO_DECIMAL((char *)0, stats->too_low));
938 }
939
940 if (PROF_CNT_NE_0(stats->too_high)) {
941 fprintf(stream,
942 "%12s histogram ticks were too high\n",
943 PROF_CNT_TO_DECIMAL((char *)0, stats->too_high));
944 }
945
946 if (PROF_CNT_NE_0(stats->acontext_locked)) {
947 fprintf(stream,
948 "%12s times an allocation context was locked\n",
949 PROF_CNT_TO_DECIMAL((char *)0, stats->acontext_locked));
950 }
951
952 if (PROF_CNT_NE_0(stats->kernel_ticks)
953 || PROF_CNT_NE_0(stats->user_ticks)
954 || PROF_CNT_NE_0(stats->idle_ticks)) {
955
956 prof_cnt_t total_ticks;
957 long double total_ticks_dbl;
958
959 total_ticks = stats->kernel_ticks;
960 PROF_CNT_LADD(total_ticks, stats->user_ticks);
961 PROF_CNT_LADD(total_ticks, stats->idle_ticks);
962 total_ticks_dbl = PROF_CNT_TO_LDOUBLE(total_ticks);
963
964 fprintf(stream,
965 "%12s total ticks\n",
966 PROF_CNT_TO_DECIMAL((char *)0, total_ticks));
967
968 fprintf(stream,
969 "%12s ticks within the kernel (%5.2Lf%%)\n",
970 PROF_CNT_TO_DECIMAL((char *)0, stats->kernel_ticks),
971 100.0L * (PROF_CNT_TO_LDOUBLE(stats->kernel_ticks) / total_ticks_dbl));
972
973 fprintf(stream,
974 "%12s ticks within user space (%5.2Lf%%)\n",
975 PROF_CNT_TO_DECIMAL((char *)0, stats->user_ticks),
976 100.0L * (PROF_CNT_TO_LDOUBLE(stats->user_ticks) / total_ticks_dbl));
977
978 fprintf(stream,
979 "%12s ticks idle (%5.2Lf%%)\n",
980 PROF_CNT_TO_DECIMAL((char *)0, stats->idle_ticks),
981 100.0L * (PROF_CNT_TO_LDOUBLE(stats->idle_ticks) / total_ticks_dbl));
982 }
983
984 if (PROF_CNT_NE_0(stats->overflow_ticks)) {
985 fprintf(stream, "%12s times a HISTCOUNTER counter would have overflowed\n",
986 PROF_CNT_TO_DECIMAL((char *)0, stats->overflow_ticks));
987 }
988
989 if (PROF_CNT_NE_0(stats->hash_num)) {
990 long double total_buckets = 0.0L;
991
992 for (i = 0; i <= MAX_BUCKETS; i++) {
993 total_buckets += (long double)stats->buckets[i];
994 }
995
996 fprintf(stream, "%12lu max bucket(s) on hash chain.\n", (long unsigned)stats->hash_buckets);
997 for (i = 0; i < MAX_BUCKETS; i++) {
998 if (stats->buckets[i] != 0) {
999 fprintf(stream, "%12lu bucket(s) had %d entries (%5.2Lf%%)\n",
1000 (long unsigned)stats->buckets[i], i,
1001 100.0L * ((long double)stats->buckets[i] / total_buckets));
1002 }
1003 }
1004
1005 if (stats->buckets[MAX_BUCKETS] != 0) {
1006 fprintf(stream, "%12lu bucket(s) had more than %d entries (%5.2Lf%%)\n",
1007 (long unsigned)stats->buckets[MAX_BUCKETS], MAX_BUCKETS,
1008 100.0L * ((long double)stats->buckets[MAX_BUCKETS] / total_buckets));
1009 }
1010 }
1011
1012 PROF_ULONG_TO_CNT(total_hits, 0);
1013 for (i = 0; i < MAX_CACHE; i++) {
1014 PROF_CNT_LADD(total_hits, stats->cache_hits[i]);
1015 }
1016
1017 if (PROF_CNT_NE_0(total_hits)) {
1018 long double total = PROF_CNT_TO_LDOUBLE(stats->cnt);
1019 long double total_hits_dbl = PROF_CNT_TO_LDOUBLE(total_hits);
1020
1021 fprintf(stream,
1022 "%12s cache hits (%.2Lf%%)\n",
1023 PROF_CNT_TO_DECIMAL((char *)0, total_hits),
1024 100.0L * (total_hits_dbl / total));
1025
1026 for (i = 0; i < MAX_CACHE; i++) {
1027 if (PROF_CNT_NE_0(stats->cache_hits[i])) {
1028 fprintf(stream,
1029 "%12s times cache#%d matched (%5.2Lf%% of cache hits, %5.2Lf%% total)\n",
1030 PROF_CNT_TO_DECIMAL((char *)0, stats->cache_hits[i]),
1031 i+1,
1032 100.0L * (PROF_CNT_TO_LDOUBLE(stats->cache_hits[i]) / total_hits_dbl),
1033 100.0L * (PROF_CNT_TO_LDOUBLE(stats->cache_hits[i]) / total));
1034 }
1035 }
1036
1037 if (PROF_CNT_NE_0(stats->hash_num)) {
1038 fprintf(stream, "%12s times hash table searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats->hash_num));
1039 fprintf(stream, "%12s hash buckets searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats->hash_search));
1040 fprintf(stream, "%12.4Lf average buckets searched\n",
1041 PROF_CNT_TO_LDOUBLE(stats->hash_search) / PROF_CNT_TO_LDOUBLE(stats->hash_num));
1042 }
1043 }
1044
1045 for (i = 0; i < sizeof (stats->stats_unused) / sizeof (stats->stats_unused[0]); i++) {
1046 if (PROF_CNT_NE_0(stats->stats_unused[i])) {
1047 fprintf(stream, "%12s unused[%2d] {0x%.8lx 0x%.8lx}\n",
1048 PROF_CNT_TO_DECIMAL((char *)0, stats->stats_unused[i]),
1049 i,
1050 (unsigned long)stats->stats_unused[i].high,
1051 (unsigned long)stats->stats_unused[i].low);
1052 }
1053 }
1054
1055 /* Get the width for the allocation contexts */
1056 for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) {
1057 int len;
1058
1059 if (stats->num_context[ac] == 0) {
1060 continue;
1061 }
1062
1063 len = strlen (cname[ac]);
1064 if (len > width_cname)
1065 width_cname = len;
1066
1067 len = sprintf (buf, "%lu", (long unsigned)stats->num_alloc[ac]);
1068 if (len > width_alloc)
1069 width_alloc = len;
1070
1071 len = sprintf (buf, "%lu", (long unsigned)stats->wasted[ac]);
1072 if (len > width_wasted)
1073 width_wasted = len;
1074
1075 len = sprintf (buf, "%lu", (long unsigned)stats->overhead[ac]);
1076 if (len > width_overhead)
1077 width_overhead = len;
1078
1079 len = sprintf (buf, "%lu", (long unsigned)stats->num_context[ac]);
1080 if (len > width_context)
1081 width_context = len;
1082 }
1083
1084 /* Print info about allocation contexts */
1085 for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) {
1086 if (stats->num_context[ac] == 0) {
1087 continue;
1088 }
1089
1090 fprintf (stream,
1091 "%12lu bytes in %-*s %*lu alloc, %*lu unused, %*lu over, %*lu context\n",
1092 (long unsigned)stats->bytes_alloc[ac],
1093 width_cname, cname[ac],
1094 width_alloc, (long unsigned)stats->num_alloc[ac],
1095 width_wasted, (long unsigned)stats->wasted[ac],
1096 width_overhead, (long unsigned)stats->overhead[ac],
1097 width_context, (long unsigned)stats->num_context[ac]);
1098 }
1099}
1100
1101\f
1102/*
1103 * Merge a new statistics field into an old one.
1104 */
1105
1106void _profile_merge_stats(struct profile_stats *old_stats, const struct profile_stats *new_stats)
1107{
1108 int i;
1109
1110 /* If nothing passed, just return */
1111 if (!old_stats || !new_stats)
1112 return;
1113
1114 /* If the old_stats has not been initialized, just copy in the new stats */
1115 if (old_stats->major_version == 0) {
1116 *old_stats = *new_stats;
1117
1118 /* Otherwise, update stats, field by field */
1119 } else {
1120 if (old_stats->prof_records < new_stats->prof_records)
1121 old_stats->prof_records = new_stats->prof_records;
1122
1123 if (old_stats->gprof_records < new_stats->gprof_records)
1124 old_stats->gprof_records = new_stats->gprof_records;
1125
1126 if (old_stats->hash_buckets < new_stats->hash_buckets)
1127 old_stats->hash_buckets = new_stats->hash_buckets;
1128
1129 if (old_stats->bogus_count < new_stats->bogus_count)
1130 old_stats->bogus_count = new_stats->bogus_count;
1131
1132 PROF_CNT_LADD(old_stats->cnt, new_stats->cnt);
1133 PROF_CNT_LADD(old_stats->dummy, new_stats->dummy);
1134 PROF_CNT_LADD(old_stats->old_mcount, new_stats->old_mcount);
1135 PROF_CNT_LADD(old_stats->hash_search, new_stats->hash_search);
1136 PROF_CNT_LADD(old_stats->hash_num, new_stats->hash_num);
1137 PROF_CNT_LADD(old_stats->user_ticks, new_stats->user_ticks);
1138 PROF_CNT_LADD(old_stats->kernel_ticks, new_stats->kernel_ticks);
1139 PROF_CNT_LADD(old_stats->idle_ticks, new_stats->idle_ticks);
1140 PROF_CNT_LADD(old_stats->overflow_ticks, new_stats->overflow_ticks);
1141 PROF_CNT_LADD(old_stats->acontext_locked, new_stats->acontext_locked);
1142 PROF_CNT_LADD(old_stats->too_low, new_stats->too_low);
1143 PROF_CNT_LADD(old_stats->too_high, new_stats->too_high);
1144 PROF_CNT_LADD(old_stats->prof_overflow, new_stats->prof_overflow);
1145 PROF_CNT_LADD(old_stats->gprof_overflow, new_stats->gprof_overflow);
1146
1147 for (i = 0; i < (int)ACONTEXT_MAX; i++) {
1148 if (old_stats->num_alloc[i] < new_stats->num_alloc[i])
1149 old_stats->num_alloc[i] = new_stats->num_alloc[i];
1150
1151 if (old_stats->bytes_alloc[i] < new_stats->bytes_alloc[i])
1152 old_stats->bytes_alloc[i] = new_stats->bytes_alloc[i];
1153
1154 if (old_stats->num_context[i] < new_stats->num_context[i])
1155 old_stats->num_context[i] = new_stats->num_context[i];
1156
1157 if (old_stats->wasted[i] < new_stats->wasted[i])
1158 old_stats->wasted[i] = new_stats->wasted[i];
1159
1160 if (old_stats->overhead[i] < new_stats->overhead[i])
1161 old_stats->overhead[i] = new_stats->overhead[i];
1162
1163 }
1164
1165 for (i = 0; i < MAX_BUCKETS+1; i++) {
1166 if (old_stats->buckets[i] < new_stats->buckets[i])
1167 old_stats->buckets[i] = new_stats->buckets[i];
1168 }
1169
1170 for (i = 0; i < MAX_CACHE; i++) {
1171 PROF_CNT_LADD(old_stats->cache_hits[i], new_stats->cache_hits[i]);
1172 }
1173
1174 for (i = 0; i < sizeof(old_stats->stats_unused) / sizeof(old_stats->stats_unused[0]); i++) {
1175 PROF_CNT_LADD(old_stats->stats_unused[i], new_stats->stats_unused[i]);
1176 }
1177 }
1178}
1179
1180#endif
1181
1182\f
1183/*
1184 * Invalid function address used when checking of function addresses is
1185 * desired for gprof arcs, and we discover an address out of bounds.
1186 * There should be no callers of this function.
1187 */
1188
1189void
1190_bogus_function(void)
1191{
1192}