2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
36 * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez
37 * Import of Mac OS X kernel (~semeria)
39 * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez
40 * Import of OSF Mach kernel (~mburg)
42 * Revision 1.1.5.1 1995/01/06 19:53:45 devrcs
43 * mk6 CR668 - 1.3b26 merge
45 * [1994/10/12 22:25:24 dwm]
47 * Revision 1.1.2.2 1994/05/16 19:19:22 meissner
48 * Protect against hash_ptr being null in _profile_update_stats.
49 * [1994/05/16 17:23:53 meissner]
51 * Remove _profile_cnt_to_hex, _profile_strbuffer.
52 * _profile_print_stats now takes const pointers.
53 * Use the new 64-bit arithmetic support instead of converting to double.
54 * Add _profile_merge_stats to merge statistics.
55 * [1994/04/28 21:45:04 meissner]
57 * If MACH_ASSERT is on in server or kernel, turn on profiling printfs.
58 * Print out fractional digits for average # of hash searches in stats.
59 * Update overflow_ticks for # times the lprofil counter overflows into high word.
60 * Don't make sizes of C/asm structures a const array, since it has pointers in it.
61 * Add support for converting 64 bit ints to a string.
62 * Use PROF_CNT_TO_DECIMAL where possible instead of PROF_CNT_TO_LDOUBLE.
63 * [1994/04/20 15:47:02 meissner]
65 * Revision 1.1.2.1 1994/04/08 17:51:51 meissner
67 * [1994/04/08 02:11:40 meissner]
69 * Make most stats 64 bits, except for things like memory allocation.
70 * [1994/04/02 14:58:28 meissner]
72 * Add some printfs under #idef DEBUG_PROFILE.
73 * [1994/03/29 21:00:11 meissner]
75 * Further changes for gprof/prof overflow support.
76 * Add overflow support for {gprof,prof,old,dummy}_mcount counters.
77 * [1994/03/17 20:13:31 meissner]
79 * Add gprof/prof overflow support
80 * [1994/03/17 14:56:51 meissner]
82 * Use memset instead of bzero.
83 * [1994/02/28 23:56:10 meissner]
85 * Add size of histogram counters & unused fields to profile_profil struct
86 * [1994/02/17 21:41:50 meissner]
88 * Allocate slop space for server in addition to microkernel.
89 * Add 3rd argument to _profile_print_stats for profil info.
90 * Print # histogram ticks too low/too high for server/mk.
91 * [1994/02/16 22:38:18 meissner]
93 * Calculate percentages for # of hash buckets.
94 * [1994/02/11 16:52:04 meissner]
96 * Print stats as an unsigned number.
97 * [1994/02/07 18:47:05 meissner]
99 * For kernel and server, include <kern/assert.h> not <assert.h>.
100 * Always do assert on comparing asm vs. C structure sizes.
101 * Add _profile_reset to reset profiling information.
102 * Add _profile_update_stats to update the statistics.
103 * Move _gprof_write code that updates hash stats to _profile_update_stats.
104 * Don't allocate space for basic block support just yet.
105 * Add support for range checking the gprof arc {from,self}pc addresses.
106 * _profile_debug now calls _profile_update_stats.
107 * Print how many times the acontext was locked.
108 * If DEBUG_PROFILE is defined, set pv->debug to 1.
110 * [1994/02/07 12:41:03 meissner]
112 * Keep track of the number of times the kernel overflows the HISTCOUNTER counter.
113 * [1994/02/03 20:13:28 meissner]
115 * Add stats for {user,kernel,idle} mode in the kernel.
116 * [1994/02/03 15:17:31 meissner]
118 * Print unused stats in hex as well as decimal.
119 * [1994/02/03 14:52:20 meissner]
121 * _profile_print_stats no longer takes profile_{vars,md} pointer arguments.
122 * If stream is NULL, _profile_print_stats will use stdout.
123 * Separate _profile_update_stats from _gprof_write.
124 * [1994/02/03 00:58:55 meissner]
126 * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars.
127 * [1994/02/01 12:04:01 meissner]
129 * Add allocation flag to _profile_md_init.
130 * Fix core dumps in _profile_print_stats if no profile_vars ptr passed.
131 * Print numbers in 12 columns, not 8.
132 * Print my_cpu/max_cpu if max_cpu != 0.
133 * Make allocations print like other stats.
134 * Use ACONTEXT_FIRST to start loop on, not ACONTEXT_PROF.
135 * [1994/01/28 23:33:26 meissner]
137 * Move callback pointers into separate allocation context.
138 * Add size fields for other structures to profile-vars.
139 * [1994/01/26 20:23:37 meissner]
141 * Allocate initial memory at startup.
142 * Print structure sizes and version number when printing stats.
143 * Initialize size fields and version numbers.
144 * Allocation context pointers moved to _profile_vars.
145 * [1994/01/25 01:46:04 meissner]
147 * Move init code here from assembly language.
148 * [1994/01/22 01:13:21 meissner]
150 * Include <profile/profile-internal.h> instead of "profile-md.h".
151 * [1994/01/20 20:56:49 meissner]
154 * [1994/01/18 23:08:02 meissner]
156 * Rename profile.h -> profile-md.h.
157 * [1994/01/18 19:44:57 meissner]
159 * Write out stats unused fields.
160 * Make _prof_write write out the prof stats gprof collects.
161 * [1994/01/15 18:40:37 meissner]
163 * Remove debug code called from profile-asm.s.
164 * Always print out the # of profil buckets.
165 * [1994/01/15 00:59:06 meissner]
168 * [1994/01/04 16:34:46 meissner]
170 * Move max hash bucket calculation into _gprof_write & put info in stats structure.
171 * [1994/01/04 16:15:17 meissner]
173 * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to.
174 * [1994/01/04 15:37:46 meissner]
176 * Correctly handle case where more than one allocation context was
177 * allocated due to multiple threads.
178 * Cast stats to long for output.
179 * Print number of profil buckets field in _profile_stats.
180 * Add support for GFUNC allocation context.
181 * [1994/01/04 14:26:00 meissner]
183 * CR 10198 - Initial version.
184 * [1994/01/01 22:44:10 meissne
189 #include <profiling/profile-internal.h>
190 #include <vm/vm_kern.h>
194 #if defined(MACH_KERNEL) || defined(_KERNEL)
196 #include <mach_assert.h>
197 #if MACH_ASSERT && !defined(DEBUG_PROFILE)
198 #define DEBUG_PROFILE 1
203 #define panic(str) exit(1)
206 #ifndef PROFILE_NUM_FUNCS
207 #define PROFILE_NUM_FUNCS 2000
210 #ifndef PROFILE_NUM_ARCS
211 #define PROFILE_NUM_ARCS 8000
215 * Information passed on from profile-asm.s
218 extern int _profile_do_stats
;
219 extern size_t _profile_size
;
220 extern size_t _profile_stats_size
;
221 extern size_t _profile_md_size
;
222 extern size_t _profile_profil_size
;
223 extern size_t _profile_hash_size
;
226 * All profiling variables, and a dummy gprof record.
229 struct profile_vars _profile_vars
= { 0 };
230 struct hasharc _gprof_dummy
= { 0 };
233 * Forward references.
236 static void *_profile_md_acontext(struct profile_vars
*pv
,
239 acontext_type_t type
);
241 static void _profile_reset_alloc(struct profile_vars
*,
244 extern void _bogus_function(void);
248 struct profile_vars
*_profile_vars_cpus
[NCPUS
] = { &_profile_vars
};
249 struct profile_vars _profile_vars_aux
[NCPUS
-1];
250 #define PROFILE_VARS(cpu) (_profile_vars_cpus[(cpu)])
252 #define PROFILE_VARS(cpu) (&_profile_vars)
256 _profile_alloc_pages (size_t size
)
261 * For the MK, we can't support allocating pages at runtime, because we
262 * might be at interrupt level, so abort if we didn't size the table
266 if (PROFILE_VARS(0)->active
) {
267 panic("Call to _profile_alloc_pages while profiling is running.");
270 if (kmem_alloc(kernel_map
, &addr
, size
)) {
271 panic("Could not allocate memory for profiling");
274 memset((void *)addr
, '\0', size
);
275 if (PROFILE_VARS(0)->debug
) {
276 printf("Allocated %d bytes for profiling, address 0x%x\n", (int)size
, (int)addr
);
279 return((caddr_t
)addr
);
283 _profile_free_pages(void *addr
, size_t size
)
285 if (PROFILE_VARS(0)->debug
) {
286 printf("Freed %d bytes for profiling, address 0x%x\n", (int)size
, (int)addr
);
289 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
293 void _profile_error(struct profile_vars
*pv
)
295 panic("Fatal error in profiling");
300 * Function to set up the initial allocation for a context block.
304 _profile_md_acontext(struct profile_vars
*pv
,
307 acontext_type_t type
)
310 struct alloc_context context
;
311 struct page_list plist
;
315 struct memory
*mptr
= (struct memory
*)ptr
;
316 struct alloc_context
*context
= &mptr
->context
;
317 struct page_list
*plist
= &mptr
->plist
;
320 _profile_printf("_profile_md_acontext: pv= 0x%lx, ptr= 0x%lx, len= %6ld, type= %d\n",
327 /* Fill in context block header */
328 context
->next
= pv
->acontext
[type
];
329 context
->plist
= plist
;
332 /* Fill in first page list information */
333 plist
->ptr
= plist
->first
= (void *)&mptr
->data
[0];
334 plist
->next
= (struct page_list
*)0;
335 plist
->bytes_free
= len
- ((char *)plist
->ptr
- (char *)ptr
);
336 plist
->bytes_allocated
= 0;
337 plist
->num_allocations
= 0;
339 /* Update statistics */
340 pv
->stats
.num_context
[type
]++;
341 pv
->stats
.wasted
[type
] += plist
->bytes_free
;
342 pv
->stats
.overhead
[type
] += len
- plist
->bytes_free
;
344 /* And setup context block */
345 pv
->acontext
[type
] = context
;
347 return (void *)((char *)ptr
+len
);
352 * Machine dependent function to initialize things.
356 _profile_md_init(struct profile_vars
*pv
,
358 profile_alloc_mem_t alloc_mem
)
360 size_t page_size
= pv
->page_size
;
365 size_t extra_arc_size
;
366 size_t extra_func_size
;
367 size_t callback_size
= page_size
;
372 size_t c_size
; /* size C thinks structure is */
373 size_t *asm_size_ptr
; /* pointer to size asm thinks struct is */
374 const char *name
; /* structure name */
376 { sizeof(struct profile_profil
), &_profile_profil_size
, "profile_profil" },
377 { sizeof(struct profile_stats
), &_profile_stats_size
, "profile_stats" },
378 { sizeof(struct profile_md
), &_profile_md_size
, "profile_md" },
379 { sizeof(struct profile_vars
), &_profile_size
, "profile_vars" }};
382 _profile_printf("_profile_md_init: pv = 0x%lx, type = %d, alloc = %d\n",
388 for (i
= 0; i
< sizeof (sizes
) / sizeof(sizes
[0]); i
++) {
389 if (sizes
[i
].c_size
!= *sizes
[i
].asm_size_ptr
) {
390 _profile_printf("C thinks struct %s is %ld bytes, asm thinks it is %ld bytes\n",
392 (long)sizes
[i
].c_size
,
393 (long)*sizes
[i
].asm_size_ptr
);
395 panic(sizes
[i
].name
);
399 /* Figure out which function will handle compiler generated profiling */
400 if (type
== PROFILE_GPROF
) {
401 pv
->md
.save_mcount_ptr
= _gprof_mcount
;
403 } else if (type
== PROFILE_PROF
) {
404 pv
->md
.save_mcount_ptr
= _prof_mcount
;
407 pv
->md
.save_mcount_ptr
= _dummy_mcount
;
410 pv
->vars_size
= sizeof(struct profile_vars
);
411 pv
->plist_size
= sizeof(struct page_list
);
412 pv
->acontext_size
= sizeof(struct alloc_context
);
413 pv
->callback_size
= sizeof(struct callback
);
414 pv
->major_version
= PROFILE_MAJOR_VERSION
;
415 pv
->minor_version
= PROFILE_MINOR_VERSION
;
420 pv
->output_uarea
= 1;
421 pv
->output_stats
= (prof_flag_t
) _profile_do_stats
;
422 pv
->output_clock
= 1;
423 pv
->multiple_sections
= 1;
425 pv
->bogus_func
= _bogus_function
;
431 if (!pv
->error_msg
) {
432 pv
->error_msg
= "error in profiling";
435 if (!pv
->page_size
) {
436 pv
->page_size
= 4096;
439 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
440 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
441 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
443 pv
->md
.md_size
= sizeof(struct profile_md
);
444 pv
->md
.major_version
= PROFILE_MAJOR_VERSION
;
445 pv
->md
.minor_version
= PROFILE_MINOR_VERSION
;
446 pv
->md
.hash_size
= _profile_hash_size
;
447 pv
->md
.num_cache
= MAX_CACHE
;
448 pv
->md
.mcount_ptr_ptr
= &_mcount_ptr
;
449 pv
->md
.dummy_ptr
= &_gprof_dummy
;
450 pv
->md
.alloc_pages
= _profile_alloc_pages
;
452 /* zero out all allocation context blocks */
453 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
454 pv
->acontext
[ac
] = (struct alloc_context
*)0;
457 /* Don't allocate memory if not desired */
462 /* Allocate some space for the initial allocations */
465 misc_size
= page_size
;
466 ptr
= _profile_alloc_pages(misc_size
+ callback_size
);
467 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
468 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
473 #if defined(MACH_KERNEL) || defined(_KERNEL)
475 * For the MK & server allocate some slop space now for the
476 * secondary context blocks in case allocations are done at
477 * interrupt level when another allocation is being done. This
478 * is done before the main allocation blocks and will be pushed
479 * so that it will only be used when the main allocation block
482 extra_arc_size
= 4*page_size
;
483 extra_func_size
= 2*page_size
;
485 extra_arc_size
= extra_func_size
= 0;
488 /* Set up allocation areas */
489 arc_size
= ROUNDUP(PROFILE_NUM_ARCS
* sizeof(struct hasharc
), page_size
);
490 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct gfuncs
), page_size
);
491 hash_size
= _profile_hash_size
* sizeof (struct hasharc
*);
492 misc_size
= ROUNDUP(hash_size
+ page_size
, page_size
);
494 ptr
= _profile_alloc_pages(arc_size
501 #if defined(MACH_KERNEL) || defined(_KERNEL)
502 ptr
= _profile_md_acontext(pv
, ptr
, extra_arc_size
, ACONTEXT_GPROF
);
503 ptr
= _profile_md_acontext(pv
, ptr
, extra_func_size
, ACONTEXT_GFUNC
);
505 ptr
= _profile_md_acontext(pv
, ptr
, arc_size
, ACONTEXT_GPROF
);
506 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_GFUNC
);
507 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
508 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
510 /* Allocate hash table */
511 pv
->md
.hash_ptr
= (struct hasharc
**) _profile_alloc(pv
, hash_size
, ACONTEXT_MISC
);
515 /* Set up allocation areas */
516 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct prof_ext
), page_size
);
517 misc_size
= page_size
;
519 ptr
= _profile_alloc_pages(func_size
523 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_PROF
);
524 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
525 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
532 * Machine dependent functions to start and stop profiling.
536 _profile_md_start(void)
538 _mcount_ptr
= _profile_vars
.md
.save_mcount_ptr
;
543 _profile_md_stop(void)
545 _mcount_ptr
= _dummy_mcount
;
551 * Free up all memory in a memory context block.
555 _profile_reset_alloc(struct profile_vars
*pv
, acontext_type_t ac
)
557 struct alloc_context
*aptr
;
558 struct page_list
*plist
;
560 for (aptr
= pv
->acontext
[ac
];
561 aptr
!= (struct alloc_context
*)0;
564 for (plist
= aptr
->plist
;
565 plist
!= (struct page_list
*)0;
566 plist
= plist
->next
) {
568 plist
->ptr
= plist
->first
;
569 plist
->bytes_free
+= plist
->bytes_allocated
;
570 plist
->bytes_allocated
= 0;
571 plist
->num_allocations
= 0;
572 memset(plist
->first
, '\0', plist
->bytes_allocated
);
579 * Reset profiling. Since the only user of this function is the kernel
580 * and the server, we don't have to worry about other stuff than gprof.
584 _profile_reset(struct profile_vars
*pv
)
586 struct alloc_context
*aptr
;
587 struct page_list
*plist
;
588 struct gfuncs
*gfunc
;
594 /* Reset all function unique pointers back to 0 */
595 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
596 aptr
!= (struct alloc_context
*)0;
599 for (plist
= aptr
->plist
;
600 plist
!= (struct page_list
*)0;
601 plist
= plist
->next
) {
603 for (gfunc
= (struct gfuncs
*)plist
->first
;
604 gfunc
< (struct gfuncs
*)plist
->ptr
;
607 *(gfunc
->unique_ptr
) = (struct hasharc
*)0;
613 _profile_reset_alloc(pv
, ACONTEXT_GPROF
);
614 _profile_reset_alloc(pv
, ACONTEXT_GFUNC
);
615 _profile_reset_alloc(pv
, ACONTEXT_PROF
);
617 memset((void *)pv
->profil_buf
, '\0', pv
->profil_info
.profil_len
);
618 memset((void *)pv
->md
.hash_ptr
, '\0', pv
->md
.hash_size
* sizeof(struct hasharc
*));
619 memset((void *)&pv
->stats
, '\0', sizeof(pv
->stats
));
621 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
622 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
623 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
632 * Machine dependent function to write out gprof records.
636 _gprof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
638 struct alloc_context
*aptr
;
639 struct page_list
*plist
;
641 struct hasharc
*hptr
;
644 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
645 aptr
!= (struct alloc_context
*)0;
648 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
649 hptr
= (struct hasharc
*)plist
->first
;
650 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
652 struct gprof_arc arc
= hptr
->arc
;
653 int nrecs
= 1 + (hptr
->overflow
* 2);
656 if (pv
->check_funcs
) {
657 if (arc
.frompc
< pv
->profil_info
.lowpc
||
658 arc
.frompc
> pv
->profil_info
.highpc
) {
660 arc
.frompc
= (prof_uptrint_t
)pv
->bogus_func
;
663 if (arc
.selfpc
< pv
->profil_info
.lowpc
||
664 arc
.selfpc
> pv
->profil_info
.highpc
) {
666 arc
.selfpc
= (prof_uptrint_t
)pv
->bogus_func
;
670 /* For each overflow, emit 2 extra records with the count
672 for (j
= 0; j
< nrecs
; j
++) {
673 bytes
+= sizeof (arc
);
674 if ((*pv
->fwrite_func
)((void *)&arc
,
682 arc
.count
= 0x80000000;
693 * Machine dependent function to write out prof records.
697 _prof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
699 struct alloc_context
*aptr
;
700 struct page_list
*plist
;
702 struct prof_ext prof_st
;
703 struct prof_int
*pptr
;
708 /* Write out information prof_mcount collects */
709 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
710 aptr
!= (struct alloc_context
*)0;
713 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
714 pptr
= (struct prof_int
*)plist
->first
;
716 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
718 /* Write out 2 records for each overflow, each with a
719 count of 0x80000000 + the normal record */
720 prof_st
= pptr
->prof
;
721 nrecs
= 1 + (pptr
->overflow
* 2);
723 for (j
= 0; j
< nrecs
; j
++) {
724 bytes
+= sizeof (struct prof_ext
);
725 if ((*pv
->fwrite_func
)((void *)&prof_st
,
733 prof_st
.cncall
= 0x80000000;
739 /* Now write out the prof information that gprof collects */
740 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
741 aptr
!= (struct alloc_context
*)0;
744 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
745 gptr
= (struct gfuncs
*)plist
->first
;
747 for (i
= 0; i
< plist
->num_allocations
; (i
++, gptr
++)) {
749 /* Write out 2 records for each overflow, each with a
750 count of 0x80000000 + the normal record */
751 prof_st
= gptr
->prof
.prof
;
752 nrecs
= 1 + (gptr
->prof
.overflow
* 2);
754 for (j
= 0; j
< nrecs
; j
++) {
755 bytes
+= sizeof (struct prof_ext
);
756 if ((*pv
->fwrite_func
)((void *)&prof_st
,
764 prof_st
.cncall
= 0x80000000;
775 * Update any statistics. For the 386, calculate the hash table loading factor.
776 * Also figure out how many overflows occurred.
780 _profile_update_stats(struct profile_vars
*pv
)
782 struct alloc_context
*aptr
;
783 struct page_list
*plist
;
784 struct hasharc
*hptr
;
785 struct prof_int
*pptr
;
790 for(i
= 0; i
< MAX_BUCKETS
+1; i
++) {
791 pv
->stats
.buckets
[i
] = 0;
794 pv
->stats
.hash_buckets
= 0;
796 if (pv
->md
.hash_ptr
) {
797 for (i
= 0; i
< pv
->md
.hash_size
; i
++) {
799 struct hasharc
*hptr
;
801 for (hptr
= pv
->md
.hash_ptr
[i
]; hptr
; hptr
= hptr
->next
) {
805 pv
->stats
.buckets
[ (nbuckets
< MAX_BUCKETS
) ? nbuckets
: MAX_BUCKETS
]++;
806 if (pv
->stats
.hash_buckets
< nbuckets
) {
807 pv
->stats
.hash_buckets
= nbuckets
;
812 /* Count how many times functions are out of bounds */
813 if (pv
->check_funcs
) {
814 pv
->stats
.bogus_count
= 0;
816 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
817 aptr
!= (struct alloc_context
*)0;
820 for (plist
= aptr
->plist
;
821 plist
!= (struct page_list
*)0;
822 plist
= plist
->next
) {
824 hptr
= (struct hasharc
*)plist
->first
;
825 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
827 if (hptr
->arc
.frompc
< pv
->profil_info
.lowpc
||
828 hptr
->arc
.frompc
> pv
->profil_info
.highpc
) {
829 pv
->stats
.bogus_count
++;
832 if (hptr
->arc
.selfpc
< pv
->profil_info
.lowpc
||
833 hptr
->arc
.selfpc
> pv
->profil_info
.highpc
) {
834 pv
->stats
.bogus_count
++;
841 /* Figure out how many overflows occurred */
842 PROF_ULONG_TO_CNT(pv
->stats
.prof_overflow
, 0);
843 PROF_ULONG_TO_CNT(pv
->stats
.gprof_overflow
, 0);
845 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
846 aptr
!= (struct alloc_context
*)0;
849 for (plist
= aptr
->plist
;
850 plist
!= (struct page_list
*)0;
851 plist
= plist
->next
) {
853 hptr
= (struct hasharc
*)plist
->first
;
854 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
855 PROF_CNT_ADD(pv
->stats
.gprof_overflow
, hptr
->overflow
);
860 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
861 aptr
!= (struct alloc_context
*)0;
864 for (plist
= aptr
->plist
;
865 plist
!= (struct page_list
*)0;
866 plist
= plist
->next
) {
868 pptr
= (struct prof_int
*)plist
->first
;
869 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
870 PROF_CNT_ADD(pv
->stats
.prof_overflow
, pptr
->overflow
);
875 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
876 aptr
!= (struct alloc_context
*)0;
879 for (plist
= aptr
->plist
;
880 plist
!= (struct page_list
*)0;
881 plist
= plist
->next
) {
883 fptr
= (struct gfuncs
*)plist
->first
;
884 for (i
= 0; i
< plist
->num_allocations
; (i
++, fptr
++)) {
885 PROF_CNT_ADD(pv
->stats
.prof_overflow
, fptr
->prof
.overflow
);
890 /* Now go through & count how many times the LHISTCOUNTER overflowed into a 2nd word */
891 lptr
= (LHISTCOUNTER
*)pv
->profil_buf
;
893 if (pv
->use_profil
&&
894 pv
->profil_info
.counter_size
== sizeof(LHISTCOUNTER
) &&
895 lptr
!= (LHISTCOUNTER
*)0) {
897 PROF_ULONG_TO_CNT(pv
->stats
.overflow_ticks
, 0);
898 for (i
= 0; i
< pv
->stats
.profil_buckets
; i
++) {
899 PROF_CNT_ADD(pv
->stats
.overflow_ticks
, lptr
[i
].high
);
904 #if !defined(_KERNEL) && !defined(MACH_KERNEL)
907 * Routine callable from the debugger that prints the statistics.
910 int _profile_debug(void)
912 _profile_update_stats(&_profile_vars
);
913 _profile_print_stats(stderr
, &_profile_vars
.stats
, &_profile_vars
.profil_info
);
918 * Print the statistics structure in a meaningful way.
921 void _profile_print_stats(FILE *stream
,
922 const struct profile_stats
*stats
,
923 const struct profile_profil
*pinfo
)
926 prof_cnt_t total_hits
;
930 int width_wasted
= 0;
931 int width_overhead
= 0;
932 int width_context
= 0;
933 static const char *cname
[ACONTEXT_MAX
] = ACONTEXT_NAMES
;
944 sprintf(buf
, "%ld.%ld", (long)stats
->major_version
, (long)stats
->minor_version
);
945 fprintf(stream
, "%12s profiling version number\n", buf
);
946 fprintf(stream
, "%12lu size of profile_vars\n", (long unsigned)sizeof(struct profile_vars
));
947 fprintf(stream
, "%12lu size of profile_stats\n", (long unsigned)sizeof(struct profile_stats
));
948 fprintf(stream
, "%12lu size of profile_md\n", (long unsigned)sizeof(struct profile_md
));
949 fprintf(stream
, "%12s calls to _{,g}prof_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->cnt
));
950 fprintf(stream
, "%12s calls to old mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->old_mcount
));
951 fprintf(stream
, "%12s calls to _dummy_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->dummy
));
952 fprintf(stream
, "%12lu functions profiled\n", (long unsigned)stats
->prof_records
);
953 fprintf(stream
, "%12lu gprof arcs\n", (long unsigned)stats
->gprof_records
);
956 fprintf(stream
, "%12lu profil buckets\n", (long unsigned)stats
->profil_buckets
);
957 fprintf(stream
, "%12lu profil lowpc [0x%lx]\n",
958 (long unsigned)pinfo
->lowpc
,
959 (long unsigned)pinfo
->lowpc
);
961 fprintf(stream
, "%12lu profil highpc [0x%lx]\n",
962 (long unsigned)pinfo
->highpc
,
963 (long unsigned)pinfo
->highpc
);
965 fprintf(stream
, "%12lu profil highpc-lowpc\n", (long unsigned)(pinfo
->highpc
- pinfo
->lowpc
));
966 fprintf(stream
, "%12lu profil buffer length\n", (long unsigned)pinfo
->profil_len
);
967 fprintf(stream
, "%12lu profil sizeof counters\n", (long unsigned)pinfo
->counter_size
);
968 fprintf(stream
, "%12lu profil scale (%g)\n",
969 (long unsigned)pinfo
->scale
,
970 ((double)pinfo
->scale
) / ((double) 0x10000));
973 for (i
= 0; i
< sizeof (pinfo
->profil_unused
) / sizeof (pinfo
->profil_unused
[0]); i
++) {
974 if (pinfo
->profil_unused
[i
]) {
975 fprintf(stream
, "%12lu profil unused[%2d] {0x%.8lx}\n",
976 (long unsigned)pinfo
->profil_unused
[i
],
978 (long unsigned)pinfo
->profil_unused
[i
]);
983 if (stats
->max_cpu
) {
984 fprintf(stream
, "%12lu current cpu/thread\n", (long unsigned)stats
->my_cpu
);
985 fprintf(stream
, "%12lu max cpu/thread+1\n", (long unsigned)stats
->max_cpu
);
988 if (stats
->bogus_count
!= 0) {
990 "%12lu gprof functions found outside of range\n",
991 (long unsigned)stats
->bogus_count
);
994 if (PROF_CNT_NE_0(stats
->too_low
)) {
996 "%12s histogram ticks were too low\n",
997 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_low
));
1000 if (PROF_CNT_NE_0(stats
->too_high
)) {
1002 "%12s histogram ticks were too high\n",
1003 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_high
));
1006 if (PROF_CNT_NE_0(stats
->acontext_locked
)) {
1008 "%12s times an allocation context was locked\n",
1009 PROF_CNT_TO_DECIMAL((char *)0, stats
->acontext_locked
));
1012 if (PROF_CNT_NE_0(stats
->kernel_ticks
)
1013 || PROF_CNT_NE_0(stats
->user_ticks
)
1014 || PROF_CNT_NE_0(stats
->idle_ticks
)) {
1016 prof_cnt_t total_ticks
;
1017 long double total_ticks_dbl
;
1019 total_ticks
= stats
->kernel_ticks
;
1020 PROF_CNT_LADD(total_ticks
, stats
->user_ticks
);
1021 PROF_CNT_LADD(total_ticks
, stats
->idle_ticks
);
1022 total_ticks_dbl
= PROF_CNT_TO_LDOUBLE(total_ticks
);
1025 "%12s total ticks\n",
1026 PROF_CNT_TO_DECIMAL((char *)0, total_ticks
));
1029 "%12s ticks within the kernel (%5.2Lf%%)\n",
1030 PROF_CNT_TO_DECIMAL((char *)0, stats
->kernel_ticks
),
1031 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->kernel_ticks
) / total_ticks_dbl
));
1034 "%12s ticks within user space (%5.2Lf%%)\n",
1035 PROF_CNT_TO_DECIMAL((char *)0, stats
->user_ticks
),
1036 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->user_ticks
) / total_ticks_dbl
));
1039 "%12s ticks idle (%5.2Lf%%)\n",
1040 PROF_CNT_TO_DECIMAL((char *)0, stats
->idle_ticks
),
1041 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->idle_ticks
) / total_ticks_dbl
));
1044 if (PROF_CNT_NE_0(stats
->overflow_ticks
)) {
1045 fprintf(stream
, "%12s times a HISTCOUNTER counter would have overflowed\n",
1046 PROF_CNT_TO_DECIMAL((char *)0, stats
->overflow_ticks
));
1049 if (PROF_CNT_NE_0(stats
->hash_num
)) {
1050 long double total_buckets
= 0.0L;
1052 for (i
= 0; i
<= MAX_BUCKETS
; i
++) {
1053 total_buckets
+= (long double)stats
->buckets
[i
];
1056 fprintf(stream
, "%12lu max bucket(s) on hash chain.\n", (long unsigned)stats
->hash_buckets
);
1057 for (i
= 0; i
< MAX_BUCKETS
; i
++) {
1058 if (stats
->buckets
[i
] != 0) {
1059 fprintf(stream
, "%12lu bucket(s) had %d entries (%5.2Lf%%)\n",
1060 (long unsigned)stats
->buckets
[i
], i
,
1061 100.0L * ((long double)stats
->buckets
[i
] / total_buckets
));
1065 if (stats
->buckets
[MAX_BUCKETS
] != 0) {
1066 fprintf(stream
, "%12lu bucket(s) had more than %d entries (%5.2Lf%%)\n",
1067 (long unsigned)stats
->buckets
[MAX_BUCKETS
], MAX_BUCKETS
,
1068 100.0L * ((long double)stats
->buckets
[MAX_BUCKETS
] / total_buckets
));
1072 PROF_ULONG_TO_CNT(total_hits
, 0);
1073 for (i
= 0; i
< MAX_CACHE
; i
++) {
1074 PROF_CNT_LADD(total_hits
, stats
->cache_hits
[i
]);
1077 if (PROF_CNT_NE_0(total_hits
)) {
1078 long double total
= PROF_CNT_TO_LDOUBLE(stats
->cnt
);
1079 long double total_hits_dbl
= PROF_CNT_TO_LDOUBLE(total_hits
);
1082 "%12s cache hits (%.2Lf%%)\n",
1083 PROF_CNT_TO_DECIMAL((char *)0, total_hits
),
1084 100.0L * (total_hits_dbl
/ total
));
1086 for (i
= 0; i
< MAX_CACHE
; i
++) {
1087 if (PROF_CNT_NE_0(stats
->cache_hits
[i
])) {
1089 "%12s times cache#%d matched (%5.2Lf%% of cache hits, %5.2Lf%% total)\n",
1090 PROF_CNT_TO_DECIMAL((char *)0, stats
->cache_hits
[i
]),
1092 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total_hits_dbl
),
1093 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total
));
1097 if (PROF_CNT_NE_0(stats
->hash_num
)) {
1098 fprintf(stream
, "%12s times hash table searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_num
));
1099 fprintf(stream
, "%12s hash buckets searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_search
));
1100 fprintf(stream
, "%12.4Lf average buckets searched\n",
1101 PROF_CNT_TO_LDOUBLE(stats
->hash_search
) / PROF_CNT_TO_LDOUBLE(stats
->hash_num
));
1105 for (i
= 0; i
< sizeof (stats
->stats_unused
) / sizeof (stats
->stats_unused
[0]); i
++) {
1106 if (PROF_CNT_NE_0(stats
->stats_unused
[i
])) {
1107 fprintf(stream
, "%12s unused[%2d] {0x%.8lx 0x%.8lx}\n",
1108 PROF_CNT_TO_DECIMAL((char *)0, stats
->stats_unused
[i
]),
1110 (unsigned long)stats
->stats_unused
[i
].high
,
1111 (unsigned long)stats
->stats_unused
[i
].low
);
1115 /* Get the width for the allocation contexts */
1116 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1119 if (stats
->num_context
[ac
] == 0) {
1123 len
= strlen (cname
[ac
]);
1124 if (len
> width_cname
)
1127 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_alloc
[ac
]);
1128 if (len
> width_alloc
)
1131 len
= sprintf (buf
, "%lu", (long unsigned)stats
->wasted
[ac
]);
1132 if (len
> width_wasted
)
1135 len
= sprintf (buf
, "%lu", (long unsigned)stats
->overhead
[ac
]);
1136 if (len
> width_overhead
)
1137 width_overhead
= len
;
1139 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_context
[ac
]);
1140 if (len
> width_context
)
1141 width_context
= len
;
1144 /* Print info about allocation contexts */
1145 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1146 if (stats
->num_context
[ac
] == 0) {
1151 "%12lu bytes in %-*s %*lu alloc, %*lu unused, %*lu over, %*lu context\n",
1152 (long unsigned)stats
->bytes_alloc
[ac
],
1153 width_cname
, cname
[ac
],
1154 width_alloc
, (long unsigned)stats
->num_alloc
[ac
],
1155 width_wasted
, (long unsigned)stats
->wasted
[ac
],
1156 width_overhead
, (long unsigned)stats
->overhead
[ac
],
1157 width_context
, (long unsigned)stats
->num_context
[ac
]);
1163 * Merge a new statistics field into an old one.
1166 void _profile_merge_stats(struct profile_stats
*old_stats
, const struct profile_stats
*new_stats
)
1170 /* If nothing passed, just return */
1171 if (!old_stats
|| !new_stats
)
1174 /* If the old_stats has not been initialized, just copy in the new stats */
1175 if (old_stats
->major_version
== 0) {
1176 *old_stats
= *new_stats
;
1178 /* Otherwise, update stats, field by field */
1180 if (old_stats
->prof_records
< new_stats
->prof_records
)
1181 old_stats
->prof_records
= new_stats
->prof_records
;
1183 if (old_stats
->gprof_records
< new_stats
->gprof_records
)
1184 old_stats
->gprof_records
= new_stats
->gprof_records
;
1186 if (old_stats
->hash_buckets
< new_stats
->hash_buckets
)
1187 old_stats
->hash_buckets
= new_stats
->hash_buckets
;
1189 if (old_stats
->bogus_count
< new_stats
->bogus_count
)
1190 old_stats
->bogus_count
= new_stats
->bogus_count
;
1192 PROF_CNT_LADD(old_stats
->cnt
, new_stats
->cnt
);
1193 PROF_CNT_LADD(old_stats
->dummy
, new_stats
->dummy
);
1194 PROF_CNT_LADD(old_stats
->old_mcount
, new_stats
->old_mcount
);
1195 PROF_CNT_LADD(old_stats
->hash_search
, new_stats
->hash_search
);
1196 PROF_CNT_LADD(old_stats
->hash_num
, new_stats
->hash_num
);
1197 PROF_CNT_LADD(old_stats
->user_ticks
, new_stats
->user_ticks
);
1198 PROF_CNT_LADD(old_stats
->kernel_ticks
, new_stats
->kernel_ticks
);
1199 PROF_CNT_LADD(old_stats
->idle_ticks
, new_stats
->idle_ticks
);
1200 PROF_CNT_LADD(old_stats
->overflow_ticks
, new_stats
->overflow_ticks
);
1201 PROF_CNT_LADD(old_stats
->acontext_locked
, new_stats
->acontext_locked
);
1202 PROF_CNT_LADD(old_stats
->too_low
, new_stats
->too_low
);
1203 PROF_CNT_LADD(old_stats
->too_high
, new_stats
->too_high
);
1204 PROF_CNT_LADD(old_stats
->prof_overflow
, new_stats
->prof_overflow
);
1205 PROF_CNT_LADD(old_stats
->gprof_overflow
, new_stats
->gprof_overflow
);
1207 for (i
= 0; i
< (int)ACONTEXT_MAX
; i
++) {
1208 if (old_stats
->num_alloc
[i
] < new_stats
->num_alloc
[i
])
1209 old_stats
->num_alloc
[i
] = new_stats
->num_alloc
[i
];
1211 if (old_stats
->bytes_alloc
[i
] < new_stats
->bytes_alloc
[i
])
1212 old_stats
->bytes_alloc
[i
] = new_stats
->bytes_alloc
[i
];
1214 if (old_stats
->num_context
[i
] < new_stats
->num_context
[i
])
1215 old_stats
->num_context
[i
] = new_stats
->num_context
[i
];
1217 if (old_stats
->wasted
[i
] < new_stats
->wasted
[i
])
1218 old_stats
->wasted
[i
] = new_stats
->wasted
[i
];
1220 if (old_stats
->overhead
[i
] < new_stats
->overhead
[i
])
1221 old_stats
->overhead
[i
] = new_stats
->overhead
[i
];
1225 for (i
= 0; i
< MAX_BUCKETS
+1; i
++) {
1226 if (old_stats
->buckets
[i
] < new_stats
->buckets
[i
])
1227 old_stats
->buckets
[i
] = new_stats
->buckets
[i
];
1230 for (i
= 0; i
< MAX_CACHE
; i
++) {
1231 PROF_CNT_LADD(old_stats
->cache_hits
[i
], new_stats
->cache_hits
[i
]);
1234 for (i
= 0; i
< sizeof(old_stats
->stats_unused
) / sizeof(old_stats
->stats_unused
[0]); i
++) {
1235 PROF_CNT_LADD(old_stats
->stats_unused
[i
], new_stats
->stats_unused
[i
]);
1244 * Invalid function address used when checking of function addresses is
1245 * desired for gprof arcs, and we discover an address out of bounds.
1246 * There should be no callers of this function.
1250 _bogus_function(void)