]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
6601e61a | 4 | * @APPLE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
6601e61a A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
8f6c56a5 | 11 | * |
6601e61a A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
6601e61a A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
8f6c56a5 | 19 | * |
6601e61a | 20 | * @APPLE_LICENSE_HEADER_END@ |
1c79356b A |
21 | */ |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * HISTORY | |
27 | * | |
28 | * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez | |
29 | * Import of Mac OS X kernel (~semeria) | |
30 | * | |
31 | * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez | |
32 | * Import of OSF Mach kernel (~mburg) | |
33 | * | |
34 | * Revision 1.1.7.1 1997/09/22 17:41:24 barbou | |
35 | * MP+RT: protect cpu_number() usage against preemption. | |
36 | * [97/09/16 barbou] | |
37 | * | |
38 | * Revision 1.1.5.1 1995/01/06 19:53:37 devrcs | |
39 | * mk6 CR668 - 1.3b26 merge | |
40 | * new file for mk6 | |
41 | * [1994/10/12 22:25:20 dwm] | |
42 | * | |
43 | * Revision 1.1.2.2 1994/05/16 19:19:17 meissner | |
44 | * Add support for converting 64-bit integers to a decimal string. | |
45 | * Use the correct address (selfpc) when creating the prof header for gprof. | |
46 | * [1994/04/28 21:44:59 meissner] | |
47 | * | |
48 | * Revision 1.1.2.1 1994/04/08 17:51:42 meissner | |
49 | * Make most stats 64 bits, except for things like memory allocation. | |
50 | * [1994/04/02 14:58:21 meissner] | |
51 | * | |
52 | * Do not provide old mcount support under MK or server. | |
53 | * Fixup stats size so it is the same as in profile-md.h. | |
54 | * [1994/03/29 21:00:03 meissner] | |
55 | * | |
56 | * Use faster sequence for overflow addition. | |
57 | * Keep {dummy,prof,gprof,old}_mcount counts in double precision. | |
58 | * Add kernel NCPUS > 1 support. | |
59 | * [1994/03/17 20:13:23 meissner] | |
60 | * | |
61 | * Add gprof/prof overflow support | |
62 | * [1994/03/17 14:56:44 meissner] | |
63 | * | |
64 | * Add size of histogram counters & unused fields to profile_profil struct | |
65 | * [1994/02/17 21:41:44 meissner] | |
66 | * | |
67 | * Add too_low/too_high to profile_stats. | |
68 | * [1994/02/16 22:38:11 meissner] | |
69 | * | |
70 | * Bump # allocation contexts to 32 from 16. | |
71 | * Store unique ptr address in gprof function header structure for _profile_reset. | |
72 | * Add new fields from profile-{internal,md}.h. | |
73 | * Align loop looking for an unlocked acontext. | |
74 | * Count # times a locked context block was found. | |
75 | * Expand copyright. | |
76 | * [1994/02/07 12:40:56 meissner] | |
77 | * | |
78 | * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. | |
79 | * [1994/02/03 20:13:23 meissner] | |
80 | * | |
81 | * Add stats for {user,kernel,idle} mode in the kernel. | |
82 | * [1994/02/03 15:17:22 meissner] | |
83 | * | |
84 | * No change. | |
85 | * [1994/02/03 00:58:49 meissner] | |
86 | * | |
87 | * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. | |
88 | * [1994/02/01 12:03:56 meissner] | |
89 | * | |
90 | * Move _mcount_ptr to be closer to other data declarations. | |
91 | * Add text_len to profile_profil structure for mk. | |
92 | * Split records_cnt into prof_cnt/gprof_cnt. | |
93 | * Always update prof_cnt/gprof_cnt even if not DO_STATS. | |
94 | * Add current/max cpu indicator to stats for kernel. | |
95 | * [1994/01/28 23:33:20 meissner] | |
96 | * | |
97 | * Don't do 4+Lgotoff(lab), use separate labels. | |
98 | * Change GPROF_HASH_SHIFT to 9 (from 8). | |
99 | * [1994/01/26 22:00:59 meissner] | |
100 | * | |
101 | * Fixup NO_RECURSIVE_ALLOC to do byte loads, not word loads. | |
102 | * [1994/01/26 20:30:57 meissner] | |
103 | * | |
104 | * Move callback pointers into separate allocation context. | |
105 | * Add size fields for other structures to profile-vars. | |
106 | * Allocate string table as one large allocation. | |
107 | * Rewrite old mcount code once again. | |
108 | * Use multiply to make hash value, not divide. | |
109 | * Hash table is now a power of two. | |
110 | * [1994/01/26 20:23:32 meissner] | |
111 | * | |
112 | * Cut hash table size back to 16189. | |
113 | * Add size fields to all structures. | |
114 | * Add major/minor version number to _profile_md. | |
115 | * Move allocation context block pointers to _profile_vars. | |
116 | * Move _gprof_dummy after _profile_md. | |
117 | * New function header code now falls into hash an element | |
118 | * to avoid having the hash code duplicated or use a macro. | |
119 | * Fix bug in _gprof_mcount with ELF shared libraries. | |
120 | * [1994/01/25 01:45:59 meissner] | |
121 | * | |
122 | * Move init functions to C code; rearrange profil varaibles. | |
123 | * [1994/01/22 01:11:14 meissner] | |
124 | * | |
125 | * No change. | |
126 | * [1994/01/20 20:56:43 meissner] | |
127 | * | |
128 | * Fixup copyright. | |
129 | * [1994/01/18 23:07:39 meissner] | |
130 | * | |
131 | * Make flags byte-sized. | |
132 | * Add have_bb flag. | |
133 | * Add init_format flag. | |
134 | * Always put word size multipler first in .space. | |
135 | * [1994/01/18 21:57:14 meissner] | |
136 | * | |
137 | * Fix elfpic problems in last change. | |
138 | * [1994/01/16 14:04:26 meissner] | |
139 | * | |
140 | * Rewrite gprof caching to be faster & not need a lock. | |
141 | * Record prof information for gprof too. | |
142 | * Bump reserved stats to 64. | |
143 | * Bump up hash table size 30799. | |
144 | * Conditionally use lock prefix. | |
145 | * Change most #ifdef's to #if. | |
146 | * DEBUG_PROFILE turns on stack frames now. | |
147 | * Conditionally add externs to gprof to determine where time is spent. | |
148 | * Prof_mcount uses xchgl to update function pointer. | |
149 | * [1994/01/15 18:40:33 meissner] | |
150 | * | |
151 | * Fix a comment. | |
152 | * Separate statistics from debugging (though debugging turns it on). | |
153 | * Remove debug code that traces each gprof request. | |
154 | * [1994/01/15 00:59:02 meissner] | |
155 | * | |
156 | * Move max hash bucket calculation into _gprof_write & put info in stats structure. | |
157 | * [1994/01/04 16:15:14 meissner] | |
158 | * | |
159 | * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to. | |
160 | * [1994/01/04 15:37:44 meissner] | |
161 | * | |
162 | * Add more allocation memory pools (gprof function hdrs in particular). | |
163 | * For prof, gprof arc, and gprof function hdrs, allocate 16 pages at a time. | |
164 | * Add major/minor version numbers to _profile_{vars,stats}. | |
165 | * Add # profil buckets field to _profil_stats. | |
166 | * [19 | |
167 | * | |
168 | * $EndLog$ | |
169 | */ | |
170 | ||
171 | /* | |
172 | * Common 386 profiling module that is shared between the kernel, mach | |
173 | * servers, and the user space library. Each environment includes | |
174 | * this file. | |
175 | */ | |
176 | ||
177 | .file "profile-asm.s" | |
178 | ||
1c79356b A |
179 | #include <machine/asm.h> |
180 | ||
181 | /* | |
182 | * By default, debugging turns on statistics and stack frames. | |
183 | */ | |
184 | ||
185 | #if DEBUG_PROFILE | |
186 | #ifndef DO_STATS | |
187 | #define DO_STATS 1 | |
188 | #endif | |
189 | ||
190 | #ifndef STACK_FRAMES | |
191 | #define STACK_FRAMES 1 | |
192 | #endif | |
193 | #endif | |
194 | ||
195 | #ifndef OLD_MCOUNT | |
196 | #define OLD_MCOUNT 0 /* do not compile old code for mcount */ | |
197 | #endif | |
198 | ||
199 | #ifndef DO_STATS | |
200 | #define DO_STATS 1 /* compile in statistics code */ | |
201 | #endif | |
202 | ||
203 | #ifndef DO_LOCK | |
204 | #define DO_LOCK 0 /* use lock; in front of increments */ | |
205 | #endif | |
206 | ||
207 | #ifndef LOCK_STATS | |
208 | #define LOCK_STATS DO_LOCK /* update stats with lock set */ | |
209 | #endif | |
210 | ||
211 | #ifndef STACK_FRAMES | |
212 | #define STACK_FRAMES 0 /* create stack frames for debugger */ | |
213 | #endif | |
214 | ||
215 | #ifndef NO_RECURSIVE_ALLOC | |
216 | #define NO_RECURSIVE_ALLOC 0 /* check for recursive allocs */ | |
217 | /* (not thread safe!) */ | |
218 | #endif | |
219 | ||
220 | #ifndef MARK_GPROF | |
221 | #define MARK_GPROF 0 /* add externs for gprof profiling */ | |
222 | #endif | |
223 | ||
224 | #ifndef OVERFLOW | |
225 | #define OVERFLOW 1 /* add overflow checking support */ | |
226 | #endif | |
227 | ||
228 | /* | |
229 | * Turn on the use of the lock prefix if desired. | |
230 | */ | |
231 | ||
232 | #ifndef LOCK | |
233 | #if DO_LOCK | |
234 | #define LOCK lock; | |
235 | #else | |
236 | #define LOCK | |
237 | #endif | |
238 | #endif | |
239 | ||
240 | #ifndef SLOCK | |
241 | #if LOCK_STATS | |
242 | #define SLOCK LOCK | |
243 | #else | |
244 | #define SLOCK | |
245 | #endif | |
246 | #endif | |
247 | ||
248 | /* | |
249 | * Double or single precision incrementing | |
250 | */ | |
251 | ||
252 | #if OVERFLOW | |
253 | #define DINC(mem) LOCK addl $1,mem; LOCK adcl $0,4+mem | |
254 | #define DINC2(mem,mem2) LOCK addl $1,mem; LOCK adcl $0,mem2 | |
255 | #define SDINC(mem) SLOCK addl $1,mem; SLOCK adcl $0,4+mem | |
256 | #define SDADD(val,mem) SLOCK addl val,mem; SLOCK adcl $0,4+mem | |
257 | #define SDADDNEG(val,mem) SLOCK subl val,mem; SLOCK adcl $0,4+mem | |
258 | #define SDSUB(val,mem) SLOCK subl val,mem; SLOCK sbbl $0,4+mem | |
259 | ||
260 | #else | |
261 | #define DINC(mem) LOCK incl mem | |
262 | #define DINC2(mem,mem2) LOCK incl mem | |
263 | #define SDINC(mem) SLOCK incl mem | |
264 | #define SDADD(val,mem) SLOCK addl val,mem | |
265 | #define SDADDNEG(val,mem) SLOCK subl val,mem | |
266 | #define SDSUB(val,mem) SLOCK subl val,mem | |
267 | #endif | |
268 | ||
269 | /* | |
270 | * Stack frame support so that debugger traceback works. | |
271 | */ | |
272 | ||
273 | #if STACK_FRAMES | |
274 | #define ENTER pushl %ebp; movl %esp,%ebp | |
275 | #define LEAVE0 popl %ebp | |
276 | #define Estack 4 | |
277 | #else | |
278 | #define ENTER | |
279 | #define LEAVE0 | |
280 | #define Estack 0 | |
281 | #endif | |
282 | ||
283 | /* | |
284 | * Gprof profiling. | |
285 | */ | |
286 | ||
287 | #if MARK_GPROF | |
288 | #define MARK(name) .globl EXT(name); ELF_FUNC(EXT(name)); ELF_SIZE(EXT(name),0); LEXT(name) | |
289 | #else | |
290 | #define MARK(name) | |
291 | #endif | |
292 | ||
293 | /* | |
294 | * Profiling allocation context block. Each time memory is needed, the | |
295 | * allocator loops until it finds an unlocked context block, and allocates | |
296 | * from that block. If no context blocks are available, a new memory | |
297 | * pool is allocated, and added to the end of the chain. | |
298 | */ | |
299 | ||
300 | LCL(A_next) = 0 /* next context block link (must be 0) */ | |
301 | LCL(A_plist) = LCL(A_next)+4 /* head of page list for context block */ | |
302 | LCL(A_lock) = LCL(A_plist)+4 /* lock word */ | |
303 | LCL(A_size) = LCL(A_lock)+4 /* size of context block */ | |
304 | ||
305 | #define A_next LCL(A_next) | |
306 | #define A_plist LCL(A_plist) | |
307 | #define A_lock LCL(A_lock) | |
308 | #define A_size LCL(A_size) | |
309 | ||
310 | /* | |
311 | * Allocation contexts used. | |
312 | */ | |
313 | ||
314 | LCL(C_prof) = 0 /* prof records */ | |
315 | LCL(C_gprof) = 1 /* gprof arc records */ | |
316 | LCL(C_gfunc) = 2 /* gprof function headers */ | |
317 | LCL(C_misc) = 3 /* misc. allocations */ | |
318 | LCL(C_profil) = 4 /* memory for profil */ | |
319 | LCL(C_dci) = 5 /* memory for dci */ | |
320 | LCL(C_bb) = 6 /* memory for basic blocks */ | |
321 | LCL(C_callback) = 7 /* memory for callbacks */ | |
322 | LCL(C_max) = 32 /* # allocation contexts */ | |
323 | ||
324 | #define C_prof LCL(C_prof) | |
325 | #define C_gprof LCL(C_gprof) | |
326 | #define C_gfunc LCL(C_gfunc) | |
327 | #define C_max LCL(C_max) | |
328 | ||
329 | /* | |
330 | * Linked list of memory allocations. | |
331 | */ | |
332 | ||
333 | LCL(M_first) = 0 /* pointer to first byte available */ | |
334 | LCL(M_ptr) = LCL(M_first)+4 /* pointer to next available byte */ | |
335 | LCL(M_next) = LCL(M_ptr)+4 /* next page allocated */ | |
336 | LCL(M_nfree) = LCL(M_next)+4 /* # bytes available */ | |
337 | LCL(M_nalloc) = LCL(M_nfree)+4 /* # bytes allocated */ | |
338 | LCL(M_num) = LCL(M_nalloc)+4 /* # allocations done on this page */ | |
339 | LCL(M_size) = LCL(M_num)+4 /* size of page header */ | |
340 | ||
341 | #define M_first LCL(M_first) | |
342 | #define M_ptr LCL(M_ptr) | |
343 | #define M_next LCL(M_next) | |
344 | #define M_nfree LCL(M_nfree) | |
345 | #define M_nalloc LCL(M_nalloc) | |
346 | #define M_num LCL(M_num) | |
347 | #define M_size LCL(M_size) | |
348 | ||
349 | /* | |
350 | * Prof data type. | |
351 | */ | |
352 | ||
353 | LCL(P_addr) = 0 /* function address */ | |
354 | LCL(P_count) = LCL(P_addr)+4 /* # times function called */ | |
355 | LCL(P_overflow) = LCL(P_count)+4 /* # times count overflowed */ | |
356 | LCL(P_size) = LCL(P_overflow)+4 /* size of prof data type */ | |
357 | ||
358 | #define P_addr LCL(P_addr) | |
359 | #define P_count LCL(P_count) | |
360 | #define P_overflow LCL(P_overflow) | |
361 | #define P_size LCL(P_size) | |
362 | ||
363 | /* | |
364 | * Gprof data type. | |
365 | */ | |
366 | ||
367 | LCL(G_next) = 0 /* next hash link (must be 0) */ | |
368 | LCL(G_frompc) = LCL(G_next)+4 /* caller's caller */ | |
369 | LCL(G_selfpc) = LCL(G_frompc)+4 /* caller's address */ | |
370 | LCL(G_count) = LCL(G_selfpc)+4 /* # times arc traversed */ | |
371 | LCL(G_overflow) = LCL(G_count)+4 /* # times count overflowed */ | |
372 | LCL(G_size) = LCL(G_overflow)+4 /* size of gprof data type */ | |
373 | ||
374 | #define G_next LCL(G_next) | |
375 | #define G_frompc LCL(G_frompc) | |
376 | #define G_selfpc LCL(G_selfpc) | |
377 | #define G_count LCL(G_count) | |
378 | #define G_overflow LCL(G_overflow) | |
379 | #define G_size LCL(G_size) | |
380 | ||
381 | /* | |
382 | * Gprof header. | |
383 | * | |
384 | * At least one header is allocated for each unique function that is profiled. | |
385 | * In order to save time calculating the hash value, the last H_maxcache | |
386 | * distinct arcs are cached within this structure. Also, to avoid loading | |
387 | * the GOT when searching the hash table, we copy the hash pointer to this | |
388 | * structure, so that we only load the GOT when we need to allocate an arc. | |
389 | */ | |
390 | ||
391 | LCL(H_maxcache) = 3 /* # of cache table entries */ | |
392 | LCL(H_csize) = 4*LCL(H_maxcache) /* size of each cache array */ | |
393 | ||
394 | LCL(H_hash_ptr) = 0 /* hash table to use */ | |
395 | LCL(H_unique_ptr) = LCL(H_hash_ptr)+4 /* function unique pointer */ | |
396 | LCL(H_prof) = LCL(H_unique_ptr)+4 /* prof statistics */ | |
397 | LCL(H_cache_ptr) = LCL(H_prof)+P_size /* cache table of element pointers */ | |
398 | LCL(H_size) = LCL(H_cache_ptr)+LCL(H_csize) /* size of gprof header type */ | |
399 | ||
400 | #define H_maxcache LCL(H_maxcache) | |
401 | #define H_csize LCL(H_csize) | |
402 | #define H_hash_ptr LCL(H_hash_ptr) | |
403 | #define H_unique_ptr LCL(H_unique_ptr) | |
404 | #define H_prof LCL(H_prof) | |
405 | #define H_cache_ptr LCL(H_cache_ptr) | |
406 | #define H_size LCL(H_size) | |
407 | ||
408 | /* | |
409 | * Number of digits needed to write a 64 bit number including trailing null. | |
410 | * (rounded up to be divisable by 4). | |
411 | */ | |
412 | ||
413 | #define N_digit 24 | |
414 | ||
415 | \f | |
416 | .data | |
417 | ||
418 | /* | |
419 | * Default gprof hash table size, which must be a power of two. | |
420 | * The shift specifies how many low order bits to eliminate when | |
421 | * calculating the hash value. | |
422 | */ | |
423 | ||
424 | #ifndef GPROF_HASH_SIZE | |
425 | #define GPROF_HASH_SIZE 16384 | |
426 | #endif | |
427 | ||
428 | #ifndef GPROF_HASH_SHIFT | |
429 | #define GPROF_HASH_SHIFT 9 | |
430 | #endif | |
431 | ||
432 | #define GPROF_HASH_MASK (GPROF_HASH_SIZE-1) | |
433 | ||
434 | DATA(_profile_hash_size) | |
435 | .long GPROF_HASH_SIZE | |
436 | ENDDATA(_profile_hash_size) | |
437 | ||
438 | \f | |
439 | ||
440 | /* | |
441 | * Pointer that the compiler uses to call to the appropriate mcount function. | |
442 | */ | |
443 | ||
444 | DATA(_mcount_ptr) | |
445 | .long EXT(_dummy_mcount) | |
446 | ENDDATA(_mcount_ptr) | |
447 | ||
448 | /* | |
449 | * Global profile variables. The structure that accesses this in C is declared | |
450 | * in profile-internal.h. All items in .data that follow this will be used as | |
451 | * one giant record, and each unique machine, thread, kgmon output or what have | |
452 | * you will create a separate instance. Typically there is only one instance | |
453 | * which will be the memory laid out below. | |
454 | */ | |
455 | ||
456 | LCL(var_major_version) = 0 /* major version number */ | |
457 | LCL(var_minor_version) = LCL(var_major_version)+4 /* minor version number */ | |
458 | LCL(vars_size) = LCL(var_minor_version)+4 /* size of _profile_vars structure */ | |
459 | LCL(plist_size) = LCL(vars_size)+4 /* size of page_list structure */ | |
460 | LCL(acontext_size) = LCL(plist_size)+4 /* size of allocation contexts */ | |
461 | LCL(callback_size) = LCL(acontext_size)+4 /* size of callback structure */ | |
462 | LCL(type) = LCL(callback_size)+4 /* profile type (gprof, prof) */ | |
463 | LCL(error_msg) = LCL(type)+4 /* error message for perror */ | |
464 | LCL(filename) = LCL(error_msg)+4 /* filename to write to */ | |
465 | LCL(str_ptr) = LCL(filename)+4 /* string table pointer */ | |
466 | LCL(stream) = LCL(str_ptr)+4 /* stdio stream to write to */ | |
467 | LCL(diag_stream) = LCL(stream)+4 /* stdio stream to write diagnostics to */ | |
468 | LCL(fwrite_func) = LCL(diag_stream)+4 /* function like fwrite to output bytes */ | |
469 | LCL(page_size) = LCL(fwrite_func)+4 /* page size in bytes */ | |
470 | LCL(str_bytes) = LCL(page_size)+4 /* # bytes in string table */ | |
471 | LCL(str_total) = LCL(str_bytes)+4 /* # total bytes allocated for string table */ | |
472 | LCL(clock_ticks) = LCL(str_total)+4 /* # clock ticks per second */ | |
473 | ||
474 | /* profil variables */ | |
475 | LCL(profil_start) = LCL(clock_ticks)+4 /* start of profil variables */ | |
476 | LCL(lowpc) = LCL(clock_ticks)+4 /* lowest address */ | |
477 | LCL(highpc) = LCL(lowpc)+4 /* highest address */ | |
478 | LCL(text_len) = LCL(highpc)+4 /* highpc-lowpc */ | |
479 | LCL(profil_len) = LCL(text_len)+4 /* size of profil buffer */ | |
480 | LCL(counter_size) = LCL(profil_len)+4 /* size of indivual counter */ | |
481 | LCL(scale) = LCL(counter_size)+4 /* scale factor */ | |
482 | LCL(profil_unused) = LCL(scale)+4 /* unused fields */ | |
483 | LCL(profil_end) = LCL(profil_unused)+4*8 /* end of profil_info structure */ | |
484 | LCL(profil_buf) = LCL(profil_end) /* buffer for profil */ | |
485 | ||
486 | /* Output selection func ptrs */ | |
487 | LCL(output_init) = LCL(profil_buf)+4 /* Initialization */ | |
488 | LCL(output) = LCL(output_init)+4 /* Write out profiling info */ | |
489 | LCL(output_ptr) = LCL(output)+4 /* Output specific data ptr */ | |
490 | ||
491 | /* Memory allocation support */ | |
492 | LCL(acontext) = LCL(output_ptr)+4 /* pointers to allocation context blocks */ | |
493 | ||
494 | LCL(bogus_func) = LCL(acontext)+4*C_max /* function to use if gprof arc is bad */ | |
495 | LCL(vars_unused) = LCL(bogus_func)+4 /* future growth */ | |
496 | ||
497 | /* flags */ | |
498 | LCL(init) = LCL(vars_unused)+4*63 /* whether initializations were done */ | |
499 | LCL(active) = LCL(init)+1 /* whether profiling is active */ | |
500 | LCL(do_profile) = LCL(active)+1 /* whether to do profiling */ | |
501 | LCL(use_dci) = LCL(do_profile)+1 /* whether to use DCI */ | |
502 | LCL(use_profil) = LCL(use_dci)+1 /* whether to use profil */ | |
503 | LCL(recursive_alloc) = LCL(use_profil)+1 /* alloc called recursively */ | |
504 | LCL(output_uarea) = LCL(recursive_alloc)+1 /* output uarea */ | |
505 | LCL(output_stats) = LCL(output_uarea)+1 /* output stats info */ | |
506 | LCL(output_clock) = LCL(output_stats)+1 /* output the clock ticks */ | |
507 | LCL(multiple_sections) = LCL(output_clock)+1 /* multiple sections are ok */ | |
508 | LCL(have_bb) = LCL(multiple_sections)+1 /* whether we have basic block data */ | |
509 | LCL(init_format) = LCL(have_bb)+1 /* The output format has been chosen */ | |
510 | LCL(debug) = LCL(init_format)+1 /* Whether or not we are debugging */ | |
511 | LCL(check_funcs) = LCL(debug)+1 /* Whether to check functions for validity */ | |
512 | LCL(flag_unused) = LCL(check_funcs)+1 /* unused flags */ | |
513 | LCL(end_of_vars) = LCL(flag_unused)+62 /* size of machine independent vars */ | |
514 | ||
515 | /* | |
516 | * Data that contains profile statistics that can be dumped out | |
517 | * into the {,g}mon.out file. This is defined in profile-md.h. | |
518 | */ | |
519 | ||
520 | LCL(stats_start) = LCL(end_of_vars) /* start of stats substructure */ | |
521 | LCL(stats_major_version)= LCL(stats_start) /* major version number */ | |
522 | LCL(stats_minor_version)= LCL(stats_major_version)+4 /* minor version number */ | |
523 | LCL(stats_size) = LCL(stats_minor_version)+4 /* size of _profile_stats structure */ | |
524 | LCL(profil_buckets) = LCL(stats_size)+4 /* # profil buckets */ | |
525 | LCL(my_cpu) = LCL(profil_buckets)+4 /* identify which cpu/thread this is */ | |
526 | LCL(max_cpu) = LCL(my_cpu)+4 /* identify which cpu/thread this is */ | |
527 | LCL(prof_records) = LCL(max_cpu)+4 /* # of profiled functions */ | |
528 | LCL(gprof_records) = LCL(prof_records)+4 /* # of gprof arcs created */ | |
529 | LCL(hash_buckets) = LCL(gprof_records)+4 /* max gprof hash buckets on a chain */ | |
530 | LCL(bogus_count) = LCL(hash_buckets)+4 /* # bogus functions found in gprof */ | |
531 | ||
532 | LCL(cnt) = LCL(bogus_count)+4 /* # of _{prof,gprof}_mcount calls */ | |
533 | LCL(dummy) = LCL(cnt)+8 /* # of _dummy_mcount calls */ | |
534 | LCL(old_mcount) = LCL(dummy)+8 /* # of old mcount calls */ | |
535 | LCL(hash_search) = LCL(old_mcount)+8 /* # gprof hash buckets searched */ | |
536 | LCL(hash_num) = LCL(hash_search)+8 /* # times hash table searched */ | |
537 | LCL(user_ticks) = LCL(hash_num)+8 /* # ticks within user space */ | |
538 | LCL(kernel_ticks) = LCL(user_ticks)+8 /* # ticks within kernel space */ | |
539 | LCL(idle_ticks) = LCL(kernel_ticks)+8 /* # ticks cpu was idle */ | |
540 | LCL(overflow_ticks) = LCL(idle_ticks)+8 /* # ticks where histcounter overflowed */ | |
541 | LCL(acontext_locked) = LCL(overflow_ticks)+8 /* # times an acontext was locked */ | |
542 | LCL(too_low) = LCL(acontext_locked)+8 /* # times histogram tick too low */ | |
543 | LCL(too_high) = LCL(too_low)+8 /* # times histogram tick too low */ | |
544 | LCL(prof_overflow) = LCL(too_high)+8 /* # times the prof count field overflowed */ | |
545 | LCL(gprof_overflow) = LCL(prof_overflow)+8 /* # times the gprof count field overflowed */ | |
546 | LCL(num_alloc) = LCL(gprof_overflow)+8 /* # allocations in each context */ | |
547 | LCL(bytes_alloc) = LCL(num_alloc)+4*C_max /* bytes allocated in each context */ | |
548 | LCL(num_context) = LCL(bytes_alloc)+4*C_max /* # allocation context blocks */ | |
549 | LCL(wasted) = LCL(num_context)+4*C_max /* # bytes wasted */ | |
550 | LCL(overhead) = LCL(wasted)+4*C_max /* # bytes of overhead */ | |
551 | LCL(buckets) = LCL(overhead)+4*C_max /* # hash indexes that have n buckets */ | |
552 | LCL(cache_hits1) = LCL(buckets)+4*10 /* # gprof cache hits in bucket #1 */ | |
553 | LCL(cache_hits2) = LCL(cache_hits1)+8 /* # gprof cache hits in bucket #2 */ | |
554 | LCL(cache_hits3) = LCL(cache_hits2)+8 /* # gprof cache hits in bucket #3 */ | |
555 | LCL(stats_unused) = LCL(cache_hits3)+8 /* reserved for future use */ | |
556 | LCL(stats_end) = LCL(stats_unused)+8*64 /* end of stats structure */ | |
557 | ||
558 | /* | |
559 | * Machine dependent variables that no C file should access (except for | |
560 | * profile-md.c). | |
561 | */ | |
562 | ||
563 | LCL(md_start) = LCL(stats_end) /* start of md structure */ | |
564 | LCL(md_major_version) = LCL(md_start) /* major version number */ | |
565 | LCL(md_minor_version) = LCL(md_major_version)+4 /* minor version number */ | |
566 | LCL(md_size) = LCL(md_minor_version)+4 /* size of _profile_stats structure */ | |
567 | LCL(hash_ptr) = LCL(md_size)+4 /* gprof hash pointer */ | |
568 | LCL(hash_size) = LCL(hash_ptr)+4 /* gprof hash size */ | |
569 | LCL(num_cache) = LCL(hash_size)+4 /* # of cache entries */ | |
570 | LCL(save_mcount_ptr) = LCL(num_cache)+4 /* save for mcount_ptr when suspending profiling */ | |
571 | LCL(mcount_ptr_ptr) = LCL(save_mcount_ptr)+4 /* pointer to _mcount_ptr */ | |
572 | LCL(dummy_ptr) = LCL(mcount_ptr_ptr)+4 /* pointer to gprof_dummy */ | |
573 | LCL(alloc_pages) = LCL(dummy_ptr)+4 /* allocate more memory */ | |
574 | LCL(num_buffer) = LCL(alloc_pages)+4 /* buffer to convert 64 bit ints in */ | |
575 | LCL(md_unused) = LCL(num_buffer)+N_digit /* unused fields */ | |
576 | LCL(md_end) = LCL(md_unused)+4*58 /* end of md structure */ | |
577 | LCL(total_size) = LCL(md_end) /* size of entire structure */ | |
578 | ||
579 | /* | |
580 | * Size of the entire _profile_vars structure. | |
581 | */ | |
582 | ||
583 | DATA(_profile_size) | |
584 | .long LCL(total_size) | |
585 | ENDDATA(_profile_size) | |
586 | ||
587 | /* | |
588 | * Size of the statistics substructure. | |
589 | */ | |
590 | ||
591 | DATA(_profile_stats_size) | |
592 | .long LCL(stats_end)-LCL(stats_start) | |
593 | ENDDATA(_profile_stats_size) | |
594 | ||
595 | /* | |
596 | * Size of the profil info substructure. | |
597 | */ | |
598 | ||
599 | DATA(_profile_profil_size) | |
600 | .long LCL(profil_end)-LCL(profil_start) | |
601 | ENDDATA(_profile_profil_size) | |
602 | ||
603 | /* | |
604 | * Size of the machine dependent substructure. | |
605 | */ | |
606 | ||
607 | DATA(_profile_md_size) | |
608 | .long LCL(md_end)-LCL(md_start) | |
609 | ENDDATA(_profile_profil_size) | |
610 | ||
611 | /* | |
612 | * Whether statistics are supported. | |
613 | */ | |
614 | ||
615 | DATA(_profile_do_stats) | |
616 | .long DO_STATS | |
617 | ENDDATA(_profile_do_stats) | |
618 | ||
619 | .text | |
620 | ||
621 | /* | |
622 | * Map LCL(xxx) -> into simpler names | |
623 | */ | |
624 | ||
625 | #define V_acontext LCL(acontext) | |
626 | #define V_acontext_locked LCL(acontext_locked) | |
627 | #define V_alloc_pages LCL(alloc_pages) | |
628 | #define V_bogus_func LCL(bogus_func) | |
629 | #define V_bytes_alloc LCL(bytes_alloc) | |
630 | #define V_cache_hits1 LCL(cache_hits1) | |
631 | #define V_cache_hits2 LCL(cache_hits2) | |
632 | #define V_cache_hits3 LCL(cache_hits3) | |
633 | #define V_cnt LCL(cnt) | |
634 | #define V_cnt_overflow LCL(cnt_overflow) | |
635 | #define V_check_funcs LCL(check_funcs) | |
636 | #define V_dummy LCL(dummy) | |
637 | #define V_dummy_overflow LCL(dummy_overflow) | |
638 | #define V_dummy_ptr LCL(dummy_ptr) | |
639 | #define V_gprof_records LCL(gprof_records) | |
640 | #define V_hash_num LCL(hash_num) | |
641 | #define V_hash_ptr LCL(hash_ptr) | |
642 | #define V_hash_search LCL(hash_search) | |
643 | #define V_mcount_ptr_ptr LCL(mcount_ptr_ptr) | |
644 | #define V_num_alloc LCL(num_alloc) | |
645 | #define V_num_buffer LCL(num_buffer) | |
646 | #define V_num_context LCL(num_context) | |
647 | #define V_old_mcount LCL(old_mcount) | |
648 | #define V_old_mcount_overflow LCL(old_mcount_overflow) | |
649 | #define V_overhead LCL(overhead) | |
650 | #define V_page_size LCL(page_size) | |
651 | #define V_prof_records LCL(prof_records) | |
652 | #define V_recursive_alloc LCL(recursive_alloc) | |
653 | #define V_wasted LCL(wasted) | |
654 | ||
655 | /* | |
656 | * Loadup %ebx with the address of _profile_vars. On a multiprocessor, this | |
657 | * will loads up the appropriate machine's _profile_vars structure. | |
658 | * For ELF shared libraries, rely on the fact that we won't need a GOT, | |
659 | * except to load this pointer. | |
660 | */ | |
661 | ||
91447636 | 662 | #if defined (MACH_KERNEL) |
1c79356b | 663 | #define ASSEMBLER |
55e303ae | 664 | #include <i386/mp.h> |
1c79356b A |
665 | |
666 | #if SQT | |
667 | #include <i386/SQT/asm_macros.h> | |
668 | #endif | |
669 | ||
670 | #ifndef CPU_NUMBER | |
671 | #error "Cannot determine how to get CPU number" | |
672 | #endif | |
673 | ||
674 | #define Vload CPU_NUMBER(%ebx); movl EXT(_profile_vars_cpus)(,%ebx,4),%ebx | |
675 | ||
91447636 | 676 | #else /* not kernel */ |
1c79356b A |
677 | #define Vload Gload; Egaddr(%ebx,_profile_vars) |
678 | #endif | |
679 | ||
680 | \f | |
681 | /* | |
682 | * Allocate some memory for profiling. This memory is guaranteed to | |
683 | * be zero. | |
684 | * %eax contains the memory size requested and will contain ptr on exit. | |
685 | * %ebx contains the address of the appropriate profile_vars structure. | |
686 | * %ecx is the number of the memory pool to allocate from (trashed on exit). | |
687 | * %edx is trashed. | |
688 | * %esi is preserved. | |
689 | * %edi is preserved. | |
690 | * %ebp is preserved. | |
691 | */ | |
692 | ||
693 | Entry(_profile_alloc_asm) | |
694 | ENTER | |
695 | pushl %esi | |
696 | pushl %edi | |
697 | ||
698 | movl %ecx,%edi /* move context number to saved reg */ | |
699 | ||
700 | #if NO_RECURSIVE_ALLOC | |
701 | movb $-1,%cl | |
702 | xchgb %cl,V_recursive_alloc(%ebx) | |
703 | cmpb $0,%cl | |
704 | je LCL(no_recurse) | |
705 | ||
706 | int $3 | |
707 | ||
708 | .align ALIGN | |
709 | LCL(no_recurse): | |
710 | #endif | |
711 | ||
712 | leal V_acontext(%ebx,%edi,4),%ecx | |
713 | ||
714 | /* Loop looking for a free allocation context. */ | |
715 | /* %eax = size, %ebx = vars addr, %ecx = ptr to allocation context to try */ | |
716 | /* %edi = context number */ | |
717 | ||
718 | .align ALIGN | |
719 | LCL(alloc_loop): | |
720 | movl %ecx,%esi /* save ptr in case no more contexts */ | |
721 | movl A_next(%ecx),%ecx /* next context block */ | |
722 | cmpl $0,%ecx | |
723 | je LCL(alloc_context) /* need to allocate a new context block */ | |
724 | ||
725 | movl $-1,%edx | |
726 | xchgl %edx,A_lock(%ecx) /* %edx == 0 if context available */ | |
727 | ||
728 | #if DO_STATS | |
729 | SDADDNEG(%edx,V_acontext_locked(%ebx)) /* increment counter if lock was held */ | |
730 | #endif | |
731 | ||
732 | cmpl $0,%edx | |
733 | jne LCL(alloc_loop) /* go back if this context block is not available */ | |
734 | ||
735 | /* Allocation context found (%ecx), now allocate. */ | |
736 | movl A_plist(%ecx),%edx /* pointer to current block */ | |
737 | cmpl $0,%edx /* first allocation? */ | |
738 | je LCL(alloc_new) | |
739 | ||
740 | cmpl %eax,M_nfree(%edx) /* see if we have enough space */ | |
741 | jl LCL(alloc_new) /* jump if not enough space */ | |
742 | ||
743 | /* Allocate from local block (and common exit) */ | |
744 | /* %eax = bytes to allocate, %ebx = GOT, %ecx = context, %edx = memory block */ | |
745 | /* %edi = context number */ | |
746 | ||
747 | .align ALIGN | |
748 | LCL(alloc_ret): | |
749 | ||
750 | #if DO_STATS | |
751 | SLOCK incl V_num_alloc(%ebx,%edi,4) /* update global counters */ | |
752 | SLOCK addl %eax,V_bytes_alloc(%ebx,%edi,4) | |
753 | SLOCK subl %eax,V_wasted(%ebx,%edi,4) | |
754 | #endif | |
755 | ||
756 | movl M_ptr(%edx),%esi /* pointer return value */ | |
757 | subl %eax,M_nfree(%edx) /* decrement bytes remaining */ | |
758 | addl %eax,M_nalloc(%edx) /* increment bytes allocated */ | |
759 | incl M_num(%edx) /* increment # allocations */ | |
760 | addl %eax,M_ptr(%edx) /* advance pointer */ | |
761 | movl $0,A_lock(%ecx) /* unlock context block */ | |
762 | movl %esi,%eax /* return pointer */ | |
763 | ||
764 | #if NO_RECURSIVE_ALLOC | |
765 | movb $0,V_recursive_alloc(%ebx) | |
766 | #endif | |
767 | ||
768 | popl %edi | |
769 | popl %esi | |
770 | LEAVE0 | |
771 | ret /* return to the caller */ | |
772 | ||
773 | /* Allocate space in whole number of pages */ | |
774 | /* %eax = bytes to allocate, %ebx = vars address, %ecx = context */ | |
775 | /* %edi = context number */ | |
776 | ||
777 | .align ALIGN | |
778 | LCL(alloc_new): | |
779 | pushl %eax /* save regs */ | |
780 | pushl %ecx | |
781 | movl V_page_size(%ebx),%edx | |
782 | addl $(M_size-1),%eax /* add in overhead size & subtract 1 */ | |
783 | decl %edx /* page_size - 1 */ | |
784 | addl %edx,%eax /* round up to whole number of pages */ | |
785 | notl %edx | |
786 | andl %edx,%eax | |
787 | leal -M_size(%eax),%esi /* save allocation size */ | |
788 | pushl %eax /* argument to _profile_alloc_pages */ | |
789 | call *V_alloc_pages(%ebx) /* allocate some memory */ | |
790 | addl $4,%esp /* pop off argument */ | |
791 | ||
792 | #if DO_STATS | |
793 | SLOCK addl %esi,V_wasted(%ebx,%edi,4) /* udpate global counters */ | |
55e303ae | 794 | SLOCK addl $(M_size),V_overhead(%ebx,%edi,4) |
1c79356b A |
795 | #endif |
796 | ||
797 | popl %ecx /* context block */ | |
798 | movl %eax,%edx /* memory block pointer */ | |
799 | movl %esi,M_nfree(%edx) /* # free bytes */ | |
55e303ae | 800 | addl $(M_size),%eax /* bump past overhead */ |
1c79356b A |
801 | movl A_plist(%ecx),%esi /* previous memory block or 0 */ |
802 | movl %eax,M_first(%edx) /* first space available */ | |
803 | movl %eax,M_ptr(%edx) /* current address available */ | |
804 | movl %esi,M_next(%edx) /* next memory block allocated */ | |
805 | movl %edx,A_plist(%ecx) /* update current page list */ | |
806 | popl %eax /* user size request */ | |
807 | jmp LCL(alloc_ret) /* goto common return code */ | |
808 | ||
809 | /* Allocate a context header in addition to memory block header + data */ | |
810 | /* %eax = bytes to allocate, %ebx = GOT, %esi = ptr to store context ptr */ | |
811 | /* %edi = context number */ | |
812 | ||
813 | .align ALIGN | |
814 | LCL(alloc_context): | |
815 | pushl %eax /* save regs */ | |
816 | pushl %esi | |
817 | movl V_page_size(%ebx),%edx | |
818 | addl $(A_size+M_size-1),%eax /* add in overhead size & subtract 1 */ | |
819 | decl %edx /* page_size - 1 */ | |
820 | addl %edx,%eax /* round up to whole number of pages */ | |
821 | notl %edx | |
822 | andl %edx,%eax | |
823 | leal -A_size-M_size(%eax),%esi /* save allocation size */ | |
824 | pushl %eax /* argument to _profile_alloc_pages */ | |
825 | call *V_alloc_pages(%ebx) /* allocate some memory */ | |
826 | addl $4,%esp /* pop off argument */ | |
827 | ||
828 | #if DO_STATS | |
829 | SLOCK incl V_num_context(%ebx,%edi,4) /* bump # context blocks */ | |
830 | SLOCK addl %esi,V_wasted(%ebx,%edi,4) /* update global counters */ | |
831 | SLOCK addl $(A_size+M_size),V_overhead(%ebx,%edi,4) | |
832 | #endif | |
833 | ||
834 | movl %eax,%ecx /* context pointer */ | |
835 | leal A_size(%eax),%edx /* memory block pointer */ | |
836 | movl %esi,M_nfree(%edx) /* # free bytes */ | |
837 | addl $(A_size+M_size),%eax /* bump past overhead */ | |
838 | movl %eax,M_first(%edx) /* first space available */ | |
839 | movl %eax,M_ptr(%edx) /* current address available */ | |
840 | movl $0,M_next(%edx) /* next memory block allocated */ | |
841 | movl %edx,A_plist(%ecx) /* head of memory block list */ | |
842 | movl $1,A_lock(%ecx) /* set lock */ | |
843 | popl %esi /* ptr to store context block link */ | |
844 | movl %ecx,%eax /* context pointer temp */ | |
845 | xchgl %eax,A_next(%esi) /* link into chain */ | |
846 | movl %eax,A_next(%ecx) /* add links in case of threading */ | |
847 | popl %eax /* user size request */ | |
848 | jmp LCL(alloc_ret) /* goto common return code */ | |
849 | ||
850 | END(_profile_alloc_asm) | |
851 | ||
852 | /* | |
853 | * C callable version of the profile memory allocator. | |
854 | * extern void *_profile_alloc(struct profile_vars *, size_t, acontext_type_t); | |
855 | */ | |
856 | ||
857 | Entry(_profile_alloc) | |
858 | ENTER | |
859 | pushl %ebx | |
860 | movl 12+Estack(%esp),%eax /* memory size */ | |
861 | movl 8+Estack(%esp),%ebx /* provile_vars address */ | |
862 | addl $3,%eax /* round up to word boundary */ | |
863 | movl 16+Estack(%esp),%ecx /* which memory pool to allocate from */ | |
864 | andl $0xfffffffc,%eax | |
865 | call EXT(_profile_alloc_asm) | |
866 | popl %ebx | |
867 | LEAVE0 | |
868 | ret | |
869 | END(_profile_alloc) | |
870 | ||
871 | \f | |
872 | /* | |
873 | * Dummy mcount routine that just returns. | |
874 | * | |
875 | * +-------------------------------+ | |
876 | * | | | |
877 | * | | | |
878 | * | caller's caller stack, | | |
879 | * | saved registers, params. | | |
880 | * | | | |
881 | * | | | |
882 | * +-------------------------------+ | |
883 | * | caller's caller return addr. | | |
884 | * +-------------------------------+ | |
885 | * esp --> | caller's return address | | |
886 | * +-------------------------------+ | |
887 | * | |
888 | * edx --> function unqiue LCL | |
889 | */ | |
890 | ||
891 | Entry(_dummy_mcount) | |
892 | ENTER | |
893 | ||
894 | #if DO_STATS | |
895 | pushl %ebx | |
896 | MP_DISABLE_PREEMPTION(%ebx) | |
897 | Vload | |
898 | SDINC(V_dummy(%ebx)) | |
899 | MP_ENABLE_PREEMPTION(%ebx) | |
900 | popl %ebx | |
901 | #endif | |
902 | ||
903 | LEAVE0 | |
904 | ret | |
905 | END(_dummy_mcount) | |
906 | ||
907 | \f | |
908 | /* | |
909 | * Entry point for System V based profiling, count how many times each function | |
910 | * is called. The function label is passed in %edx, and the top two words on | |
911 | * the stack are the caller's address, and the caller's return address. | |
912 | * | |
913 | * +-------------------------------+ | |
914 | * | | | |
915 | * | | | |
916 | * | caller's caller stack, | | |
917 | * | saved registers, params. | | |
918 | * | | | |
919 | * | | | |
920 | * +-------------------------------+ | |
921 | * | caller's caller return addr. | | |
922 | * +-------------------------------+ | |
923 | * esp --> | caller's return address | | |
924 | * +-------------------------------+ | |
925 | * | |
926 | * edx --> function unique label | |
927 | * | |
928 | * We don't worry about the possibility about two threads calling | |
929 | * the same function for the first time simulataneously. If that | |
930 | * happens, two records will be created, and one of the records | |
931 | * address will be stored in in the function unique label (which | |
932 | * is aligned by the compiler, so we don't have to watch out for | |
933 | * crossing page/cache boundaries). | |
934 | */ | |
935 | ||
936 | Entry(_prof_mcount) | |
937 | ENTER | |
938 | ||
939 | #if DO_STATS | |
940 | pushl %ebx | |
941 | MP_DISABLE_PREEMPTION(%ebx) | |
942 | Vload | |
943 | SDINC(V_cnt(%ebx)) | |
944 | #endif | |
945 | ||
946 | movl (%edx),%eax /* initialized? */ | |
947 | cmpl $0,%eax | |
948 | je LCL(pnew) | |
949 | ||
950 | DINC2(P_count(%eax),P_overflow(%eax)) /* bump function count (double precision) */ | |
951 | ||
952 | #if DO_STATS | |
953 | MP_ENABLE_PREEMPTION(%ebx) | |
954 | popl %ebx | |
955 | #endif | |
956 | ||
957 | LEAVE0 | |
958 | ret | |
959 | ||
960 | .align ALIGN | |
961 | LCL(pnew): | |
962 | ||
963 | #if !DO_STATS | |
964 | pushl %ebx | |
965 | MP_DISABLE_PREEMPTION(%ebx) | |
966 | Vload | |
967 | #endif | |
968 | ||
969 | SLOCK incl V_prof_records(%ebx) | |
970 | pushl %edx | |
55e303ae A |
971 | movl $(P_size),%eax /* allocation size */ |
972 | movl $(C_prof),%ecx /* allocation pool */ | |
1c79356b A |
973 | call EXT(_profile_alloc_asm) /* allocate a new record */ |
974 | popl %edx | |
975 | ||
976 | movl Estack+4(%esp),%ecx /* caller's address */ | |
977 | movl %ecx,P_addr(%eax) | |
978 | movl $1,P_count(%eax) /* call count */ | |
979 | xchgl %eax,(%edx) /* update function header */ | |
980 | MP_ENABLE_PREEMPTION(%ebx) | |
981 | popl %ebx | |
982 | LEAVE0 | |
983 | ret | |
984 | ||
985 | END(_prof_mcount) | |
986 | ||
987 | \f | |
988 | /* | |
989 | * Entry point for BSD based graph profiling, count how many times each unique | |
990 | * call graph (caller + callee) is called. The function label is passed in | |
991 | * %edx, and the top two words on the stack are the caller's address, and the | |
992 | * caller's return address. | |
993 | * | |
994 | * +-------------------------------+ | |
995 | * | | | |
996 | * | | | |
997 | * | caller's caller stack, | | |
998 | * | saved registers, params. | | |
999 | * | | | |
1000 | * | | | |
1001 | * +-------------------------------+ | |
1002 | * | caller's caller return addr. | | |
1003 | * +-------------------------------+ | |
1004 | * esp --> | caller's return address | | |
1005 | * +-------------------------------+ | |
1006 | * | |
1007 | * edx --> function unqiue label | |
1008 | * | |
1009 | * We don't worry about the possibility about two threads calling the same | |
1010 | * function simulataneously. If that happens, two records will be created, and | |
1011 | * one of the records address will be stored in in the function unique label | |
1012 | * (which is aligned by the compiler). | |
1013 | * | |
1014 | * By design, the gprof header is not locked. Each of the cache pointers is | |
1015 | * always a valid pointer (possibily to a null record), and if another thread | |
1016 | * comes in and modifies the pointer, it does so automatically with a simple store. | |
1017 | * Since all arcs are in the hash table, the caches are just to avoid doing | |
1018 | * a multiplication in the common case, and if they don't match, the arcs will | |
1019 | * still be found. | |
1020 | */ | |
1021 | ||
1022 | Entry(_gprof_mcount) | |
1023 | ||
1024 | ENTER | |
1025 | movl Estack+4(%esp),%ecx /* caller's caller address */ | |
1026 | ||
1027 | #if DO_STATS | |
1028 | pushl %ebx | |
1029 | MP_DISABLE_PREEMPTION(%ebx) | |
1030 | Vload | |
1031 | SDINC(V_cnt(%ebx)) /* bump profile call counter (double int) */ | |
1032 | #endif | |
1033 | ||
1034 | movl (%edx),%eax /* Gprof header allocated? */ | |
1035 | cmpl $0,%eax | |
1036 | je LCL(gnew) /* skip if first call */ | |
1037 | ||
1038 | DINC2(H_prof+P_count(%eax),H_prof+P_overflow(%eax)) /* bump function count */ | |
1039 | ||
1040 | /* See if this call arc is the same as the last time */ | |
1041 | MARK(_gprof_mcount_cache1) | |
1042 | movl H_cache_ptr(%eax),%edx /* last arc searched */ | |
1043 | cmpl %ecx,G_frompc(%edx) /* skip if not equal */ | |
1044 | jne LCL(gcache2) | |
1045 | ||
1046 | /* Same as last time, increment and return */ | |
1047 | ||
1048 | DINC2(G_count(%edx),G_overflow(%edx)) /* bump arc count */ | |
1049 | ||
1050 | #if DO_STATS | |
1051 | SDINC(V_cache_hits1(%ebx)) /* update counter */ | |
1052 | MP_ENABLE_PREEMPTION(%ebx) | |
1053 | popl %ebx | |
1054 | #endif | |
1055 | ||
1056 | LEAVE0 | |
1057 | ret | |
1058 | ||
1059 | /* Search second cache entry */ | |
1060 | /* %eax = gprof func header, %ebx = vars address if DO_STATS, %ecx = caller's caller */ | |
1061 | /* %edx = first arc searched */ | |
1062 | /* %ebx if DO_STATS pushed on stack */ | |
1063 | ||
1064 | .align ALIGN | |
1065 | MARK(_gprof_mcount_cache2) | |
1066 | LCL(gcache2): | |
1067 | pushl %esi /* get a saved register */ | |
1068 | movl H_cache_ptr+4(%eax),%esi /* 2nd arc to be searched */ | |
1069 | cmpl %ecx,G_frompc(%esi) /* skip if not equal */ | |
1070 | jne LCL(gcache3) | |
1071 | ||
1072 | /* Element found, increment, reset last arc searched and return */ | |
1073 | ||
1074 | DINC2(G_count(%esi),G_overflow(%esi)) /* bump arc count */ | |
1075 | ||
1076 | movl %esi,H_cache_ptr+0(%eax) /* swap 1st and 2nd cached arcs */ | |
1077 | popl %esi | |
1078 | movl %edx,H_cache_ptr+4(%eax) | |
1079 | ||
1080 | #if DO_STATS | |
1081 | SDINC(V_cache_hits2(%ebx)) /* update counter */ | |
1082 | MP_ENABLE_PREEMPTION(%ebx) | |
1083 | popl %ebx | |
1084 | #endif | |
1085 | ||
1086 | LEAVE0 | |
1087 | ret | |
1088 | ||
1089 | /* Search third cache entry */ | |
1090 | /* %eax = gprof func header, %ebx = vars address if DO_STATS, %ecx = caller's caller */ | |
1091 | /* %edx = first arc searched, %esi = second arc searched */ | |
1092 | /* %esi, %ebx if DO_STATS pushed on stack */ | |
1093 | ||
1094 | .align ALIGN | |
1095 | MARK(_gprof_mcount_cache3) | |
1096 | LCL(gcache3): | |
1097 | pushl %edi | |
1098 | movl H_cache_ptr+8(%eax),%edi /* 3rd arc to be searched */ | |
1099 | cmpl %ecx,G_frompc(%edi) /* skip if not equal */ | |
1100 | jne LCL(gnocache) | |
1101 | ||
1102 | /* Element found, increment, reset last arc searched and return */ | |
1103 | ||
1104 | DINC2(G_count(%edi),G_overflow(%edi)) /* bump arc count */ | |
1105 | ||
1106 | movl %edi,H_cache_ptr+0(%eax) /* make this 1st cached arc */ | |
1107 | movl %esi,H_cache_ptr+8(%eax) | |
1108 | movl %edx,H_cache_ptr+4(%eax) | |
1109 | popl %edi | |
1110 | popl %esi | |
1111 | ||
1112 | #if DO_STATS | |
1113 | SDINC(V_cache_hits3(%ebx)) /* update counter */ | |
1114 | MP_ENABLE_PREEMPTION(%ebx) | |
1115 | popl %ebx | |
1116 | #endif | |
1117 | ||
1118 | LEAVE0 | |
1119 | ret | |
1120 | ||
1121 | /* No function context, allocate a new context */ | |
1122 | /* %ebx is the variables address if DO_STATS */ | |
1123 | /* %ecx is the caller's caller's address */ | |
1124 | /* %edx is the unique function pointer */ | |
1125 | /* %ebx if DO_STATS pushed on stack */ | |
1126 | ||
1127 | .align ALIGN | |
1128 | MARK(_gprof_mcount_new) | |
1129 | LCL(gnew): | |
1130 | pushl %esi | |
1131 | pushl %edi | |
1132 | ||
1133 | #if !DO_STATS | |
1134 | pushl %ebx /* Address of vars needed for alloc */ | |
1135 | MP_DISABLE_PREEMPTION(%ebx) | |
1136 | Vload /* stats already loaded address */ | |
1137 | #endif | |
1138 | ||
1139 | SLOCK incl V_prof_records(%ebx) | |
1140 | movl %edx,%esi /* save unique function ptr */ | |
1141 | movl %ecx,%edi /* and caller's caller address */ | |
55e303ae A |
1142 | movl $(H_size),%eax /* memory block size */ |
1143 | movl $(C_gfunc),%ecx /* gprof function header memory pool */ | |
1c79356b A |
1144 | call EXT(_profile_alloc_asm) |
1145 | ||
1146 | movl V_hash_ptr(%ebx),%ecx /* copy hash_ptr to func header */ | |
1147 | movl V_dummy_ptr(%ebx),%edx /* dummy cache entry */ | |
1148 | movl %ecx,H_hash_ptr(%eax) | |
1149 | movl %edx,H_cache_ptr+0(%eax) /* store dummy cache ptrs */ | |
1150 | movl %edx,H_cache_ptr+4(%eax) | |
1151 | movl %edx,H_cache_ptr+8(%eax) | |
1152 | movl %esi,H_unique_ptr(%eax) /* remember function unique ptr */ | |
1153 | movl Estack+12(%esp),%ecx /* caller's address */ | |
1154 | movl $1,H_prof+P_count(%eax) /* function called once so far */ | |
1155 | movl %ecx,H_prof+P_addr(%eax) /* set up prof information */ | |
1156 | movl %eax,(%esi) /* update context block address */ | |
1157 | movl %edi,%ecx /* caller's caller address */ | |
1158 | movl %edx,%esi /* 2nd cached arc */ | |
1159 | ||
1160 | #if !DO_STATS | |
1161 | popl %ebx | |
1162 | #endif | |
1163 | ||
1164 | /* Fall through to add element to the hash table. This may involve */ | |
1165 | /* searching a few hash table elements that don't need to be searched */ | |
1166 | /* since we have a new element, but it allows the hash table function */ | |
1167 | /* to be specified in only one place */ | |
1168 | ||
1169 | /* Didn't find entry in cache, search the global hash table */ | |
1170 | /* %eax = gprof func header, %ebx = vars address if DO_STATS */ | |
1171 | /* %ecx = caller's caller */ | |
1172 | /* %edx, %esi = cached arcs that were searched */ | |
1173 | /* %edi, %esi, %ebx if DO_STATS pushed on stack */ | |
1174 | ||
1175 | .align ALIGN | |
1176 | MARK(_gprof_mcount_hash) | |
1177 | LCL(gnocache): | |
1178 | ||
1179 | pushl %esi /* save 2nd arc searched */ | |
1180 | pushl %edx /* save 1st arc searched */ | |
1181 | movl %eax,%esi /* save gprof func header */ | |
1182 | ||
1183 | #if DO_STATS | |
1184 | SDINC(V_hash_num(%ebx)) | |
1185 | movl Estack+20(%esp),%edi /* caller's address */ | |
1186 | #else | |
1187 | movl Estack+16(%esp),%edi /* caller's address */ | |
1188 | #endif | |
1189 | movl %ecx,%eax /* caller's caller address */ | |
1190 | imull %edi,%eax /* multiply to get hash */ | |
1191 | movl H_hash_ptr(%esi),%edx /* hash pointer */ | |
55e303ae A |
1192 | shrl $(GPROF_HASH_SHIFT),%eax /* eliminate low order bits */ |
1193 | andl $(GPROF_HASH_MASK),%eax /* mask to get hash value */ | |
1c79356b A |
1194 | leal 0(%edx,%eax,4),%eax /* pointer to hash bucket */ |
1195 | movl %eax,%edx /* save hash bucket address */ | |
1196 | ||
1197 | /* %eax = old arc, %ebx = vars address if DO_STATS, %ecx = caller's caller */ | |
1198 | /* %edx = hash bucket address, %esi = gfunc ptr, %edi = caller's addr */ | |
1199 | /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ | |
1200 | ||
1201 | .align ALIGN | |
1202 | LCL(ghash): | |
1203 | movl G_next(%eax),%eax /* get next hash element */ | |
1204 | cmpl $0,%eax /* end of line? */ | |
1205 | je LCL(ghashnew) /* skip if allocate new hash */ | |
1206 | ||
1207 | #if DO_STATS | |
1208 | SDINC(V_hash_search(%ebx)) | |
1209 | #endif | |
1210 | ||
1211 | cmpl G_selfpc(%eax),%edi /* loop back if not one we want */ | |
1212 | jne LCL(ghash) | |
1213 | ||
1214 | cmpl G_frompc(%eax),%ecx /* loop back if not one we want */ | |
1215 | jne LCL(ghash) | |
1216 | ||
1217 | /* Found an entry, increment count, set up for caching, and return */ | |
1218 | /* %eax = arc, %ebx = vars address if DO_STATS, %esi = func header */ | |
1219 | /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ | |
1220 | ||
1221 | DINC2(G_count(%eax),G_overflow(%eax)) /* bump arc count */ | |
1222 | ||
1223 | popl %ecx /* previous 1st arc searched */ | |
1224 | movl %eax,H_cache_ptr+0(%esi) /* this element is now 1st arc */ | |
1225 | popl %edi /* previous 2nd arc searched */ | |
1226 | movl %ecx,H_cache_ptr+4(%esi) /* new 2nd arc to be searched */ | |
1227 | movl %edi,H_cache_ptr+8(%esi) /* new 3rd arc to be searched */ | |
1228 | popl %edi | |
1229 | popl %esi | |
1230 | ||
1231 | #if DO_STATS | |
1232 | MP_ENABLE_PREEMPTION(%ebx) | |
1233 | popl %ebx | |
1234 | #endif | |
1235 | ||
1236 | LEAVE0 | |
1237 | ret /* return to user */ | |
1238 | ||
1239 | /* Allocate new arc */ | |
1240 | /* %eax = old arc, %ebx = vars address if DO_STATS, %ecx = caller's caller */ | |
1241 | /* %edx = hash bucket address, %esi = gfunc ptr, %edi = caller's addr */ | |
1242 | /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ | |
1243 | ||
1244 | .align ALIGN | |
1245 | MARK(_gprof_mcount_hashnew) | |
1246 | LCL(ghashnew): | |
1247 | ||
1248 | #if !DO_STATS | |
1249 | pushl %ebx /* load address of vars if we haven't */ | |
1250 | MP_DISABLE_PREEMPTION(%ebx) | |
1251 | Vload /* already done so */ | |
1252 | #endif | |
1253 | ||
1254 | SLOCK incl V_gprof_records(%ebx) | |
1255 | pushl %edx | |
1256 | movl %ecx,%edi /* save caller's caller */ | |
55e303ae A |
1257 | movl $(G_size),%eax /* arc size */ |
1258 | movl $(C_gprof),%ecx /* gprof memory pool */ | |
1c79356b A |
1259 | call EXT(_profile_alloc_asm) |
1260 | popl %edx | |
1261 | ||
1262 | movl $1,G_count(%eax) /* set call count */ | |
1263 | movl Estack+20(%esp),%ecx /* caller's address */ | |
1264 | movl %edi,G_frompc(%eax) /* caller's caller */ | |
1265 | movl %ecx,G_selfpc(%eax) | |
1266 | ||
1267 | #if !DO_STATS | |
1268 | popl %ebx /* release %ebx if no stats */ | |
1269 | #endif | |
1270 | ||
1271 | movl (%edx),%ecx /* first hash bucket */ | |
1272 | movl %ecx,G_next(%eax) /* update link */ | |
1273 | movl %eax,%ecx /* copy for xchgl */ | |
1274 | xchgl %ecx,(%edx) /* add to hash linked list */ | |
1275 | movl %ecx,G_next(%eax) /* update in case list changed */ | |
1276 | ||
1277 | popl %ecx /* previous 1st arc searched */ | |
1278 | popl %edi /* previous 2nd arc searched */ | |
1279 | movl %eax,H_cache_ptr+0(%esi) /* this element is now 1st arc */ | |
1280 | movl %ecx,H_cache_ptr+4(%esi) /* new 2nd arc to be searched */ | |
1281 | movl %edi,H_cache_ptr+8(%esi) /* new 3rd arc to be searched */ | |
1282 | ||
1283 | popl %edi | |
1284 | popl %esi | |
1285 | ||
1286 | #if DO_STATS | |
1287 | MP_ENABLE_PREEMPTION(%ebx) | |
1288 | popl %ebx | |
1289 | #endif | |
1290 | ||
1291 | LEAVE0 | |
1292 | ret /* return to user */ | |
1293 | ||
1294 | END(_gprof_mcount) | |
1295 | ||
1296 | \f | |
1297 | /* | |
1298 | * This function assumes that neither the caller or it's caller | |
1299 | * has not omitted the frame pointer in order to get the caller's | |
1300 | * caller. The stack looks like the following at the time of the call: | |
1301 | * | |
1302 | * +-------------------------------+ | |
1303 | * | | | |
1304 | * | | | |
1305 | * | caller's caller stack, | | |
1306 | * | saved registers, params. | | |
1307 | * | | | |
1308 | * | | | |
1309 | * +-------------------------------+ | |
1310 | * | caller's caller return addr. | | |
1311 | * +-------------------------------+ | |
1312 | * fp --> | previous frame pointer | | |
1313 | * +-------------------------------+ | |
1314 | * | | | |
1315 | * | caller's stack, saved regs, | | |
1316 | * | params. | | |
1317 | * | | | |
1318 | * +-------------------------------+ | |
1319 | * sp --> | caller's return address | | |
1320 | * +-------------------------------+ | |
1321 | * | |
1322 | * Recent versions of the compiler put the address of the pointer | |
1323 | * sized word in %edx. Previous versions did not, but this code | |
1324 | * does not support them. | |
1325 | */ | |
1326 | ||
1327 | /* | |
1328 | * Note that OSF/rose blew defining _mcount, since it prepends leading | |
1329 | * underscores, and _mcount didn't have a second leading underscore. However, | |
1330 | * some of the kernel/server functions 'know' that mcount has a leading | |
1331 | * underscore, so we satisfy both camps. | |
1332 | */ | |
1333 | ||
1334 | #if OLD_MCOUNT | |
1335 | .globl mcount | |
1336 | .globl _mcount | |
1337 | ELF_FUNC(mcount) | |
1338 | ELF_FUNC(_mcount) | |
1339 | .align FALIGN | |
1340 | _mcount: | |
1341 | mcount: | |
1342 | ||
1343 | pushl %ebx | |
1344 | MP_DISABLE_PREEMPTION(%ebx) | |
1345 | Vload | |
1346 | ||
1347 | #if DO_STATS | |
1348 | SDINC(V_old_mcount(%ebx)) | |
1349 | #endif | |
1350 | ||
1351 | /* In calling the functions, we will actually leave 1 extra word on the */ | |
1352 | /* top of the stack, but generated code will not notice, since the function */ | |
1353 | /* uses a frame pointer */ | |
1354 | ||
1355 | movl V_mcount_ptr_ptr(%ebx),%ecx /* address of mcount_ptr */ | |
1356 | MP_ENABLE_PREEMPTION(%ebx) | |
1357 | popl %ebx | |
1358 | movl 4(%ebp),%eax /* caller's caller return address */ | |
1359 | xchgl %eax,(%esp) /* push & get return address */ | |
1360 | pushl %eax /* push return address */ | |
1361 | jmp *(%ecx) /* go to profile the function */ | |
1362 | ||
1363 | End(mcount) | |
1364 | End(_mcount) | |
1365 | #endif | |
1366 | ||
1367 | \f | |
1368 | #if !defined(KERNEL) && !defined(MACH_KERNEL) | |
1369 | ||
1370 | /* | |
1371 | * Convert a 64-bit integer to a string. | |
1372 | * Arg #1 is a pointer to a string (at least 24 bytes) or NULL | |
1373 | * Arg #2 is the low part of the 64-bit integer. | |
1374 | * Arg #3 is the high part of the 64-bit integer. | |
1375 | */ | |
1376 | ||
1377 | Entry(_profile_cnt_to_decimal) | |
1378 | ENTER | |
1379 | pushl %ebx | |
1380 | pushl %esi | |
1381 | pushl %edi | |
1382 | movl Estack+16(%esp),%ebx /* pointer or null */ | |
1383 | movl Estack+20(%esp),%edi /* low part of number */ | |
1384 | movl $10,%ecx /* divisor */ | |
1385 | cmpl $0,%ebx /* skip if pointer ok */ | |
1386 | jne LCL(cvt_nonnull) | |
1387 | ||
1388 | MP_DISABLE_PREEMPTION(%ebx) | |
1389 | Vload /* get _profile_vars address */ | |
1390 | leal V_num_buffer(%ebx),%ebx /* temp buffer to use */ | |
1391 | ||
1392 | .align ALIGN | |
1393 | LCL(cvt_nonnull): | |
1394 | addl $(N_digit-1),%ebx /* point string at end */ | |
1395 | movb $0,0(%ebx) /* null terminate string */ | |
1396 | ||
1397 | #if OVERFLOW | |
1398 | movl Estack+24(%esp),%esi /* high part of number */ | |
1399 | cmpl $0,%esi /* any thing left in high part? */ | |
1400 | je LCL(cvt_low) | |
1401 | ||
1402 | .align ALIGN | |
1403 | LCL(cvt_high): | |
1404 | movl %esi,%eax /* calculate high/10 & high%10 */ | |
1405 | xorl %edx,%edx | |
1406 | divl %ecx | |
1407 | movl %eax,%esi | |
1408 | ||
1409 | movl %edi,%eax /* calculate (low + (high%10)*2^32) / 10 */ | |
1410 | divl %ecx | |
1411 | movl %eax,%edi | |
1412 | ||
1413 | decl %ebx /* decrement string pointer */ | |
1414 | addl $48,%edx /* convert from 0..9 -> '0'..'9' */ | |
1415 | movb %dl,0(%ebx) /* store digit in string */ | |
1416 | cmpl $0,%esi /* any thing left in high part? */ | |
1417 | jne LCL(cvt_high) | |
1418 | ||
1419 | #endif /* OVERFLOW */ | |
1420 | ||
1421 | .align ALIGN | |
1422 | LCL(cvt_low): | |
1423 | movl %edi,%eax /* get low part into %eax */ | |
1424 | ||
1425 | .align ALIGN | |
1426 | LCL(cvt_low2): | |
1427 | xorl %edx,%edx /* 0 */ | |
1428 | divl %ecx /* calculate next digit */ | |
1429 | decl %ebx /* decrement string pointer */ | |
1430 | addl $48,%edx /* convert from 0..9 -> '0'..'9' */ | |
1431 | movb %dl,0(%ebx) /* store digit in string */ | |
1432 | cmpl $0,%eax /* any more digits to convert? */ | |
1433 | jne LCL(cvt_low2) | |
1434 | ||
1435 | movl %ebx,%eax /* return value */ | |
1436 | popl %edi | |
1437 | popl %esi | |
1438 | MP_ENABLE_PREEMPTION(%ebx) | |
1439 | popl %ebx | |
1440 | LEAVE0 | |
1441 | ret | |
1442 | ||
1443 | END(_profile_cnt_to_decimal) | |
1444 | ||
1445 | #endif |