]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | ||
51 | /* | |
52 | */ | |
53 | #include <mach_prof.h> | |
54 | ||
55 | #include <mach/task_server.h> | |
56 | #include <mach/thread_act_server.h> | |
57 | ||
58 | #if MACH_PROF | |
59 | #include <cpus.h> | |
60 | #include <kern/thread.h> | |
61 | #include <kern/thread_swap.h> | |
62 | #include <kern/queue.h> | |
63 | #include <kern/profile.h> | |
64 | #include <kern/sched_prim.h> | |
65 | #include <kern/spl.h> | |
66 | #include <kern/misc_protos.h> | |
67 | #include <ipc/ipc_space.h> | |
68 | #include <machine/machparam.h> | |
69 | #include <mach/prof.h> | |
70 | ||
71 | thread_t profile_thread_id = THREAD_NULL; | |
72 | int profile_sample_count = 0; /* Provided for looking at from kdb. */ | |
73 | extern kern_return_t task_suspend(task_t task); /* ack */ | |
74 | ||
75 | /* Forwards */ | |
76 | prof_data_t pbuf_alloc(void); | |
77 | void pbuf_free( | |
78 | prof_data_t pbuf); | |
79 | void profile_thread(void); | |
80 | void send_last_sample_buf( | |
81 | prof_data_t pbuf); | |
82 | ||
83 | /* | |
84 | ***************************************************************************** | |
85 | * profile_thread is the profile/trace kernel support thread. It is started | |
86 | * by a server/user request through task_sample, or thread_sample. The profile | |
87 | * thread dequeues messages and sends them to the receive_prof thread, in the | |
88 | * server, via the send_samples and send_notices mig interface functions. If | |
89 | * there are no messages in the queue profile thread blocks until wakened by | |
90 | * profile (called in from mach_clock), or last_sample (called by thread/task_ | |
91 | * sample). | |
92 | */ | |
93 | ||
94 | void | |
95 | profile_thread(void) | |
96 | { | |
97 | spl_t s; | |
98 | buffer_t buf_entry; | |
99 | queue_entry_t prof_queue_entry; | |
100 | prof_data_t pbuf; | |
101 | kern_return_t kr; | |
102 | int j; | |
103 | ||
104 | thread_swappable(current_act(), FALSE); | |
105 | ||
106 | /* Initialise the queue header for the prof_queue */ | |
107 | mpqueue_init(&prof_queue); | |
108 | ||
109 | while (TRUE) { | |
110 | ||
111 | /* Dequeue the first buffer. */ | |
112 | s = splsched(); | |
113 | mpdequeue_head(&prof_queue, &prof_queue_entry); | |
114 | splx(s); | |
115 | ||
116 | if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) { | |
117 | assert_wait((event_t) profile_thread, THREAD_UNINT); | |
9bccf70c | 118 | thread_block(THREAD_CONTINUE_NULL); |
1c79356b A |
119 | if (current_thread()->wait_result != THREAD_AWAKENED) |
120 | break; | |
121 | } else | |
122 | { | |
123 | int dropped; | |
124 | ||
125 | pbuf = buf_entry->p_prof; | |
126 | kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone, | |
127 | (mach_msg_type_number_t)buf_entry->p_index); | |
128 | profile_sample_count += buf_entry->p_index; | |
129 | if (kr != KERN_SUCCESS) | |
130 | printf("send_samples(%x, %x, %d) error %x\n", | |
131 | pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr); | |
132 | dropped = buf_entry->p_dropped; | |
133 | if (dropped > 0) { | |
134 | printf("kernel: profile dropped %d sample%s\n", dropped, | |
135 | dropped == 1 ? "" : "s"); | |
136 | buf_entry->p_dropped = 0; | |
137 | } | |
138 | ||
139 | /* Indicate you've finished the dirty job */ | |
140 | buf_entry->p_full = FALSE; | |
141 | if (buf_entry->p_wakeme) | |
142 | thread_wakeup((event_t) &buf_entry->p_wakeme); | |
143 | } | |
144 | ||
145 | } | |
146 | /* The profile thread has been signalled to exit. Any threads waiting | |
147 | for the last buffer of samples to be acknowledged should be woken | |
148 | up now. */ | |
149 | profile_thread_id = THREAD_NULL; | |
150 | while (1) { | |
151 | s = splsched(); | |
152 | mpdequeue_head(&prof_queue, &prof_queue_entry); | |
153 | splx(s); | |
154 | if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) | |
155 | break; | |
156 | if (buf_entry->p_wakeme) | |
157 | thread_wakeup((event_t) &buf_entry->p_wakeme); | |
158 | } | |
159 | #if 0 /* XXXXX */ | |
160 | thread_halt_self(); | |
161 | #else | |
162 | panic("profile_thread(): halt_self"); | |
163 | #endif /* XXXXX */ | |
164 | } | |
165 | ||
166 | /* | |
167 | ***************************************************************************** | |
168 | * send_last_sample is the drain mechanism to allow partial profiled buffers | |
169 | * to be sent to the receive_prof thread in the server. | |
170 | ***************************************************************************** | |
171 | */ | |
172 | ||
173 | void | |
174 | send_last_sample_buf(prof_data_t pbuf) | |
175 | { | |
176 | spl_t s; | |
177 | buffer_t buf_entry; | |
178 | ||
179 | if (pbuf == NULLPROFDATA) | |
180 | return; | |
181 | ||
182 | /* Ask for the sending of the last PC buffer. | |
183 | * Make a request to the profile_thread by inserting | |
184 | * the buffer in the send queue, and wake it up. | |
185 | * The last buffer must be inserted at the head of the | |
186 | * send queue, so the profile_thread handles it immediatly. | |
187 | */ | |
188 | buf_entry = pbuf->prof_area + pbuf->prof_index; | |
189 | buf_entry->p_prof = pbuf; | |
190 | ||
191 | /* | |
192 | Watch out in case profile thread exits while we are about to | |
193 | queue data for it. | |
194 | */ | |
195 | s = splsched(); | |
196 | if (profile_thread_id == THREAD_NULL) | |
197 | splx(s); | |
198 | else { | |
199 | buf_entry->p_wakeme = 1; | |
200 | mpenqueue_tail(&prof_queue, &buf_entry->p_list); | |
201 | thread_wakeup((event_t) profile_thread); | |
202 | assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE); | |
203 | splx(s); | |
9bccf70c | 204 | thread_block(THREAD_CONTINUE_NULL); |
1c79356b A |
205 | } |
206 | } | |
207 | ||
208 | ||
209 | /* | |
210 | ***************************************************************************** | |
211 | * add clock tick parameters to profile/trace buffers. Called from the mach_ | |
212 | * clock heritz_tick function. DCI version stores thread, sp, and pc values | |
213 | * into the profile/trace buffers. MACH_PROF version just stores pc values. | |
214 | ***************************************************************************** | |
215 | */ | |
216 | ||
217 | void | |
218 | profile(natural_t pc, | |
219 | prof_data_t pbuf) | |
220 | { | |
221 | natural_t inout_val = pc; | |
222 | buffer_t buf_entry; | |
223 | ||
224 | if (pbuf == NULLPROFDATA) | |
225 | return; | |
226 | ||
227 | /* Inserts the PC value in the buffer of the thread */ | |
228 | set_pbuf_value(pbuf, &inout_val); | |
229 | switch((int)inout_val) { | |
230 | case 0: | |
231 | if (profile_thread_id == THREAD_NULL) { | |
232 | reset_pbuf_area(pbuf); | |
233 | } | |
234 | break; | |
235 | case 1: | |
236 | /* Normal case, value successfully inserted */ | |
237 | break; | |
238 | case 2 : | |
239 | /* | |
240 | * The value we have just inserted caused the | |
241 | * buffer to be full, and ready to be sent. | |
242 | * If profile_thread_id is null, the profile | |
243 | * thread has been killed. Since this generally | |
244 | * happens only when the O/S server task of which | |
245 | * it is a part is killed, it is not a great loss | |
246 | * to throw away the data. | |
247 | */ | |
248 | if (profile_thread_id == THREAD_NULL) { | |
249 | reset_pbuf_area(pbuf); | |
250 | break; | |
251 | } | |
252 | ||
253 | buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index]; | |
254 | buf_entry->p_prof = pbuf; | |
255 | mpenqueue_tail(&prof_queue, &buf_entry->p_list); | |
256 | ||
257 | /* Switch to another buffer */ | |
258 | reset_pbuf_area(pbuf); | |
259 | ||
260 | /* Wake up the profile thread */ | |
261 | if (profile_thread_id != THREAD_NULL) | |
262 | thread_wakeup((event_t) profile_thread); | |
263 | break; | |
264 | ||
265 | default: | |
266 | printf("profile : unexpected case\n"); | |
267 | } | |
268 | } | |
269 | ||
270 | /* | |
271 | ***************************************************************************** | |
272 | * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing | |
273 | * profiled items. | |
274 | ***************************************************************************** | |
275 | */ | |
276 | ||
277 | prof_data_t | |
278 | pbuf_alloc(void) | |
279 | { | |
280 | register prof_data_t pbuf; | |
281 | register int i; | |
282 | register natural_t *zone; | |
283 | ||
284 | pbuf = (prof_data_t)kalloc(sizeof(struct prof_data)); | |
285 | if (!pbuf) | |
286 | return(NULLPROFDATA); | |
287 | pbuf->prof_port = MACH_PORT_NULL; | |
288 | for (i=0; i< NB_PROF_BUFFER; i++) { | |
289 | zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t)); | |
290 | if (!zone) { | |
291 | i--; | |
292 | while (i--) | |
293 | kfree((vm_offset_t)pbuf->prof_area[i].p_zone, | |
294 | SIZE_PROF_BUFFER*sizeof(natural_t)); | |
295 | kfree((vm_offset_t)pbuf, sizeof(struct prof_data)); | |
296 | return(NULLPROFDATA); | |
297 | } | |
298 | pbuf->prof_area[i].p_zone = zone; | |
299 | pbuf->prof_area[i].p_full = FALSE; | |
300 | } | |
301 | pbuf->prof_port = MACH_PORT_NULL; | |
302 | return(pbuf); | |
303 | } | |
304 | ||
305 | /* | |
306 | ***************************************************************************** | |
307 | * pbuf_free free memory allocated for storing profile/trace items. Called | |
308 | * when a task is no longer profiled/traced. Pbuf_free tears down the memory | |
309 | * alloced in pbuf_alloc. It does not check to see if the structures are valid | |
310 | * since it is only called by functions in this file. | |
311 | ***************************************************************************** | |
312 | */ | |
313 | void | |
314 | pbuf_free( | |
315 | prof_data_t pbuf) | |
316 | { | |
317 | register int i; | |
318 | ||
319 | if (pbuf->prof_port) | |
320 | ipc_port_release_send(pbuf->prof_port); | |
321 | ||
322 | for(i=0; i < NB_PROF_BUFFER ; i++) | |
323 | kfree((vm_offset_t)pbuf->prof_area[i].p_zone, | |
324 | SIZE_PROF_BUFFER*sizeof(natural_t)); | |
325 | kfree((vm_offset_t)pbuf, sizeof(struct prof_data)); | |
326 | } | |
327 | ||
328 | #endif /* MACH_PROF */ | |
329 | ||
330 | /* | |
331 | ***************************************************************************** | |
332 | * Thread_sample is used by MACH_PROF to profile a single thread, and is only | |
333 | * stub in DCI. | |
334 | ***************************************************************************** | |
335 | */ | |
336 | ||
337 | kern_return_t | |
338 | thread_sample( | |
339 | thread_act_t thr_act, | |
340 | ipc_port_t reply) | |
341 | { | |
342 | /* | |
343 | * This routine is called every time that a new thread has made | |
344 | * a request for the sampling service. We must keep track of the | |
345 | * correspondance between its identity (thread) and the port | |
346 | * we are going to use as a reply port to send out the samples resulting | |
347 | * from its execution. | |
348 | */ | |
349 | #if !MACH_PROF | |
350 | return KERN_FAILURE; | |
351 | #else | |
352 | prof_data_t pbuf; | |
353 | vm_offset_t vmpbuf; | |
354 | ||
355 | if (reply != MACH_PORT_NULL) { | |
356 | if (thr_act->act_profiled) /* yuck! */ | |
357 | return KERN_INVALID_ARGUMENT; | |
358 | /* Start profiling this activation, do the initialization. */ | |
359 | pbuf = pbuf_alloc(); | |
360 | if ((thr_act->profil_buffer = pbuf) == NULLPROFDATA) { | |
361 | printf("thread_sample: cannot allocate pbuf\n"); | |
362 | return KERN_RESOURCE_SHORTAGE; | |
363 | } | |
364 | else { | |
365 | if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) { | |
366 | printf("mach_sample_thread: cannot set pbuf_nb\n"); | |
367 | return KERN_FAILURE; | |
368 | } | |
369 | reset_pbuf_area(pbuf); | |
370 | } | |
371 | pbuf->prof_port = reply; | |
372 | thr_act->act_profiled = TRUE; | |
373 | thr_act->act_profiled_own = TRUE; | |
374 | if (profile_thread_id == THREAD_NULL) | |
375 | profile_thread_id = kernel_thread(kernel_task, profile_thread); | |
376 | } else { | |
377 | if (!thr_act->act_profiled) | |
378 | return(KERN_INVALID_ARGUMENT); | |
379 | ||
380 | thr_act->act_profiled = FALSE; | |
381 | /* do not stop sampling if thread is not profiled by its own */ | |
382 | ||
383 | if (!thr_act->act_profiled_own) | |
384 | return KERN_SUCCESS; | |
385 | else | |
386 | thr_act->act_profiled_own = FALSE; | |
387 | ||
388 | send_last_sample_buf(thr_act->profil_buffer); | |
389 | pbuf_free(thr_act->profil_buffer); | |
390 | thr_act->profil_buffer = NULLPROFDATA; | |
391 | } | |
392 | return KERN_SUCCESS; | |
393 | #endif /* MACH_PROF */ | |
394 | } | |
395 | ||
396 | /* | |
397 | ***************************************************************************** | |
398 | * Task_sample is used to profile/trace tasks - all thread within a task using | |
399 | * a common profile buffer to collect items generated by the hertz_tick. For | |
400 | * each task profiled a profile buffer is created that associates a reply port | |
401 | * (used to send the data to a server thread), task (used for throttling), and | |
402 | * a zone area (used to store profiled/traced items). | |
403 | ***************************************************************************** | |
404 | */ | |
405 | ||
406 | kern_return_t | |
407 | task_sample( | |
408 | task_t task, | |
409 | ipc_port_t reply) | |
410 | { | |
411 | #if !MACH_PROF | |
412 | return KERN_FAILURE; | |
413 | #else | |
414 | prof_data_t pbuf=task->profil_buffer; | |
415 | vm_offset_t vmpbuf; | |
416 | boolean_t turnon = (reply != MACH_PORT_NULL); | |
417 | ||
418 | if (task == TASK_NULL) | |
419 | return KERN_INVALID_ARGUMENT; | |
420 | if (turnon) /* Do we want to profile this task? */ | |
421 | { | |
422 | pbuf = pbuf_alloc(); /* allocate a profile buffer */ | |
423 | task_lock(task); | |
424 | if (task->task_profiled) { /* if it is already profiled return so */ | |
425 | task_unlock(task); | |
426 | if (pbuf != NULLPROFDATA) | |
427 | pbuf_free(pbuf); | |
428 | return(KERN_INVALID_ARGUMENT); | |
429 | } | |
430 | if (pbuf == NULLPROFDATA) { | |
431 | task_unlock(task); | |
432 | return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */ | |
433 | } | |
434 | task->profil_buffer = pbuf; | |
435 | ||
436 | if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) { | |
437 | pbuf_free(pbuf); | |
438 | task_unlock(task); | |
439 | return KERN_FAILURE; | |
440 | } | |
441 | reset_pbuf_area(pbuf); | |
442 | pbuf->prof_port = reply; /* assoc. buffer with reply port */ | |
443 | } else { /* We want to stop profiling/tracing */ | |
444 | task_lock(task); | |
445 | if (!task->task_profiled) { /* but this task is not being profiled */ | |
446 | task_unlock(task); | |
447 | return(KERN_INVALID_ARGUMENT); | |
448 | } | |
449 | } | |
450 | ||
451 | /* | |
452 | * turnon = FALSE && task_profile = TRUE || | |
453 | * turnon = TRUE && task_profile = FALSE | |
454 | */ | |
455 | ||
456 | if (turnon != task->task_profiled) { | |
457 | int actual, i; | |
458 | thread_act_t thr_act; | |
459 | ||
460 | if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */ | |
461 | profile_thread_id = /* then start profile thread. */ | |
462 | kernel_thread(kernel_task, profile_thread); | |
463 | task->task_profiled = turnon; | |
464 | actual = task->thr_act_count; | |
465 | for (i = 0, thr_act = (thread_act_t)queue_first(&task->thr_acts); | |
466 | i < actual; | |
467 | i++, thr_act = (thread_act_t)queue_next(&thr_act->thr_acts)) { | |
468 | if (!thr_act->act_profiled_own) { | |
469 | thr_act->act_profiled = turnon; | |
470 | if (turnon) { | |
471 | thr_act->profil_buffer = task->profil_buffer; | |
472 | thr_act->act_profiled = TRUE; | |
473 | } else { | |
474 | thr_act->act_profiled = FALSE; | |
475 | thr_act->profil_buffer = NULLPROFDATA; | |
476 | } | |
477 | } | |
478 | } | |
479 | if (!turnon) { /* drain buffers and clean-up */ | |
480 | send_last_sample_buf(task->profil_buffer); | |
481 | pbuf_free(task->profil_buffer); | |
482 | task->profil_buffer = NULLPROFDATA; | |
483 | } | |
484 | } | |
485 | ||
486 | task_unlock(task); | |
487 | return KERN_SUCCESS; | |
488 | #endif /* MACH_PROF */ | |
489 | } | |
490 |