]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/profile.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / kern / profile.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53
54 /*
55 */
56 #include <mach_prof.h>
57
58 #include <mach/task_server.h>
59 #include <mach/thread_act_server.h>
60
61 #if MACH_PROF
62 #include <cpus.h>
63 #include <kern/thread.h>
64 #include <kern/thread_swap.h>
65 #include <kern/queue.h>
66 #include <kern/profile.h>
67 #include <kern/sched_prim.h>
68 #include <kern/spl.h>
69 #include <kern/misc_protos.h>
70 #include <ipc/ipc_space.h>
71 #include <machine/machparam.h>
72 #include <mach/prof.h>
73
74 thread_t profile_thread_id = THREAD_NULL;
75 int profile_sample_count = 0; /* Provided for looking at from kdb. */
76 extern kern_return_t task_suspend(task_t task); /* ack */
77
78 /* Forwards */
79 prof_data_t pbuf_alloc(void);
80 void pbuf_free(
81 prof_data_t pbuf);
82 void profile_thread(void);
83 void send_last_sample_buf(
84 prof_data_t pbuf);
85
86 /*
87 *****************************************************************************
88 * profile_thread is the profile/trace kernel support thread. It is started
89 * by a server/user request through task_sample, or thread_sample. The profile
90 * thread dequeues messages and sends them to the receive_prof thread, in the
91 * server, via the send_samples and send_notices mig interface functions. If
92 * there are no messages in the queue profile thread blocks until wakened by
93 * profile (called in from mach_clock), or last_sample (called by thread/task_
94 * sample).
95 */
96
97 void
98 profile_thread(void)
99 {
100 spl_t s;
101 buffer_t buf_entry;
102 queue_entry_t prof_queue_entry;
103 prof_data_t pbuf;
104 kern_return_t kr;
105 int j;
106
107 thread_swappable(current_act(), FALSE);
108
109 /* Initialise the queue header for the prof_queue */
110 mpqueue_init(&prof_queue);
111
112 while (TRUE) {
113
114 /* Dequeue the first buffer. */
115 s = splsched();
116 mpdequeue_head(&prof_queue, &prof_queue_entry);
117 splx(s);
118
119 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) {
120 assert_wait((event_t) profile_thread, THREAD_UNINT);
121 thread_block(THREAD_CONTINUE_NULL);
122 if (current_thread()->wait_result != THREAD_AWAKENED)
123 break;
124 } else
125 {
126 int dropped;
127
128 pbuf = buf_entry->p_prof;
129 kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
130 (mach_msg_type_number_t)buf_entry->p_index);
131 profile_sample_count += buf_entry->p_index;
132 if (kr != KERN_SUCCESS)
133 printf("send_samples(%x, %x, %d) error %x\n",
134 pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr);
135 dropped = buf_entry->p_dropped;
136 if (dropped > 0) {
137 printf("kernel: profile dropped %d sample%s\n", dropped,
138 dropped == 1 ? "" : "s");
139 buf_entry->p_dropped = 0;
140 }
141
142 /* Indicate you've finished the dirty job */
143 buf_entry->p_full = FALSE;
144 if (buf_entry->p_wakeme)
145 thread_wakeup((event_t) &buf_entry->p_wakeme);
146 }
147
148 }
149 /* The profile thread has been signalled to exit. Any threads waiting
150 for the last buffer of samples to be acknowledged should be woken
151 up now. */
152 profile_thread_id = THREAD_NULL;
153 while (1) {
154 s = splsched();
155 mpdequeue_head(&prof_queue, &prof_queue_entry);
156 splx(s);
157 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
158 break;
159 if (buf_entry->p_wakeme)
160 thread_wakeup((event_t) &buf_entry->p_wakeme);
161 }
162 #if 0 /* XXXXX */
163 thread_halt_self();
164 #else
165 panic("profile_thread(): halt_self");
166 #endif /* XXXXX */
167 }
168
169 /*
170 *****************************************************************************
171 * send_last_sample is the drain mechanism to allow partial profiled buffers
172 * to be sent to the receive_prof thread in the server.
173 *****************************************************************************
174 */
175
176 void
177 send_last_sample_buf(prof_data_t pbuf)
178 {
179 spl_t s;
180 buffer_t buf_entry;
181
182 if (pbuf == NULLPROFDATA)
183 return;
184
185 /* Ask for the sending of the last PC buffer.
186 * Make a request to the profile_thread by inserting
187 * the buffer in the send queue, and wake it up.
188 * The last buffer must be inserted at the head of the
189 * send queue, so the profile_thread handles it immediatly.
190 */
191 buf_entry = pbuf->prof_area + pbuf->prof_index;
192 buf_entry->p_prof = pbuf;
193
194 /*
195 Watch out in case profile thread exits while we are about to
196 queue data for it.
197 */
198 s = splsched();
199 if (profile_thread_id == THREAD_NULL)
200 splx(s);
201 else {
202 buf_entry->p_wakeme = 1;
203 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
204 thread_wakeup((event_t) profile_thread);
205 assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE);
206 splx(s);
207 thread_block(THREAD_CONTINUE_NULL);
208 }
209 }
210
211
212 /*
213 *****************************************************************************
214 * add clock tick parameters to profile/trace buffers. Called from the mach_
215 * clock heritz_tick function. DCI version stores thread, sp, and pc values
216 * into the profile/trace buffers. MACH_PROF version just stores pc values.
217 *****************************************************************************
218 */
219
220 void
221 profile(natural_t pc,
222 prof_data_t pbuf)
223 {
224 natural_t inout_val = pc;
225 buffer_t buf_entry;
226
227 if (pbuf == NULLPROFDATA)
228 return;
229
230 /* Inserts the PC value in the buffer of the thread */
231 set_pbuf_value(pbuf, &inout_val);
232 switch((int)inout_val) {
233 case 0:
234 if (profile_thread_id == THREAD_NULL) {
235 reset_pbuf_area(pbuf);
236 }
237 break;
238 case 1:
239 /* Normal case, value successfully inserted */
240 break;
241 case 2 :
242 /*
243 * The value we have just inserted caused the
244 * buffer to be full, and ready to be sent.
245 * If profile_thread_id is null, the profile
246 * thread has been killed. Since this generally
247 * happens only when the O/S server task of which
248 * it is a part is killed, it is not a great loss
249 * to throw away the data.
250 */
251 if (profile_thread_id == THREAD_NULL) {
252 reset_pbuf_area(pbuf);
253 break;
254 }
255
256 buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
257 buf_entry->p_prof = pbuf;
258 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
259
260 /* Switch to another buffer */
261 reset_pbuf_area(pbuf);
262
263 /* Wake up the profile thread */
264 if (profile_thread_id != THREAD_NULL)
265 thread_wakeup((event_t) profile_thread);
266 break;
267
268 default:
269 printf("profile : unexpected case\n");
270 }
271 }
272
273 /*
274 *****************************************************************************
275 * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing
276 * profiled items.
277 *****************************************************************************
278 */
279
280 prof_data_t
281 pbuf_alloc(void)
282 {
283 register prof_data_t pbuf;
284 register int i;
285 register natural_t *zone;
286
287 pbuf = (prof_data_t)kalloc(sizeof(struct prof_data));
288 if (!pbuf)
289 return(NULLPROFDATA);
290 pbuf->prof_port = MACH_PORT_NULL;
291 for (i=0; i< NB_PROF_BUFFER; i++) {
292 zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t));
293 if (!zone) {
294 i--;
295 while (i--)
296 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
297 SIZE_PROF_BUFFER*sizeof(natural_t));
298 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
299 return(NULLPROFDATA);
300 }
301 pbuf->prof_area[i].p_zone = zone;
302 pbuf->prof_area[i].p_full = FALSE;
303 }
304 pbuf->prof_port = MACH_PORT_NULL;
305 return(pbuf);
306 }
307
308 /*
309 *****************************************************************************
310 * pbuf_free free memory allocated for storing profile/trace items. Called
311 * when a task is no longer profiled/traced. Pbuf_free tears down the memory
312 * alloced in pbuf_alloc. It does not check to see if the structures are valid
313 * since it is only called by functions in this file.
314 *****************************************************************************
315 */
316 void
317 pbuf_free(
318 prof_data_t pbuf)
319 {
320 register int i;
321
322 if (pbuf->prof_port)
323 ipc_port_release_send(pbuf->prof_port);
324
325 for(i=0; i < NB_PROF_BUFFER ; i++)
326 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
327 SIZE_PROF_BUFFER*sizeof(natural_t));
328 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
329 }
330
331 #endif /* MACH_PROF */
332
333 /*
334 *****************************************************************************
335 * Thread_sample is used by MACH_PROF to profile a single thread, and is only
336 * stub in DCI.
337 *****************************************************************************
338 */
339
340 kern_return_t
341 thread_sample(
342 thread_act_t thr_act,
343 ipc_port_t reply)
344 {
345 /*
346 * This routine is called every time that a new thread has made
347 * a request for the sampling service. We must keep track of the
348 * correspondance between its identity (thread) and the port
349 * we are going to use as a reply port to send out the samples resulting
350 * from its execution.
351 */
352 #if !MACH_PROF
353 return KERN_FAILURE;
354 #else
355 prof_data_t pbuf;
356 vm_offset_t vmpbuf;
357
358 if (reply != MACH_PORT_NULL) {
359 if (thr_act->act_profiled) /* yuck! */
360 return KERN_INVALID_ARGUMENT;
361 /* Start profiling this activation, do the initialization. */
362 pbuf = pbuf_alloc();
363 if ((thr_act->profil_buffer = pbuf) == NULLPROFDATA) {
364 printf("thread_sample: cannot allocate pbuf\n");
365 return KERN_RESOURCE_SHORTAGE;
366 }
367 else {
368 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
369 printf("mach_sample_thread: cannot set pbuf_nb\n");
370 return KERN_FAILURE;
371 }
372 reset_pbuf_area(pbuf);
373 }
374 pbuf->prof_port = reply;
375 thr_act->act_profiled = TRUE;
376 thr_act->act_profiled_own = TRUE;
377 if (profile_thread_id == THREAD_NULL)
378 profile_thread_id = kernel_thread(kernel_task, profile_thread);
379 } else {
380 if (!thr_act->act_profiled)
381 return(KERN_INVALID_ARGUMENT);
382
383 thr_act->act_profiled = FALSE;
384 /* do not stop sampling if thread is not profiled by its own */
385
386 if (!thr_act->act_profiled_own)
387 return KERN_SUCCESS;
388 else
389 thr_act->act_profiled_own = FALSE;
390
391 send_last_sample_buf(thr_act->profil_buffer);
392 pbuf_free(thr_act->profil_buffer);
393 thr_act->profil_buffer = NULLPROFDATA;
394 }
395 return KERN_SUCCESS;
396 #endif /* MACH_PROF */
397 }
398
399 /*
400 *****************************************************************************
401 * Task_sample is used to profile/trace tasks - all thread within a task using
402 * a common profile buffer to collect items generated by the hertz_tick. For
403 * each task profiled a profile buffer is created that associates a reply port
404 * (used to send the data to a server thread), task (used for throttling), and
405 * a zone area (used to store profiled/traced items).
406 *****************************************************************************
407 */
408
409 kern_return_t
410 task_sample(
411 task_t task,
412 ipc_port_t reply)
413 {
414 #if !MACH_PROF
415 return KERN_FAILURE;
416 #else
417 prof_data_t pbuf=task->profil_buffer;
418 vm_offset_t vmpbuf;
419 boolean_t turnon = (reply != MACH_PORT_NULL);
420
421 if (task == TASK_NULL)
422 return KERN_INVALID_ARGUMENT;
423 if (turnon) /* Do we want to profile this task? */
424 {
425 pbuf = pbuf_alloc(); /* allocate a profile buffer */
426 task_lock(task);
427 if (task->task_profiled) { /* if it is already profiled return so */
428 task_unlock(task);
429 if (pbuf != NULLPROFDATA)
430 pbuf_free(pbuf);
431 return(KERN_INVALID_ARGUMENT);
432 }
433 if (pbuf == NULLPROFDATA) {
434 task_unlock(task);
435 return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */
436 }
437 task->profil_buffer = pbuf;
438
439 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
440 pbuf_free(pbuf);
441 task_unlock(task);
442 return KERN_FAILURE;
443 }
444 reset_pbuf_area(pbuf);
445 pbuf->prof_port = reply; /* assoc. buffer with reply port */
446 } else { /* We want to stop profiling/tracing */
447 task_lock(task);
448 if (!task->task_profiled) { /* but this task is not being profiled */
449 task_unlock(task);
450 return(KERN_INVALID_ARGUMENT);
451 }
452 }
453
454 /*
455 * turnon = FALSE && task_profile = TRUE ||
456 * turnon = TRUE && task_profile = FALSE
457 */
458
459 if (turnon != task->task_profiled) {
460 int actual, i;
461 thread_act_t thr_act;
462
463 if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */
464 profile_thread_id = /* then start profile thread. */
465 kernel_thread(kernel_task, profile_thread);
466 task->task_profiled = turnon;
467 actual = task->thr_act_count;
468 for (i = 0, thr_act = (thread_act_t)queue_first(&task->thr_acts);
469 i < actual;
470 i++, thr_act = (thread_act_t)queue_next(&thr_act->thr_acts)) {
471 if (!thr_act->act_profiled_own) {
472 thr_act->act_profiled = turnon;
473 if (turnon) {
474 thr_act->profil_buffer = task->profil_buffer;
475 thr_act->act_profiled = TRUE;
476 } else {
477 thr_act->act_profiled = FALSE;
478 thr_act->profil_buffer = NULLPROFDATA;
479 }
480 }
481 }
482 if (!turnon) { /* drain buffers and clean-up */
483 send_last_sample_buf(task->profil_buffer);
484 pbuf_free(task->profil_buffer);
485 task->profil_buffer = NULLPROFDATA;
486 }
487 }
488
489 task_unlock(task);
490 return KERN_SUCCESS;
491 #endif /* MACH_PROF */
492 }
493