]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | ||
30 | /* | |
31 | * Corpses Overview | |
32 | * ================ | |
33 | * | |
34 | * A corpse is a state of process that is past the point of its death. This means that process has | |
35 | * completed all its termination operations like releasing file descriptors, mach ports, sockets and | |
36 | * other constructs used to identify a process. For all the processes this mimics the behavior as if | |
37 | * the process has died and no longer available by any means. | |
38 | * | |
39 | * Why do we need Corpses? | |
40 | * ----------------------- | |
41 | * For crash inspection we need to inspect the state and data that is associated with process so that | |
42 | * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash | |
43 | * | |
44 | * Corpses functionality in kernel | |
45 | * =============================== | |
46 | * The corpse functionality is an extension of existing exception reporting mechanisms we have. The | |
47 | * exception_triage calls will try to deliver the first round of exceptions allowing | |
48 | * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after | |
49 | * notification the exception is not handled, then the process begins the death operations and during | |
50 | * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through | |
51 | * of events and data shuffling that happens when corpses is enabled. | |
52 | * | |
53 | * * a process causes an exception during normal execution of threads. | |
54 | * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD | |
55 | * etc) side is passed through the exception_triage() function to follow the thread -> task -> host | |
56 | * level exception handling system. This set of steps are same as before and allow for existing | |
57 | * crash reporting systems (both internal and 3rd party) to catch and create reports as required. | |
58 | * * If above exception handling returns failed (when nobody handles the notification), then the | |
59 | * proc_prepareexit path has logic to decide to create corpse. | |
60 | * * The task_mark_corpse function allocates userspace vm memory and attaches the information | |
61 | * kcdata_descriptor_t to task->corpse_info field of task. | |
62 | * - All the task's threads are marked with the "inspection" flag which signals the termination | |
63 | * daemon to not reap them but hold until they are being inspected. | |
64 | * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE | |
65 | * prevents task_terminate from stripping important data from task. | |
66 | * - It marks all the threads to terminate and return to AST for termination. | |
67 | * - The allocation logic takes into account the rate limiting policy of allowing only | |
68 | * TOTAL_CORPSES_ALLOWED in flight. | |
69 | * * The proc exit threads continues and collects required information in the allocated vm region. | |
70 | * Once complete it marks itself for termination. | |
71 | * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit(). | |
72 | * Following this is a check to see if task is marked for corpse notification and will | |
73 | * invoke the the task_deliver_crash_notification(). | |
74 | * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and | |
75 | * inspection flag from all its threads) and allows task_terminate to go ahead and continue | |
76 | * the mach task termination process. | |
77 | * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the | |
78 | * inspection flag set are just bounced to another holding queue (crashed_threads_queue). | |
79 | * Only after the corpse notification these are pulled out from holding queue and enqueued | |
80 | * back to termination queue | |
81 | * | |
82 | * | |
83 | * Corpse info format | |
84 | * ================== | |
85 | * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag | |
86 | * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various | |
87 | * subsystems like | |
88 | * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc | |
89 | * * mach side may append data regarding ledger usage, memory stats etc | |
90 | * See detailed info about the memory structure and format in kern_cdata.h documentation. | |
91 | * | |
92 | * Configuring Corpses functionality | |
93 | * ================================= | |
94 | * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting | |
95 | * any other subsystem. | |
96 | * TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling | |
97 | * the number of corpse instances to be held for inspection before allowing memory to be reclaimed | |
98 | * by system. | |
99 | * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more | |
100 | * data to be put in, then please re-tune this parameter. | |
101 | * | |
102 | * Debugging/Visibility | |
103 | * ==================== | |
104 | * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads. | |
105 | * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue) | |
106 | * and holding queue (dumpcrashed_thread_queue). | |
107 | * * In case of corpse creation is disabled of ignored then the system log is updated with | |
108 | * printf data with reason. | |
109 | * | |
110 | * Limitations of Corpses | |
111 | * ====================== | |
112 | * With holding off memory for inspection, it creates vm pressure which might not be desirable | |
113 | * on low memory devices. There are limits to max corpses being inspected at a time which is | |
114 | * marked by TOTAL_CORPSES_ALLOWED. | |
115 | * | |
116 | */ | |
117 | ||
118 | ||
119 | #include <stdatomic.h> | |
120 | #include <kern/assert.h> | |
121 | #include <mach/mach_types.h> | |
122 | #include <mach/boolean.h> | |
123 | #include <mach/vm_param.h> | |
124 | #include <kern/kern_types.h> | |
125 | #include <kern/mach_param.h> | |
126 | #include <kern/thread.h> | |
127 | #include <kern/task.h> | |
128 | #include <corpses/task_corpse.h> | |
129 | #include <kern/kalloc.h> | |
130 | #include <kern/kern_cdata.h> | |
131 | #include <mach/mach_vm.h> | |
132 | #include <kern/exc_guard.h> | |
133 | #include <os/log.h> | |
134 | ||
135 | #if CONFIG_MACF | |
136 | #include <security/mac_mach_internal.h> | |
137 | #endif | |
138 | ||
139 | /* | |
140 | * Exported interfaces | |
141 | */ | |
142 | #include <mach/task_server.h> | |
143 | ||
144 | union corpse_creation_gate { | |
145 | struct { | |
146 | uint16_t user_faults; | |
147 | uint16_t corpses; | |
148 | }; | |
149 | uint32_t value; | |
150 | }; | |
151 | ||
152 | static _Atomic uint32_t inflight_corpses; | |
153 | unsigned long total_corpses_created = 0; | |
154 | boolean_t corpse_enabled_config = TRUE; | |
155 | ||
156 | /* bootarg to generate corpse with size up to max_footprint_mb */ | |
157 | boolean_t corpse_threshold_system_limit = FALSE; | |
158 | ||
159 | /* bootarg to turn on corpse forking for EXC_RESOURCE */ | |
160 | int exc_via_corpse_forking = 1; | |
161 | ||
162 | /* bootarg to generate corpse for fatal high memory watermark violation */ | |
163 | int corpse_for_fatal_memkill = 1; | |
164 | ||
165 | #ifdef __arm__ | |
166 | static inline int | |
167 | IS_64BIT_PROCESS(__unused void *p) | |
168 | { | |
169 | return 0; | |
170 | } | |
171 | #else | |
172 | extern int IS_64BIT_PROCESS(void *); | |
173 | #endif /* __arm__ */ | |
174 | extern void gather_populate_corpse_crashinfo(void *p, task_t task, | |
175 | mach_exception_data_type_t code, mach_exception_data_type_t subcode, | |
176 | uint64_t *udata_buffer, int num_udata, void *reason); | |
177 | extern void *proc_find(int pid); | |
178 | extern int proc_rele(void *p); | |
179 | ||
180 | ||
181 | void | |
182 | corpses_init() | |
183 | { | |
184 | char temp_buf[20]; | |
185 | int exc_corpse_forking; | |
186 | int fatal_memkill; | |
187 | if (PE_parse_boot_argn("-no_corpses", temp_buf, sizeof(temp_buf))) { | |
188 | corpse_enabled_config = FALSE; | |
189 | } | |
190 | if (PE_parse_boot_argn("exc_via_corpse_forking", &exc_corpse_forking, sizeof(exc_corpse_forking))) { | |
191 | exc_via_corpse_forking = exc_corpse_forking; | |
192 | } | |
193 | if (PE_parse_boot_argn("corpse_for_fatal_memkill", &fatal_memkill, sizeof(fatal_memkill))) { | |
194 | corpse_for_fatal_memkill = fatal_memkill; | |
195 | } | |
196 | #if DEBUG || DEVELOPMENT | |
197 | if (PE_parse_boot_argn("-corpse_threshold_system_limit", &corpse_threshold_system_limit, sizeof(corpse_threshold_system_limit))) { | |
198 | corpse_threshold_system_limit = TRUE; | |
199 | } | |
200 | #endif /* DEBUG || DEVELOPMENT */ | |
201 | } | |
202 | ||
203 | /* | |
204 | * Routine: corpses_enabled | |
205 | * returns FALSE if not enabled | |
206 | */ | |
207 | boolean_t | |
208 | corpses_enabled() | |
209 | { | |
210 | return corpse_enabled_config; | |
211 | } | |
212 | ||
213 | unsigned long | |
214 | total_corpses_count(void) | |
215 | { | |
216 | union corpse_creation_gate gate; | |
217 | ||
218 | gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed); | |
219 | return gate.corpses; | |
220 | } | |
221 | ||
222 | extern char *proc_best_name(struct proc *); | |
223 | extern int proc_pid(struct proc *); | |
224 | ||
225 | /* | |
226 | * Routine: task_crashinfo_get_ref() | |
227 | * Grab a slot at creating a corpse. | |
228 | * Returns: KERN_SUCCESS if the policy allows for creating a corpse. | |
229 | */ | |
230 | static kern_return_t | |
231 | task_crashinfo_get_ref(corpse_flags_t kcd_u_flags) | |
232 | { | |
233 | union corpse_creation_gate oldgate, newgate; | |
234 | struct proc *p = (void *)current_proc(); | |
235 | ||
236 | assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF); | |
237 | ||
238 | oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed); | |
239 | for (;;) { | |
240 | newgate = oldgate; | |
241 | if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) { | |
242 | if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) { | |
243 | os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n", | |
244 | proc_best_name(p), proc_pid(p), newgate.user_faults); | |
245 | return KERN_RESOURCE_SHORTAGE; | |
246 | } | |
247 | } | |
248 | if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) { | |
249 | os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n", | |
250 | proc_best_name(p), proc_pid(p), newgate.corpses); | |
251 | return KERN_RESOURCE_SHORTAGE; | |
252 | } | |
253 | ||
254 | // this reloads the value in oldgate | |
255 | if (atomic_compare_exchange_strong_explicit(&inflight_corpses, | |
256 | &oldgate.value, newgate.value, memory_order_relaxed, | |
257 | memory_order_relaxed)) { | |
258 | os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n", | |
259 | proc_best_name(p), proc_pid(p), newgate.corpses, TOTAL_CORPSES_ALLOWED); | |
260 | return KERN_SUCCESS; | |
261 | } | |
262 | } | |
263 | } | |
264 | ||
265 | /* | |
266 | * Routine: task_crashinfo_release_ref | |
267 | * release the slot for corpse being used. | |
268 | */ | |
269 | static kern_return_t | |
270 | task_crashinfo_release_ref(corpse_flags_t kcd_u_flags) | |
271 | { | |
272 | union corpse_creation_gate oldgate, newgate; | |
273 | ||
274 | assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF); | |
275 | ||
276 | oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed); | |
277 | for (;;) { | |
278 | newgate = oldgate; | |
279 | if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) { | |
280 | if (newgate.user_faults-- == 0) { | |
281 | panic("corpse in flight count over-release"); | |
282 | } | |
283 | } | |
284 | if (newgate.corpses-- == 0) { | |
285 | panic("corpse in flight count over-release"); | |
286 | } | |
287 | // this reloads the value in oldgate | |
288 | if (atomic_compare_exchange_strong_explicit(&inflight_corpses, | |
289 | &oldgate.value, newgate.value, memory_order_relaxed, | |
290 | memory_order_relaxed)) { | |
291 | os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses); | |
292 | return KERN_SUCCESS; | |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | ||
298 | kcdata_descriptor_t | |
299 | task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size, | |
300 | corpse_flags_t kc_u_flags, unsigned kc_flags) | |
301 | { | |
302 | kcdata_descriptor_t kcdata; | |
303 | ||
304 | if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) { | |
305 | if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) { | |
306 | return NULL; | |
307 | } | |
308 | } | |
309 | ||
310 | kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size, | |
311 | kc_flags); | |
312 | if (kcdata) { | |
313 | kcdata->kcd_user_flags = kc_u_flags; | |
314 | } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) { | |
315 | task_crashinfo_release_ref(kc_u_flags); | |
316 | } | |
317 | return kcdata; | |
318 | } | |
319 | ||
320 | ||
321 | /* | |
322 | * Free up the memory associated with task_crashinfo_data | |
323 | */ | |
324 | kern_return_t | |
325 | task_crashinfo_destroy(kcdata_descriptor_t data) | |
326 | { | |
327 | if (!data) { | |
328 | return KERN_INVALID_ARGUMENT; | |
329 | } | |
330 | if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) { | |
331 | task_crashinfo_release_ref(data->kcd_user_flags); | |
332 | } | |
333 | return kcdata_memory_destroy(data); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Routine: task_get_corpseinfo | |
338 | * params: task - task which has corpse info setup. | |
339 | * returns: crash info data attached to task. | |
340 | * NULL if task is null or has no corpse info | |
341 | */ | |
342 | kcdata_descriptor_t | |
343 | task_get_corpseinfo(task_t task) | |
344 | { | |
345 | kcdata_descriptor_t retval = NULL; | |
346 | if (task != NULL) { | |
347 | retval = task->corpse_info; | |
348 | } | |
349 | return retval; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Routine: task_add_to_corpse_task_list | |
354 | * params: task - task to be added to corpse task list | |
355 | * returns: None. | |
356 | */ | |
357 | void | |
358 | task_add_to_corpse_task_list(task_t corpse_task) | |
359 | { | |
360 | lck_mtx_lock(&tasks_corpse_lock); | |
361 | queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks); | |
362 | lck_mtx_unlock(&tasks_corpse_lock); | |
363 | } | |
364 | ||
365 | /* | |
366 | * Routine: task_remove_from_corpse_task_list | |
367 | * params: task - task to be removed from corpse task list | |
368 | * returns: None. | |
369 | */ | |
370 | void | |
371 | task_remove_from_corpse_task_list(task_t corpse_task) | |
372 | { | |
373 | lck_mtx_lock(&tasks_corpse_lock); | |
374 | queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks); | |
375 | lck_mtx_unlock(&tasks_corpse_lock); | |
376 | } | |
377 | ||
378 | /* | |
379 | * Routine: task_purge_all_corpses | |
380 | * params: None. | |
381 | * returns: None. | |
382 | */ | |
383 | void | |
384 | task_purge_all_corpses(void) | |
385 | { | |
386 | task_t task; | |
387 | ||
388 | printf("Purging corpses......\n\n"); | |
389 | ||
390 | lck_mtx_lock(&tasks_corpse_lock); | |
391 | /* Iterate through all the corpse tasks and clear all map entries */ | |
392 | queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) { | |
393 | vm_map_remove(task->map, | |
394 | task->map->min_offset, | |
395 | task->map->max_offset, | |
396 | /* | |
397 | * Final cleanup: | |
398 | * + no unnesting | |
399 | * + remove immutable mappings | |
400 | * + allow gaps in the range | |
401 | */ | |
402 | (VM_MAP_REMOVE_NO_UNNESTING | | |
403 | VM_MAP_REMOVE_IMMUTABLE | | |
404 | VM_MAP_REMOVE_GAPS_OK)); | |
405 | } | |
406 | ||
407 | lck_mtx_unlock(&tasks_corpse_lock); | |
408 | } | |
409 | ||
410 | /* | |
411 | * Routine: task_generate_corpse | |
412 | * params: task - task to fork a corpse | |
413 | * corpse_task - task port of the generated corpse | |
414 | * returns: KERN_SUCCESS on Success. | |
415 | * KERN_FAILURE on Failure. | |
416 | * KERN_NOT_SUPPORTED on corpse disabled. | |
417 | * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse. | |
418 | */ | |
419 | kern_return_t | |
420 | task_generate_corpse( | |
421 | task_t task, | |
422 | ipc_port_t *corpse_task_port) | |
423 | { | |
424 | task_t new_task; | |
425 | kern_return_t kr; | |
426 | thread_t thread, th_iter; | |
427 | ipc_port_t corpse_port; | |
428 | ipc_port_t old_notify; | |
429 | ||
430 | if (task == kernel_task || task == TASK_NULL) { | |
431 | return KERN_INVALID_ARGUMENT; | |
432 | } | |
433 | ||
434 | task_lock(task); | |
435 | if (task_is_a_corpse_fork(task)) { | |
436 | task_unlock(task); | |
437 | return KERN_INVALID_ARGUMENT; | |
438 | } | |
439 | task_unlock(task); | |
440 | ||
441 | /* Generate a corpse for the given task, will return with a ref on corpse task */ | |
442 | kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL); | |
443 | if (kr != KERN_SUCCESS) { | |
444 | return kr; | |
445 | } | |
446 | if (thread != THREAD_NULL) { | |
447 | thread_deallocate(thread); | |
448 | } | |
449 | ||
450 | /* wait for all the threads in the task to terminate */ | |
451 | task_lock(new_task); | |
452 | task_wait_till_threads_terminate_locked(new_task); | |
453 | ||
454 | /* Reset thread ports of all the threads in task */ | |
455 | queue_iterate(&new_task->threads, th_iter, thread_t, task_threads) | |
456 | { | |
457 | /* Do not reset the thread port for inactive threads */ | |
458 | if (th_iter->corpse_dup == FALSE) { | |
459 | ipc_thread_reset(th_iter); | |
460 | } | |
461 | } | |
462 | task_unlock(new_task); | |
463 | ||
464 | /* transfer the task ref to port and arm the no-senders notification */ | |
465 | corpse_port = convert_task_to_port(new_task); | |
466 | assert(IP_NULL != corpse_port); | |
467 | ||
468 | ip_lock(corpse_port); | |
469 | require_ip_active(corpse_port); | |
470 | ipc_port_nsrequest(corpse_port, corpse_port->ip_mscount, ipc_port_make_sonce_locked(corpse_port), &old_notify); | |
471 | /* port unlocked */ | |
472 | ||
473 | assert(IP_NULL == old_notify); | |
474 | *corpse_task_port = corpse_port; | |
475 | return KERN_SUCCESS; | |
476 | } | |
477 | ||
478 | /* | |
479 | * Routine: task_enqueue_exception_with_corpse | |
480 | * params: task - task to generate a corpse and enqueue it | |
481 | * etype - EXC_RESOURCE or EXC_GUARD | |
482 | * code - exception code to be enqueued | |
483 | * codeCnt - code array count - code and subcode | |
484 | * | |
485 | * returns: KERN_SUCCESS on Success. | |
486 | * KERN_FAILURE on Failure. | |
487 | * KERN_INVALID_ARGUMENT on invalid arguments passed. | |
488 | * KERN_NOT_SUPPORTED on corpse disabled. | |
489 | * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse. | |
490 | */ | |
491 | kern_return_t | |
492 | task_enqueue_exception_with_corpse( | |
493 | task_t task, | |
494 | exception_type_t etype, | |
495 | mach_exception_data_t code, | |
496 | mach_msg_type_number_t codeCnt, | |
497 | void *reason) | |
498 | { | |
499 | task_t new_task = TASK_NULL; | |
500 | thread_t thread = THREAD_NULL; | |
501 | kern_return_t kr; | |
502 | ||
503 | if (codeCnt < 2) { | |
504 | return KERN_INVALID_ARGUMENT; | |
505 | } | |
506 | ||
507 | /* Generate a corpse for the given task, will return with a ref on corpse task */ | |
508 | kr = task_generate_corpse_internal(task, &new_task, &thread, | |
509 | etype, code[0], code[1], reason); | |
510 | if (kr == KERN_SUCCESS) { | |
511 | if (thread == THREAD_NULL) { | |
512 | return KERN_FAILURE; | |
513 | } | |
514 | assert(new_task != TASK_NULL); | |
515 | assert(etype == EXC_RESOURCE || etype == EXC_GUARD); | |
516 | thread_exception_enqueue(new_task, thread, etype); | |
517 | } | |
518 | return kr; | |
519 | } | |
520 | ||
521 | /* | |
522 | * Routine: task_generate_corpse_internal | |
523 | * params: task - task to fork a corpse | |
524 | * corpse_task - task of the generated corpse | |
525 | * exc_thread - equivalent thread in corpse enqueuing exception | |
526 | * etype - EXC_RESOURCE or EXC_GUARD or 0 | |
527 | * code - mach exception code to be passed in corpse blob | |
528 | * subcode - mach exception subcode to be passed in corpse blob | |
529 | * returns: KERN_SUCCESS on Success. | |
530 | * KERN_FAILURE on Failure. | |
531 | * KERN_NOT_SUPPORTED on corpse disabled. | |
532 | * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse. | |
533 | */ | |
534 | kern_return_t | |
535 | task_generate_corpse_internal( | |
536 | task_t task, | |
537 | task_t *corpse_task, | |
538 | thread_t *exc_thread, | |
539 | exception_type_t etype, | |
540 | mach_exception_data_type_t code, | |
541 | mach_exception_data_type_t subcode, | |
542 | void *reason) | |
543 | { | |
544 | task_t new_task = TASK_NULL; | |
545 | thread_t thread = THREAD_NULL; | |
546 | thread_t thread_next = THREAD_NULL; | |
547 | kern_return_t kr; | |
548 | struct proc *p = NULL; | |
549 | int is_64bit_addr; | |
550 | int is_64bit_data; | |
551 | int t_flags; | |
552 | uint64_t *udata_buffer = NULL; | |
553 | int size = 0; | |
554 | int num_udata = 0; | |
555 | corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF; | |
556 | ||
557 | #if CONFIG_MACF | |
558 | struct label *label = NULL; | |
559 | #endif | |
560 | ||
561 | if (!corpses_enabled()) { | |
562 | return KERN_NOT_SUPPORTED; | |
563 | } | |
564 | ||
565 | if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) { | |
566 | kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT; | |
567 | } | |
568 | ||
569 | kr = task_crashinfo_get_ref(kc_u_flags); | |
570 | if (kr != KERN_SUCCESS) { | |
571 | return kr; | |
572 | } | |
573 | ||
574 | /* Having a task reference does not guarantee a proc reference */ | |
575 | p = proc_find(task_pid(task)); | |
576 | if (p == NULL) { | |
577 | kr = KERN_INVALID_TASK; | |
578 | goto error_task_generate_corpse; | |
579 | } | |
580 | ||
581 | is_64bit_addr = IS_64BIT_PROCESS(p); | |
582 | is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task); | |
583 | t_flags = TF_CORPSE_FORK | | |
584 | TF_PENDING_CORPSE | | |
585 | TF_CORPSE | | |
586 | (is_64bit_addr ? TF_64B_ADDR : TF_NONE) | | |
587 | (is_64bit_data ? TF_64B_DATA : TF_NONE); | |
588 | ||
589 | #if CONFIG_MACF | |
590 | /* Create the corpse label credentials from the process. */ | |
591 | label = mac_exc_create_label_for_proc(p); | |
592 | #endif | |
593 | ||
594 | /* Create a task for corpse */ | |
595 | kr = task_create_internal(task, | |
596 | NULL, | |
597 | TRUE, | |
598 | is_64bit_addr, | |
599 | is_64bit_data, | |
600 | t_flags, | |
601 | TPF_NONE, | |
602 | TWF_NONE, | |
603 | &new_task); | |
604 | if (kr != KERN_SUCCESS) { | |
605 | goto error_task_generate_corpse; | |
606 | } | |
607 | ||
608 | /* Create and copy threads from task, returns a ref to thread */ | |
609 | kr = task_duplicate_map_and_threads(task, p, new_task, &thread, | |
610 | &udata_buffer, &size, &num_udata); | |
611 | if (kr != KERN_SUCCESS) { | |
612 | goto error_task_generate_corpse; | |
613 | } | |
614 | ||
615 | kr = task_collect_crash_info(new_task, | |
616 | #if CONFIG_MACF | |
617 | label, | |
618 | #endif | |
619 | TRUE); | |
620 | if (kr != KERN_SUCCESS) { | |
621 | goto error_task_generate_corpse; | |
622 | } | |
623 | ||
624 | /* transfer our references to the corpse info */ | |
625 | assert(new_task->corpse_info->kcd_user_flags == 0); | |
626 | new_task->corpse_info->kcd_user_flags = kc_u_flags; | |
627 | kc_u_flags = 0; | |
628 | ||
629 | kr = task_start_halt(new_task); | |
630 | if (kr != KERN_SUCCESS) { | |
631 | goto error_task_generate_corpse; | |
632 | } | |
633 | ||
634 | /* terminate the ipc space */ | |
635 | ipc_space_terminate(new_task->itk_space); | |
636 | ||
637 | /* Populate the corpse blob, use the proc struct of task instead of corpse task */ | |
638 | gather_populate_corpse_crashinfo(p, new_task, | |
639 | code, subcode, udata_buffer, num_udata, reason); | |
640 | ||
641 | /* Add it to global corpse task list */ | |
642 | task_add_to_corpse_task_list(new_task); | |
643 | ||
644 | *corpse_task = new_task; | |
645 | *exc_thread = thread; | |
646 | ||
647 | error_task_generate_corpse: | |
648 | #if CONFIG_MACF | |
649 | if (label) { | |
650 | mac_exc_free_label(label); | |
651 | } | |
652 | #endif | |
653 | ||
654 | /* Release the proc reference */ | |
655 | if (p != NULL) { | |
656 | proc_rele(p); | |
657 | } | |
658 | ||
659 | if (kr != KERN_SUCCESS) { | |
660 | if (thread != THREAD_NULL) { | |
661 | thread_deallocate(thread); | |
662 | } | |
663 | if (new_task != TASK_NULL) { | |
664 | task_lock(new_task); | |
665 | /* Terminate all the other threads in the task. */ | |
666 | queue_iterate(&new_task->threads, thread_next, thread_t, task_threads) | |
667 | { | |
668 | thread_terminate_internal(thread_next, TH_TERMINATE_OPTION_NONE); | |
669 | } | |
670 | /* wait for all the threads in the task to terminate */ | |
671 | task_wait_till_threads_terminate_locked(new_task); | |
672 | task_unlock(new_task); | |
673 | ||
674 | task_clear_corpse(new_task); | |
675 | task_terminate_internal(new_task); | |
676 | task_deallocate(new_task); | |
677 | } | |
678 | if (kc_u_flags) { | |
679 | task_crashinfo_release_ref(kc_u_flags); | |
680 | } | |
681 | } | |
682 | /* Free the udata buffer allocated in task_duplicate_map_and_threads */ | |
683 | if (udata_buffer != NULL) { | |
684 | kheap_free(KHEAP_DATA_BUFFERS, udata_buffer, size); | |
685 | } | |
686 | ||
687 | return kr; | |
688 | } | |
689 | ||
690 | /* | |
691 | * Routine: task_map_corpse_info | |
692 | * params: task - Map the corpse info in task's address space | |
693 | * corpse_task - task port of the corpse | |
694 | * kcd_addr_begin - address of the mapped corpse info | |
695 | * kcd_addr_begin - size of the mapped corpse info | |
696 | * returns: KERN_SUCCESS on Success. | |
697 | * KERN_FAILURE on Failure. | |
698 | * KERN_INVALID_ARGUMENT on invalid arguments. | |
699 | * Note: Temporary function, will be deleted soon. | |
700 | */ | |
701 | kern_return_t | |
702 | task_map_corpse_info( | |
703 | task_t task, | |
704 | task_t corpse_task, | |
705 | vm_address_t *kcd_addr_begin, | |
706 | uint32_t *kcd_size) | |
707 | { | |
708 | kern_return_t kr; | |
709 | mach_vm_address_t kcd_addr_begin_64; | |
710 | mach_vm_size_t size_64; | |
711 | ||
712 | kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64); | |
713 | if (kr != KERN_SUCCESS) { | |
714 | return kr; | |
715 | } | |
716 | ||
717 | *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64; | |
718 | *kcd_size = (uint32_t) size_64; | |
719 | return KERN_SUCCESS; | |
720 | } | |
721 | ||
722 | /* | |
723 | * Routine: task_map_corpse_info_64 | |
724 | * params: task - Map the corpse info in task's address space | |
725 | * corpse_task - task port of the corpse | |
726 | * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *) | |
727 | * kcd_addr_begin - size of the mapped corpse info (takes mach_vm_size_t *) | |
728 | * returns: KERN_SUCCESS on Success. | |
729 | * KERN_FAILURE on Failure. | |
730 | * KERN_INVALID_ARGUMENT on invalid arguments. | |
731 | */ | |
732 | kern_return_t | |
733 | task_map_corpse_info_64( | |
734 | task_t task, | |
735 | task_t corpse_task, | |
736 | mach_vm_address_t *kcd_addr_begin, | |
737 | mach_vm_size_t *kcd_size) | |
738 | { | |
739 | kern_return_t kr; | |
740 | mach_vm_offset_t crash_data_ptr = 0; | |
741 | const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE; | |
742 | void *corpse_info_kernel = NULL; | |
743 | ||
744 | if (task == TASK_NULL || task_is_a_corpse_fork(task)) { | |
745 | return KERN_INVALID_ARGUMENT; | |
746 | } | |
747 | ||
748 | if (corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task) || | |
749 | kcdata_memory_get_begin_addr(corpse_task->corpse_info) == NULL) { | |
750 | return KERN_INVALID_ARGUMENT; | |
751 | } | |
752 | corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info); | |
753 | kr = mach_vm_allocate_kernel(task->map, &crash_data_ptr, size, | |
754 | VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO); | |
755 | if (kr != KERN_SUCCESS) { | |
756 | return kr; | |
757 | } | |
758 | copyout(corpse_info_kernel, (user_addr_t)crash_data_ptr, (size_t)size); | |
759 | *kcd_addr_begin = crash_data_ptr; | |
760 | *kcd_size = size; | |
761 | ||
762 | return KERN_SUCCESS; | |
763 | } | |
764 | ||
765 | uint64_t | |
766 | task_corpse_get_crashed_thread_id(task_t corpse_task) | |
767 | { | |
768 | return corpse_task->crashed_thread_id; | |
769 | } |