]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_cancelable.c
libpthread-454.100.8.tar.gz
[apple/libpthread.git] / src / pthread_cancelable.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "resolver.h"
53 #include "internal.h"
54
55 #include <stdio.h> /* For printf(). */
56 #include <stdlib.h>
57 #include <errno.h> /* For __mach_errno_addr() prototype. */
58 #include <signal.h>
59 #include <sys/time.h>
60 #include <sys/resource.h>
61 #include <sys/sysctl.h>
62 #include <sys/queue.h>
63 #include <sys/ulock.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66
67 #ifndef BUILDING_VARIANT /* [ */
68
69 OS_ALWAYS_INLINE
70 static inline int
71 _pthread_update_cancel_state(pthread_t thread, int mask, int state)
72 {
73 uint16_t oldstate, newstate;
74 os_atomic_rmw_loop(&thread->cancel_state, oldstate, newstate, relaxed, {
75 newstate = oldstate;
76 newstate &= ~mask;
77 newstate |= state;
78 });
79 return oldstate;
80 }
81
82 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
83 void
84 _pthread_setcancelstate_exit(pthread_t thread, void *value_ptr)
85 {
86 _pthread_update_cancel_state(thread,
87 _PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK,
88 PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED |
89 _PTHREAD_CANCEL_EXITING);
90 }
91
92 /*
93 * Cancel a thread
94 */
95 PTHREAD_NOEXPORT_VARIANT
96 int
97 pthread_cancel(pthread_t thread)
98 {
99 if (!_pthread_is_valid(thread, NULL)) {
100 return(ESRCH);
101 }
102
103 /* if the thread is a workqueue thread, then return error */
104 if (thread->wqthread != 0) {
105 return(ENOTSUP);
106 }
107 int state = os_atomic_or(&thread->cancel_state, _PTHREAD_CANCEL_PENDING, relaxed);
108 if (state & PTHREAD_CANCEL_ENABLE) {
109 mach_port_t kport = _pthread_tsd_slot(thread, MACH_THREAD_SELF);
110 if (kport) __pthread_markcancel(kport);
111 }
112 return (0);
113 }
114
115 /*
116 * Query/update the cancelability 'state' of a thread
117 */
118 PTHREAD_NOEXPORT_VARIANT
119 int
120 pthread_setcancelstate(int state, int *oldstateptr)
121 {
122 pthread_t self = pthread_self();
123
124 _pthread_validate_signature(self);
125
126 switch (state) {
127 case PTHREAD_CANCEL_ENABLE:
128 __pthread_canceled(1);
129 break;
130 case PTHREAD_CANCEL_DISABLE:
131 __pthread_canceled(2);
132 break;
133 default:
134 return EINVAL;
135 }
136
137 int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_STATE_MASK, state);
138 if (oldstateptr) {
139 *oldstateptr = oldstate & _PTHREAD_CANCEL_STATE_MASK;
140 }
141 return 0;
142 }
143
144 /*
145 * Query/update the cancelability 'type' of a thread
146 */
147 PTHREAD_NOEXPORT_VARIANT
148 int
149 pthread_setcanceltype(int type, int *oldtype)
150 {
151 pthread_t self = pthread_self();
152
153 _pthread_validate_signature(self);
154
155 if ((type != PTHREAD_CANCEL_DEFERRED) &&
156 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
157 return EINVAL;
158 int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_TYPE_MASK, type);
159 if (oldtype) {
160 *oldtype = oldstate & _PTHREAD_CANCEL_TYPE_MASK;
161 }
162 return (0);
163 }
164
165
166 OS_ALWAYS_INLINE
167 static inline bool
168 _pthread_is_canceled(pthread_t thread)
169 {
170 const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
171 int state = os_atomic_load(&thread->cancel_state, seq_cst);
172 return (state & flags) == flags;
173 }
174
175 OS_ALWAYS_INLINE
176 static inline void *
177 _pthread_get_exit_value(pthread_t thread)
178 {
179 if (os_unlikely(_pthread_is_canceled(thread))) {
180 return PTHREAD_CANCELED;
181 }
182 return thread->tl_exit_value;
183 }
184
185 void
186 pthread_testcancel(void)
187 {
188 pthread_t self = pthread_self();
189 if (os_unlikely(_pthread_is_canceled(self))) {
190 _pthread_validate_signature(self);
191 // 4597450: begin
192 self->canceled = true;
193 // 4597450: end
194 pthread_exit(PTHREAD_CANCELED);
195 }
196 }
197
198 void
199 _pthread_markcancel_if_canceled(pthread_t thread, mach_port_t kport)
200 {
201 if (os_unlikely(_pthread_is_canceled(thread))) {
202 __pthread_markcancel(kport);
203 }
204 }
205
206 void
207 _pthread_exit_if_canceled(int error)
208 {
209 if ((error & 0xff) == EINTR && __pthread_canceled(0) == 0) {
210 pthread_t self = pthread_self();
211
212 _pthread_validate_signature(self);
213 self->cancel_error = error;
214 self->canceled = true;
215 pthread_exit(PTHREAD_CANCELED);
216 }
217 }
218
219 int
220 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
221 {
222 int err = 0;
223
224 if (__pthread_sigmask(how, set, oset) == -1) {
225 err = errno;
226 }
227 return(err);
228 }
229
230 // called with _pthread_list_lock held
231 semaphore_t
232 _pthread_joiner_prepost_wake(pthread_t thread)
233 {
234 pthread_join_context_t ctx = thread->tl_join_ctx;
235 semaphore_t sema = MACH_PORT_NULL;
236
237 if (thread->tl_joinable) {
238 sema = ctx->custom_stack_sema;
239 thread->tl_joinable = false;
240 } else {
241 ctx->detached = true;
242 thread->tl_join_ctx = NULL;
243 }
244 if (ctx->value_ptr) *ctx->value_ptr = _pthread_get_exit_value(thread);
245 return sema;
246 }
247
248 static inline bool
249 _pthread_joiner_abort_wait(pthread_t thread, pthread_join_context_t ctx)
250 {
251 bool aborted = false;
252
253 _pthread_lock_lock(&_pthread_list_lock);
254 if (!ctx->detached && thread->tl_exit_gate != MACH_PORT_DEAD) {
255 /*
256 * _pthread_joiner_prepost_wake() didn't happen
257 * allow another thread to join
258 */
259 PTHREAD_DEBUG_ASSERT(thread->tl_join_ctx == ctx);
260 thread->tl_join_ctx = NULL;
261 thread->tl_exit_gate = MACH_PORT_NULL;
262 aborted = true;
263 }
264 _pthread_lock_unlock(&_pthread_list_lock);
265 return aborted;
266 }
267
268 static int
269 _pthread_joiner_wait(pthread_t thread, pthread_join_context_t ctx,
270 pthread_conformance_t conforming)
271 {
272 uint32_t *exit_gate = &thread->tl_exit_gate;
273 int ulock_op = UL_UNFAIR_LOCK | ULF_NO_ERRNO;
274
275 if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
276 ulock_op |= ULF_WAIT_CANCEL_POINT;
277 }
278
279 for (;;) {
280 uint32_t cur = os_atomic_load(exit_gate, acquire);
281 if (cur == MACH_PORT_DEAD) {
282 break;
283 }
284 if (os_unlikely(cur != ctx->kport)) {
285 PTHREAD_CLIENT_CRASH(cur, "pthread_join() state corruption");
286 }
287 int ret = __ulock_wait(ulock_op, exit_gate, ctx->kport, 0);
288 switch (-ret) {
289 case 0:
290 case EFAULT:
291 break;
292 case EINTR:
293 /*
294 * POSIX says:
295 *
296 * As specified, either the pthread_join() call is canceled, or it
297 * succeeds, but not both. The difference is obvious to the
298 * application, since either a cancellation handler is run or
299 * pthread_join() returns.
300 *
301 * When __ulock_wait() returns EINTR, we check if we have been
302 * canceled, and if we have, we try to abort the wait.
303 *
304 * If we can't, it means the other thread finished the join while we
305 * were being canceled and commited the waiter to return from
306 * pthread_join(). Returning from the join then takes precedence
307 * over the cancelation which will be acted upon at the next
308 * cancelation point.
309 */
310 if (os_unlikely(conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE &&
311 _pthread_is_canceled(ctx->waiter))) {
312 if (_pthread_joiner_abort_wait(thread, ctx)) {
313 ctx->waiter->canceled = true;
314 pthread_exit(PTHREAD_CANCELED);
315 }
316 }
317 break;
318 }
319 }
320
321 bool cleanup = false;
322
323 _pthread_lock_lock(&_pthread_list_lock);
324 // If pthread_detach() was called, we can't safely dereference the thread,
325 // else, decide who gets to deallocate the thread (see _pthread_terminate).
326 if (!ctx->detached) {
327 PTHREAD_DEBUG_ASSERT(thread->tl_join_ctx == ctx);
328 thread->tl_join_ctx = NULL;
329 cleanup = thread->tl_joiner_cleans_up;
330 }
331 _pthread_lock_unlock(&_pthread_list_lock);
332
333 if (cleanup) {
334 _pthread_deallocate(thread, false);
335 }
336 return 0;
337 }
338
339 OS_NOINLINE
340 int
341 _pthread_join(pthread_t thread, void **value_ptr, pthread_conformance_t conforming)
342 {
343 pthread_t self = pthread_self();
344 pthread_join_context_s ctx = {
345 .waiter = self,
346 .value_ptr = value_ptr,
347 .kport = MACH_PORT_NULL,
348 .custom_stack_sema = MACH_PORT_NULL,
349 };
350 int res = 0;
351 kern_return_t kr;
352
353 if (!_pthread_validate_thread_and_list_lock(thread)) {
354 return ESRCH;
355 }
356
357 _pthread_validate_signature(self);
358
359 if (!thread->tl_joinable || (thread->tl_join_ctx != NULL)) {
360 res = EINVAL;
361 } else if (thread == self ||
362 (self->tl_join_ctx && self->tl_join_ctx->waiter == thread)) {
363 res = EDEADLK;
364 } else if (thread->tl_exit_gate == MACH_PORT_DEAD) {
365 TAILQ_REMOVE(&__pthread_head, thread, tl_plist);
366 PTHREAD_DEBUG_ASSERT(thread->tl_joiner_cleans_up);
367 thread->tl_joinable = false;
368 if (value_ptr) *value_ptr = _pthread_get_exit_value(thread);
369 } else {
370 ctx.kport = _pthread_tsd_slot(thread, MACH_THREAD_SELF);
371 thread->tl_exit_gate = ctx.kport;
372 thread->tl_join_ctx = &ctx;
373 if (thread->tl_has_custom_stack) {
374 ctx.custom_stack_sema = (semaphore_t)os_get_cached_semaphore();
375 }
376 }
377 _pthread_lock_unlock(&_pthread_list_lock);
378
379 if (res == 0) {
380 if (ctx.kport == MACH_PORT_NULL) {
381 _pthread_deallocate(thread, false);
382 } else {
383 res = _pthread_joiner_wait(thread, &ctx, conforming);
384 }
385 }
386 if (res == 0 && ctx.custom_stack_sema && !ctx.detached) {
387 // threads with a custom stack need to make sure _pthread_terminate
388 // returned before the joiner is unblocked, the joiner may quickly
389 // deallocate the stack with rather dire consequences.
390 //
391 // When we reach this point we know the pthread_join has to succeed
392 // so this can't be a cancelation point.
393 do {
394 kr = __semwait_signal_nocancel(ctx.custom_stack_sema, 0, 0, 0, 0, 0);
395 } while (kr != KERN_SUCCESS);
396 }
397 if (ctx.custom_stack_sema) {
398 os_put_cached_semaphore(ctx.custom_stack_sema);
399 }
400 return res;
401 }
402
403 #endif /* !BUILDING_VARIANT ] */
404
405 static inline pthread_conformance_t
406 _pthread_conformance(void)
407 {
408 #ifdef VARIANT_CANCELABLE
409 return PTHREAD_CONFORM_UNIX03_CANCELABLE;
410 #else /* !VARIANT_CANCELABLE */
411 return PTHREAD_CONFORM_UNIX03_NOCANCEL;
412 #endif
413 }
414
415 static inline void
416 _pthread_testcancel_if_cancelable_variant(void)
417 {
418 #ifdef VARIANT_CANCELABLE
419 pthread_testcancel();
420 #endif
421 }
422
423 int
424 pthread_join(pthread_t thread, void **value_ptr)
425 {
426 _pthread_testcancel_if_cancelable_variant();
427 return _pthread_join(thread, value_ptr, _pthread_conformance());
428 }
429
430 int
431 pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
432 {
433 return _pthread_cond_wait(cond, mutex, NULL, 0, _pthread_conformance());
434 }
435
436 int
437 pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
438 const struct timespec *abstime)
439 {
440 return _pthread_cond_wait(cond, mutex, abstime, 0, _pthread_conformance());
441 }
442
443 int
444 sigwait(const sigset_t * set, int * sig)
445 {
446 int err = 0;
447
448 _pthread_testcancel_if_cancelable_variant();
449
450 if (__sigwait(set, sig) == -1) {
451 err = errno;
452
453 _pthread_testcancel_if_cancelable_variant();
454
455 /*
456 * EINTR that isn't a result of pthread_cancel()
457 * is translated to 0.
458 */
459 if (err == EINTR) {
460 err = 0;
461 }
462 }
463 return(err);
464 }
465