]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/unix_signal.c
c8209bd942741b17433fef2f7f0d015db8acfaab
[apple/xnu.git] / bsd / dev / i386 / unix_signal.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (c) 1992 NeXT, Inc.
32 *
33 * HISTORY
34 * 13 May 1992 ? at NeXT
35 * Created.
36 */
37
38 #include <mach/mach_types.h>
39 #include <mach/exception.h>
40
41 #include <kern/thread.h>
42
43 #include <sys/systm.h>
44 #include <sys/param.h>
45 #include <sys/proc_internal.h>
46 #include <sys/user.h>
47 #include <sys/sysproto.h>
48 #include <sys/sysent.h>
49 #include <sys/ucontext.h>
50 #include <sys/wait.h>
51 #include <mach/thread_act.h> /* for thread_abort_safely */
52 #include <mach/thread_status.h>
53 #include <i386/machine_routines.h>
54
55 #include <i386/eflags.h>
56 #include <i386/psl.h>
57 #include <i386/seg.h>
58
59 #include <sys/kdebug.h>
60
61 /* Forward: */
62 extern boolean_t machine_exception(int, int, int, int *, int *);
63 extern kern_return_t thread_getstatus(register thread_t act, int flavor,
64 thread_state_t tstate, mach_msg_type_number_t *count);
65 extern kern_return_t thread_setstatus(thread_t thread, int flavor,
66 thread_state_t tstate, mach_msg_type_number_t count);
67
68 /* Signal handler flavors supported */
69 /* These defns should match the Libc implmn */
70 #define UC_TRAD 1
71 #define UC_FLAVOR 30
72
73 #define C_32_STK_ALIGN 16
74 #define C_64_STK_ALIGN 16
75 #define C_64_REDZONE_LEN 128
76 #define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c))))
77 #define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c))))
78
79 /*
80 * Send an interrupt to process.
81 *
82 * Stack is set up to allow sigcode stored
83 * in u. to call routine, followed by chmk
84 * to sigreturn routine below. After sigreturn
85 * resets the signal mask, the stack, the frame
86 * pointer, and the argument pointer, it returns
87 * to the user specified pc, psl.
88 */
89 struct sigframe32 {
90 int retaddr;
91 sig_t catcher;
92 int sigstyle;
93 int sig;
94 siginfo_t * sinfo;
95 struct ucontext * uctx;
96 };
97
98
99
100 void
101 sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused u_long code)
102 {
103 union {
104 struct mcontext32 mctx32;
105 struct mcontext64 mctx64;
106 } mctx;
107 user_addr_t ua_sp;
108 user_addr_t ua_fp;
109 user_addr_t ua_cr2;
110 user_addr_t ua_sip;
111 user_addr_t ua_uctxp;
112 user_addr_t ua_mctxp;
113 user_siginfo_t sinfo64;
114
115 struct sigacts *ps = p->p_sigacts;
116 int oonstack, flavor;
117 void * state;
118 mach_msg_type_number_t state_count;
119 int uthsigaltstack = 0;
120 int altstack = 0;
121
122 thread_t thread = current_thread();
123 struct uthread * ut;
124 int stack_size = 0;
125 int infostyle = UC_TRAD;
126
127 if (p->p_sigacts->ps_siginfo & sigmask(sig))
128 infostyle = UC_FLAVOR;
129
130 ut = get_bsdthread_info(thread);
131
132 uthsigaltstack = p->p_lflag & P_LTHSIGSTACK;
133
134 if (uthsigaltstack != 0 ) {
135 oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
136 altstack = ut->uu_flag & UT_ALTSTACK;
137 } else {
138 oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;
139 altstack = ps->ps_flags & SAS_ALTSTACK;
140 }
141 /*
142 * init siginfo
143 */
144 bzero((caddr_t)&sinfo64, sizeof(user_siginfo_t));
145 sinfo64.si_signo = sig;
146
147 if (proc_is64bit(p)) {
148 x86_thread_state64_t *tstate64;
149 struct user_ucontext64 uctx64;
150
151 flavor = x86_THREAD_STATE64;
152 state_count = x86_THREAD_STATE64_COUNT;
153 state = (void *)&mctx.mctx64.ss;
154 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
155 goto bad;
156
157 flavor = x86_FLOAT_STATE64;
158 state_count = x86_FLOAT_STATE64_COUNT;
159 state = (void *)&mctx.mctx64.fs;
160 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
161 goto bad;
162
163 flavor = x86_EXCEPTION_STATE64;
164 state_count = x86_EXCEPTION_STATE64_COUNT;
165 state = (void *)&mctx.mctx64.es;
166 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
167 goto bad;
168
169 tstate64 = &mctx.mctx64.ss;
170
171 if (altstack && !oonstack && (ps->ps_sigonstack & sigmask(sig))) {
172 if (uthsigaltstack != 0) {
173 ua_sp = ut->uu_sigstk.ss_sp;
174 stack_size = ut->uu_sigstk.ss_size;
175 ua_sp += stack_size;
176 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
177 } else {
178 ua_sp = ps->ps_sigstk.ss_sp;
179 stack_size = ps->ps_sigstk.ss_size;
180 ua_sp += stack_size;
181 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
182 }
183 } else
184 ua_sp = tstate64->rsp;
185 ua_cr2 = mctx.mctx64.es.faultvaddr;
186
187 /* The x86_64 ABI defines a 128-byte red zone. */
188 ua_sp -= C_64_REDZONE_LEN;
189
190 ua_sp -= sizeof (struct user_ucontext64);
191 ua_uctxp = ua_sp; // someone tramples the first word!
192
193 ua_sp -= sizeof (user_siginfo_t);
194 ua_sip = ua_sp;
195
196 ua_sp -= sizeof (struct mcontext64);
197 ua_mctxp = ua_sp;
198
199 /*
200 * Align the frame and stack pointers to 16 bytes for SSE.
201 * (Note that we use 'ua_fp' as the base of the stack going forward)
202 */
203 ua_fp = TRUNC_DOWN64(ua_sp, C_64_STK_ALIGN);
204
205 /*
206 * But we need to account for the return address so the alignment is
207 * truly "correct" at _sigtramp
208 */
209 ua_fp -= sizeof(user_addr_t);
210
211 /*
212 * Build the signal context to be used by sigreturn.
213 */
214 bzero(&uctx64, sizeof(uctx64));
215
216 uctx64.uc_onstack = oonstack;
217 uctx64.uc_sigmask = mask;
218 uctx64.uc_stack.ss_sp = ua_fp;
219 uctx64.uc_stack.ss_size = stack_size;
220
221 if (oonstack)
222 uctx64.uc_stack.ss_flags |= SS_ONSTACK;
223 uctx64.uc_link = 0;
224
225 uctx64.uc_mcsize = sizeof(struct mcontext64);
226 uctx64.uc_mcontext64 = ua_mctxp;
227
228 if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64)))
229 goto bad;
230
231 if (copyout((caddr_t)&mctx.mctx64, ua_mctxp, sizeof (struct mcontext64)))
232 goto bad;
233
234 sinfo64.pad[0] = tstate64->rsp;
235 sinfo64.si_addr = tstate64->rip;
236
237 tstate64->rip = ps->ps_trampact[sig];
238 tstate64->rsp = ua_fp;
239 tstate64->rflags = get_eflags_exportmask();
240 /*
241 * JOE - might not need to set these
242 */
243 tstate64->cs = USER64_CS;
244 tstate64->fs = NULL_SEG;
245 tstate64->gs = USER_CTHREAD;
246
247 /*
248 * Build the argument list for the signal handler.
249 * Handler should call sigreturn to get out of it
250 */
251 tstate64->rdi = ua_catcher;
252 tstate64->rsi = infostyle;
253 tstate64->rdx = sig;
254 tstate64->rcx = ua_sip;
255 tstate64->r8 = ua_uctxp;
256
257 } else {
258 x86_thread_state32_t *tstate32;
259 struct ucontext uctx32;
260 struct sigframe32 frame32;
261
262 flavor = x86_THREAD_STATE32;
263 state_count = x86_THREAD_STATE32_COUNT;
264 state = (void *)&mctx.mctx32.ss;
265 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
266 goto bad;
267
268 flavor = x86_FLOAT_STATE32;
269 state_count = x86_FLOAT_STATE32_COUNT;
270 state = (void *)&mctx.mctx32.fs;
271 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
272 goto bad;
273
274 flavor = x86_EXCEPTION_STATE32;
275 state_count = x86_EXCEPTION_STATE32_COUNT;
276 state = (void *)&mctx.mctx32.es;
277 if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
278 goto bad;
279
280 tstate32 = &mctx.mctx32.ss;
281
282 if (altstack && !oonstack && (ps->ps_sigonstack & sigmask(sig))) {
283 if (uthsigaltstack != 0) {
284 ua_sp = ut->uu_sigstk.ss_sp;
285 stack_size = ut->uu_sigstk.ss_size;
286 ua_sp += stack_size;
287 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
288 } else {
289 ua_sp = ps->ps_sigstk.ss_sp;
290 stack_size = ps->ps_sigstk.ss_size;
291 ua_sp += stack_size;
292 ps->ps_sigstk.ss_flags |= SA_ONSTACK;
293 }
294 } else
295 ua_sp = tstate32->esp;
296 ua_cr2 = mctx.mctx32.es.faultvaddr;
297
298 ua_sp -= sizeof (struct ucontext);
299 ua_uctxp = ua_sp; // someone tramples the first word!
300
301 ua_sp -= sizeof (siginfo_t);
302 ua_sip = ua_sp;
303
304 ua_sp -= sizeof (struct mcontext32);
305 ua_mctxp = ua_sp;
306
307 ua_sp -= sizeof (struct sigframe32);
308 ua_fp = ua_sp;
309
310 /*
311 * Align the frame and stack pointers to 16 bytes for SSE.
312 * (Note that we use 'fp' as the base of the stack going forward)
313 */
314 ua_fp = TRUNC_DOWN32(ua_fp, C_32_STK_ALIGN);
315
316 /*
317 * But we need to account for the return address so the alignment is
318 * truly "correct" at _sigtramp
319 */
320 ua_fp -= sizeof(frame32.retaddr);
321
322 /*
323 * Build the argument list for the signal handler.
324 * Handler should call sigreturn to get out of it
325 */
326 frame32.retaddr = -1;
327 frame32.sigstyle = infostyle;
328 frame32.sig = sig;
329 frame32.catcher = CAST_DOWN(sig_t, ua_catcher);
330 frame32.sinfo = CAST_DOWN(siginfo_t *, ua_sip);
331 frame32.uctx = CAST_DOWN(struct ucontext *, ua_uctxp);
332
333 if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32)))
334 goto bad;
335
336 /*
337 * Build the signal context to be used by sigreturn.
338 */
339 bzero(&uctx32, sizeof(uctx32));
340
341 uctx32.uc_onstack = oonstack;
342 uctx32.uc_sigmask = mask;
343 uctx32.uc_stack.ss_sp = CAST_DOWN(char *, ua_fp);
344 uctx32.uc_stack.ss_size = stack_size;
345
346 if (oonstack)
347 uctx32.uc_stack.ss_flags |= SS_ONSTACK;
348 uctx32.uc_link = 0;
349
350 uctx32.uc_mcsize = sizeof(struct mcontext32);
351
352 uctx32.uc_mcontext = CAST_DOWN(struct mcontext *, ua_mctxp);
353
354 if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32)))
355 goto bad;
356
357 if (copyout((caddr_t)&mctx.mctx32, ua_mctxp, sizeof (struct mcontext32)))
358 goto bad;
359
360 sinfo64.pad[0] = tstate32->esp;
361 sinfo64.si_addr = tstate32->eip;
362 }
363
364 switch (sig) {
365 case SIGCHLD:
366 sinfo64.si_pid = p->si_pid;
367 p->si_pid =0;
368 sinfo64.si_status = p->si_status;
369 p->si_status = 0;
370 sinfo64.si_uid = p->si_uid;
371 p->si_uid =0;
372 sinfo64.si_code = p->si_code;
373 p->si_code = 0;
374 if (sinfo64.si_code == CLD_EXITED) {
375 if (WIFEXITED(sinfo64.si_status))
376 sinfo64.si_code = CLD_EXITED;
377 else if (WIFSIGNALED(sinfo64.si_status)) {
378 if (WCOREDUMP(sinfo64.si_status))
379 sinfo64.si_code = CLD_DUMPED;
380 else
381 sinfo64.si_code = CLD_KILLED;
382 }
383 }
384 break;
385 case SIGILL:
386 switch (ut->uu_code) {
387 case EXC_I386_INVOP:
388 sinfo64.si_code = ILL_ILLOPC;
389 break;
390 case EXC_I386_GPFLT:
391 sinfo64.si_code = ILL_PRVOPC;
392 break;
393 default:
394 printf("unknown SIGILL code %d\n", ut->uu_code);
395 sinfo64.si_code = ILL_NOOP;
396 }
397 break;
398 case SIGFPE:
399 #define FP_IE 0 /* Invalid operation */
400 #define FP_DE 1 /* Denormalized operand */
401 #define FP_ZE 2 /* Zero divide */
402 #define FP_OE 3 /* overflow */
403 #define FP_UE 4 /* underflow */
404 #define FP_PE 5 /* precision */
405 if (ut->uu_subcode & (1 << FP_ZE)) {
406 sinfo64.si_code = FPE_FLTDIV;
407 } else if (ut->uu_subcode & (1 << FP_OE)) {
408 sinfo64.si_code = FPE_FLTOVF;
409 } else if (ut->uu_subcode & (1 << FP_UE)) {
410 sinfo64.si_code = FPE_FLTUND;
411 } else if (ut->uu_subcode & (1 << FP_PE)) {
412 sinfo64.si_code = FPE_FLTRES;
413 } else if (ut->uu_subcode & (1 << FP_IE)) {
414 sinfo64.si_code = FPE_FLTINV;
415 } else {
416 printf("unknown SIGFPE code %d, subcode %x\n",
417 ut->uu_code, ut->uu_subcode);
418 sinfo64.si_code = FPE_NOOP;
419 }
420 break;
421 case SIGBUS:
422 sinfo64.si_code = BUS_ADRERR;
423 sinfo64.si_addr = ua_cr2;
424 break;
425 case SIGTRAP:
426 sinfo64.si_code = TRAP_BRKPT;
427 break;
428 case SIGSEGV:
429 sinfo64.si_addr = ua_cr2;
430
431 switch (ut->uu_code) {
432 case KERN_PROTECTION_FAILURE:
433 sinfo64.si_code = SEGV_ACCERR;
434 break;
435 case KERN_INVALID_ADDRESS:
436 sinfo64.si_code = SEGV_MAPERR;
437 break;
438 default:
439 printf("unknown SIGSEGV code %d\n", ut->uu_code);
440 sinfo64.si_code = FPE_NOOP;
441 }
442 break;
443 default:
444 break;
445 }
446 if (proc_is64bit(p)) {
447 if (copyout((caddr_t)&sinfo64, ua_sip, sizeof (sinfo64)))
448 goto bad;
449
450 flavor = x86_THREAD_STATE64;
451 state_count = x86_THREAD_STATE64_COUNT;
452 state = (void *)&mctx.mctx64.ss;
453 } else {
454 x86_thread_state32_t *tstate32;
455 siginfo_t sinfo32;
456
457 bzero((caddr_t)&sinfo32, sizeof(siginfo_t));
458
459 sinfo32.si_signo = sinfo64.si_signo;
460 sinfo32.si_code = sinfo64.si_code;
461 sinfo32.si_pid = sinfo64.si_pid;
462 sinfo32.si_uid = sinfo64.si_uid;
463 sinfo32.si_status = sinfo64.si_status;
464 sinfo32.si_addr = CAST_DOWN(void *, sinfo64.si_addr);
465 sinfo32.pad[0] = sinfo64.pad[0];
466
467 if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32)))
468 goto bad;
469
470 tstate32 = &mctx.mctx32.ss;
471 tstate32->eip = CAST_DOWN(unsigned int, ps->ps_trampact[sig]);
472 tstate32->esp = CAST_DOWN(unsigned int, ua_fp);
473
474 tstate32->eflags = get_eflags_exportmask();
475
476 tstate32->cs = USER_CS;
477 tstate32->ss = USER_DS;
478 tstate32->ds = USER_DS;
479 tstate32->es = USER_DS;
480 tstate32->fs = NULL_SEG;
481 tstate32->gs = USER_CTHREAD;
482
483 flavor = x86_THREAD_STATE32;
484 state_count = x86_THREAD_STATE32_COUNT;
485 state = (void *)tstate32;
486 }
487 if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS)
488 goto bad;
489 ml_fp_setvalid(FALSE);
490
491 return;
492
493 bad:
494 SIGACTION(p, SIGILL) = SIG_DFL;
495 sig = sigmask(SIGILL);
496 p->p_sigignore &= ~sig;
497 p->p_sigcatch &= ~sig;
498 ut->uu_sigmask &= ~sig;
499 /* sendsig is called with signal lock held */
500 psignal_lock(p, SIGILL, 0);
501 return;
502 }
503
504 /*
505 * System call to cleanup state after a signal
506 * has been taken. Reset signal mask and
507 * stack state from context left by sendsig (above).
508 * Return to previous pc and psl as specified by
509 * context left by sendsig. Check carefully to
510 * make sure that the user has not modified the
511 * psl to gain improper priviledges or to cause
512 * a machine fault.
513 */
514
515 int
516 sigreturn(
517 struct proc *p,
518 struct sigreturn_args *uap,
519 __unused int *retval)
520 {
521 union {
522 struct mcontext32 mctx32;
523 struct mcontext64 mctx64;
524 } mctx;
525 thread_t thread = current_thread();
526 struct uthread * ut;
527 int error;
528 int uthsigaltstack = 0;
529 int onstack = 0;
530
531 mach_msg_type_number_t ts_count;
532 unsigned int ts_flavor;
533 void * ts;
534 mach_msg_type_number_t fs_count;
535 unsigned int fs_flavor;
536 void * fs;
537
538 ut = (struct uthread *)get_bsdthread_info(thread);
539 uthsigaltstack = p->p_lflag & P_LTHSIGSTACK;
540
541 if (proc_is64bit(p)) {
542 struct user_ucontext64 uctx64;
543
544 if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64))))
545 return(error);
546
547 if ((error = copyin(uctx64.uc_mcontext64, (void *)&mctx.mctx64, sizeof (struct mcontext64))))
548 return(error);
549
550 onstack = uctx64.uc_onstack & 01;
551 ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask;
552
553 ts_flavor = x86_THREAD_STATE64;
554 ts_count = x86_THREAD_STATE64_COUNT;
555 ts = (void *)&mctx.mctx64.ss;
556
557 fs_flavor = x86_FLOAT_STATE64;
558 fs_count = x86_FLOAT_STATE64_COUNT;
559 fs = (void *)&mctx.mctx64.fs;
560
561 } else {
562 struct ucontext uctx32;
563
564 if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32))))
565 return(error);
566
567 if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)&mctx.mctx32, sizeof (struct mcontext32))))
568 return(error);
569
570 onstack = uctx32.uc_onstack & 01;
571 ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask;
572
573 ts_flavor = x86_THREAD_STATE32;
574 ts_count = x86_THREAD_STATE32_COUNT;
575 ts = (void *)&mctx.mctx32.ss;
576
577 fs_flavor = x86_FLOAT_STATE32;
578 fs_count = x86_FLOAT_STATE32_COUNT;
579 fs = (void *)&mctx.mctx32.fs;
580 }
581 if (onstack) {
582 if (uthsigaltstack != 0)
583 ut->uu_sigstk.ss_flags |= SA_ONSTACK;
584 else
585 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
586 } else {
587 if (uthsigaltstack != 0)
588 ut->uu_sigstk.ss_flags &= ~SA_ONSTACK;
589 else
590 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
591 }
592 if (ut->uu_siglist & ~ut->uu_sigmask)
593 signal_setast(thread);
594
595 /*
596 * thread_set_state() does all the needed checks for the passed in content
597 */
598 if (thread_setstatus(thread, ts_flavor, ts, ts_count) != KERN_SUCCESS)
599 return(EINVAL);
600
601 ml_fp_setvalid(TRUE);
602
603 if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS)
604 return(EINVAL);
605
606 return (EJUSTRETURN);
607 }
608
609
610 /*
611 * machine_exception() performs MD translation
612 * of a mach exception to a unix signal and code.
613 */
614
615 boolean_t
616 machine_exception(
617 int exception,
618 int code,
619 __unused int subcode,
620 int *unix_signal,
621 int *unix_code
622 )
623 {
624
625 switch(exception) {
626
627 case EXC_BAD_INSTRUCTION:
628 *unix_signal = SIGILL;
629 *unix_code = code;
630 break;
631
632 case EXC_ARITHMETIC:
633 *unix_signal = SIGFPE;
634 *unix_code = code;
635 break;
636
637 default:
638 return(FALSE);
639 }
640
641 return(TRUE);
642 }
643
644 #include <sys/systm.h>
645 #include <sys/sysent.h>
646
647 int __pthread_cset(struct sysent *);
648 void __pthread_creset(struct sysent *);
649
650 int
651 __pthread_cset(struct sysent *callp)
652 {
653 unsigned int cancel_enable;
654 thread_t thread;
655 struct uthread * uthread;
656
657 thread = current_thread();
658 uthread = get_bsdthread_info(thread);
659
660 cancel_enable = callp->sy_cancel;
661 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
662 uthread->uu_flag |= UT_NOTCANCELPT;
663 } else {
664 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
665 if (cancel_enable == _SYSCALL_CANCEL_PRE)
666 return(EINTR);
667 else
668 thread_abort_safely(thread);
669 }
670 }
671 return(0);
672 }
673
674
675 void
676 __pthread_creset(struct sysent *callp)
677 {
678
679 unsigned int cancel_enable;
680 thread_t thread;
681 struct uthread * uthread;
682
683 thread = current_thread();
684 uthread = get_bsdthread_info(thread);
685
686 cancel_enable = callp->sy_cancel;
687 if (!cancel_enable)
688 uthread->uu_flag &= ~UT_NOTCANCELPT;
689
690 }
691