]> git.saurik.com Git - apple/libc.git/blame - pthreads/pthread.c
Libc-320.tar.gz
[apple/libc.git] / pthreads / pthread.c
CommitLineData
9385eb3d
A
1/*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
e9ce8d39
A
25/*
26 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * All Rights Reserved
28 *
29 * Permission to use, copy, modify, and distribute this software and
30 * its documentation for any purpose and without fee is hereby granted,
31 * provided that the above copyright notice appears in all copies and
32 * that both the copyright notice and this permission notice appear in
33 * supporting documentation.
34 *
35 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
36 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
37 * FOR A PARTICULAR PURPOSE.
38 *
39 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
40 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
41 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
42 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
43 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 *
45 */
46/*
47 * MkLinux
48 */
49
50/*
51 * POSIX Pthread Library
52 */
53
9385eb3d
A
54#include "pthread_internals.h"
55
e9ce8d39
A
56#include <assert.h>
57#include <stdio.h> /* For printf(). */
58#include <stdlib.h>
59#include <errno.h> /* For __mach_errno_addr() prototype. */
60#include <sys/time.h>
61#include <sys/resource.h>
62#include <sys/sysctl.h>
9385eb3d 63#include <sys/queue.h>
5b2abdfb 64#include <sys/syscall.h>
e9ce8d39
A
65#include <machine/vmparam.h>
66#include <mach/vm_statistics.h>
9385eb3d
A
67#define __APPLE_API_PRIVATE
68#include <machine/cpu_capabilities.h>
e9ce8d39 69
9385eb3d 70__private_extern__ struct __pthread_list __pthread_head = LIST_HEAD_INITIALIZER(&__pthread_head);
e9ce8d39
A
71
72/* Per-thread kernel support */
73extern void _pthread_set_self(pthread_t);
74extern void mig_init(int);
75
9385eb3d
A
76/* Get CPU capabilities from the kernel */
77__private_extern__ void _init_cpu_capabilities(void);
78
e9ce8d39
A
79/* Needed to tell the malloc subsystem we're going multithreaded */
80extern void set_malloc_singlethreaded(int);
81
82/* Used when we need to call into the kernel with no reply port */
83extern pthread_lock_t reply_port_lock;
84
5b2abdfb
A
85/* We'll implement this when the main thread is a pthread */
86/* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
87static struct _pthread _thread = {0};
e9ce8d39 88
5b2abdfb
A
89/* This global should be used (carefully) by anyone needing to know if a
90** pthread has been created.
91*/
92int __is_threaded = 0;
9385eb3d 93/* _pthread_count is protected by _pthread_list_lock */
5b2abdfb
A
94static int _pthread_count = 1;
95
9385eb3d 96__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
5b2abdfb
A
97
98/* Same implementation as LOCK, but without the __is_threaded check */
3b2a1fe8 99int _spin_tries = 0;
5b2abdfb
A
100__private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
101{
102 int tries = _spin_tries;
103 do {
104 if (tries-- > 0)
105 continue;
106 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
107 tries = _spin_tries;
108 } while(!_spin_lock_try(lock));
109}
110
5b2abdfb 111extern mach_port_t thread_recycle_port;
e9ce8d39
A
112
113/* These are used to keep track of a semaphore pool shared by mutexes and condition
114** variables.
115*/
116
117static semaphore_t *sem_pool = NULL;
118static int sem_pool_count = 0;
119static int sem_pool_current = 0;
120static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
121
122static int default_priority;
123static int max_priority;
124static int min_priority;
5b2abdfb 125static int pthread_concurrency;
e9ce8d39 126
5b2abdfb
A
127/*
128 * [Internal] stack support
129 */
130size_t _pthread_stack_size = 0;
e9ce8d39
A
131#define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
132#define STACK_RESERVED (sizeof (struct _pthread))
133
e9ce8d39
A
134
135/* The stack grows towards lower addresses:
136 |<----------------user stack|struct _pthread|
137 ^STACK_LOWEST ^STACK_START ^STACK_BASE
138 ^STACK_SELF */
139
140#define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
141#define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
142#define STACK_SELF(sp) STACK_START(sp)
143
5b2abdfb
A
144#if defined(__ppc__)
145static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
146#elif defined(__i386__)
147static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
148#else
149#error Need to define a stack address hint for this architecture
150#endif
151
9385eb3d
A
152/* Set the base address to use as the stack pointer, before adjusting due to the ABI
153 * The guardpages for stackoverflow protection is also allocated here
154 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
155 * set up for the thread
156 */
e9ce8d39
A
157
158static int
5b2abdfb 159_pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
e9ce8d39
A
160{
161 kern_return_t kr;
9385eb3d 162 size_t guardsize;
e9ce8d39
A
163#if 1
164 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
165 if (attrs->stackaddr != NULL) {
9385eb3d 166 /* No guard pages setup in this case */
5b2abdfb
A
167 assert(((vm_address_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
168 *stack = attrs->stackaddr;
169 return 0;
e9ce8d39 170 }
5b2abdfb 171
9385eb3d 172 guardsize = attrs->guardsize;
5b2abdfb
A
173 *((vm_address_t *)stack) = PTHREAD_STACK_HINT;
174 kr = vm_map(mach_task_self(), (vm_address_t *)stack,
9385eb3d 175 attrs->stacksize + guardsize,
5b2abdfb
A
176 vm_page_size-1,
177 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
178 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
179 VM_INHERIT_DEFAULT);
180 if (kr != KERN_SUCCESS)
181 kr = vm_allocate(mach_task_self(),
9385eb3d 182 (vm_address_t *)stack, attrs->stacksize + guardsize,
5b2abdfb 183 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
e9ce8d39
A
184 if (kr != KERN_SUCCESS) {
185 return EAGAIN;
186 }
5b2abdfb
A
187 /* The guard page is at the lowest address */
188 /* The stack base is the highest address */
9385eb3d
A
189 if (guardsize)
190 kr = vm_protect(mach_task_self(), (vm_address_t)*stack, guardsize, FALSE, VM_PROT_NONE);
191 *stack += attrs->stacksize + guardsize;
e9ce8d39
A
192
193#else
194 vm_address_t cur_stack = (vm_address_t)0;
195 if (free_stacks == 0)
196 {
197 /* Allocating guard pages is done by doubling
198 * the actual stack size, since STACK_BASE() needs
199 * to have stacks aligned on stack_size. Allocating just
200 * one page takes as much memory as allocating more pages
201 * since it will remain one entry in the vm map.
202 * Besides, allocating more than one page allows tracking the
203 * overflow pattern when the overflow is bigger than one page.
204 */
205#ifndef NO_GUARD_PAGES
206# define GUARD_SIZE(a) (2*(a))
207# define GUARD_MASK(a) (((a)<<1) | 1)
208#else
209# define GUARD_SIZE(a) (a)
210# define GUARD_MASK(a) (a)
211#endif
212 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
213 {
214 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
215 /* Ensure stack is there */
216 kr = vm_allocate(mach_task_self(),
217 &lowest_stack,
218 GUARD_SIZE(__pthread_stack_size),
219 FALSE);
220#ifndef NO_GUARD_PAGES
221 if (kr == KERN_SUCCESS) {
e9ce8d39
A
222 kr = vm_protect(mach_task_self(),
223 lowest_stack,
224 __pthread_stack_size,
225 FALSE, VM_PROT_NONE);
226 lowest_stack += __pthread_stack_size;
e9ce8d39
A
227 if (kr == KERN_SUCCESS)
228 break;
229 }
230#else
231 if (kr == KERN_SUCCESS)
232 break;
233#endif
234 }
235 if (lowest_stack > 0)
236 free_stacks = (vm_address_t *)lowest_stack;
237 else
238 {
239 /* Too bad. We'll just have to take what comes.
240 Use vm_map instead of vm_allocate so we can
241 specify alignment. */
242 kr = vm_map(mach_task_self(), &lowest_stack,
243 GUARD_SIZE(__pthread_stack_size),
244 GUARD_MASK(__pthread_stack_mask),
245 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
246 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
247 VM_INHERIT_DEFAULT);
248 /* This really shouldn't fail and if it does I don't
249 know what to do. */
250#ifndef NO_GUARD_PAGES
251 if (kr == KERN_SUCCESS) {
e9ce8d39
A
252 kr = vm_protect(mach_task_self(),
253 lowest_stack,
254 __pthread_stack_size,
255 FALSE, VM_PROT_NONE);
256 lowest_stack += __pthread_stack_size;
e9ce8d39
A
257 }
258#endif
259 free_stacks = (vm_address_t *)lowest_stack;
260 lowest_stack = 0;
261 }
262 *free_stacks = 0; /* No other free stacks */
263 }
264 cur_stack = STACK_START((vm_address_t) free_stacks);
265 free_stacks = (vm_address_t *)*free_stacks;
266 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
267#endif
268 return 0;
269}
270
5b2abdfb
A
271static pthread_attr_t _pthread_attr_default = {0};
272
e9ce8d39
A
273/*
274 * Destroy a thread attribute structure
275 */
276int
277pthread_attr_destroy(pthread_attr_t *attr)
278{
279 if (attr->sig == _PTHREAD_ATTR_SIG)
280 {
281 return (ESUCCESS);
282 } else
283 {
284 return (EINVAL); /* Not an attribute structure! */
285 }
286}
287
288/*
289 * Get the 'detach' state from a thread attribute structure.
290 * Note: written as a helper function for info hiding
291 */
292int
293pthread_attr_getdetachstate(const pthread_attr_t *attr,
294 int *detachstate)
295{
296 if (attr->sig == _PTHREAD_ATTR_SIG)
297 {
298 *detachstate = attr->detached;
299 return (ESUCCESS);
300 } else
301 {
302 return (EINVAL); /* Not an attribute structure! */
303 }
304}
305
306/*
307 * Get the 'inherit scheduling' info from a thread attribute structure.
308 * Note: written as a helper function for info hiding
309 */
310int
311pthread_attr_getinheritsched(const pthread_attr_t *attr,
312 int *inheritsched)
313{
314 if (attr->sig == _PTHREAD_ATTR_SIG)
315 {
316 *inheritsched = attr->inherit;
317 return (ESUCCESS);
318 } else
319 {
320 return (EINVAL); /* Not an attribute structure! */
321 }
322}
323
324/*
325 * Get the scheduling parameters from a thread attribute structure.
326 * Note: written as a helper function for info hiding
327 */
328int
329pthread_attr_getschedparam(const pthread_attr_t *attr,
330 struct sched_param *param)
331{
332 if (attr->sig == _PTHREAD_ATTR_SIG)
333 {
334 *param = attr->param;
335 return (ESUCCESS);
336 } else
337 {
338 return (EINVAL); /* Not an attribute structure! */
339 }
340}
341
342/*
343 * Get the scheduling policy from a thread attribute structure.
344 * Note: written as a helper function for info hiding
345 */
346int
347pthread_attr_getschedpolicy(const pthread_attr_t *attr,
348 int *policy)
349{
350 if (attr->sig == _PTHREAD_ATTR_SIG)
351 {
352 *policy = attr->policy;
353 return (ESUCCESS);
354 } else
355 {
356 return (EINVAL); /* Not an attribute structure! */
357 }
358}
359
9385eb3d
A
360/* Retain the existing stack size of 512K and not depend on Main thread default stack size */
361static const size_t DEFAULT_STACK_SIZE = (512*1024);
e9ce8d39
A
362/*
363 * Initialize a thread attribute structure to default values.
364 */
365int
366pthread_attr_init(pthread_attr_t *attr)
367{
368 attr->stacksize = DEFAULT_STACK_SIZE;
369 attr->stackaddr = NULL;
370 attr->sig = _PTHREAD_ATTR_SIG;
e9ce8d39
A
371 attr->param.sched_priority = default_priority;
372 attr->param.quantum = 10; /* quantum isn't public yet */
e9ce8d39 373 attr->detached = PTHREAD_CREATE_JOINABLE;
9385eb3d
A
374 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
375 attr->policy = _PTHREAD_DEFAULT_POLICY;
e9ce8d39 376 attr->freeStackOnExit = TRUE;
9385eb3d 377 attr->guardsize = vm_page_size;
e9ce8d39
A
378 return (ESUCCESS);
379}
380
381/*
382 * Set the 'detach' state in a thread attribute structure.
383 * Note: written as a helper function for info hiding
384 */
385int
386pthread_attr_setdetachstate(pthread_attr_t *attr,
387 int detachstate)
388{
389 if (attr->sig == _PTHREAD_ATTR_SIG)
390 {
391 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
392 (detachstate == PTHREAD_CREATE_DETACHED))
393 {
394 attr->detached = detachstate;
395 return (ESUCCESS);
396 } else
397 {
398 return (EINVAL);
399 }
400 } else
401 {
402 return (EINVAL); /* Not an attribute structure! */
403 }
404}
405
406/*
407 * Set the 'inherit scheduling' state in a thread attribute structure.
408 * Note: written as a helper function for info hiding
409 */
410int
411pthread_attr_setinheritsched(pthread_attr_t *attr,
412 int inheritsched)
413{
414 if (attr->sig == _PTHREAD_ATTR_SIG)
415 {
416 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
417 (inheritsched == PTHREAD_EXPLICIT_SCHED))
418 {
419 attr->inherit = inheritsched;
420 return (ESUCCESS);
421 } else
422 {
423 return (EINVAL);
424 }
425 } else
426 {
427 return (EINVAL); /* Not an attribute structure! */
428 }
429}
430
431/*
432 * Set the scheduling paramters in a thread attribute structure.
433 * Note: written as a helper function for info hiding
434 */
435int
436pthread_attr_setschedparam(pthread_attr_t *attr,
437 const struct sched_param *param)
438{
439 if (attr->sig == _PTHREAD_ATTR_SIG)
440 {
441 /* TODO: Validate sched_param fields */
442 attr->param = *param;
443 return (ESUCCESS);
444 } else
445 {
446 return (EINVAL); /* Not an attribute structure! */
447 }
448}
449
450/*
451 * Set the scheduling policy in a thread attribute structure.
452 * Note: written as a helper function for info hiding
453 */
454int
455pthread_attr_setschedpolicy(pthread_attr_t *attr,
456 int policy)
457{
458 if (attr->sig == _PTHREAD_ATTR_SIG)
459 {
460 if ((policy == SCHED_OTHER) ||
461 (policy == SCHED_RR) ||
462 (policy == SCHED_FIFO))
463 {
464 attr->policy = policy;
465 return (ESUCCESS);
466 } else
467 {
468 return (EINVAL);
469 }
470 } else
471 {
472 return (EINVAL); /* Not an attribute structure! */
473 }
474}
475
476/*
477 * Set the scope for the thread.
478 * We currently only provide PTHREAD_SCOPE_SYSTEM
479 */
480int
481pthread_attr_setscope(pthread_attr_t *attr,
482 int scope)
483{
484 if (attr->sig == _PTHREAD_ATTR_SIG) {
485 if (scope == PTHREAD_SCOPE_SYSTEM) {
486 /* No attribute yet for the scope */
487 return (ESUCCESS);
488 } else if (scope == PTHREAD_SCOPE_PROCESS) {
489 return (ENOTSUP);
490 }
491 }
492 return (EINVAL); /* Not an attribute structure! */
493}
494
495/*
496 * Get the scope for the thread.
497 * We currently only provide PTHREAD_SCOPE_SYSTEM
498 */
499int
500pthread_attr_getscope(pthread_attr_t *attr,
501 int *scope)
502{
503 if (attr->sig == _PTHREAD_ATTR_SIG) {
504 *scope = PTHREAD_SCOPE_SYSTEM;
505 return (ESUCCESS);
506 }
507 return (EINVAL); /* Not an attribute structure! */
508}
509
510/* Get the base stack address of the given thread */
511int
512pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
513{
514 if (attr->sig == _PTHREAD_ATTR_SIG) {
515 *stackaddr = attr->stackaddr;
516 return (ESUCCESS);
517 } else {
518 return (EINVAL); /* Not an attribute structure! */
519 }
520}
521
522int
523pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
524{
525 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
526 attr->stackaddr = stackaddr;
527 attr->freeStackOnExit = FALSE;
528 return (ESUCCESS);
529 } else {
530 return (EINVAL); /* Not an attribute structure! */
531 }
532}
533
534int
535pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
536{
537 if (attr->sig == _PTHREAD_ATTR_SIG) {
538 *stacksize = attr->stacksize;
539 return (ESUCCESS);
540 } else {
541 return (EINVAL); /* Not an attribute structure! */
542 }
543}
544
545int
546pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
547{
548 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
549 attr->stacksize = stacksize;
550 return (ESUCCESS);
551 } else {
552 return (EINVAL); /* Not an attribute structure! */
553 }
554}
555
5b2abdfb
A
556int
557pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
558{
559 if (attr->sig == _PTHREAD_ATTR_SIG) {
9385eb3d
A
560 u_int32_t addr = (u_int32_t)attr->stackaddr;
561
562 addr -= attr->stacksize;
563 *stackaddr = (void *)addr;
5b2abdfb
A
564 *stacksize = attr->stacksize;
565 return (ESUCCESS);
566 } else {
567 return (EINVAL); /* Not an attribute structure! */
568 }
569}
570
9385eb3d
A
571/* By SUSV spec, the stackaddr is the base address, the lowest addressable
572 * byte address. This is not the same as in pthread_attr_setstackaddr.
573 */
5b2abdfb
A
574int
575pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
576{
577 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
578 (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0) &&
579 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
9385eb3d
A
580 u_int32_t addr = (u_int32_t)stackaddr;
581
582 addr += stacksize;
583 attr->stackaddr = (void *)addr;
5b2abdfb 584 attr->stacksize = stacksize;
9385eb3d 585 attr->freeStackOnExit = FALSE;
5b2abdfb
A
586 return (ESUCCESS);
587 } else {
588 return (EINVAL); /* Not an attribute structure! */
589 }
590}
591
9385eb3d
A
592
593/*
594 * Set the guardsize attribute in the attr.
595 */
596int
597pthread_attr_setguardsize(pthread_attr_t *attr,
598 size_t guardsize)
599{
600 if (attr->sig == _PTHREAD_ATTR_SIG) {
601 /* Guardsize of 0 is valid, ot means no guard */
602 if ((guardsize % vm_page_size) == 0) {
603 attr->guardsize = guardsize;
604 return (ESUCCESS);
605 } else
606 return(EINVAL);
607 }
608 return (EINVAL); /* Not an attribute structure! */
609}
610
611/*
612 * Get the guardsize attribute in the attr.
613 */
614int
615pthread_attr_getguardsize(const pthread_attr_t *attr,
616 size_t *guardsize)
617{
618 if (attr->sig == _PTHREAD_ATTR_SIG) {
619 *guardsize = attr->guardsize;
620 return (ESUCCESS);
621 }
622 return (EINVAL); /* Not an attribute structure! */
623}
624
625
e9ce8d39
A
626/*
627 * Create and start execution of a new thread.
628 */
629
630static void
631_pthread_body(pthread_t self)
632{
e9ce8d39
A
633 _pthread_set_self(self);
634 pthread_exit((self->fun)(self->arg));
635}
636
637int
638_pthread_create(pthread_t t,
639 const pthread_attr_t *attrs,
5b2abdfb 640 void *stack,
e9ce8d39
A
641 const mach_port_t kernel_thread)
642{
643 int res;
e9ce8d39 644 res = ESUCCESS;
5b2abdfb 645
e9ce8d39
A
646 do
647 {
648 memset(t, 0, sizeof(*t));
9385eb3d 649 t->tsd[0] = t;
e9ce8d39
A
650 t->stacksize = attrs->stacksize;
651 t->stackaddr = (void *)stack;
9385eb3d 652 t->guardsize = attrs->guardsize;
e9ce8d39
A
653 t->kernel_thread = kernel_thread;
654 t->detached = attrs->detached;
655 t->inherit = attrs->inherit;
656 t->policy = attrs->policy;
657 t->param = attrs->param;
658 t->freeStackOnExit = attrs->freeStackOnExit;
659 t->mutexes = (struct _pthread_mutex *)NULL;
660 t->sig = _PTHREAD_SIG;
661 t->reply_port = MACH_PORT_NULL;
662 t->cthread_self = NULL;
663 LOCK_INIT(t->lock);
9385eb3d
A
664 t->plist.le_next = (struct _pthread *)0;
665 t->plist.le_prev = (struct _pthread **)0;
e9ce8d39
A
666 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
667 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
5b2abdfb
A
668 t->death = SEMAPHORE_NULL;
669
670 if (kernel_thread != MACH_PORT_NULL)
671 pthread_setschedparam(t, t->policy, &t->param);
e9ce8d39
A
672 } while (0);
673 return (res);
674}
675
5b2abdfb 676/* Need to deprecate this in future */
e9ce8d39
A
677int
678_pthread_is_threaded(void)
679{
680 return __is_threaded;
681}
682
5b2abdfb
A
683/* Non portable public api to know whether this process has(had) atleast one thread
684 * apart from main thread. There could be race if there is a thread in the process of
685 * creation at the time of call . It does not tell whether there are more than one thread
686 * at this point of time.
687 */
688int
689pthread_is_threaded_np(void)
690{
691 return (__is_threaded);
692}
693
e9ce8d39
A
694mach_port_t
695pthread_mach_thread_np(pthread_t t)
696{
5b2abdfb
A
697 thread_t kernel_thread;
698
699 /* Wait for the creator to initialize it */
700 while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
701 sched_yield();
702
703 return kernel_thread;
e9ce8d39
A
704}
705
706size_t
707pthread_get_stacksize_np(pthread_t t)
708{
709 return t->stacksize;
710}
711
712void *
713pthread_get_stackaddr_np(pthread_t t)
714{
715 return t->stackaddr;
716}
717
718mach_port_t
719_pthread_reply_port(pthread_t t)
720{
721 return t->reply_port;
722}
723
5b2abdfb
A
724
725/* returns non-zero if the current thread is the main thread */
726int
727pthread_main_np(void)
728{
729 pthread_t self = pthread_self();
730
731 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
732}
733
e9ce8d39
A
734static int
735_pthread_create_suspended(pthread_t *thread,
736 const pthread_attr_t *attr,
737 void *(*start_routine)(void *),
738 void *arg,
739 int suspended)
740{
5b2abdfb
A
741 pthread_attr_t *attrs;
742 void *stack;
e9ce8d39
A
743 int res;
744 pthread_t t;
745 kern_return_t kern_res;
5b2abdfb
A
746 mach_port_t kernel_thread = MACH_PORT_NULL;
747 int needresume;
748
e9ce8d39
A
749 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
750 { /* Set up default paramters */
5b2abdfb
A
751 attrs = &_pthread_attr_default;
752 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
e9ce8d39 753 return EINVAL;
5b2abdfb 754 }
e9ce8d39 755 res = ESUCCESS;
5b2abdfb
A
756
757 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
758 * any change in priority or policy is needed here.
759 */
760 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
761 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
762 needresume = 1;
763 suspended = 1;
764 } else
765 needresume = 0;
766
e9ce8d39
A
767 do
768 {
769 /* Allocate a stack for the thread */
770 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
771 break;
772 }
773 t = (pthread_t)malloc(sizeof(struct _pthread));
774 *thread = t;
5b2abdfb
A
775 if (suspended) {
776 /* Create the Mach thread for this thread */
777 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
778 if (kern_res != KERN_SUCCESS)
779 {
780 printf("Can't create thread: %d\n", kern_res);
781 res = EINVAL; /* Need better error here? */
782 break;
783 }
e9ce8d39
A
784 }
785 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
786 {
787 break;
788 }
5b2abdfb
A
789 set_malloc_singlethreaded(0);
790 __is_threaded = 1;
5b2abdfb
A
791
792 /* Send it on it's way */
e9ce8d39
A
793 t->arg = arg;
794 t->fun = start_routine;
795 /* Now set it up to execute */
9385eb3d
A
796 LOCK(_pthread_list_lock);
797 LIST_INSERT_HEAD(&__pthread_head, t, plist);
798 _pthread_count++;
799 UNLOCK(_pthread_list_lock);
5b2abdfb 800 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
e9ce8d39
A
801 } while (0);
802 return (res);
803}
804
805int
806pthread_create(pthread_t *thread,
807 const pthread_attr_t *attr,
808 void *(*start_routine)(void *),
809 void *arg)
810{
811 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
812}
813
814int
815pthread_create_suspended_np(pthread_t *thread,
816 const pthread_attr_t *attr,
817 void *(*start_routine)(void *),
818 void *arg)
819{
820 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
821}
822
823/*
824 * Make a thread 'undetached' - no longer 'joinable' with other threads.
825 */
826int
827pthread_detach(pthread_t thread)
828{
e9ce8d39
A
829 if (thread->sig == _PTHREAD_SIG)
830 {
831 LOCK(thread->lock);
5b2abdfb 832 if (thread->detached & PTHREAD_CREATE_JOINABLE)
e9ce8d39 833 {
5b2abdfb
A
834 if (thread->detached & _PTHREAD_EXITED) {
835 UNLOCK(thread->lock);
836 pthread_join(thread, NULL);
837 return ESUCCESS;
838 } else {
839 semaphore_t death = thread->death;
840
841 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
842 thread->detached |= PTHREAD_CREATE_DETACHED;
843 UNLOCK(thread->lock);
844 if (death)
845 (void) semaphore_signal(death);
846 return (ESUCCESS);
e9ce8d39 847 }
5b2abdfb 848 } else {
e9ce8d39
A
849 UNLOCK(thread->lock);
850 return (EINVAL);
851 }
5b2abdfb 852 } else {
e9ce8d39
A
853 return (ESRCH); /* Not a valid thread */
854 }
855}
856
e9ce8d39 857
5b2abdfb
A
858/*
859 * pthread_kill call to system call
860 */
e9ce8d39 861
9385eb3d 862extern int __pthread_kill(mach_port_t, int);
5b2abdfb
A
863
864int
865pthread_kill (
866 pthread_t th,
867 int sig)
868{
869 int error = 0;
870
871 if ((sig < 0) || (sig > NSIG))
872 return(EINVAL);
873
874 if (th && (th->sig == _PTHREAD_SIG)) {
875 error = __pthread_kill(pthread_mach_thread_np(th), sig);
876 if (error == -1)
877 error = errno;
878 return(error);
3b2a1fe8 879 }
5b2abdfb
A
880 else
881 return(ESRCH);
882}
883
884/* Announce that there are pthread resources ready to be reclaimed in a */
885/* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
886/* thread underneath is terminated right away. */
887static
888void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
889 mach_msg_empty_rcv_t msg;
890 kern_return_t ret;
891
892 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
893 MACH_MSG_TYPE_MOVE_SEND);
e9ce8d39
A
894 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
895 msg.header.msgh_remote_port = thread_recycle_port;
5b2abdfb 896 msg.header.msgh_local_port = kernel_thread;
e9ce8d39 897 msg.header.msgh_id = (int)thread;
5b2abdfb
A
898 ret = mach_msg_send(&msg.header);
899 assert(ret == MACH_MSG_SUCCESS);
e9ce8d39
A
900}
901
5b2abdfb
A
902/* Reap the resources for available threads */
903static
904int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
905 mach_port_type_t ptype;
906 kern_return_t ret;
907 task_t self;
908
909 self = mach_task_self();
910 if (kernel_thread != MACH_PORT_DEAD) {
911 ret = mach_port_type(self, kernel_thread, &ptype);
912 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
913 /* not quite dead yet... */
914 return EAGAIN;
915 }
916 ret = mach_port_deallocate(self, kernel_thread);
917 if (ret != KERN_SUCCESS) {
918 fprintf(stderr,
919 "mach_port_deallocate(kernel_thread) failed: %s\n",
920 mach_error_string(ret));
921 }
922 }
e9ce8d39 923
5b2abdfb
A
924 if (th->reply_port != MACH_PORT_NULL) {
925 ret = mach_port_mod_refs(self, th->reply_port,
926 MACH_PORT_RIGHT_RECEIVE, -1);
927 if (ret != KERN_SUCCESS) {
928 fprintf(stderr,
929 "mach_port_mod_refs(reply_port) failed: %s\n",
930 mach_error_string(ret));
931 }
932 }
e9ce8d39 933
5b2abdfb 934 if (th->freeStackOnExit) {
e9ce8d39 935 vm_address_t addr = (vm_address_t)th->stackaddr;
5b2abdfb
A
936 vm_size_t size;
937
9385eb3d 938 size = (vm_size_t)th->stacksize + th->guardsize;
5b2abdfb 939
e9ce8d39 940 addr -= size;
5b2abdfb 941 ret = vm_deallocate(self, addr, size);
e9ce8d39 942 if (ret != KERN_SUCCESS) {
5b2abdfb
A
943 fprintf(stderr,
944 "vm_deallocate(stack) failed: %s\n",
945 mach_error_string(ret));
e9ce8d39 946 }
5b2abdfb
A
947 }
948
949 if (value_ptr)
950 *value_ptr = th->exit_value;
951
952 if (th != &_thread)
e9ce8d39 953 free(th);
5b2abdfb
A
954
955 return ESUCCESS;
956}
957
958static
959void _pthread_reap_threads(void)
960{
961 mach_msg_empty_rcv_t msg;
962 kern_return_t ret;
963
964 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
965 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
966 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
967 while (ret == MACH_MSG_SUCCESS) {
968 mach_port_t kernel_thread = msg.header.msgh_remote_port;
969 pthread_t thread = (pthread_t)msg.header.msgh_id;
970
971 if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
972 {
973 /* not dead yet, put it back for someone else to reap, stop here */
974 _pthread_become_available(thread, kernel_thread);
975 return;
976 }
977 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
978 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
979 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
e9ce8d39
A
980 }
981}
982
3b2a1fe8 983/* For compatibility... */
e9ce8d39
A
984
985pthread_t
3b2a1fe8
A
986_pthread_self() {
987 return pthread_self();
e9ce8d39
A
988}
989
990/*
991 * Terminate a thread.
992 */
993void
994pthread_exit(void *value_ptr)
995{
5b2abdfb 996 struct _pthread_handler_rec *handler;
e9ce8d39 997 pthread_t self = pthread_self();
e9ce8d39 998 kern_return_t kern_res;
5b2abdfb
A
999 int thread_count;
1000
1001 /* Make this thread not to receive any signals */
1002 syscall(331,1);
1003
e9ce8d39
A
1004 while ((handler = self->cleanup_stack) != 0)
1005 {
1006 (handler->routine)(handler->arg);
1007 self->cleanup_stack = handler->next;
1008 }
1009 _pthread_tsd_cleanup(self);
5b2abdfb
A
1010
1011 _pthread_reap_threads();
1012
e9ce8d39 1013 LOCK(self->lock);
5b2abdfb
A
1014 self->detached |= _PTHREAD_EXITED;
1015
1016 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1017 mach_port_t death = self->death;
e9ce8d39 1018 self->exit_value = value_ptr;
e9ce8d39 1019 UNLOCK(self->lock);
5b2abdfb
A
1020 /* the joiner will need a kernel thread reference, leave ours for it */
1021 if (death) {
1022 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1023 if (kern_res != KERN_SUCCESS)
1024 fprintf(stderr,
1025 "semaphore_signal(death) failed: %s\n",
1026 mach_error_string(kern_res));
e9ce8d39 1027 }
9385eb3d
A
1028 LOCK(_pthread_list_lock);
1029 thread_count = --_pthread_count;
1030 UNLOCK(_pthread_list_lock);
5b2abdfb 1031 } else {
e9ce8d39 1032 UNLOCK(self->lock);
9385eb3d
A
1033 LOCK(_pthread_list_lock);
1034 LIST_REMOVE(self, plist);
1035 thread_count = --_pthread_count;
1036 UNLOCK(_pthread_list_lock);
5b2abdfb
A
1037 /* with no joiner, we let become available consume our cached ref */
1038 _pthread_become_available(self, pthread_mach_thread_np(self));
e9ce8d39
A
1039 }
1040
5b2abdfb
A
1041 if (thread_count <= 0)
1042 exit(0);
1043
1044 /* Use a new reference to terminate ourselves. Should never return. */
1045 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1046 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1047 mach_error_string(kern_res));
1048 abort();
e9ce8d39
A
1049}
1050
1051/*
1052 * Wait for a thread to terminate and obtain its exit value.
1053 */
1054int
1055pthread_join(pthread_t thread,
1056 void **value_ptr)
1057{
1058 kern_return_t kern_res;
5b2abdfb
A
1059 int res = ESUCCESS;
1060
e9ce8d39
A
1061 if (thread->sig == _PTHREAD_SIG)
1062 {
5b2abdfb
A
1063 semaphore_t death = new_sem_from_pool(); /* in case we need it */
1064
e9ce8d39 1065 LOCK(thread->lock);
5b2abdfb
A
1066 if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
1067 thread->death == SEMAPHORE_NULL)
e9ce8d39 1068 {
5b2abdfb
A
1069 pthread_t self = pthread_self();
1070
1071 assert(thread->joiner == NULL);
1072 if (thread != self && (self == NULL || self->joiner != thread))
1073 {
1074 int already_exited = (thread->detached & _PTHREAD_EXITED);
1075
1076 thread->death = death;
1077 thread->joiner = self;
1078 UNLOCK(thread->lock);
1079
1080 if (!already_exited)
e9ce8d39 1081 {
5b2abdfb
A
1082 /* Wait for it to signal... */
1083 do {
1084 PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
1085 } while (kern_res != KERN_SUCCESS);
e9ce8d39 1086 }
5b2abdfb 1087
9385eb3d
A
1088 LOCK(_pthread_list_lock);
1089 LIST_REMOVE(thread, plist);
1090 UNLOCK(_pthread_list_lock);
5b2abdfb
A
1091 /* ... and wait for it to really be dead */
1092 while ((res = _pthread_reap_thread(thread,
1093 thread->kernel_thread,
1094 value_ptr)) == EAGAIN)
1095 {
1096 sched_yield();
1097 }
1098 } else {
e9ce8d39 1099 UNLOCK(thread->lock);
5b2abdfb 1100 res = EDEADLK;
e9ce8d39 1101 }
5b2abdfb 1102 } else {
e9ce8d39 1103 UNLOCK(thread->lock);
5b2abdfb 1104 res = EINVAL;
e9ce8d39 1105 }
5b2abdfb
A
1106 restore_sem_to_pool(death);
1107 return res;
e9ce8d39 1108 }
5b2abdfb 1109 return ESRCH;
e9ce8d39
A
1110}
1111
1112/*
1113 * Get the scheduling policy and scheduling paramters for a thread.
1114 */
1115int
1116pthread_getschedparam(pthread_t thread,
1117 int *policy,
1118 struct sched_param *param)
1119{
1120 if (thread->sig == _PTHREAD_SIG)
1121 {
1122 *policy = thread->policy;
1123 *param = thread->param;
1124 return (ESUCCESS);
1125 } else
1126 {
1127 return (ESRCH); /* Not a valid thread structure */
1128 }
1129}
1130
1131/*
1132 * Set the scheduling policy and scheduling paramters for a thread.
1133 */
1134int
1135pthread_setschedparam(pthread_t thread,
1136 int policy,
1137 const struct sched_param *param)
1138{
1139 policy_base_data_t bases;
1140 policy_base_t base;
1141 mach_msg_type_number_t count;
1142 kern_return_t ret;
1143
1144 if (thread->sig == _PTHREAD_SIG)
1145 {
1146 switch (policy)
1147 {
1148 case SCHED_OTHER:
1149 bases.ts.base_priority = param->sched_priority;
1150 base = (policy_base_t)&bases.ts;
1151 count = POLICY_TIMESHARE_BASE_COUNT;
1152 break;
1153 case SCHED_FIFO:
1154 bases.fifo.base_priority = param->sched_priority;
1155 base = (policy_base_t)&bases.fifo;
1156 count = POLICY_FIFO_BASE_COUNT;
1157 break;
1158 case SCHED_RR:
1159 bases.rr.base_priority = param->sched_priority;
1160 /* quantum isn't public yet */
1161 bases.rr.quantum = param->quantum;
1162 base = (policy_base_t)&bases.rr;
1163 count = POLICY_RR_BASE_COUNT;
1164 break;
1165 default:
1166 return (EINVAL);
1167 }
5b2abdfb 1168 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
e9ce8d39
A
1169 if (ret != KERN_SUCCESS)
1170 {
1171 return (EINVAL);
1172 }
9385eb3d
A
1173 thread->policy = policy;
1174 thread->param = *param;
e9ce8d39
A
1175 return (ESUCCESS);
1176 } else
1177 {
1178 return (ESRCH); /* Not a valid thread structure */
1179 }
1180}
1181
1182/*
1183 * Get the minimum priority for the given policy
1184 */
1185int
1186sched_get_priority_min(int policy)
1187{
1188 return default_priority - 16;
1189}
1190
1191/*
1192 * Get the maximum priority for the given policy
1193 */
1194int
1195sched_get_priority_max(int policy)
1196{
1197 return default_priority + 16;
1198}
1199
1200/*
1201 * Determine if two thread identifiers represent the same thread.
1202 */
1203int
1204pthread_equal(pthread_t t1,
1205 pthread_t t2)
1206{
1207 return (t1 == t2);
1208}
1209
9385eb3d
A
1210__private_extern__ void
1211_pthread_set_self(pthread_t p)
1212{
1213 extern void __pthread_set_self(pthread_t);
1214 if (p == 0) {
1215 bzero(&_thread, sizeof(struct _pthread));
1216 p = &_thread;
1217 }
1218 p->tsd[0] = p;
1219 __pthread_set_self(p);
1220}
1221
e9ce8d39
A
1222void
1223cthread_set_self(void *cself)
1224{
1225 pthread_t self = pthread_self();
1226 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1227 _pthread_set_self(cself);
1228 return;
1229 }
1230 self->cthread_self = cself;
1231}
1232
1233void *
1234ur_cthread_self(void) {
1235 pthread_t self = pthread_self();
1236 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1237 return (void *)self;
1238 }
1239 return self->cthread_self;
1240}
1241
1242/*
1243 * Execute a function exactly one time in a thread-safe fashion.
1244 */
1245int
1246pthread_once(pthread_once_t *once_control,
1247 void (*init_routine)(void))
1248{
9385eb3d 1249 _spin_lock(&once_control->lock);
e9ce8d39
A
1250 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1251 {
1252 (*init_routine)();
1253 once_control->sig = _PTHREAD_ONCE_SIG;
1254 }
9385eb3d 1255 _spin_unlock(&once_control->lock);
e9ce8d39
A
1256 return (ESUCCESS); /* Spec defines no possible errors! */
1257}
1258
1259/*
1260 * Cancel a thread
1261 */
1262int
1263pthread_cancel(pthread_t thread)
1264{
1265 if (thread->sig == _PTHREAD_SIG)
1266 {
1267 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1268 return (ESUCCESS);
1269 } else
1270 {
1271 return (ESRCH);
1272 }
1273}
1274
1275/*
1276 * Insert a cancellation point in a thread.
1277 */
1278static void
1279_pthread_testcancel(pthread_t thread)
1280{
1281 LOCK(thread->lock);
1282 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1283 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1284 {
1285 UNLOCK(thread->lock);
1286 pthread_exit(0);
1287 }
1288 UNLOCK(thread->lock);
1289}
1290
1291void
1292pthread_testcancel(void)
1293{
1294 pthread_t self = pthread_self();
1295 _pthread_testcancel(self);
1296}
1297
1298/*
1299 * Query/update the cancelability 'state' of a thread
1300 */
1301int
1302pthread_setcancelstate(int state, int *oldstate)
1303{
1304 pthread_t self = pthread_self();
1305 int err = ESUCCESS;
1306 LOCK(self->lock);
5b2abdfb
A
1307 if (oldstate)
1308 *oldstate = self->cancel_state & ~_PTHREAD_CANCEL_STATE_MASK;
e9ce8d39
A
1309 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1310 {
1311 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1312 } else
1313 {
1314 err = EINVAL;
1315 }
1316 UNLOCK(self->lock);
1317 _pthread_testcancel(self); /* See if we need to 'die' now... */
1318 return (err);
1319}
1320
1321/*
1322 * Query/update the cancelability 'type' of a thread
1323 */
1324int
1325pthread_setcanceltype(int type, int *oldtype)
1326{
1327 pthread_t self = pthread_self();
1328 int err = ESUCCESS;
1329 LOCK(self->lock);
5b2abdfb
A
1330 if (oldtype)
1331 *oldtype = self->cancel_state & ~_PTHREAD_CANCEL_TYPE_MASK;
e9ce8d39
A
1332 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1333 {
1334 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1335 } else
1336 {
1337 err = EINVAL;
1338 }
1339 UNLOCK(self->lock);
1340 _pthread_testcancel(self); /* See if we need to 'die' now... */
1341 return (err);
1342}
1343
5b2abdfb
A
1344int
1345pthread_getconcurrency(void)
1346{
1347 return(pthread_concurrency);
1348}
1349
1350int
1351pthread_setconcurrency(int new_level)
1352{
1353 pthread_concurrency = new_level;
1354 return(ESUCCESS);
1355}
1356
e9ce8d39
A
1357/*
1358 * Perform package initialization - called automatically when application starts
1359 */
1360
e9ce8d39
A
1361static int
1362pthread_init(void)
1363{
5b2abdfb 1364 pthread_attr_t *attrs;
e9ce8d39
A
1365 pthread_t thread;
1366 kern_return_t kr;
1367 host_basic_info_data_t basic_info;
1368 host_priority_info_data_t priority_info;
1369 host_info_t info;
1370 host_flavor_t flavor;
5b2abdfb 1371 host_t host;
e9ce8d39
A
1372 mach_msg_type_number_t count;
1373 int mib[2];
1374 size_t len;
734aad71 1375 int numcpus;
e9ce8d39
A
1376
1377 count = HOST_PRIORITY_INFO_COUNT;
1378 info = (host_info_t)&priority_info;
1379 flavor = HOST_PRIORITY_INFO;
5b2abdfb
A
1380 host = mach_host_self();
1381 kr = host_info(host, flavor, info, &count);
e9ce8d39
A
1382 if (kr != KERN_SUCCESS)
1383 printf("host_info failed (%d); probably need privilege.\n", kr);
1384 else {
1385 default_priority = priority_info.user_priority;
9385eb3d
A
1386 min_priority = priority_info.minimum_priority;
1387 max_priority = priority_info.maximum_priority;
e9ce8d39 1388 }
5b2abdfb 1389 attrs = &_pthread_attr_default;
e9ce8d39 1390 pthread_attr_init(attrs);
e9ce8d39 1391
9385eb3d
A
1392 LIST_INIT(&__pthread_head);
1393 LOCK_INIT(_pthread_list_lock);
5b2abdfb 1394 thread = &_thread;
9385eb3d 1395 LIST_INSERT_HEAD(&__pthread_head, thread, plist);
5b2abdfb
A
1396 _pthread_set_self(thread);
1397 _pthread_create(thread, attrs, (void *)USRSTACK, mach_thread_self());
1398 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
e9ce8d39
A
1399
1400 /* See if we're on a multiprocessor and set _spin_tries if so. */
1401 mib[0] = CTL_HW;
1402 mib[1] = HW_NCPU;
1403 len = sizeof(numcpus);
1404 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1405 if (numcpus > 1) {
3b2a1fe8 1406 _spin_tries = MP_SPIN_TRIES;
e9ce8d39
A
1407 }
1408 } else {
1409 count = HOST_BASIC_INFO_COUNT;
1410 info = (host_info_t)&basic_info;
1411 flavor = HOST_BASIC_INFO;
5b2abdfb 1412 kr = host_info(host, flavor, info, &count);
e9ce8d39
A
1413 if (kr != KERN_SUCCESS)
1414 printf("host_info failed (%d)\n", kr);
1415 else {
1416 if (basic_info.avail_cpus > 1)
3b2a1fe8 1417 _spin_tries = MP_SPIN_TRIES;
e9ce8d39
A
1418 }
1419 }
e3cf15b6 1420
9385eb3d
A
1421 mach_port_deallocate(mach_task_self(), host);
1422
1423 _init_cpu_capabilities();
e3cf15b6 1424
9385eb3d
A
1425#if defined(__ppc__)
1426
1427 /* Use fsqrt instruction in sqrt() if available. */
1428 if (_cpu_capabilities & kHasFsqrt) {
1429 extern size_t hw_sqrt_len;
1430 extern double sqrt( double );
1431 extern double hw_sqrt( double );
1432 extern void sys_icache_invalidate(void *, size_t);
1433
1434 memcpy ( (void *)sqrt, (void *)hw_sqrt, hw_sqrt_len );
1435 sys_icache_invalidate((void *)sqrt, hw_sqrt_len);
1436 }
1437#endif
1438
e9ce8d39
A
1439 mig_init(1); /* enable multi-threaded mig interfaces */
1440 return 0;
1441}
1442
1443int sched_yield(void)
1444{
1445 swtch_pri(0);
1446 return 0;
1447}
1448
1449/* This is the "magic" that gets the initialization routine called when the application starts */
1450int (*_cthread_init_routine)(void) = pthread_init;
1451
1452/* Get a semaphore from the pool, growing it if necessary */
1453
1454__private_extern__ semaphore_t new_sem_from_pool(void) {
1455 kern_return_t res;
1456 semaphore_t sem;
1457 int i;
1458
1459 LOCK(sem_pool_lock);
1460 if (sem_pool_current == sem_pool_count) {
1461 sem_pool_count += 16;
1462 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1463 for (i = sem_pool_current; i < sem_pool_count; i++) {
1464 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1465 }
1466 }
1467 sem = sem_pool[sem_pool_current++];
1468 UNLOCK(sem_pool_lock);
1469 return sem;
1470}
1471
1472/* Put a semaphore back into the pool */
1473__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1474 LOCK(sem_pool_lock);
1475 sem_pool[--sem_pool_current] = sem;
1476 UNLOCK(sem_pool_lock);
1477}
1478
1479static void sem_pool_reset(void) {
1480 LOCK(sem_pool_lock);
1481 sem_pool_count = 0;
1482 sem_pool_current = 0;
1483 sem_pool = NULL;
1484 UNLOCK(sem_pool_lock);
1485}
1486
9385eb3d 1487__private_extern__ void _pthread_fork_child(pthread_t p) {
e9ce8d39
A
1488 /* Just in case somebody had it locked... */
1489 UNLOCK(sem_pool_lock);
1490 sem_pool_reset();
9385eb3d
A
1491 /* No need to hold the pthread_list_lock as no one other than this
1492 * thread is present at this time
1493 */
1494 LIST_INIT(&__pthread_head);
1495 LOCK_INIT(_pthread_list_lock);
1496 LIST_INSERT_HEAD(&__pthread_head, p, plist);
5b2abdfb 1497 _pthread_count = 1;
e9ce8d39
A
1498}
1499