X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/0c530ab8987f0ae6a1a3d9284f40182b88852816..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/bsd/kern/bsd_stubs.c diff --git a/bsd/kern/bsd_stubs.c b/bsd/kern/bsd_stubs.c index 0e885c2ae..f73834598 100644 --- a/bsd/kern/bsd_stubs.c +++ b/bsd/kern/bsd_stubs.c @@ -1,23 +1,29 @@ /* * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include #include @@ -25,29 +31,52 @@ #include #include #include +#include #include #include +#include #include #include -#include /* for SET */ +#include /* for SET */ +#include #include #include #include +/* XXX these should be in a common header somwhere, but aren't */ +extern int chrtoblk_set(int, int); +extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int, kern_return_t *); + +/* XXX most of these just exist to export; there's no good header for them*/ +void pcb_synch(void); + +TAILQ_HEAD(, devsw_lock) devsw_locks; +lck_mtx_t devsw_lock_list_mtx; +lck_grp_t * devsw_lock_grp; + /* Just to satisfy pstat command */ -int dmmin, dmmax, dmtext; +int dmmin, dmmax, dmtext; vm_offset_t -kmem_mb_alloc(vm_map_t mbmap, int size) +kmem_mb_alloc(vm_map_t mbmap, int size, int physContig, kern_return_t *err) { - vm_offset_t addr; - if (kernel_memory_allocate(mbmap, &addr, size, - 0, - KMA_NOPAGEWAIT|KMA_KOBJECT|KMA_LOMEM) == KERN_SUCCESS) - return(addr); - else - return(0); - + vm_offset_t addr = 0; + kern_return_t kr = KERN_SUCCESS; + + if (!physContig) { + kr = kernel_memory_allocate(mbmap, &addr, size, 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF); + } else { + kr = kmem_alloc_contig(mbmap, &addr, size, PAGE_MASK, 0xfffff, 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF); + } + + if (kr != KERN_SUCCESS) { + addr = 0; + } + if (err) { + *err = kr; + } + + return addr; } /* @@ -63,297 +92,345 @@ current_proc(void) { /* Never returns a NULL */ struct uthread * ut; - struct proc *p; - thread_t thr_act = current_thread(); + struct proc * p; + thread_t thread = current_thread(); - ut = (struct uthread *)get_bsdthread_info(thr_act); - if (ut && (ut->uu_flag & UT_VFORK) && ut->uu_proc) { + ut = (struct uthread *)get_bsdthread_info(thread); + if (ut && (ut->uu_flag & UT_VFORK) && ut->uu_proc) { p = ut->uu_proc; - if ((p->p_flag & P_INVFORK) == 0) + if ((p->p_lflag & P_LINVFORK) == 0) { panic("returning child proc not under vfork"); - if (p->p_vforkact != (void *)thr_act) + } + if (p->p_vforkact != (void *)thread) { panic("returning child proc which is not cur_act"); - return(p); + } + return p; } p = (struct proc *)get_bsdtask_info(current_task()); - if (p == NULL) - return (kernproc); + if (p == NULL) { + return kernproc; + } - return (p); + return p; } /* Device switch add delete routines */ -extern int nblkdev, nchrdev; - struct bdevsw nobdev = NO_BDEVICE; struct cdevsw nocdev = NO_CDEVICE; -/* +/* * if index is -1, return a free slot if avaliable * else see whether the index is free * return the major number that is free else -1 * + * if index is negative, we start + * looking for a free slot at the absolute value of index, + * instead of starting at 0 */ int bdevsw_isfree(int index) { - struct bdevsw *devsw; - if (index == -1) { - devsw = bdevsw; - for(index=0; index < nblkdev; index++, devsw++) { - if(memcmp((char *)devsw, - (char *)&nobdev, - sizeof(struct bdevsw)) == 0) - break; - } - } else { - /* NB: Not used below unless index is in range */ + struct bdevsw * devsw; + + if (index < 0) { + if (index == -1) { + index = 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */ + } else { + index = -index; /* start at least this far up in the table */ + } devsw = &bdevsw[index]; + for (; index < nblkdev; index++, devsw++) { + if (memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) == 0) { + break; + } + } } - if ((index < 0) || (index >= nblkdev) || - (memcmp((char *)devsw, - (char *)&nobdev, - sizeof(struct bdevsw)) != 0)) { - return(-1); + if (index < 0 || index >= nblkdev) { + return -1; } - return(index); + + devsw = &bdevsw[index]; + if ((memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) != 0)) { + return -1; + } + return index; } -/* +/* * if index is -1, find a free slot to add * else see whether the slot is free * return the major number that is used else -1 + * + * if index is negative, we start + * looking for a free slot at the absolute value of index, + * instead of starting at 0 */ int -bdevsw_add(int index, struct bdevsw * bsw) +bdevsw_add(int index, struct bdevsw * bsw) { - struct bdevsw *devsw; - - if (index == -1) { - devsw = &bdevsw[1]; /* Start at slot 1 - this is a hack to fix the index=1 hack */ - /* yes, start at 1 to avoid collision with volfs (Radar 2842228) */ - for(index=1; index < nblkdev; index++, devsw++) { - if(memcmp((char *)devsw, - (char *)&nobdev, - sizeof(struct bdevsw)) == 0) - break; - } - } - devsw = &bdevsw[index]; - if ((index < 0) || (index >= nblkdev) || - (memcmp((char *)devsw, - (char *)&nobdev, - sizeof(struct bdevsw)) != 0)) { - return(-1); + lck_mtx_lock_spin(&devsw_lock_list_mtx); + index = bdevsw_isfree(index); + if (index < 0) { + index = -1; + } else { + bdevsw[index] = *bsw; } - bdevsw[index] = *bsw; - return(index); + lck_mtx_unlock(&devsw_lock_list_mtx); + return index; } -/* +/* * if the slot has the same bsw, then remove * else -1 */ int -bdevsw_remove(int index, struct bdevsw * bsw) +bdevsw_remove(int index, struct bdevsw * bsw) { - struct bdevsw *devsw; + struct bdevsw * devsw; + + if (index < 0 || index >= nblkdev) { + return -1; + } devsw = &bdevsw[index]; - if ((index < 0) || (index >= nblkdev) || - (memcmp((char *)devsw, - (char *)bsw, - sizeof(struct bdevsw)) != 0)) { - return(-1); + lck_mtx_lock_spin(&devsw_lock_list_mtx); + if ((memcmp((char *)devsw, (char *)bsw, sizeof(struct bdevsw)) != 0)) { + index = -1; + } else { + bdevsw[index] = nobdev; } - bdevsw[index] = nobdev; - return(index); + lck_mtx_unlock(&devsw_lock_list_mtx); + return index; } -/* +/* * if index is -1, return a free slot if avaliable * else see whether the index is free * return the major number that is free else -1 + * + * if index is negative, we start + * looking for a free slot at the absolute value of index, + * instead of starting at 0 */ int cdevsw_isfree(int index) { - struct cdevsw *devsw; - - if (index == -1) { - devsw = cdevsw; - for(index=0; index < nchrdev; index++, devsw++) { - if(memcmp((char *)devsw, - (char *)&nocdev, - sizeof(struct cdevsw)) == 0) - break; - } + struct cdevsw * devsw; + + if (index < 0) { + if (index == -1) { + index = 0; + } else { + index = -index; /* start at least this far up in the table */ + } + devsw = &cdevsw[index]; + for (; index < nchrdev; index++, devsw++) { + if (memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) == 0) { + break; + } + } } + + if (index < 0 || index >= nchrdev) { + return -1; + } + devsw = &cdevsw[index]; - if ((index < 0) || (index >= nchrdev) || - (memcmp((char *)devsw, - (char *)&nocdev, - sizeof(struct cdevsw)) != 0)) { - return(-1); + if ((memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) != 0)) { + return -1; } - return(index); + return index; } -/* +/* * if index is -1, find a free slot to add * else see whether the slot is free * return the major number that is used else -1 + * + * if index is negative, we start + * looking for a free slot at the absolute value of index, + * instead of starting at 0 + * + * NOTE: In practice, -1 is unusable, since there are kernel internal + * devices that call this function with absolute index values, + * which will stomp on free-slot based assignments that happen + * before them. -24 is currently a safe starting point. */ int -cdevsw_add(int index, struct cdevsw * csw) +cdevsw_add(int index, struct cdevsw * csw) { - struct cdevsw *devsw; - - if (index == -1) { - devsw = cdevsw; - for(index=0; index < nchrdev; index++, devsw++) { - if(memcmp((char *)devsw, - (char *)&nocdev, - sizeof(struct cdevsw)) == 0) - break; - } - } - devsw = &cdevsw[index]; - if ((index < 0) || (index >= nchrdev) || - (memcmp((char *)devsw, - (char *)&nocdev, - sizeof(struct cdevsw)) != 0)) { - return(-1); + lck_mtx_lock_spin(&devsw_lock_list_mtx); + index = cdevsw_isfree(index); + if (index < 0) { + index = -1; + } else { + cdevsw[index] = *csw; } - cdevsw[index] = *csw; - return(index); + lck_mtx_unlock(&devsw_lock_list_mtx); + return index; } /* - * if the index has the same bsw, then remove + * if the slot has the same csw, then remove * else -1 */ int -cdevsw_remove(int index, struct cdevsw * csw) +cdevsw_remove(int index, struct cdevsw * csw) { - struct cdevsw *devsw; + struct cdevsw * devsw; + + if (index < 0 || index >= nchrdev) { + return -1; + } devsw = &cdevsw[index]; - if ((index < 0) || (index >= nchrdev) || - (memcmp((char *)devsw, - (char *)csw, - sizeof(struct cdevsw)) != 0)) { - return(-1); + lck_mtx_lock_spin(&devsw_lock_list_mtx); + if ((memcmp((char *)devsw, (char *)csw, sizeof(struct cdevsw)) != 0)) { + index = -1; + } else { + cdevsw[index] = nocdev; + cdevsw_flags[index] = 0; } - cdevsw[index] = nocdev; - return(index); + lck_mtx_unlock(&devsw_lock_list_mtx); + return index; } static int cdev_set_bdev(int cdev, int bdev) { - extern int chrtoblk_add(int cdev, int bdev); - - return (chrtoblk_set(cdev, bdev)); + return chrtoblk_set(cdev, bdev); } -int +int cdevsw_add_with_bdev(int index, struct cdevsw * csw, int bdev) { index = cdevsw_add(index, csw); if (index < 0) { - return (index); + return index; } if (cdev_set_bdev(index, bdev) < 0) { cdevsw_remove(index, csw); - return (-1); + return -1; } - return (index); + return index; } -issingleuser(void) +int +cdevsw_setkqueueok(int maj, struct cdevsw * csw, int extra_flags) { - char namep[16]; + struct cdevsw * devsw; + uint64_t flags = CDEVSW_SELECT_KQUEUE; + + if (maj < 0 || maj >= nchrdev) { + return -1; + } + + devsw = &cdevsw[maj]; + if ((memcmp((char *)devsw, (char *)csw, sizeof(struct cdevsw)) != 0)) { + return -1; + } + + flags |= extra_flags; + + cdevsw_flags[maj] = flags; + return 0; +} +#include /* for PE_parse_boot_arg */ - if (PE_parse_boot_arg("-s", namep)) { - return(1); +/* + * Copy the "hostname" variable into a caller-provided buffer + * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space. + * On success, "len" will be set to the number of characters preceding + * the NULL character in the hostname. + */ +int +bsd_hostname(char * buf, int bufsize, int * len) +{ + int ret, hnlen; + /* + * "hostname" is null-terminated + */ + lck_mtx_lock(&hostname_lock); + hnlen = strlen(hostname); + if (hnlen < bufsize) { + strlcpy(buf, hostname, bufsize); + *len = hnlen; + ret = 0; } else { - return(0); + ret = ENAMETOOLONG; } + lck_mtx_unlock(&hostname_lock); + return ret; } -void * -tbeproc(void *procp) +void +devsw_lock(dev_t dev, int mode) { - struct proc *p = procp; + devsw_lock_t newlock, tmplock; + int res; + + assert(0 <= major(dev) && major(dev) < nchrdev); + assert(mode == S_IFCHR || mode == S_IFBLK); + + MALLOC(newlock, devsw_lock_t, sizeof(struct devsw_lock), M_TEMP, M_WAITOK | M_ZERO); + newlock->dl_dev = dev; + newlock->dl_thread = current_thread(); + newlock->dl_mode = mode; + + lck_mtx_lock_spin(&devsw_lock_list_mtx); +retry: + TAILQ_FOREACH(tmplock, &devsw_locks, dl_list) + { + if (tmplock->dl_dev == dev && tmplock->dl_mode == mode) { + res = msleep(tmplock, &devsw_lock_list_mtx, PVFS, "devsw_lock", NULL); + assert(res == 0); + goto retry; + } + } - if (p) - SET(p->p_flag, P_TBE); - return; + TAILQ_INSERT_TAIL(&devsw_locks, newlock, dl_list); + lck_mtx_unlock(&devsw_lock_list_mtx); } +void +devsw_unlock(dev_t dev, int mode) +{ + devsw_lock_t tmplock; + assert(0 <= major(dev) && major(dev) < nchrdev); -/* - * WARNING - this is a temporary workaround for binary compatibility issues - * with anti-piracy software that relies on patching ptrace (3928003). - * This KPI will be removed in the system release after Tiger. - */ -uintptr_t temp_patch_ptrace(uintptr_t new_ptrace) -{ - struct sysent * callp; - sy_call_t * old_ptrace; -#ifndef __ppc__ - boolean_t funnel_state; -#endif - - if (new_ptrace == 0) - return(0); - -#ifdef __ppc__ - enter_funnel_section(kernel_flock); -#else - funnel_state = thread_funnel_set(kernel_flock, TRUE); -#endif - callp = &sysent[26]; - old_ptrace = callp->sy_call; - - /* only allow one patcher of ptrace */ - if (old_ptrace == (sy_call_t *) ptrace) { - callp->sy_call = (sy_call_t *) new_ptrace; + lck_mtx_lock_spin(&devsw_lock_list_mtx); + + TAILQ_FOREACH(tmplock, &devsw_locks, dl_list) + { + if (tmplock->dl_dev == dev && tmplock->dl_mode == mode) { + break; + } } - else { - old_ptrace = NULL; + + if (tmplock == NULL) { + panic("Trying to unlock, and couldn't find lock."); } -#ifdef __ppc__ - exit_funnel_section( ); -#else - (void)thread_funnel_set(kernel_flock, funnel_state); -#endif - - return((uintptr_t)old_ptrace); + + if (tmplock->dl_thread != current_thread()) { + panic("Trying to unlock, but I don't hold the lock."); + } + + wakeup(tmplock); + TAILQ_REMOVE(&devsw_locks, tmplock, dl_list); + + lck_mtx_unlock(&devsw_lock_list_mtx); + + FREE(tmplock, M_TEMP); } -void temp_unpatch_ptrace(void) +void +devsw_init() { - struct sysent * callp; -#ifndef __ppc__ - boolean_t funnel_state; -#endif - -#ifdef __ppc__ - enter_funnel_section(kernel_flock); -#else - funnel_state = thread_funnel_set(kernel_flock, TRUE); -#endif - callp = &sysent[26]; - callp->sy_call = (sy_call_t *) ptrace; -#ifdef __ppc__ - exit_funnel_section( ); -#else - (void)thread_funnel_set(kernel_flock, funnel_state); -#endif - - return; + devsw_lock_grp = lck_grp_alloc_init("devsw", NULL); + assert(devsw_lock_grp != NULL); + + lck_mtx_init(&devsw_lock_list_mtx, devsw_lock_grp, NULL); + TAILQ_INIT(&devsw_locks); }