]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/bsd_stubs.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / bsd_stubs.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/time.h>
29 #include <kern/task.h>
30 #include <kern/thread.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_prot.h>
33 #include <vm/vm_kern.h>
34 #include <sys/stat.h>
35 #include <vm/vm_map.h>
36 #include <sys/systm.h>
37 #include <kern/assert.h>
38 #include <sys/conf.h>
39 #include <sys/proc_internal.h>
40 #include <sys/buf.h> /* for SET */
41 #include <sys/kernel.h>
42 #include <sys/user.h>
43 #include <sys/sysent.h>
44 #include <sys/sysproto.h>
45
46 /* XXX these should be in a common header somwhere, but aren't */
47 extern int chrtoblk_set(int, int);
48
49 /* XXX most of these just exist to export; there's no good header for them*/
50 void pcb_synch(void);
51
52 typedef struct devsw_lock {
53 TAILQ_ENTRY(devsw_lock) dl_list;
54 thread_t dl_thread;
55 dev_t dl_dev;
56 int dl_mode;
57 int dl_waiters;
58 } *devsw_lock_t;
59
60 static LCK_GRP_DECLARE(devsw_lock_grp, "devsw");
61 static LCK_MTX_DECLARE(devsw_lock_list_mtx, &devsw_lock_grp);
62 static TAILQ_HEAD(, devsw_lock) devsw_locks = TAILQ_HEAD_INITIALIZER(devsw_locks);
63
64 /* Just to satisfy pstat command */
65 int dmmin, dmmax, dmtext;
66
67 /*
68 * XXX this function only exists to be exported and do nothing.
69 */
70 void
71 pcb_synch(void)
72 {
73 }
74
75 struct proc *
76 current_proc(void)
77 {
78 /* Never returns a NULL */
79 struct uthread * ut;
80 struct proc * p;
81 thread_t thread = current_thread();
82
83 ut = (struct uthread *)get_bsdthread_info(thread);
84 if (ut && (ut->uu_flag & UT_VFORK) && ut->uu_proc) {
85 p = ut->uu_proc;
86 if ((p->p_lflag & P_LINVFORK) == 0) {
87 panic("returning child proc not under vfork");
88 }
89 if (p->p_vforkact != (void *)thread) {
90 panic("returning child proc which is not cur_act");
91 }
92 return p;
93 }
94
95 p = (struct proc *)get_bsdtask_info(current_task());
96
97 if (p == NULL) {
98 return kernproc;
99 }
100
101 return p;
102 }
103
104 /* Device switch add delete routines */
105
106 const struct bdevsw nobdev = NO_BDEVICE;
107 const struct cdevsw nocdev = NO_CDEVICE;
108 /*
109 * if index is -1, return a free slot if avaliable
110 * else see whether the index is free
111 * return the major number that is free else -1
112 *
113 * if index is negative, we start
114 * looking for a free slot at the absolute value of index,
115 * instead of starting at 0
116 */
117 int
118 bdevsw_isfree(int index)
119 {
120 struct bdevsw * devsw;
121
122 if (index < 0) {
123 if (index == -1) {
124 index = 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */
125 } else {
126 index = -index; /* start at least this far up in the table */
127 }
128 devsw = &bdevsw[index];
129 for (; index < nblkdev; index++, devsw++) {
130 if (memcmp((const char *)devsw, (const char *)&nobdev, sizeof(struct bdevsw)) == 0) {
131 break;
132 }
133 }
134 }
135
136 if (index < 0 || index >= nblkdev) {
137 return -1;
138 }
139
140 devsw = &bdevsw[index];
141 if ((memcmp((const char *)devsw, (const char *)&nobdev, sizeof(struct bdevsw)) != 0)) {
142 return -1;
143 }
144 return index;
145 }
146
147 /*
148 * if index is -1, find a free slot to add
149 * else see whether the slot is free
150 * return the major number that is used else -1
151 *
152 * if index is negative, we start
153 * looking for a free slot at the absolute value of index,
154 * instead of starting at 0
155 */
156 int
157 bdevsw_add(int index, const struct bdevsw * bsw)
158 {
159 lck_mtx_lock_spin(&devsw_lock_list_mtx);
160 index = bdevsw_isfree(index);
161 if (index < 0) {
162 index = -1;
163 } else {
164 bdevsw[index] = *bsw;
165 }
166 lck_mtx_unlock(&devsw_lock_list_mtx);
167 return index;
168 }
169 /*
170 * if the slot has the same bsw, then remove
171 * else -1
172 */
173 int
174 bdevsw_remove(int index, const struct bdevsw * bsw)
175 {
176 struct bdevsw * devsw;
177
178 if (index < 0 || index >= nblkdev) {
179 return -1;
180 }
181
182 devsw = &bdevsw[index];
183 lck_mtx_lock_spin(&devsw_lock_list_mtx);
184 if ((memcmp((const char *)devsw, (const char *)bsw, sizeof(struct bdevsw)) != 0)) {
185 index = -1;
186 } else {
187 bdevsw[index] = nobdev;
188 }
189 lck_mtx_unlock(&devsw_lock_list_mtx);
190 return index;
191 }
192
193 /*
194 * if index is -1, return a free slot if avaliable
195 * else see whether the index is free
196 * return the major number that is free else -1
197 *
198 * if index is negative, we start
199 * looking for a free slot at the absolute value of index,
200 * instead of starting at 0
201 */
202 int
203 cdevsw_isfree(int index)
204 {
205 struct cdevsw * devsw;
206
207 if (index < 0) {
208 if (index == -1) {
209 index = 0;
210 } else {
211 index = -index; /* start at least this far up in the table */
212 }
213 devsw = &cdevsw[index];
214 for (; index < nchrdev; index++, devsw++) {
215 if (memcmp((const char *)devsw, (const char *)&nocdev, sizeof(struct cdevsw)) == 0) {
216 break;
217 }
218 }
219 }
220
221 if (index < 0 || index >= nchrdev) {
222 return -1;
223 }
224
225 devsw = &cdevsw[index];
226 if ((memcmp((const char *)devsw, (const char *)&nocdev, sizeof(struct cdevsw)) != 0)) {
227 return -1;
228 }
229 return index;
230 }
231
232 /*
233 * if index is -1, find a free slot to add
234 * else see whether the slot is free
235 * return the major number that is used else -1
236 *
237 * if index is negative, we start
238 * looking for a free slot at the absolute value of index,
239 * instead of starting at 0
240 *
241 * NOTE: In practice, -1 is unusable, since there are kernel internal
242 * devices that call this function with absolute index values,
243 * which will stomp on free-slot based assignments that happen
244 * before them. -24 is currently a safe starting point.
245 */
246 int
247 cdevsw_add(int index, const struct cdevsw * csw)
248 {
249 lck_mtx_lock_spin(&devsw_lock_list_mtx);
250 index = cdevsw_isfree(index);
251 if (index < 0) {
252 index = -1;
253 } else {
254 cdevsw[index] = *csw;
255 }
256 lck_mtx_unlock(&devsw_lock_list_mtx);
257 return index;
258 }
259 /*
260 * if the slot has the same csw, then remove
261 * else -1
262 */
263 int
264 cdevsw_remove(int index, const struct cdevsw * csw)
265 {
266 struct cdevsw * devsw;
267
268 if (index < 0 || index >= nchrdev) {
269 return -1;
270 }
271
272 devsw = &cdevsw[index];
273 lck_mtx_lock_spin(&devsw_lock_list_mtx);
274 if ((memcmp((const char *)devsw, (const char *)csw, sizeof(struct cdevsw)) != 0)) {
275 index = -1;
276 } else {
277 cdevsw[index] = nocdev;
278 cdevsw_flags[index] = 0;
279 }
280 lck_mtx_unlock(&devsw_lock_list_mtx);
281 return index;
282 }
283
284 static int
285 cdev_set_bdev(int cdev, int bdev)
286 {
287 return chrtoblk_set(cdev, bdev);
288 }
289
290 int
291 cdevsw_add_with_bdev(int index, const struct cdevsw * csw, int bdev)
292 {
293 index = cdevsw_add(index, csw);
294 if (index < 0) {
295 return index;
296 }
297 if (cdev_set_bdev(index, bdev) < 0) {
298 cdevsw_remove(index, csw);
299 return -1;
300 }
301 return index;
302 }
303
304 int
305 cdevsw_setkqueueok(int maj, const struct cdevsw * csw, int extra_flags)
306 {
307 struct cdevsw * devsw;
308 uint64_t flags = CDEVSW_SELECT_KQUEUE;
309
310 if (maj < 0 || maj >= nchrdev) {
311 return -1;
312 }
313
314 devsw = &cdevsw[maj];
315 if ((memcmp((const char *)devsw, (const char *)csw, sizeof(struct cdevsw)) != 0)) {
316 return -1;
317 }
318
319 flags |= extra_flags;
320
321 cdevsw_flags[maj] = flags;
322 return 0;
323 }
324
325 #include <pexpert/pexpert.h> /* for PE_parse_boot_arg */
326
327 /*
328 * Copy the "hostname" variable into a caller-provided buffer
329 * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space.
330 * On success, "len" will be set to the number of characters preceding
331 * the NULL character in the hostname.
332 */
333 int
334 bsd_hostname(char *buf, size_t bufsize, size_t *len)
335 {
336 int ret;
337 size_t hnlen;
338 /*
339 * "hostname" is null-terminated
340 */
341 lck_mtx_lock(&hostname_lock);
342 hnlen = strlen(hostname);
343 if (hnlen < bufsize) {
344 strlcpy(buf, hostname, bufsize);
345 *len = hnlen;
346 ret = 0;
347 } else {
348 ret = ENAMETOOLONG;
349 }
350 lck_mtx_unlock(&hostname_lock);
351 return ret;
352 }
353
354 static devsw_lock_t
355 devsw_lock_find_locked(dev_t dev, int mode)
356 {
357 devsw_lock_t lock;
358
359 TAILQ_FOREACH(lock, &devsw_locks, dl_list) {
360 if (lock->dl_dev == dev && lock->dl_mode == mode) {
361 return lock;
362 }
363 }
364
365 return NULL;
366 }
367
368 void
369 devsw_lock(dev_t dev, int mode)
370 {
371 devsw_lock_t newlock, curlock;
372
373 assert(0 <= major(dev) && major(dev) < nchrdev);
374 assert(mode == S_IFCHR || mode == S_IFBLK);
375
376 newlock = kalloc_flags(sizeof(struct devsw_lock), Z_WAITOK | Z_ZERO);
377 newlock->dl_dev = dev;
378 newlock->dl_thread = current_thread();
379 newlock->dl_mode = mode;
380
381 lck_mtx_lock_spin(&devsw_lock_list_mtx);
382
383 curlock = devsw_lock_find_locked(dev, mode);
384 if (curlock == NULL) {
385 TAILQ_INSERT_TAIL(&devsw_locks, newlock, dl_list);
386 } else {
387 curlock->dl_waiters++;
388 lck_mtx_sleep_with_inheritor(&devsw_lock_list_mtx,
389 LCK_SLEEP_SPIN, curlock, curlock->dl_thread,
390 THREAD_UNINT | THREAD_WAIT_NOREPORT,
391 TIMEOUT_WAIT_FOREVER);
392 assert(curlock->dl_thread == current_thread());
393 curlock->dl_waiters--;
394 }
395
396 lck_mtx_unlock(&devsw_lock_list_mtx);
397
398 if (curlock != NULL) {
399 kfree(newlock, sizeof(struct devsw_lock));
400 }
401 }
402
403 void
404 devsw_unlock(dev_t dev, int mode)
405 {
406 devsw_lock_t lock;
407 thread_t inheritor_thread = NULL;
408
409 assert(0 <= major(dev) && major(dev) < nchrdev);
410
411 lck_mtx_lock_spin(&devsw_lock_list_mtx);
412
413 lock = devsw_lock_find_locked(dev, mode);
414
415 if (lock == NULL || lock->dl_thread != current_thread()) {
416 panic("current thread doesn't own the lock (%p)", lock);
417 }
418
419 if (lock->dl_waiters) {
420 wakeup_one_with_inheritor(lock, THREAD_AWAKENED,
421 LCK_WAKE_DEFAULT, &lock->dl_thread);
422 inheritor_thread = lock->dl_thread;
423 lock = NULL;
424 } else {
425 TAILQ_REMOVE(&devsw_locks, lock, dl_list);
426 }
427
428 lck_mtx_unlock(&devsw_lock_list_mtx);
429
430 if (inheritor_thread) {
431 thread_deallocate(inheritor_thread);
432 }
433 kfree(lock, sizeof(struct devsw_lock));
434 }