2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/task.h>
30 #include <kern/thread.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_prot.h>
33 #include <vm/vm_kern.h>
35 #include <vm/vm_map.h>
36 #include <sys/systm.h>
37 #include <kern/assert.h>
39 #include <sys/proc_internal.h>
40 #include <sys/buf.h> /* for SET */
41 #include <sys/kernel.h>
43 #include <sys/sysent.h>
44 #include <sys/sysproto.h>
46 /* XXX these should be in a common header somwhere, but aren't */
47 extern int chrtoblk_set(int, int);
48 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int, int, kern_return_t
*);
50 /* XXX most of these just exist to export; there's no good header for them*/
53 TAILQ_HEAD(, devsw_lock
) devsw_locks
;
54 lck_mtx_t devsw_lock_list_mtx
;
55 lck_grp_t
* devsw_lock_grp
;
57 /* Just to satisfy pstat command */
58 int dmmin
, dmmax
, dmtext
;
61 kmem_mb_alloc(vm_map_t mbmap
, int size
, int physContig
, kern_return_t
*err
)
64 kern_return_t kr
= KERN_SUCCESS
;
67 kr
= kernel_memory_allocate(mbmap
, &addr
, size
, 0, KMA_KOBJECT
| KMA_LOMEM
, VM_KERN_MEMORY_MBUF
);
69 kr
= kmem_alloc_contig(mbmap
, &addr
, size
, PAGE_MASK
, 0xfffff, 0, KMA_KOBJECT
| KMA_LOMEM
, VM_KERN_MEMORY_MBUF
);
71 if (kr
!= KERN_SUCCESS
)
80 * XXX this function only exists to be exported and do nothing.
90 /* Never returns a NULL */
93 thread_t thread
= current_thread();
95 ut
= (struct uthread
*)get_bsdthread_info(thread
);
96 if (ut
&& (ut
->uu_flag
& UT_VFORK
) && ut
->uu_proc
) {
98 if ((p
->p_lflag
& P_LINVFORK
) == 0)
99 panic("returning child proc not under vfork");
100 if (p
->p_vforkact
!= (void *)thread
)
101 panic("returning child proc which is not cur_act");
105 p
= (struct proc
*)get_bsdtask_info(current_task());
113 /* Device switch add delete routines */
115 struct bdevsw nobdev
= NO_BDEVICE
;
116 struct cdevsw nocdev
= NO_CDEVICE
;
118 * if index is -1, return a free slot if avaliable
119 * else see whether the index is free
120 * return the major number that is free else -1
122 * if index is negative, we start
123 * looking for a free slot at the absolute value of index,
124 * instead of starting at 0
127 bdevsw_isfree(int index
)
129 struct bdevsw
* devsw
;
133 index
= 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */
135 index
= -index
; /* start at least this far up in the table */
136 devsw
= &bdevsw
[index
];
137 for (; index
< nblkdev
; index
++, devsw
++) {
138 if (memcmp((char *)devsw
, (char *)&nobdev
, sizeof(struct bdevsw
)) == 0)
143 if (index
< 0 || index
>= nblkdev
)
146 devsw
= &bdevsw
[index
];
147 if ((memcmp((char *)devsw
, (char *)&nobdev
, sizeof(struct bdevsw
)) != 0)) {
154 * if index is -1, find a free slot to add
155 * else see whether the slot is free
156 * return the major number that is used else -1
158 * if index is negative, we start
159 * looking for a free slot at the absolute value of index,
160 * instead of starting at 0
163 bdevsw_add(int index
, struct bdevsw
* bsw
)
165 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
166 index
= bdevsw_isfree(index
);
170 bdevsw
[index
] = *bsw
;
172 lck_mtx_unlock(&devsw_lock_list_mtx
);
176 * if the slot has the same bsw, then remove
180 bdevsw_remove(int index
, struct bdevsw
* bsw
)
182 struct bdevsw
* devsw
;
184 if (index
< 0 || index
>= nblkdev
)
187 devsw
= &bdevsw
[index
];
188 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
189 if ((memcmp((char *)devsw
, (char *)bsw
, sizeof(struct bdevsw
)) != 0)) {
192 bdevsw
[index
] = nobdev
;
194 lck_mtx_unlock(&devsw_lock_list_mtx
);
199 * if index is -1, return a free slot if avaliable
200 * else see whether the index is free
201 * return the major number that is free else -1
203 * if index is negative, we start
204 * looking for a free slot at the absolute value of index,
205 * instead of starting at 0
208 cdevsw_isfree(int index
)
210 struct cdevsw
* devsw
;
216 index
= -index
; /* start at least this far up in the table */
217 devsw
= &cdevsw
[index
];
218 for (; index
< nchrdev
; index
++, devsw
++) {
219 if (memcmp((char *)devsw
, (char *)&nocdev
, sizeof(struct cdevsw
)) == 0)
224 if (index
< 0 || index
>= nchrdev
)
227 devsw
= &cdevsw
[index
];
228 if ((memcmp((char *)devsw
, (char *)&nocdev
, sizeof(struct cdevsw
)) != 0)) {
235 * if index is -1, find a free slot to add
236 * else see whether the slot is free
237 * return the major number that is used else -1
239 * if index is negative, we start
240 * looking for a free slot at the absolute value of index,
241 * instead of starting at 0
243 * NOTE: In practice, -1 is unusable, since there are kernel internal
244 * devices that call this function with absolute index values,
245 * which will stomp on free-slot based assignments that happen
246 * before them. -24 is currently a safe starting point.
249 cdevsw_add(int index
, struct cdevsw
* csw
)
251 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
252 index
= cdevsw_isfree(index
);
256 cdevsw
[index
] = *csw
;
258 lck_mtx_unlock(&devsw_lock_list_mtx
);
262 * if the slot has the same csw, then remove
266 cdevsw_remove(int index
, struct cdevsw
* csw
)
268 struct cdevsw
* devsw
;
270 if (index
< 0 || index
>= nchrdev
)
273 devsw
= &cdevsw
[index
];
274 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
275 if ((memcmp((char *)devsw
, (char *)csw
, sizeof(struct cdevsw
)) != 0)) {
278 cdevsw
[index
] = nocdev
;
279 cdevsw_flags
[index
] = 0;
281 lck_mtx_unlock(&devsw_lock_list_mtx
);
286 cdev_set_bdev(int cdev
, int bdev
)
288 return (chrtoblk_set(cdev
, bdev
));
292 cdevsw_add_with_bdev(int index
, struct cdevsw
* csw
, int bdev
)
294 index
= cdevsw_add(index
, csw
);
298 if (cdev_set_bdev(index
, bdev
) < 0) {
299 cdevsw_remove(index
, csw
);
306 cdevsw_setkqueueok(int maj
, struct cdevsw
* csw
, int extra_flags
)
308 struct cdevsw
* devsw
;
309 uint64_t flags
= CDEVSW_SELECT_KQUEUE
;
311 if (maj
< 0 || maj
>= nchrdev
) {
315 devsw
= &cdevsw
[maj
];
316 if ((memcmp((char *)devsw
, (char *)csw
, sizeof(struct cdevsw
)) != 0)) {
320 flags
|= extra_flags
;
322 cdevsw_flags
[maj
] = flags
;
326 #include <pexpert/pexpert.h> /* for PE_parse_boot_arg */
329 * Copy the "hostname" variable into a caller-provided buffer
330 * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space.
331 * On success, "len" will be set to the number of characters preceding
332 * the NULL character in the hostname.
335 bsd_hostname(char * buf
, int bufsize
, int * len
)
338 * "hostname" is null-terminated, and "hostnamelen" is equivalent to strlen(hostname).
340 if (hostnamelen
< bufsize
) {
341 strlcpy(buf
, hostname
, bufsize
);
350 devsw_lock(dev_t dev
, int mode
)
352 devsw_lock_t newlock
, tmplock
;
355 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
356 assert(mode
== S_IFCHR
|| mode
== S_IFBLK
);
358 MALLOC(newlock
, devsw_lock_t
, sizeof(struct devsw_lock
), M_TEMP
, M_WAITOK
| M_ZERO
);
359 newlock
->dl_dev
= dev
;
360 newlock
->dl_thread
= current_thread();
361 newlock
->dl_mode
= mode
;
363 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
365 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
)
367 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
368 res
= msleep(tmplock
, &devsw_lock_list_mtx
, PVFS
, "devsw_lock", NULL
);
374 TAILQ_INSERT_TAIL(&devsw_locks
, newlock
, dl_list
);
375 lck_mtx_unlock(&devsw_lock_list_mtx
);
378 devsw_unlock(dev_t dev
, int mode
)
380 devsw_lock_t tmplock
;
382 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
384 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
386 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
)
388 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
393 if (tmplock
== NULL
) {
394 panic("Trying to unlock, and couldn't find lock.");
397 if (tmplock
->dl_thread
!= current_thread()) {
398 panic("Trying to unlock, but I don't hold the lock.");
402 TAILQ_REMOVE(&devsw_locks
, tmplock
, dl_list
);
404 lck_mtx_unlock(&devsw_lock_list_mtx
);
406 FREE(tmplock
, M_TEMP
);
412 devsw_lock_grp
= lck_grp_alloc_init("devsw", NULL
);
413 assert(devsw_lock_grp
!= NULL
);
415 lck_mtx_init(&devsw_lock_list_mtx
, devsw_lock_grp
, NULL
);
416 TAILQ_INIT(&devsw_locks
);