2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/task.h>
30 #include <kern/thread.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_prot.h>
33 #include <vm/vm_kern.h>
35 #include <vm/vm_map.h>
36 #include <sys/systm.h>
37 #include <kern/assert.h>
39 #include <sys/proc_internal.h>
40 #include <sys/buf.h> /* for SET */
41 #include <sys/kernel.h>
43 #include <sys/sysent.h>
44 #include <sys/sysproto.h>
46 /* XXX these should be in a common header somwhere, but aren't */
47 extern int chrtoblk_set(int, int);
48 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int, int, kern_return_t
*);
50 /* XXX most of these just exist to export; there's no good header for them*/
53 TAILQ_HEAD(, devsw_lock
) devsw_locks
;
54 lck_mtx_t devsw_lock_list_mtx
;
55 lck_grp_t
* devsw_lock_grp
;
57 /* Just to satisfy pstat command */
58 int dmmin
, dmmax
, dmtext
;
61 kmem_mb_alloc(vm_map_t mbmap
, int size
, int physContig
, kern_return_t
*err
)
64 kern_return_t kr
= KERN_SUCCESS
;
67 kr
= kernel_memory_allocate(mbmap
, &addr
, size
, 0, KMA_KOBJECT
| KMA_LOMEM
, VM_KERN_MEMORY_MBUF
);
69 kr
= kmem_alloc_contig(mbmap
, &addr
, size
, PAGE_MASK
, 0xfffff, 0, KMA_KOBJECT
| KMA_LOMEM
, VM_KERN_MEMORY_MBUF
);
72 if (kr
!= KERN_SUCCESS
) {
83 * XXX this function only exists to be exported and do nothing.
93 /* Never returns a NULL */
96 thread_t thread
= current_thread();
98 ut
= (struct uthread
*)get_bsdthread_info(thread
);
99 if (ut
&& (ut
->uu_flag
& UT_VFORK
) && ut
->uu_proc
) {
101 if ((p
->p_lflag
& P_LINVFORK
) == 0) {
102 panic("returning child proc not under vfork");
104 if (p
->p_vforkact
!= (void *)thread
) {
105 panic("returning child proc which is not cur_act");
110 p
= (struct proc
*)get_bsdtask_info(current_task());
119 /* Device switch add delete routines */
121 struct bdevsw nobdev
= NO_BDEVICE
;
122 struct cdevsw nocdev
= NO_CDEVICE
;
124 * if index is -1, return a free slot if avaliable
125 * else see whether the index is free
126 * return the major number that is free else -1
128 * if index is negative, we start
129 * looking for a free slot at the absolute value of index,
130 * instead of starting at 0
133 bdevsw_isfree(int index
)
135 struct bdevsw
* devsw
;
139 index
= 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */
141 index
= -index
; /* start at least this far up in the table */
143 devsw
= &bdevsw
[index
];
144 for (; index
< nblkdev
; index
++, devsw
++) {
145 if (memcmp((char *)devsw
, (char *)&nobdev
, sizeof(struct bdevsw
)) == 0) {
151 if (index
< 0 || index
>= nblkdev
) {
155 devsw
= &bdevsw
[index
];
156 if ((memcmp((char *)devsw
, (char *)&nobdev
, sizeof(struct bdevsw
)) != 0)) {
163 * if index is -1, find a free slot to add
164 * else see whether the slot is free
165 * return the major number that is used else -1
167 * if index is negative, we start
168 * looking for a free slot at the absolute value of index,
169 * instead of starting at 0
172 bdevsw_add(int index
, struct bdevsw
* bsw
)
174 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
175 index
= bdevsw_isfree(index
);
179 bdevsw
[index
] = *bsw
;
181 lck_mtx_unlock(&devsw_lock_list_mtx
);
185 * if the slot has the same bsw, then remove
189 bdevsw_remove(int index
, struct bdevsw
* bsw
)
191 struct bdevsw
* devsw
;
193 if (index
< 0 || index
>= nblkdev
) {
197 devsw
= &bdevsw
[index
];
198 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
199 if ((memcmp((char *)devsw
, (char *)bsw
, sizeof(struct bdevsw
)) != 0)) {
202 bdevsw
[index
] = nobdev
;
204 lck_mtx_unlock(&devsw_lock_list_mtx
);
209 * if index is -1, return a free slot if avaliable
210 * else see whether the index is free
211 * return the major number that is free else -1
213 * if index is negative, we start
214 * looking for a free slot at the absolute value of index,
215 * instead of starting at 0
218 cdevsw_isfree(int index
)
220 struct cdevsw
* devsw
;
226 index
= -index
; /* start at least this far up in the table */
228 devsw
= &cdevsw
[index
];
229 for (; index
< nchrdev
; index
++, devsw
++) {
230 if (memcmp((char *)devsw
, (char *)&nocdev
, sizeof(struct cdevsw
)) == 0) {
236 if (index
< 0 || index
>= nchrdev
) {
240 devsw
= &cdevsw
[index
];
241 if ((memcmp((char *)devsw
, (char *)&nocdev
, sizeof(struct cdevsw
)) != 0)) {
248 * if index is -1, find a free slot to add
249 * else see whether the slot is free
250 * return the major number that is used else -1
252 * if index is negative, we start
253 * looking for a free slot at the absolute value of index,
254 * instead of starting at 0
256 * NOTE: In practice, -1 is unusable, since there are kernel internal
257 * devices that call this function with absolute index values,
258 * which will stomp on free-slot based assignments that happen
259 * before them. -24 is currently a safe starting point.
262 cdevsw_add(int index
, struct cdevsw
* csw
)
264 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
265 index
= cdevsw_isfree(index
);
269 cdevsw
[index
] = *csw
;
271 lck_mtx_unlock(&devsw_lock_list_mtx
);
275 * if the slot has the same csw, then remove
279 cdevsw_remove(int index
, struct cdevsw
* csw
)
281 struct cdevsw
* devsw
;
283 if (index
< 0 || index
>= nchrdev
) {
287 devsw
= &cdevsw
[index
];
288 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
289 if ((memcmp((char *)devsw
, (char *)csw
, sizeof(struct cdevsw
)) != 0)) {
292 cdevsw
[index
] = nocdev
;
293 cdevsw_flags
[index
] = 0;
295 lck_mtx_unlock(&devsw_lock_list_mtx
);
300 cdev_set_bdev(int cdev
, int bdev
)
302 return chrtoblk_set(cdev
, bdev
);
306 cdevsw_add_with_bdev(int index
, struct cdevsw
* csw
, int bdev
)
308 index
= cdevsw_add(index
, csw
);
312 if (cdev_set_bdev(index
, bdev
) < 0) {
313 cdevsw_remove(index
, csw
);
320 cdevsw_setkqueueok(int maj
, struct cdevsw
* csw
, int extra_flags
)
322 struct cdevsw
* devsw
;
323 uint64_t flags
= CDEVSW_SELECT_KQUEUE
;
325 if (maj
< 0 || maj
>= nchrdev
) {
329 devsw
= &cdevsw
[maj
];
330 if ((memcmp((char *)devsw
, (char *)csw
, sizeof(struct cdevsw
)) != 0)) {
334 flags
|= extra_flags
;
336 cdevsw_flags
[maj
] = flags
;
340 #include <pexpert/pexpert.h> /* for PE_parse_boot_arg */
343 * Copy the "hostname" variable into a caller-provided buffer
344 * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space.
345 * On success, "len" will be set to the number of characters preceding
346 * the NULL character in the hostname.
349 bsd_hostname(char * buf
, int bufsize
, int * len
)
352 * "hostname" is null-terminated, and "hostnamelen" is equivalent to strlen(hostname).
354 if (hostnamelen
< bufsize
) {
355 strlcpy(buf
, hostname
, bufsize
);
364 devsw_lock(dev_t dev
, int mode
)
366 devsw_lock_t newlock
, tmplock
;
369 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
370 assert(mode
== S_IFCHR
|| mode
== S_IFBLK
);
372 MALLOC(newlock
, devsw_lock_t
, sizeof(struct devsw_lock
), M_TEMP
, M_WAITOK
| M_ZERO
);
373 newlock
->dl_dev
= dev
;
374 newlock
->dl_thread
= current_thread();
375 newlock
->dl_mode
= mode
;
377 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
379 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
)
381 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
382 res
= msleep(tmplock
, &devsw_lock_list_mtx
, PVFS
, "devsw_lock", NULL
);
388 TAILQ_INSERT_TAIL(&devsw_locks
, newlock
, dl_list
);
389 lck_mtx_unlock(&devsw_lock_list_mtx
);
392 devsw_unlock(dev_t dev
, int mode
)
394 devsw_lock_t tmplock
;
396 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
398 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
400 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
)
402 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
407 if (tmplock
== NULL
) {
408 panic("Trying to unlock, and couldn't find lock.");
411 if (tmplock
->dl_thread
!= current_thread()) {
412 panic("Trying to unlock, but I don't hold the lock.");
416 TAILQ_REMOVE(&devsw_locks
, tmplock
, dl_list
);
418 lck_mtx_unlock(&devsw_lock_list_mtx
);
420 FREE(tmplock
, M_TEMP
);
426 devsw_lock_grp
= lck_grp_alloc_init("devsw", NULL
);
427 assert(devsw_lock_grp
!= NULL
);
429 lck_mtx_init(&devsw_lock_list_mtx
, devsw_lock_grp
, NULL
);
430 TAILQ_INIT(&devsw_locks
);