2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/task.h>
30 #include <kern/thread.h>
31 #include <mach/mach_types.h>
32 #include <mach/vm_prot.h>
33 #include <vm/vm_kern.h>
35 #include <vm/vm_map.h>
36 #include <sys/systm.h>
37 #include <kern/assert.h>
39 #include <sys/proc_internal.h>
40 #include <sys/buf.h> /* for SET */
41 #include <sys/kernel.h>
43 #include <sys/sysent.h>
44 #include <sys/sysproto.h>
46 /* XXX these should be in a common header somwhere, but aren't */
47 extern int chrtoblk_set(int, int);
48 extern vm_offset_t
kmem_mb_alloc(vm_map_t
, int, int);
50 /* XXX most of these just exist to export; there's no good header for them*/
53 TAILQ_HEAD(,devsw_lock
) devsw_locks
;
54 lck_mtx_t devsw_lock_list_mtx
;
55 lck_grp_t
*devsw_lock_grp
;
57 /* Just to satisfy pstat command */
58 int dmmin
, dmmax
, dmtext
;
61 kmem_mb_alloc(vm_map_t mbmap
, int size
, int physContig
)
64 kern_return_t kr
= KERN_SUCCESS
;
67 kr
= kernel_memory_allocate(mbmap
, &addr
, size
,
68 0, KMA_NOPAGEWAIT
|KMA_KOBJECT
|KMA_LOMEM
);
70 kr
= kmem_alloc_contig(mbmap
, &addr
, size
, PAGE_MASK
,
71 0xfffff, 0, KMA_NOPAGEWAIT
| KMA_KOBJECT
| KMA_LOMEM
);
73 if( kr
!= KERN_SUCCESS
)
80 * XXX this function only exists to be exported and do nothing.
90 /* Never returns a NULL */
93 thread_t thread
= current_thread();
95 ut
= (struct uthread
*)get_bsdthread_info(thread
);
96 if (ut
&& (ut
->uu_flag
& UT_VFORK
) && ut
->uu_proc
) {
98 if ((p
->p_lflag
& P_LINVFORK
) == 0)
99 panic("returning child proc not under vfork");
100 if (p
->p_vforkact
!= (void *)thread
)
101 panic("returning child proc which is not cur_act");
105 p
= (struct proc
*)get_bsdtask_info(current_task());
113 /* Device switch add delete routines */
115 struct bdevsw nobdev
= NO_BDEVICE
;
116 struct cdevsw nocdev
= NO_CDEVICE
;
118 * if index is -1, return a free slot if avaliable
119 * else see whether the index is free
120 * return the major number that is free else -1
122 * if index is negative, we start
123 * looking for a free slot at the absolute value of index,
124 * instead of starting at 0
127 bdevsw_isfree(int index
)
129 struct bdevsw
*devsw
;
133 index
= 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */
135 index
= -index
; /* start at least this far up in the table */
136 devsw
= &bdevsw
[index
];
137 for(; index
< nblkdev
; index
++, devsw
++) {
138 if(memcmp((char *)devsw
,
140 sizeof(struct bdevsw
)) == 0)
144 devsw
= &bdevsw
[index
];
145 if ((index
< 0) || (index
>= nblkdev
) ||
146 (memcmp((char *)devsw
,
148 sizeof(struct bdevsw
)) != 0)) {
155 * if index is -1, find a free slot to add
156 * else see whether the slot is free
157 * return the major number that is used else -1
159 * if index is negative, we start
160 * looking for a free slot at the absolute value of index,
161 * instead of starting at 0
164 bdevsw_add(int index
, struct bdevsw
* bsw
)
166 index
= bdevsw_isfree(index
);
170 bdevsw
[index
] = *bsw
;
174 * if the slot has the same bsw, then remove
178 bdevsw_remove(int index
, struct bdevsw
* bsw
)
180 struct bdevsw
*devsw
;
182 devsw
= &bdevsw
[index
];
183 if ((index
< 0) || (index
>= nblkdev
) ||
184 (memcmp((char *)devsw
,
186 sizeof(struct bdevsw
)) != 0)) {
189 bdevsw
[index
] = nobdev
;
194 * if index is -1, return a free slot if avaliable
195 * else see whether the index is free
196 * return the major number that is free else -1
198 * if index is negative, we start
199 * looking for a free slot at the absolute value of index,
200 * instead of starting at 0
203 cdevsw_isfree(int index
)
205 struct cdevsw
*devsw
;
211 index
= -index
; /* start at least this far up in the table */
212 devsw
= &cdevsw
[index
];
213 for(; index
< nchrdev
; index
++, devsw
++) {
214 if(memcmp((char *)devsw
,
216 sizeof(struct cdevsw
)) == 0)
220 devsw
= &cdevsw
[index
];
221 if ((index
< 0) || (index
>= nchrdev
) ||
222 (memcmp((char *)devsw
,
224 sizeof(struct cdevsw
)) != 0)) {
231 * if index is -1, find a free slot to add
232 * else see whether the slot is free
233 * return the major number that is used else -1
235 * if index is negative, we start
236 * looking for a free slot at the absolute value of index,
237 * instead of starting at 0
239 * NOTE: In practice, -1 is unusable, since there are kernel internal
240 * devices that call this function with absolute index values,
241 * which will stomp on free-slot based assignments that happen
242 * before them. -24 is currently a safe starting point.
245 cdevsw_add(int index
, struct cdevsw
* csw
)
247 index
= cdevsw_isfree(index
);
251 cdevsw
[index
] = *csw
;
255 * if the slot has the same csw, then remove
259 cdevsw_remove(int index
, struct cdevsw
* csw
)
261 struct cdevsw
*devsw
;
263 devsw
= &cdevsw
[index
];
264 if ((index
< 0) || (index
>= nchrdev
) ||
265 (memcmp((char *)devsw
,
267 sizeof(struct cdevsw
)) != 0)) {
270 cdevsw
[index
] = nocdev
;
271 cdevsw_flags
[index
] = 0;
276 cdev_set_bdev(int cdev
, int bdev
)
278 return (chrtoblk_set(cdev
, bdev
));
282 cdevsw_add_with_bdev(int index
, struct cdevsw
* csw
, int bdev
)
284 index
= cdevsw_add(index
, csw
);
288 if (cdev_set_bdev(index
, bdev
) < 0) {
289 cdevsw_remove(index
, csw
);
296 cdevsw_setkqueueok(int index
, struct cdevsw
*csw
, int use_offset
)
298 struct cdevsw
*devsw
;
299 uint64_t flags
= CDEVSW_SELECT_KQUEUE
;
301 devsw
= &cdevsw
[index
];
302 if ((index
< 0) || (index
>= nchrdev
) ||
303 (memcmp((char *)devsw
,
305 sizeof(struct cdevsw
)) != 0)) {
310 flags
|= CDEVSW_USE_OFFSET
;
313 cdevsw_flags
[index
] = flags
;
317 #include <pexpert/pexpert.h> /* for PE_parse_boot_arg */
320 * Copy the "hostname" variable into a caller-provided buffer
321 * Returns: 0 for success, ENAMETOOLONG for insufficient buffer space.
322 * On success, "len" will be set to the number of characters preceding
323 * the NULL character in the hostname.
326 bsd_hostname(char *buf
, int bufsize
, int *len
)
329 * "hostname" is null-terminated, and "hostnamelen" is equivalent to strlen(hostname).
331 if (hostnamelen
< bufsize
) {
332 strlcpy(buf
, hostname
, bufsize
);
341 devsw_lock(dev_t dev
, int mode
)
343 devsw_lock_t newlock
, tmplock
;
346 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
347 assert(mode
== S_IFCHR
|| mode
== S_IFBLK
);
349 MALLOC(newlock
, devsw_lock_t
, sizeof(struct devsw_lock
), M_TEMP
, M_WAITOK
| M_ZERO
);
350 newlock
->dl_dev
= dev
;
351 newlock
->dl_thread
= current_thread();
352 newlock
->dl_mode
= mode
;
354 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
356 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
) {
357 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
358 res
= msleep(tmplock
, &devsw_lock_list_mtx
, PVFS
, "devsw_lock", NULL
);
364 TAILQ_INSERT_TAIL(&devsw_locks
, newlock
, dl_list
);
365 lck_mtx_unlock(&devsw_lock_list_mtx
);
369 devsw_unlock(dev_t dev
, int mode
)
371 devsw_lock_t tmplock
;
373 assert(0 <= major(dev
) && major(dev
) < nchrdev
);
375 lck_mtx_lock_spin(&devsw_lock_list_mtx
);
377 TAILQ_FOREACH(tmplock
, &devsw_locks
, dl_list
) {
378 if (tmplock
->dl_dev
== dev
&& tmplock
->dl_mode
== mode
) {
383 if (tmplock
== NULL
) {
384 panic("Trying to unlock, and couldn't find lock.");
387 if (tmplock
->dl_thread
!= current_thread()) {
388 panic("Trying to unlock, but I don't hold the lock.");
392 TAILQ_REMOVE(&devsw_locks
, tmplock
, dl_list
);
394 lck_mtx_unlock(&devsw_lock_list_mtx
);
396 FREE(tmplock
, M_TEMP
);
402 devsw_lock_grp
= lck_grp_alloc_init("devsw", NULL
);
403 assert(devsw_lock_grp
!= NULL
);
405 lck_mtx_init(&devsw_lock_list_mtx
, devsw_lock_grp
, NULL
);
406 TAILQ_INIT(&devsw_locks
);