2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc_internal.h>
72 #include <sys/malloc.h>
73 #include <sys/queue.h>
75 #include <sys/uio_internal.h>
76 #include <kern/kalloc.h>
80 #include <sys/kdebug.h>
81 #define DBG_UIO_COPYOUT 16
82 #define DBG_UIO_COPYIN 17
85 #include <kern/simple_lock.h>
87 static int uio_t_count
= 0;
97 return uiomove64((addr64_t
)((unsigned int)cp
), n
, uio
);
100 // LP64todo - fix this! 'n' should be int64_t?
102 uiomove64(addr64_t cp
, int n
, register struct uio
*uio
)
105 register uint64_t acnt
;
112 if (uio
->uio_rw
!= UIO_READ
&& uio
->uio_rw
!= UIO_WRITE
)
113 panic("uiomove: mode");
117 if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) {
118 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);
120 #endif /* LP64_DEBUG */
122 while (n
> 0 && uio_resid(uio
)) {
123 acnt
= uio_iov_len(uio
);
129 if (n
> 0 && acnt
> (uint64_t)n
)
132 switch (uio
->uio_segflg
) {
134 case UIO_USERSPACE64
:
135 case UIO_USERISPACE64
:
136 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
137 if (uio
->uio_rw
== UIO_READ
)
139 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
140 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
142 error
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.iov64p
->iov_base
, acnt
);
144 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
145 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
149 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
150 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
152 error
= copyin(uio
->uio_iovs
.iov64p
->iov_base
, CAST_DOWN(caddr_t
, cp
), acnt
);
154 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
155 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
161 case UIO_USERSPACE32
:
162 case UIO_USERISPACE32
:
165 if (uio
->uio_rw
== UIO_READ
)
167 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
168 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
170 error
= copyout( CAST_DOWN(caddr_t
, cp
), CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), acnt
);
172 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
173 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
178 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
180 error
= copyin(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), CAST_DOWN(caddr_t
, cp
), acnt
);
182 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
183 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
191 if (uio
->uio_rw
== UIO_READ
)
192 error
= copywithin(CAST_DOWN(caddr_t
, cp
), (caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
,
195 error
= copywithin((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
, CAST_DOWN(caddr_t
, cp
),
199 case UIO_PHYS_USERSPACE64
:
200 if (uio
->uio_rw
== UIO_READ
)
202 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
203 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
205 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov64p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
206 if (error
) /* Copy physical to virtual */
209 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
210 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
214 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
215 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
217 error
= copypv(uio
->uio_iovs
.iov64p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
218 if (error
) /* Copy virtual to physical */
221 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
222 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
228 case UIO_PHYS_USERSPACE32
:
229 case UIO_PHYS_USERSPACE
:
230 if (uio
->uio_rw
== UIO_READ
)
232 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
233 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
235 error
= copypv((addr64_t
)cp
, (addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
236 if (error
) /* Copy physical to virtual */
239 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
240 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
244 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
245 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
247 error
= copypv((addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
248 if (error
) /* Copy virtual to physical */
251 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
252 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
258 case UIO_PHYS_SYSSPACE32
:
259 case UIO_PHYS_SYSSPACE
:
260 if (uio
->uio_rw
== UIO_READ
)
262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
263 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
265 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvKmap
| cppvPsrc
| cppvNoRefSrc
);
266 if (error
) /* Copy physical to virtual */
269 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
270 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
274 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
275 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
277 error
= copypv(uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvKmap
| cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
278 if (error
) /* Copy virtual to physical */
281 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
282 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
291 uio_iov_base_add(uio
, acnt
);
293 uio_iov_len_add(uio
, -((int64_t)acnt
));
294 uio_setresid(uio
, (uio_resid(uio
) - ((int64_t)acnt
)));
296 uio_iov_len_add(uio
, -((int)acnt
));
297 uio_setresid(uio
, (uio_resid(uio
) - ((int)acnt
)));
299 uio
->uio_offset
+= acnt
;
307 * Give next character to user as result of read.
312 register struct uio
*uio
;
314 if (uio_resid(uio
) <= 0)
315 panic("ureadc: non-positive resid");
317 if (uio
->uio_iovcnt
== 0)
318 panic("ureadc: non-positive iovcnt");
319 if (uio_iov_len(uio
) <= 0) {
324 switch (uio
->uio_segflg
) {
326 case UIO_USERSPACE32
:
328 if (subyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
332 case UIO_USERSPACE64
:
333 if (subyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
, c
) < 0)
339 *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) = c
;
342 case UIO_USERISPACE32
:
344 if (suibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
351 uio_iov_base_add(uio
, 1);
352 uio_iov_len_add(uio
, -1);
353 uio_setresid(uio
, (uio_resid(uio
) - 1));
358 #if defined(vax) || defined(ppc)
359 /* unused except by ct.c, other oddities XXX */
361 * Get next character written in by user from uio.
369 if (uio_resid(uio
) <= 0)
372 if (uio
->uio_iovcnt
<= 0)
373 panic("uwritec: non-positive iovcnt");
375 if (uio_iov_len(uio
) == 0) {
377 if (--uio
->uio_iovcnt
== 0)
381 switch (uio
->uio_segflg
) {
383 case UIO_USERSPACE32
:
385 c
= fubyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
388 case UIO_USERSPACE64
:
389 c
= fubyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
);
394 c
= *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) & 0377;
397 case UIO_USERISPACE32
:
399 c
= fuibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
403 c
= 0; /* avoid uninitialized variable warning */
404 panic("uwritec: bogus uio_segflg");
409 uio_iov_base_add(uio
, 1);
410 uio_iov_len_add(uio
, -1);
411 uio_setresid(uio
, (uio_resid(uio
) - 1));
415 #endif /* vax || ppc */
418 * General routine to allocate a hash table.
421 hashinit(elements
, type
, hashmask
)
426 LIST_HEAD(generic
, generic
) *hashtbl
;
430 panic("hashinit: bad cnt");
431 for (hashsize
= 1; hashsize
<= elements
; hashsize
<<= 1)
434 MALLOC(hashtbl
, struct generic
*,
435 (u_long
)hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
|M_ZERO
);
436 if (hashtbl
!= NULL
) {
437 for (i
= 0; i
< hashsize
; i
++)
438 LIST_INIT(&hashtbl
[i
]);
439 *hashmask
= hashsize
- 1;
445 * uio_resid - return the residual IO value for the given uio_t
447 user_ssize_t
uio_resid( uio_t a_uio
)
451 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
453 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
454 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
458 /* return 0 if there are no active iovecs */
463 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
464 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
465 return( (user_ssize_t
)a_uio
->uio_resid
);
467 return( a_uio
->uio_resid_64
);
470 return( (user_ssize_t
)a_uio
->uio_resid
);
474 * uio_setresid - set the residual IO value for the given uio_t
476 void uio_setresid( uio_t a_uio
, user_ssize_t a_value
)
480 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
482 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
483 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
491 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
492 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
493 a_uio
->uio_resid
= (int)a_value
;
495 a_uio
->uio_resid_64
= a_value
;
499 a_uio
->uio_resid
= (int)a_value
;
506 * uio_proc_t - return the proc_t for the given uio_t
507 * WARNING - This call is going away. Find another way to get the proc_t!!
509 __private_extern__ proc_t
uio_proc_t( uio_t a_uio
)
513 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
515 #endif /* LP64_DEBUG */
517 /* return 0 if there are no active iovecs */
521 return( a_uio
->uio_procp
);
525 * uio_setproc_t - set the residual IO value for the given uio_t
526 * WARNING - This call is going away.
528 __private_extern__
void uio_setproc_t( uio_t a_uio
, proc_t a_proc_t
)
532 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
533 #endif /* LP64_DEBUG */
537 a_uio
->uio_procp
= a_proc_t
;
543 * uio_curriovbase - return the base address of the current iovec associated
544 * with the given uio_t. May return 0.
546 user_addr_t
uio_curriovbase( uio_t a_uio
)
550 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
552 #endif /* LP64_DEBUG */
554 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
558 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
559 return(a_uio
->uio_iovs
.uiovp
->iov_base
);
561 return((user_addr_t
)((uintptr_t)a_uio
->uio_iovs
.kiovp
->iov_base
));
566 * uio_curriovlen - return the length value of the current iovec associated
567 * with the given uio_t.
569 user_size_t
uio_curriovlen( uio_t a_uio
)
573 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
575 #endif /* LP64_DEBUG */
577 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
581 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
582 return(a_uio
->uio_iovs
.uiovp
->iov_len
);
584 return((user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
);
588 * uio_setcurriovlen - set the length value of the current iovec associated
589 * with the given uio_t.
591 __private_extern__
void uio_setcurriovlen( uio_t a_uio
, user_size_t a_value
)
595 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
597 #endif /* LP64_DEBUG */
603 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
604 a_uio
->uio_iovs
.uiovp
->iov_len
= a_value
;
608 if (a_value
> 0xFFFFFFFFull
) {
609 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
611 #endif /* LP64_DEBUG */
612 a_uio
->uio_iovs
.kiovp
->iov_len
= (size_t)a_value
;
618 * uio_iovcnt - return count of active iovecs for the given uio_t
620 int uio_iovcnt( uio_t a_uio
)
624 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
626 #endif /* LP64_DEBUG */
632 return( a_uio
->uio_iovcnt
);
636 * uio_offset - return the current offset value for the given uio_t
638 off_t
uio_offset( uio_t a_uio
)
642 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
644 #endif /* LP64_DEBUG */
649 return( a_uio
->uio_offset
);
653 * uio_setoffset - set the current offset value for the given uio_t
655 void uio_setoffset( uio_t a_uio
, off_t a_offset
)
659 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
661 #endif /* LP64_DEBUG */
666 a_uio
->uio_offset
= a_offset
;
671 * uio_rw - return the read / write flag for the given uio_t
673 int uio_rw( uio_t a_uio
)
677 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
679 #endif /* LP64_DEBUG */
684 return( a_uio
->uio_rw
);
688 * uio_setrw - set the read / write flag for the given uio_t
690 void uio_setrw( uio_t a_uio
, int a_value
)
694 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
695 #endif /* LP64_DEBUG */
700 if (!(a_value
== UIO_READ
|| a_value
== UIO_WRITE
)) {
701 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
703 #endif /* LP64_DEBUG */
705 if (a_value
== UIO_READ
|| a_value
== UIO_WRITE
) {
706 a_uio
->uio_rw
= a_value
;
712 * uio_isuserspace - return non zero value if the address space
713 * flag is for a user address space (could be 32 or 64 bit).
715 int uio_isuserspace( uio_t a_uio
)
719 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
720 #endif /* LP64_DEBUG */
724 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
732 * uio_create - create an uio_t.
733 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
734 * is not fully initialized until all iovecs are added using uio_addiov calls.
735 * a_iovcount is the maximum number of iovecs you may add.
737 uio_t
uio_create( int a_iovcount
, /* number of iovecs */
738 off_t a_offset
, /* current offset */
739 int a_spacetype
, /* type of address space */
740 int a_iodirection
) /* read or write flag */
746 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
747 my_buf_p
= kalloc(my_size
);
748 my_uio
= uio_createwithbuffer( a_iovcount
,
755 /* leave a note that we allocated this uio_t */
756 my_uio
->uio_flags
|= UIO_FLAGS_WE_ALLOCED
;
758 hw_atomic_add(&uio_t_count
, 1);
767 * uio_createwithbuffer - create an uio_t.
768 * Create a uio_t using the given buffer. The uio_t
769 * is not fully initialized until all iovecs are added using uio_addiov calls.
770 * a_iovcount is the maximum number of iovecs you may add.
771 * This call may fail if the given buffer is not large enough.
773 __private_extern__ uio_t
774 uio_createwithbuffer( int a_iovcount
, /* number of iovecs */
775 off_t a_offset
, /* current offset */
776 int a_spacetype
, /* type of address space */
777 int a_iodirection
, /* read or write flag */
778 void *a_buf_p
, /* pointer to a uio_t buffer */
779 int a_buffer_size
) /* size of uio_t buffer */
781 uio_t my_uio
= (uio_t
) a_buf_p
;
784 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
785 if (a_buffer_size
< my_size
) {
787 panic("%s :%d - a_buffer_size is too small\n", __FILE__
, __LINE__
);
791 my_size
= a_buffer_size
;
795 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
797 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
798 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
800 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
801 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
803 if (a_iovcount
> UIO_MAXIOV
) {
804 panic("%s :%d - invalid a_iovcount\n", __FILE__
, __LINE__
);
808 bzero(my_uio
, my_size
);
809 my_uio
->uio_size
= my_size
;
811 /* we use uio_segflg to indicate if the uio_t is the new format or */
812 /* old (pre LP64 support) legacy format */
813 switch (a_spacetype
) {
815 my_uio
->uio_segflg
= UIO_USERSPACE32
;
817 my_uio
->uio_segflg
= UIO_SYSSPACE32
;
818 case UIO_PHYS_USERSPACE
:
819 my_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
820 case UIO_PHYS_SYSSPACE
:
821 my_uio
->uio_segflg
= UIO_PHYS_SYSSPACE32
;
823 my_uio
->uio_segflg
= a_spacetype
;
827 if (a_iovcount
> 0) {
828 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
829 (((uint8_t *)my_uio
) + sizeof(struct uio
));
832 my_uio
->uio_iovs
.uiovp
= NULL
;
835 my_uio
->uio_max_iovs
= a_iovcount
;
836 my_uio
->uio_offset
= a_offset
;
837 my_uio
->uio_rw
= a_iodirection
;
838 my_uio
->uio_flags
= UIO_FLAGS_INITED
;
844 * uio_spacetype - return the address space type for the given uio_t
846 int uio_spacetype( uio_t a_uio
)
850 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
851 #endif /* LP64_DEBUG */
855 return( a_uio
->uio_segflg
);
859 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
860 * This returns the location of the iovecs within the uio.
861 * NOTE - for compatibility mode we just return the current value in uio_iovs
862 * which will increase as the IO is completed and is NOT embedded within the
863 * uio, it is a seperate array of one or more iovecs.
865 struct user_iovec
* uio_iovsaddr( uio_t a_uio
)
867 struct user_iovec
* my_addr
;
873 if (a_uio
->uio_segflg
== UIO_USERSPACE
|| a_uio
->uio_segflg
== UIO_SYSSPACE
) {
874 /* we need this for compatibility mode. */
875 my_addr
= (struct user_iovec
*) a_uio
->uio_iovs
.iovp
;
878 my_addr
= (struct user_iovec
*) (((uint8_t *)a_uio
) + sizeof(struct uio
));
884 * uio_reset - reset an uio_t.
885 * Reset the given uio_t to initial values. The uio_t is not fully initialized
886 * until all iovecs are added using uio_addiov calls.
887 * The a_iovcount value passed in the uio_create is the maximum number of
888 * iovecs you may add.
890 void uio_reset( uio_t a_uio
,
891 off_t a_offset
, /* current offset */
892 int a_spacetype
, /* type of address space */
893 int a_iodirection
) /* read or write flag */
897 u_int32_t my_old_flags
;
901 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
903 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
904 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
906 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
907 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
909 #endif /* LP64_DEBUG */
915 my_size
= a_uio
->uio_size
;
916 my_old_flags
= a_uio
->uio_flags
;
917 my_max_iovs
= a_uio
->uio_max_iovs
;
918 bzero(a_uio
, my_size
);
919 a_uio
->uio_size
= my_size
;
920 a_uio
->uio_segflg
= a_spacetype
;
921 if (my_max_iovs
> 0) {
922 a_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
923 (((uint8_t *)a_uio
) + sizeof(struct uio
));
926 a_uio
->uio_iovs
.uiovp
= NULL
;
928 a_uio
->uio_max_iovs
= my_max_iovs
;
929 a_uio
->uio_offset
= a_offset
;
930 a_uio
->uio_rw
= a_iodirection
;
931 a_uio
->uio_flags
= my_old_flags
;
937 * uio_free - free a uio_t allocated via uio_init. this also frees all
940 void uio_free( uio_t a_uio
)
944 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);
946 #endif /* LP64_DEBUG */
948 if (a_uio
!= NULL
&& (a_uio
->uio_flags
& UIO_FLAGS_WE_ALLOCED
) != 0) {
950 if ((int)(hw_atomic_sub(&uio_t_count
, 1)) < 0) {
951 panic("%s :%d - uio_t_count has gone negative\n", __FILE__
, __LINE__
);
954 kfree(a_uio
, a_uio
->uio_size
);
961 * uio_addiov - add an iovec to the given uio_t. You may call this up to
962 * the a_iovcount number that was passed to uio_create. This call will
963 * increment the residual IO count as iovecs are added to the uio_t.
964 * returns 0 if add was successful else non zero.
966 int uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length
)
972 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
973 #endif /* LP64_DEBUG */
977 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
978 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
979 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
== 0) {
980 a_uio
->uio_iovs
.uiovp
[i
].iov_len
= a_length
;
981 a_uio
->uio_iovs
.uiovp
[i
].iov_base
= a_baseaddr
;
983 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
984 a_uio
->uio_resid
+= a_length
;
986 a_uio
->uio_resid_64
+= a_length
;
993 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
994 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
== 0) {
995 a_uio
->uio_iovs
.kiovp
[i
].iov_len
= (u_int32_t
)a_length
;
996 a_uio
->uio_iovs
.kiovp
[i
].iov_base
= (u_int32_t
)((uintptr_t)a_baseaddr
);
998 a_uio
->uio_resid
+= a_length
;
1008 * uio_getiov - get iovec data associated with the given uio_t. Use
1009 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1010 * a_baseaddr_p and a_length_p may be NULL.
1011 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1012 * returns 0 when data is returned.
1014 int uio_getiov( uio_t a_uio
,
1016 user_addr_t
* a_baseaddr_p
,
1017 user_size_t
* a_length_p
)
1019 if (a_uio
== NULL
) {
1021 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1025 if ( a_index
< 0 || a_index
>= a_uio
->uio_iovcnt
) {
1029 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1030 if (a_baseaddr_p
!= NULL
) {
1031 *a_baseaddr_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
;
1033 if (a_length_p
!= NULL
) {
1034 *a_length_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
;
1038 if (a_baseaddr_p
!= NULL
) {
1039 *a_baseaddr_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
;
1041 if (a_length_p
!= NULL
) {
1042 *a_length_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
;
1050 * uio_calculateresid - runs through all iovecs associated with this
1051 * uio_t and calculates (and sets) the residual IO count.
1053 __private_extern__
void uio_calculateresid( uio_t a_uio
)
1057 if (a_uio
== NULL
) {
1059 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1060 #endif /* LP64_DEBUG */
1064 a_uio
->uio_iovcnt
= a_uio
->uio_max_iovs
;
1065 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1066 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1067 a_uio
->uio_resid
= 0;
1069 a_uio
->uio_resid_64
= 0;
1071 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1072 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
!= 0) {
1073 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1074 a_uio
->uio_resid
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1076 a_uio
->uio_resid_64
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1081 /* position to first non zero length iovec (4235922) */
1082 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1083 a_uio
->uio_iovcnt
--;
1084 if (a_uio
->uio_iovcnt
> 0) {
1085 a_uio
->uio_iovs
.uiovp
++;
1090 a_uio
->uio_resid
= 0;
1091 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1092 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
!= 0) {
1093 a_uio
->uio_resid
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
;
1097 /* position to first non zero length iovec (4235922) */
1098 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1099 a_uio
->uio_iovcnt
--;
1100 if (a_uio
->uio_iovcnt
> 0) {
1101 a_uio
->uio_iovs
.kiovp
++;
1110 * uio_update - update the given uio_t for a_count of completed IO.
1111 * This call decrements the current iovec length and residual IO value
1112 * and increments the current iovec base address and offset value.
1113 * If the current iovec length is 0 then advance to the next
1115 * If the a_count passed in is 0, than only do the advancement
1116 * over any 0 length iovec's.
1118 void uio_update( uio_t a_uio
, user_size_t a_count
)
1121 if (a_uio
== NULL
) {
1122 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1124 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1125 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1127 #endif /* LP64_DEBUG */
1129 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
1133 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1135 * if a_count == 0, then we are asking to skip over
1139 if (a_count
> a_uio
->uio_iovs
.uiovp
->iov_len
) {
1140 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_uio
->uio_iovs
.uiovp
->iov_len
;
1141 a_uio
->uio_iovs
.uiovp
->iov_len
= 0;
1144 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_count
;
1145 a_uio
->uio_iovs
.uiovp
->iov_len
-= a_count
;
1147 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1148 if (a_uio
->uio_resid
< 0) {
1149 a_uio
->uio_resid
= 0;
1151 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1152 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1153 a_uio
->uio_resid
= 0;
1156 a_uio
->uio_offset
+= a_count
;
1157 a_uio
->uio_resid
-= a_count
;
1160 if (a_uio
->uio_resid_64
< 0) {
1161 a_uio
->uio_resid_64
= 0;
1163 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1164 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1165 a_uio
->uio_resid_64
= 0;
1168 a_uio
->uio_offset
+= a_count
;
1169 a_uio
->uio_resid_64
-= a_count
;
1174 * advance to next iovec if current one is totally consumed
1176 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1177 a_uio
->uio_iovcnt
--;
1178 if (a_uio
->uio_iovcnt
> 0) {
1179 a_uio
->uio_iovs
.uiovp
++;
1185 * if a_count == 0, then we are asking to skip over
1189 if (a_count
> a_uio
->uio_iovs
.kiovp
->iov_len
) {
1190 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_uio
->uio_iovs
.kiovp
->iov_len
;
1191 a_uio
->uio_iovs
.kiovp
->iov_len
= 0;
1194 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_count
;
1195 a_uio
->uio_iovs
.kiovp
->iov_len
-= a_count
;
1197 if (a_uio
->uio_resid
< 0) {
1198 a_uio
->uio_resid
= 0;
1200 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1201 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1202 a_uio
->uio_resid
= 0;
1205 a_uio
->uio_offset
+= a_count
;
1206 a_uio
->uio_resid
-= a_count
;
1210 * advance to next iovec if current one is totally consumed
1212 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1213 a_uio
->uio_iovcnt
--;
1214 if (a_uio
->uio_iovcnt
> 0) {
1215 a_uio
->uio_iovs
.kiovp
++;
1224 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1227 uio_t
uio_duplicate( uio_t a_uio
)
1232 if (a_uio
== NULL
) {
1236 my_uio
= (uio_t
) kalloc(a_uio
->uio_size
);
1238 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);
1241 bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
);
1242 /* need to set our iovec pointer to point to first active iovec */
1243 if (my_uio
->uio_max_iovs
> 0) {
1244 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
1245 (((uint8_t *)my_uio
) + sizeof(struct uio
));
1247 /* advance to first nonzero iovec */
1248 if (my_uio
->uio_iovcnt
> 0) {
1249 for ( i
= 0; i
< my_uio
->uio_max_iovs
; i
++ ) {
1250 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1251 if (my_uio
->uio_iovs
.uiovp
->iov_len
!= 0) {
1254 my_uio
->uio_iovs
.uiovp
++;
1257 if (my_uio
->uio_iovs
.kiovp
->iov_len
!= 0) {
1260 my_uio
->uio_iovs
.kiovp
++;