]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
69 #include <machine/atomic.h>
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
87 #include <kern/simple_lock.h>
89 static uint32_t uio_t_count
= 0;
92 #define IS_VALID_UIO_SEGFLG(segflg) \
93 ( (segflg) == UIO_USERSPACE || \
94 (segflg) == UIO_SYSSPACE || \
95 (segflg) == UIO_USERSPACE32 || \
96 (segflg) == UIO_USERSPACE64 || \
97 (segflg) == UIO_SYSSPACE32 || \
98 (segflg) == UIO_USERISPACE || \
99 (segflg) == UIO_PHYS_USERSPACE || \
100 (segflg) == UIO_PHYS_SYSSPACE || \
101 (segflg) == UIO_USERISPACE32 || \
102 (segflg) == UIO_PHYS_USERSPACE32 || \
103 (segflg) == UIO_USERISPACE64 || \
104 (segflg) == UIO_PHYS_USERSPACE64 )
110 * Notes: The first argument should be a caddr_t, but const poisoning
111 * for typedef'ed types doesn't work in gcc.
114 uiomove(const char * cp
, int n
, uio_t uio
)
116 return uiomove64((const addr64_t
)(uintptr_t)cp
, n
, uio
);
128 uiomove64(const addr64_t c_cp
, int n
, struct uio
*uio
)
135 if (uio
->uio_rw
!= UIO_READ
&& uio
->uio_rw
!= UIO_WRITE
) {
136 panic("uiomove: mode");
141 if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) {
142 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);
144 #endif /* LP64_DEBUG */
146 while (n
> 0 && uio_resid(uio
)) {
148 acnt
= uio_curriovlen(uio
);
152 if (n
> 0 && acnt
> (uint64_t)n
) {
156 switch ((int) uio
->uio_segflg
) {
157 case UIO_USERSPACE64
:
158 case UIO_USERISPACE64
:
159 case UIO_USERSPACE32
:
160 case UIO_USERISPACE32
:
163 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
164 if (uio
->uio_rw
== UIO_READ
) {
165 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
166 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 0, 0);
168 error
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.uiovp
->iov_base
, acnt
);
170 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
171 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 0, 0);
173 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
174 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 0, 0);
176 error
= copyin(uio
->uio_iovs
.uiovp
->iov_base
, CAST_DOWN(caddr_t
, cp
), acnt
);
178 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
179 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 0, 0);
188 if (uio
->uio_rw
== UIO_READ
) {
189 error
= copywithin(CAST_DOWN(caddr_t
, cp
), CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
),
192 error
= copywithin(CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
), CAST_DOWN(caddr_t
, cp
),
197 case UIO_PHYS_USERSPACE64
:
198 case UIO_PHYS_USERSPACE32
:
199 case UIO_PHYS_USERSPACE
:
200 if (uio
->uio_rw
== UIO_READ
) {
201 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
202 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 1, 0);
204 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.uiovp
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
205 if (error
) { /* Copy physical to virtual */
209 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
210 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 1, 0);
212 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
213 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 1, 0);
215 error
= copypv(uio
->uio_iovs
.uiovp
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
216 if (error
) { /* Copy virtual to physical */
220 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
221 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 1, 0);
228 case UIO_PHYS_SYSSPACE
:
229 if (uio
->uio_rw
== UIO_READ
) {
230 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
231 (int)cp
, (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, acnt
, 2, 0);
233 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.kiovp
->iov_base
, acnt
, cppvKmap
| cppvPsrc
| cppvNoRefSrc
);
234 if (error
) { /* Copy physical to virtual */
238 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
239 (int)cp
, (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, acnt
, 2, 0);
241 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
242 (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, (int)cp
, acnt
, 2, 0);
244 error
= copypv(uio
->uio_iovs
.kiovp
->iov_base
, (addr64_t
)cp
, acnt
, cppvKmap
| cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
245 if (error
) { /* Copy virtual to physical */
249 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
250 (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, (int)cp
, acnt
, 2, 0);
260 uio_update(uio
, acnt
);
268 * Give next character to user as result of read.
271 ureadc(int c
, struct uio
*uio
)
273 if (uio_resid(uio
) <= 0) {
274 panic("ureadc: non-positive resid");
277 if (uio
->uio_iovcnt
== 0) {
278 panic("ureadc: non-positive iovcnt");
280 if (uio_curriovlen(uio
) <= 0) {
281 panic("ureadc: non-positive iovlen");
284 switch ((int) uio
->uio_segflg
) {
285 case UIO_USERSPACE32
:
287 case UIO_USERISPACE32
:
289 case UIO_USERSPACE64
:
290 case UIO_USERISPACE64
:
291 if (subyte((user_addr_t
)uio
->uio_iovs
.uiovp
->iov_base
, c
) < 0) {
298 *(CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
)) = c
;
309 * General routine to allocate a hash table.
312 hashinit(int elements
, int type
, u_long
*hashmask
)
315 LIST_HEAD(generic
, generic
) * hashtbl
;
319 panic("hashinit: bad cnt");
321 for (hashsize
= 1; hashsize
<= elements
; hashsize
<<= 1) {
325 MALLOC(hashtbl
, struct generic
*,
326 hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
| M_ZERO
);
327 if (hashtbl
!= NULL
) {
328 for (i
= 0; i
< hashsize
; i
++) {
329 LIST_INIT(&hashtbl
[i
]);
331 *hashmask
= hashsize
- 1;
337 * uio_resid - return the residual IO value for the given uio_t
340 uio_resid( uio_t a_uio
)
344 printf("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
346 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
347 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
351 /* return 0 if there are no active iovecs */
356 return a_uio
->uio_resid_64
;
360 * uio_setresid - set the residual IO value for the given uio_t
363 uio_setresid( uio_t a_uio
, user_ssize_t a_value
)
367 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
369 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
370 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
378 a_uio
->uio_resid_64
= a_value
;
383 * uio_curriovbase - return the base address of the current iovec associated
384 * with the given uio_t. May return 0.
387 uio_curriovbase( uio_t a_uio
)
391 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
393 #endif /* LP64_DEBUG */
395 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
399 if (UIO_IS_USER_SPACE(a_uio
)) {
400 return a_uio
->uio_iovs
.uiovp
->iov_base
;
402 return (user_addr_t
)a_uio
->uio_iovs
.kiovp
->iov_base
;
406 * uio_curriovlen - return the length value of the current iovec associated
407 * with the given uio_t.
410 uio_curriovlen( uio_t a_uio
)
414 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
416 #endif /* LP64_DEBUG */
418 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
422 if (UIO_IS_USER_SPACE(a_uio
)) {
423 return a_uio
->uio_iovs
.uiovp
->iov_len
;
425 return (user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
;
429 * uio_setcurriovlen - set the length value of the current iovec associated
430 * with the given uio_t.
432 __private_extern__
void
433 uio_setcurriovlen( uio_t a_uio
, user_size_t a_value
)
437 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
439 #endif /* LP64_DEBUG */
445 if (UIO_IS_USER_SPACE(a_uio
)) {
446 a_uio
->uio_iovs
.uiovp
->iov_len
= a_value
;
449 if (a_value
> 0xFFFFFFFFull
) {
450 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
452 #endif /* LP64_DEBUG */
453 a_uio
->uio_iovs
.kiovp
->iov_len
= (size_t)a_value
;
459 * uio_iovcnt - return count of active iovecs for the given uio_t
462 uio_iovcnt( uio_t a_uio
)
466 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
468 #endif /* LP64_DEBUG */
474 return a_uio
->uio_iovcnt
;
478 * uio_offset - return the current offset value for the given uio_t
481 uio_offset( uio_t a_uio
)
485 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
487 #endif /* LP64_DEBUG */
492 return a_uio
->uio_offset
;
496 * uio_setoffset - set the current offset value for the given uio_t
499 uio_setoffset( uio_t a_uio
, off_t a_offset
)
503 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
505 #endif /* LP64_DEBUG */
510 a_uio
->uio_offset
= a_offset
;
515 * uio_rw - return the read / write flag for the given uio_t
518 uio_rw( uio_t a_uio
)
522 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
524 #endif /* LP64_DEBUG */
529 return a_uio
->uio_rw
;
533 * uio_setrw - set the read / write flag for the given uio_t
536 uio_setrw( uio_t a_uio
, int a_value
)
540 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
541 #endif /* LP64_DEBUG */
546 if (!(a_value
== UIO_READ
|| a_value
== UIO_WRITE
)) {
547 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
549 #endif /* LP64_DEBUG */
551 if (a_value
== UIO_READ
|| a_value
== UIO_WRITE
) {
552 a_uio
->uio_rw
= a_value
;
558 * uio_isuserspace - return non zero value if the address space
559 * flag is for a user address space (could be 32 or 64 bit).
562 uio_isuserspace( uio_t a_uio
)
566 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
567 #endif /* LP64_DEBUG */
571 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
579 * uio_create - create an uio_t.
580 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
581 * is not fully initialized until all iovecs are added using uio_addiov calls.
582 * a_iovcount is the maximum number of iovecs you may add.
585 uio_create( int a_iovcount
, /* number of iovecs */
586 off_t a_offset
, /* current offset */
587 int a_spacetype
, /* type of address space */
588 int a_iodirection
) /* read or write flag */
594 my_size
= UIO_SIZEOF(a_iovcount
);
595 my_buf_p
= kalloc(my_size
);
596 my_uio
= uio_createwithbuffer( a_iovcount
,
603 /* leave a note that we allocated this uio_t */
604 my_uio
->uio_flags
|= UIO_FLAGS_WE_ALLOCED
;
606 os_atomic_inc(&uio_t_count
, relaxed
);
615 * uio_createwithbuffer - create an uio_t.
616 * Create a uio_t using the given buffer. The uio_t
617 * is not fully initialized until all iovecs are added using uio_addiov calls.
618 * a_iovcount is the maximum number of iovecs you may add.
619 * This call may fail if the given buffer is not large enough.
621 __private_extern__ uio_t
622 uio_createwithbuffer( int a_iovcount
, /* number of iovecs */
623 off_t a_offset
, /* current offset */
624 int a_spacetype
, /* type of address space */
625 int a_iodirection
, /* read or write flag */
626 void *a_buf_p
, /* pointer to a uio_t buffer */
627 size_t a_buffer_size
) /* size of uio_t buffer */
629 uio_t my_uio
= (uio_t
) a_buf_p
;
632 my_size
= UIO_SIZEOF(a_iovcount
);
633 if (a_buffer_size
< my_size
) {
635 panic("%s :%d - a_buffer_size is too small\n", __FILE__
, __LINE__
);
639 my_size
= a_buffer_size
;
643 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
645 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
646 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
648 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
649 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
651 if (a_iovcount
> UIO_MAXIOV
) {
652 panic("%s :%d - invalid a_iovcount\n", __FILE__
, __LINE__
);
656 bzero(my_uio
, my_size
);
657 my_uio
->uio_size
= my_size
;
660 * we use uio_segflg to indicate if the uio_t is the new format or
661 * old (pre LP64 support) legacy format
662 * This switch statement should canonicalize incoming space type
663 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
664 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
666 switch (a_spacetype
) {
668 my_uio
->uio_segflg
= UIO_USERSPACE32
;
671 my_uio
->uio_segflg
= UIO_SYSSPACE
;
673 case UIO_PHYS_USERSPACE
:
674 my_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
677 my_uio
->uio_segflg
= a_spacetype
;
681 if (a_iovcount
> 0) {
682 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
683 (((uint8_t *)my_uio
) + sizeof(struct uio
));
685 my_uio
->uio_iovs
.uiovp
= NULL
;
688 my_uio
->uio_max_iovs
= a_iovcount
;
689 my_uio
->uio_offset
= a_offset
;
690 my_uio
->uio_rw
= a_iodirection
;
691 my_uio
->uio_flags
= UIO_FLAGS_INITED
;
697 * uio_spacetype - return the address space type for the given uio_t
699 __private_extern__
int
700 uio_spacetype( uio_t a_uio
)
704 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
705 #endif /* LP64_DEBUG */
709 return a_uio
->uio_segflg
;
713 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
714 * This returns the location of the iovecs within the uio.
715 * NOTE - for compatibility mode we just return the current value in uio_iovs
716 * which will increase as the IO is completed and is NOT embedded within the
717 * uio, it is a seperate array of one or more iovecs.
719 __private_extern__
struct user_iovec
*
720 uio_iovsaddr( uio_t a_uio
)
722 struct user_iovec
* my_addr
;
728 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
729 /* we need this for compatibility mode. */
730 my_addr
= (struct user_iovec
*) a_uio
->uio_iovs
.uiovp
;
733 panic("uio_iovsaddr called for UIO_SYSSPACE request");
741 * uio_reset - reset an uio_t.
742 * Reset the given uio_t to initial values. The uio_t is not fully initialized
743 * until all iovecs are added using uio_addiov calls.
744 * The a_iovcount value passed in the uio_create is the maximum number of
745 * iovecs you may add.
748 uio_reset( uio_t a_uio
,
749 off_t a_offset
, /* current offset */
750 int a_spacetype
, /* type of address space */
751 int a_iodirection
) /* read or write flag */
755 u_int32_t my_old_flags
;
759 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
761 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
762 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
764 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
765 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
767 #endif /* LP64_DEBUG */
773 my_size
= a_uio
->uio_size
;
774 my_old_flags
= a_uio
->uio_flags
;
775 my_max_iovs
= a_uio
->uio_max_iovs
;
776 bzero(a_uio
, my_size
);
777 a_uio
->uio_size
= my_size
;
780 * we use uio_segflg to indicate if the uio_t is the new format or
781 * old (pre LP64 support) legacy format
782 * This switch statement should canonicalize incoming space type
783 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
784 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
786 switch (a_spacetype
) {
788 a_uio
->uio_segflg
= UIO_USERSPACE32
;
791 a_uio
->uio_segflg
= UIO_SYSSPACE
;
793 case UIO_PHYS_USERSPACE
:
794 a_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
797 a_uio
->uio_segflg
= a_spacetype
;
801 if (my_max_iovs
> 0) {
802 a_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
803 (((uint8_t *)a_uio
) + sizeof(struct uio
));
805 a_uio
->uio_iovs
.uiovp
= NULL
;
808 a_uio
->uio_max_iovs
= my_max_iovs
;
809 a_uio
->uio_offset
= a_offset
;
810 a_uio
->uio_rw
= a_iodirection
;
811 a_uio
->uio_flags
= my_old_flags
;
817 * uio_free - free a uio_t allocated via uio_init. this also frees all
821 uio_free( uio_t a_uio
)
825 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);
827 #endif /* LP64_DEBUG */
829 if (a_uio
!= NULL
&& (a_uio
->uio_flags
& UIO_FLAGS_WE_ALLOCED
) != 0) {
831 if (os_atomic_dec_orig(&uio_t_count
, relaxed
) == 0) {
832 panic("%s :%d - uio_t_count underflow\n", __FILE__
, __LINE__
);
835 kfree(a_uio
, a_uio
->uio_size
);
840 * uio_addiov - add an iovec to the given uio_t. You may call this up to
841 * the a_iovcount number that was passed to uio_create. This call will
842 * increment the residual IO count as iovecs are added to the uio_t.
843 * returns 0 if add was successful else non zero.
846 uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length
)
853 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
858 if (os_add_overflow(a_length
, a_uio
->uio_resid_64
, &resid
)) {
860 panic("%s :%d - invalid length %lu\n", __FILE__
, __LINE__
, (unsigned long)a_length
);
865 if (UIO_IS_USER_SPACE(a_uio
)) {
866 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
867 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
== 0) {
868 a_uio
->uio_iovs
.uiovp
[i
].iov_len
= a_length
;
869 a_uio
->uio_iovs
.uiovp
[i
].iov_base
= a_baseaddr
;
871 a_uio
->uio_resid_64
= resid
;
876 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
877 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
== 0) {
878 a_uio
->uio_iovs
.kiovp
[i
].iov_len
= (u_int64_t
)a_length
;
879 a_uio
->uio_iovs
.kiovp
[i
].iov_base
= (u_int64_t
)a_baseaddr
;
881 a_uio
->uio_resid_64
= resid
;
891 * uio_getiov - get iovec data associated with the given uio_t. Use
892 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
893 * a_baseaddr_p and a_length_p may be NULL.
894 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
895 * returns 0 when data is returned.
898 uio_getiov( uio_t a_uio
,
900 user_addr_t
* a_baseaddr_p
,
901 user_size_t
* a_length_p
)
905 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
909 if (a_index
< 0 || a_index
>= a_uio
->uio_iovcnt
) {
913 if (UIO_IS_USER_SPACE(a_uio
)) {
914 if (a_baseaddr_p
!= NULL
) {
915 *a_baseaddr_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
;
917 if (a_length_p
!= NULL
) {
918 *a_length_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
;
921 if (a_baseaddr_p
!= NULL
) {
922 *a_baseaddr_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
;
924 if (a_length_p
!= NULL
) {
925 *a_length_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
;
933 * uio_calculateresid - runs through all iovecs associated with this
934 * uio_t and calculates (and sets) the residual IO count.
936 __private_extern__
int
937 uio_calculateresid( uio_t a_uio
)
944 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
945 #endif /* LP64_DEBUG */
949 a_uio
->uio_iovcnt
= a_uio
->uio_max_iovs
;
950 if (UIO_IS_USER_SPACE(a_uio
)) {
951 a_uio
->uio_resid_64
= 0;
952 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
953 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
!= 0) {
954 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
> LONG_MAX
) {
957 resid
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
958 if (resid
> LONG_MAX
) {
963 a_uio
->uio_resid_64
= resid
;
965 /* position to first non zero length iovec (4235922) */
966 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
968 if (a_uio
->uio_iovcnt
> 0) {
969 a_uio
->uio_iovs
.uiovp
++;
973 a_uio
->uio_resid_64
= 0;
974 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
975 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
!= 0) {
976 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
> LONG_MAX
) {
979 resid
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
;
980 if (resid
> LONG_MAX
) {
985 a_uio
->uio_resid_64
= resid
;
987 /* position to first non zero length iovec (4235922) */
988 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
990 if (a_uio
->uio_iovcnt
> 0) {
991 a_uio
->uio_iovs
.kiovp
++;
1000 * uio_update - update the given uio_t for a_count of completed IO.
1001 * This call decrements the current iovec length and residual IO value
1002 * and increments the current iovec base address and offset value.
1003 * If the current iovec length is 0 then advance to the next
1005 * If the a_count passed in is 0, than only do the advancement
1006 * over any 0 length iovec's.
1009 uio_update( uio_t a_uio
, user_size_t a_count
)
1012 if (a_uio
== NULL
) {
1013 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1015 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1016 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1018 #endif /* LP64_DEBUG */
1020 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
1024 if (UIO_IS_USER_SPACE(a_uio
)) {
1026 * if a_count == 0, then we are asking to skip over
1030 if (a_count
> a_uio
->uio_iovs
.uiovp
->iov_len
) {
1031 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_uio
->uio_iovs
.uiovp
->iov_len
;
1032 a_uio
->uio_iovs
.uiovp
->iov_len
= 0;
1034 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_count
;
1035 a_uio
->uio_iovs
.uiovp
->iov_len
-= a_count
;
1037 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1038 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1039 a_uio
->uio_resid_64
= 0;
1041 a_uio
->uio_offset
+= a_count
;
1042 a_uio
->uio_resid_64
-= a_count
;
1046 * advance to next iovec if current one is totally consumed
1048 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1049 a_uio
->uio_iovcnt
--;
1050 if (a_uio
->uio_iovcnt
> 0) {
1051 a_uio
->uio_iovs
.uiovp
++;
1056 * if a_count == 0, then we are asking to skip over
1060 if (a_count
> a_uio
->uio_iovs
.kiovp
->iov_len
) {
1061 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_uio
->uio_iovs
.kiovp
->iov_len
;
1062 a_uio
->uio_iovs
.kiovp
->iov_len
= 0;
1064 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_count
;
1065 a_uio
->uio_iovs
.kiovp
->iov_len
-= a_count
;
1067 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1068 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1069 a_uio
->uio_resid_64
= 0;
1071 a_uio
->uio_offset
+= a_count
;
1072 a_uio
->uio_resid_64
-= a_count
;
1076 * advance to next iovec if current one is totally consumed
1078 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1079 a_uio
->uio_iovcnt
--;
1080 if (a_uio
->uio_iovcnt
> 0) {
1081 a_uio
->uio_iovs
.kiovp
++;
1089 * uio_pushback - undo uncommitted I/O by subtracting from the
1090 * current base address and offset, and incrementing the residiual
1091 * IO. If the UIO was previously exhausted, this call will panic.
1092 * New code should not use this functionality.
1094 __private_extern__
void
1095 uio_pushback( uio_t a_uio
, user_size_t a_count
)
1098 if (a_uio
== NULL
) {
1099 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1101 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1102 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1104 #endif /* LP64_DEBUG */
1106 if (a_uio
== NULL
|| a_count
== 0) {
1110 if (a_uio
->uio_iovcnt
< 1) {
1111 panic("Invalid uio for pushback");
1114 if (UIO_IS_USER_SPACE(a_uio
)) {
1115 a_uio
->uio_iovs
.uiovp
->iov_base
-= a_count
;
1116 a_uio
->uio_iovs
.uiovp
->iov_len
+= a_count
;
1118 a_uio
->uio_iovs
.kiovp
->iov_base
-= a_count
;
1119 a_uio
->uio_iovs
.kiovp
->iov_len
+= a_count
;
1122 a_uio
->uio_offset
-= a_count
;
1123 a_uio
->uio_resid_64
+= a_count
;
1130 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1134 uio_duplicate( uio_t a_uio
)
1139 if (a_uio
== NULL
) {
1143 my_uio
= (uio_t
) kalloc(a_uio
->uio_size
);
1145 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);
1148 bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
);
1149 /* need to set our iovec pointer to point to first active iovec */
1150 if (my_uio
->uio_max_iovs
> 0) {
1151 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
1152 (((uint8_t *)my_uio
) + sizeof(struct uio
));
1154 /* advance to first nonzero iovec */
1155 if (my_uio
->uio_iovcnt
> 0) {
1156 for (i
= 0; i
< my_uio
->uio_max_iovs
; i
++) {
1157 if (UIO_IS_USER_SPACE(a_uio
)) {
1158 if (my_uio
->uio_iovs
.uiovp
->iov_len
!= 0) {
1161 my_uio
->uio_iovs
.uiovp
++;
1163 if (my_uio
->uio_iovs
.kiovp
->iov_len
!= 0) {
1166 my_uio
->uio_iovs
.kiovp
++;
1172 my_uio
->uio_flags
= UIO_FLAGS_WE_ALLOCED
| UIO_FLAGS_INITED
;
1174 os_atomic_inc(&uio_t_count
, relaxed
);
1182 copyin_user_iovec_array(user_addr_t uaddr
, int spacetype
, int count
, struct user_iovec
*dst
)
1184 size_t size_of_iovec
= (spacetype
== UIO_USERSPACE64
? sizeof(struct user64_iovec
) : sizeof(struct user32_iovec
));
1188 // copyin to the front of "dst", without regard for putting records in the right places
1189 error
= copyin(uaddr
, dst
, count
* size_of_iovec
);
1194 // now, unpack the entries in reverse order, so we don't overwrite anything
1195 for (i
= count
- 1; i
>= 0; i
--) {
1196 if (spacetype
== UIO_USERSPACE64
) {
1197 struct user64_iovec iovec
= ((struct user64_iovec
*)dst
)[i
];
1198 dst
[i
].iov_base
= iovec
.iov_base
;
1199 dst
[i
].iov_len
= iovec
.iov_len
;
1201 struct user32_iovec iovec
= ((struct user32_iovec
*)dst
)[i
];
1202 dst
[i
].iov_base
= iovec
.iov_base
;
1203 dst
[i
].iov_len
= iovec
.iov_len
;