]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
69 #include <machine/atomic.h>
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
87 #include <kern/simple_lock.h>
89 static uint32_t uio_t_count
= 0;
92 #define IS_VALID_UIO_SEGFLG(segflg) \
93 ( (segflg) == UIO_USERSPACE || \
94 (segflg) == UIO_SYSSPACE || \
95 (segflg) == UIO_USERSPACE32 || \
96 (segflg) == UIO_USERSPACE64 || \
97 (segflg) == UIO_SYSSPACE32 || \
98 (segflg) == UIO_USERISPACE || \
99 (segflg) == UIO_PHYS_USERSPACE || \
100 (segflg) == UIO_PHYS_SYSSPACE || \
101 (segflg) == UIO_USERISPACE32 || \
102 (segflg) == UIO_PHYS_USERSPACE32 || \
103 (segflg) == UIO_USERISPACE64 || \
104 (segflg) == UIO_PHYS_USERSPACE64 )
110 * Notes: The first argument should be a caddr_t, but const poisoning
111 * for typedef'ed types doesn't work in gcc.
114 uiomove(const char * cp
, int n
, uio_t uio
)
116 return uiomove64((const addr64_t
)(uintptr_t)cp
, n
, uio
);
128 uiomove64(const addr64_t c_cp
, int n
, struct uio
*uio
)
135 if (uio
->uio_rw
!= UIO_READ
&& uio
->uio_rw
!= UIO_WRITE
) {
136 panic("uiomove: mode");
141 if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) {
142 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);
144 #endif /* LP64_DEBUG */
146 while (n
> 0 && uio_resid(uio
)) {
148 acnt
= uio_curriovlen(uio
);
152 if (n
> 0 && acnt
> (uint64_t)n
) {
156 switch ((int) uio
->uio_segflg
) {
157 case UIO_USERSPACE64
:
158 case UIO_USERISPACE64
:
159 case UIO_USERSPACE32
:
160 case UIO_USERISPACE32
:
163 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
164 if (uio
->uio_rw
== UIO_READ
) {
165 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
166 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 0, 0);
168 error
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.uiovp
->iov_base
, (size_t)acnt
);
170 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
171 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 0, 0);
173 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
174 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 0, 0);
176 error
= copyin(uio
->uio_iovs
.uiovp
->iov_base
, CAST_DOWN(caddr_t
, cp
), (size_t)acnt
);
178 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
179 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 0, 0);
188 if (uio
->uio_rw
== UIO_READ
) {
189 error
= copywithin(CAST_DOWN(caddr_t
, cp
), CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
),
192 error
= copywithin(CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
), CAST_DOWN(caddr_t
, cp
),
197 case UIO_PHYS_USERSPACE64
:
198 case UIO_PHYS_USERSPACE32
:
199 case UIO_PHYS_USERSPACE
:
200 acnt
= MIN(acnt
, UINT_MAX
);
202 if (uio
->uio_rw
== UIO_READ
) {
203 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
204 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 1, 0);
206 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.uiovp
->iov_base
, (unsigned int)acnt
, cppvPsrc
| cppvNoRefSrc
);
207 if (error
) { /* Copy physical to virtual */
211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
212 (int)cp
, (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, acnt
, 1, 0);
214 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
215 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 1, 0);
217 error
= copypv(uio
->uio_iovs
.uiovp
->iov_base
, (addr64_t
)cp
, (unsigned int)acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
218 if (error
) { /* Copy virtual to physical */
222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
223 (uintptr_t)uio
->uio_iovs
.uiovp
->iov_base
, (int)cp
, acnt
, 1, 0);
230 case UIO_PHYS_SYSSPACE
:
231 acnt
= MIN(acnt
, UINT_MAX
);
233 if (uio
->uio_rw
== UIO_READ
) {
234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
235 (int)cp
, (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, acnt
, 2, 0);
237 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.kiovp
->iov_base
, (unsigned int)acnt
, cppvKmap
| cppvPsrc
| cppvNoRefSrc
);
238 if (error
) { /* Copy physical to virtual */
242 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
243 (int)cp
, (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, acnt
, 2, 0);
245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
246 (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, (int)cp
, acnt
, 2, 0);
248 error
= copypv(uio
->uio_iovs
.kiovp
->iov_base
, (addr64_t
)cp
, (unsigned int)acnt
, cppvKmap
| cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
249 if (error
) { /* Copy virtual to physical */
253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
254 (uintptr_t)uio
->uio_iovs
.kiovp
->iov_base
, (int)cp
, acnt
, 2, 0);
264 uio_update(uio
, (user_size_t
)acnt
);
272 * Give next character to user as result of read.
275 ureadc(int c
, struct uio
*uio
)
277 if (uio_resid(uio
) <= 0) {
278 panic("ureadc: non-positive resid");
281 if (uio
->uio_iovcnt
== 0) {
282 panic("ureadc: non-positive iovcnt");
284 if (uio_curriovlen(uio
) <= 0) {
285 panic("ureadc: non-positive iovlen");
288 switch ((int) uio
->uio_segflg
) {
289 case UIO_USERSPACE32
:
291 case UIO_USERISPACE32
:
293 case UIO_USERSPACE64
:
294 case UIO_USERISPACE64
:
295 if (subyte((user_addr_t
)uio
->uio_iovs
.uiovp
->iov_base
, c
) < 0) {
302 *(CAST_DOWN(caddr_t
, uio
->uio_iovs
.kiovp
->iov_base
)) = (char)c
;
312 LIST_HEAD(generic_hash_head
, generic
);
315 * General routine to allocate a hash table.
318 hashinit(int elements
, int type __unused
, u_long
*hashmask
)
320 struct generic_hash_head
*hashtbl
;
324 panic("hashinit: bad cnt");
327 hashsize
= 1UL << (fls(elements
) - 1);
328 hashtbl
= kheap_alloc(KHEAP_DEFAULT
, hashsize
* sizeof(*hashtbl
),
330 if (hashtbl
!= NULL
) {
331 *hashmask
= hashsize
- 1;
337 hashdestroy(void *hash
, int type __unused
, u_long hashmask
)
339 struct generic_hash_head
*hashtbl
= hash
;
340 assert(powerof2(hashmask
+ 1));
341 kheap_free(KHEAP_DEFAULT
, hashtbl
, (hashmask
+ 1) * sizeof(*hashtbl
));
345 * uio_resid - return the residual IO value for the given uio_t
348 uio_resid( uio_t a_uio
)
352 printf("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
354 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
355 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
359 /* return 0 if there are no active iovecs */
364 return a_uio
->uio_resid_64
;
368 * uio_setresid - set the residual IO value for the given uio_t
371 uio_setresid( uio_t a_uio
, user_ssize_t a_value
)
375 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
377 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
378 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
386 a_uio
->uio_resid_64
= a_value
;
391 * uio_curriovbase - return the base address of the current iovec associated
392 * with the given uio_t. May return 0.
395 uio_curriovbase( uio_t a_uio
)
399 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
401 #endif /* LP64_DEBUG */
403 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
407 if (UIO_IS_USER_SPACE(a_uio
)) {
408 return a_uio
->uio_iovs
.uiovp
->iov_base
;
410 return (user_addr_t
)a_uio
->uio_iovs
.kiovp
->iov_base
;
414 * uio_curriovlen - return the length value of the current iovec associated
415 * with the given uio_t.
418 uio_curriovlen( uio_t a_uio
)
422 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
424 #endif /* LP64_DEBUG */
426 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
430 if (UIO_IS_USER_SPACE(a_uio
)) {
431 return a_uio
->uio_iovs
.uiovp
->iov_len
;
433 return (user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
;
437 * uio_setcurriovlen - set the length value of the current iovec associated
438 * with the given uio_t.
440 __private_extern__
void
441 uio_setcurriovlen( uio_t a_uio
, user_size_t a_value
)
445 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
447 #endif /* LP64_DEBUG */
453 if (UIO_IS_USER_SPACE(a_uio
)) {
454 a_uio
->uio_iovs
.uiovp
->iov_len
= a_value
;
457 if (a_value
> 0xFFFFFFFFull
) {
458 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
460 #endif /* LP64_DEBUG */
461 a_uio
->uio_iovs
.kiovp
->iov_len
= (size_t)a_value
;
467 * uio_iovcnt - return count of active iovecs for the given uio_t
470 uio_iovcnt( uio_t a_uio
)
474 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
476 #endif /* LP64_DEBUG */
482 return a_uio
->uio_iovcnt
;
486 * uio_offset - return the current offset value for the given uio_t
489 uio_offset( uio_t a_uio
)
493 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
495 #endif /* LP64_DEBUG */
500 return a_uio
->uio_offset
;
504 * uio_setoffset - set the current offset value for the given uio_t
507 uio_setoffset( uio_t a_uio
, off_t a_offset
)
511 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
513 #endif /* LP64_DEBUG */
518 a_uio
->uio_offset
= a_offset
;
523 * uio_rw - return the read / write flag for the given uio_t
526 uio_rw( uio_t a_uio
)
530 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
532 #endif /* LP64_DEBUG */
537 return a_uio
->uio_rw
;
541 * uio_setrw - set the read / write flag for the given uio_t
544 uio_setrw( uio_t a_uio
, int a_value
)
548 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
549 #endif /* LP64_DEBUG */
554 if (!(a_value
== UIO_READ
|| a_value
== UIO_WRITE
)) {
555 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
557 #endif /* LP64_DEBUG */
559 if (a_value
== UIO_READ
|| a_value
== UIO_WRITE
) {
560 a_uio
->uio_rw
= a_value
;
566 * uio_isuserspace - return non zero value if the address space
567 * flag is for a user address space (could be 32 or 64 bit).
570 uio_isuserspace( uio_t a_uio
)
574 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
575 #endif /* LP64_DEBUG */
579 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
587 * uio_create - create an uio_t.
588 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
589 * is not fully initialized until all iovecs are added using uio_addiov calls.
590 * a_iovcount is the maximum number of iovecs you may add.
593 uio_create( int a_iovcount
, /* number of iovecs */
594 off_t a_offset
, /* current offset */
595 int a_spacetype
, /* type of address space */
596 int a_iodirection
) /* read or write flag */
602 my_size
= UIO_SIZEOF(a_iovcount
);
603 my_buf_p
= kalloc(my_size
);
604 my_uio
= uio_createwithbuffer( a_iovcount
,
611 /* leave a note that we allocated this uio_t */
612 my_uio
->uio_flags
|= UIO_FLAGS_WE_ALLOCED
;
614 os_atomic_inc(&uio_t_count
, relaxed
);
623 * uio_createwithbuffer - create an uio_t.
624 * Create a uio_t using the given buffer. The uio_t
625 * is not fully initialized until all iovecs are added using uio_addiov calls.
626 * a_iovcount is the maximum number of iovecs you may add.
627 * This call may fail if the given buffer is not large enough.
629 __private_extern__ uio_t
630 uio_createwithbuffer( int a_iovcount
, /* number of iovecs */
631 off_t a_offset
, /* current offset */
632 int a_spacetype
, /* type of address space */
633 int a_iodirection
, /* read or write flag */
634 void *a_buf_p
, /* pointer to a uio_t buffer */
635 size_t a_buffer_size
) /* size of uio_t buffer */
637 uio_t my_uio
= (uio_t
) a_buf_p
;
640 assert(a_iovcount
>= 0 && a_iovcount
<= UIO_MAXIOV
);
641 if (a_iovcount
< 0 || a_iovcount
> UIO_MAXIOV
) {
645 my_size
= UIO_SIZEOF(a_iovcount
);
646 assert(a_buffer_size
>= my_size
);
647 if (a_buffer_size
< my_size
) {
650 my_size
= a_buffer_size
;
652 assert(my_size
<= INT_MAX
);
653 if (my_size
> INT_MAX
) {
657 assert(my_uio
!= NULL
);
658 assert(IS_VALID_UIO_SEGFLG(a_spacetype
));
659 assert(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
);
661 bzero(my_uio
, my_size
);
662 my_uio
->uio_size
= (int)my_size
;
665 * we use uio_segflg to indicate if the uio_t is the new format or
666 * old (pre LP64 support) legacy format
667 * This switch statement should canonicalize incoming space type
668 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
669 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
671 switch (a_spacetype
) {
673 my_uio
->uio_segflg
= UIO_USERSPACE32
;
676 my_uio
->uio_segflg
= UIO_SYSSPACE
;
678 case UIO_PHYS_USERSPACE
:
679 my_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
682 my_uio
->uio_segflg
= a_spacetype
;
686 if (a_iovcount
> 0) {
687 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
688 (((uint8_t *)my_uio
) + sizeof(struct uio
));
690 my_uio
->uio_iovs
.uiovp
= NULL
;
693 my_uio
->uio_max_iovs
= a_iovcount
;
694 my_uio
->uio_offset
= a_offset
;
695 my_uio
->uio_rw
= a_iodirection
;
696 my_uio
->uio_flags
= UIO_FLAGS_INITED
;
702 * uio_spacetype - return the address space type for the given uio_t
704 __private_extern__
int
705 uio_spacetype( uio_t a_uio
)
709 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
710 #endif /* LP64_DEBUG */
714 return a_uio
->uio_segflg
;
718 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
719 * This returns the location of the iovecs within the uio.
720 * NOTE - for compatibility mode we just return the current value in uio_iovs
721 * which will increase as the IO is completed and is NOT embedded within the
722 * uio, it is a seperate array of one or more iovecs.
724 __private_extern__
struct user_iovec
*
725 uio_iovsaddr( uio_t a_uio
)
727 struct user_iovec
* my_addr
;
733 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
734 /* we need this for compatibility mode. */
735 my_addr
= (struct user_iovec
*) a_uio
->uio_iovs
.uiovp
;
738 panic("uio_iovsaddr called for UIO_SYSSPACE request");
746 * uio_reset - reset an uio_t.
747 * Reset the given uio_t to initial values. The uio_t is not fully initialized
748 * until all iovecs are added using uio_addiov calls.
749 * The a_iovcount value passed in the uio_create is the maximum number of
750 * iovecs you may add.
753 uio_reset( uio_t a_uio
,
754 off_t a_offset
, /* current offset */
755 int a_spacetype
, /* type of address space */
756 int a_iodirection
) /* read or write flag */
760 u_int32_t my_old_flags
;
764 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
766 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
767 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
769 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
770 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
772 #endif /* LP64_DEBUG */
778 my_size
= a_uio
->uio_size
;
779 my_old_flags
= a_uio
->uio_flags
;
780 my_max_iovs
= a_uio
->uio_max_iovs
;
781 bzero(a_uio
, my_size
);
782 a_uio
->uio_size
= (int)my_size
;
785 * we use uio_segflg to indicate if the uio_t is the new format or
786 * old (pre LP64 support) legacy format
787 * This switch statement should canonicalize incoming space type
788 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
789 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
791 switch (a_spacetype
) {
793 a_uio
->uio_segflg
= UIO_USERSPACE32
;
796 a_uio
->uio_segflg
= UIO_SYSSPACE
;
798 case UIO_PHYS_USERSPACE
:
799 a_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
802 a_uio
->uio_segflg
= a_spacetype
;
806 if (my_max_iovs
> 0) {
807 a_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
808 (((uint8_t *)a_uio
) + sizeof(struct uio
));
810 a_uio
->uio_iovs
.uiovp
= NULL
;
813 a_uio
->uio_max_iovs
= my_max_iovs
;
814 a_uio
->uio_offset
= a_offset
;
815 a_uio
->uio_rw
= a_iodirection
;
816 a_uio
->uio_flags
= my_old_flags
;
822 * uio_free - free a uio_t allocated via uio_init. this also frees all
826 uio_free( uio_t a_uio
)
830 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);
832 #endif /* LP64_DEBUG */
834 if (a_uio
!= NULL
&& (a_uio
->uio_flags
& UIO_FLAGS_WE_ALLOCED
) != 0) {
836 if (os_atomic_dec_orig(&uio_t_count
, relaxed
) == 0) {
837 panic("%s :%d - uio_t_count underflow\n", __FILE__
, __LINE__
);
840 kfree(a_uio
, a_uio
->uio_size
);
845 * uio_addiov - add an iovec to the given uio_t. You may call this up to
846 * the a_iovcount number that was passed to uio_create. This call will
847 * increment the residual IO count as iovecs are added to the uio_t.
848 * returns 0 if add was successful else non zero.
851 uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length
)
858 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
863 if (os_add_overflow(a_length
, a_uio
->uio_resid_64
, &resid
)) {
865 panic("%s :%d - invalid length %lu\n", __FILE__
, __LINE__
, (unsigned long)a_length
);
870 if (UIO_IS_USER_SPACE(a_uio
)) {
871 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
872 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
== 0) {
873 a_uio
->uio_iovs
.uiovp
[i
].iov_len
= a_length
;
874 a_uio
->uio_iovs
.uiovp
[i
].iov_base
= a_baseaddr
;
876 a_uio
->uio_resid_64
= resid
;
881 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
882 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
== 0) {
883 a_uio
->uio_iovs
.kiovp
[i
].iov_len
= (u_int64_t
)a_length
;
884 a_uio
->uio_iovs
.kiovp
[i
].iov_base
= (u_int64_t
)a_baseaddr
;
886 a_uio
->uio_resid_64
= resid
;
896 * uio_getiov - get iovec data associated with the given uio_t. Use
897 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
898 * a_baseaddr_p and a_length_p may be NULL.
899 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
900 * returns 0 when data is returned.
903 uio_getiov( uio_t a_uio
,
905 user_addr_t
* a_baseaddr_p
,
906 user_size_t
* a_length_p
)
910 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
914 if (a_index
< 0 || a_index
>= a_uio
->uio_iovcnt
) {
918 if (UIO_IS_USER_SPACE(a_uio
)) {
919 if (a_baseaddr_p
!= NULL
) {
920 *a_baseaddr_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
;
922 if (a_length_p
!= NULL
) {
923 *a_length_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
;
926 if (a_baseaddr_p
!= NULL
) {
927 *a_baseaddr_p
= (user_addr_t
)a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
;
929 if (a_length_p
!= NULL
) {
930 *a_length_p
= (user_size_t
)a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
;
938 * uio_calculateresid - runs through all iovecs associated with this
939 * uio_t and calculates (and sets) the residual IO count.
941 __private_extern__
int
942 uio_calculateresid( uio_t a_uio
)
949 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
950 #endif /* LP64_DEBUG */
954 a_uio
->uio_iovcnt
= a_uio
->uio_max_iovs
;
955 if (UIO_IS_USER_SPACE(a_uio
)) {
956 a_uio
->uio_resid_64
= 0;
957 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
958 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
!= 0) {
959 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
> LONG_MAX
) {
962 resid
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
963 if (resid
> LONG_MAX
) {
968 a_uio
->uio_resid_64
= (user_size_t
)resid
;
970 /* position to first non zero length iovec (4235922) */
971 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
973 if (a_uio
->uio_iovcnt
> 0) {
974 a_uio
->uio_iovs
.uiovp
++;
978 a_uio
->uio_resid_64
= 0;
979 for (i
= 0; i
< a_uio
->uio_max_iovs
; i
++) {
980 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
!= 0) {
981 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
> LONG_MAX
) {
984 resid
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
;
985 if (resid
> LONG_MAX
) {
990 a_uio
->uio_resid_64
= (user_size_t
)resid
;
992 /* position to first non zero length iovec (4235922) */
993 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
995 if (a_uio
->uio_iovcnt
> 0) {
996 a_uio
->uio_iovs
.kiovp
++;
1005 * uio_update - update the given uio_t for a_count of completed IO.
1006 * This call decrements the current iovec length and residual IO value
1007 * and increments the current iovec base address and offset value.
1008 * If the current iovec length is 0 then advance to the next
1010 * If the a_count passed in is 0, than only do the advancement
1011 * over any 0 length iovec's.
1014 uio_update( uio_t a_uio
, user_size_t a_count
)
1017 if (a_uio
== NULL
) {
1018 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1020 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1021 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1023 #endif /* LP64_DEBUG */
1025 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
1029 if (UIO_IS_USER_SPACE(a_uio
)) {
1031 * if a_count == 0, then we are asking to skip over
1035 if (a_count
> a_uio
->uio_iovs
.uiovp
->iov_len
) {
1036 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_uio
->uio_iovs
.uiovp
->iov_len
;
1037 a_uio
->uio_iovs
.uiovp
->iov_len
= 0;
1039 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_count
;
1040 a_uio
->uio_iovs
.uiovp
->iov_len
-= a_count
;
1042 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1043 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1044 a_uio
->uio_resid_64
= 0;
1046 a_uio
->uio_offset
+= a_count
;
1047 a_uio
->uio_resid_64
-= a_count
;
1051 * advance to next iovec if current one is totally consumed
1053 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1054 a_uio
->uio_iovcnt
--;
1055 if (a_uio
->uio_iovcnt
> 0) {
1056 a_uio
->uio_iovs
.uiovp
++;
1061 * if a_count == 0, then we are asking to skip over
1065 if (a_count
> a_uio
->uio_iovs
.kiovp
->iov_len
) {
1066 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_uio
->uio_iovs
.kiovp
->iov_len
;
1067 a_uio
->uio_iovs
.kiovp
->iov_len
= 0;
1069 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_count
;
1070 a_uio
->uio_iovs
.kiovp
->iov_len
-= a_count
;
1072 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1073 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1074 a_uio
->uio_resid_64
= 0;
1076 a_uio
->uio_offset
+= a_count
;
1077 a_uio
->uio_resid_64
-= a_count
;
1081 * advance to next iovec if current one is totally consumed
1083 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1084 a_uio
->uio_iovcnt
--;
1085 if (a_uio
->uio_iovcnt
> 0) {
1086 a_uio
->uio_iovs
.kiovp
++;
1094 * uio_pushback - undo uncommitted I/O by subtracting from the
1095 * current base address and offset, and incrementing the residiual
1096 * IO. If the UIO was previously exhausted, this call will panic.
1097 * New code should not use this functionality.
1099 __private_extern__
void
1100 uio_pushback( uio_t a_uio
, user_size_t a_count
)
1103 if (a_uio
== NULL
) {
1104 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1106 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1107 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1109 #endif /* LP64_DEBUG */
1111 if (a_uio
== NULL
|| a_count
== 0) {
1115 if (a_uio
->uio_iovcnt
< 1) {
1116 panic("Invalid uio for pushback");
1119 if (UIO_IS_USER_SPACE(a_uio
)) {
1120 a_uio
->uio_iovs
.uiovp
->iov_base
-= a_count
;
1121 a_uio
->uio_iovs
.uiovp
->iov_len
+= a_count
;
1123 a_uio
->uio_iovs
.kiovp
->iov_base
-= a_count
;
1124 a_uio
->uio_iovs
.kiovp
->iov_len
+= a_count
;
1127 a_uio
->uio_offset
-= a_count
;
1128 a_uio
->uio_resid_64
+= a_count
;
1135 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1139 uio_duplicate( uio_t a_uio
)
1144 if (a_uio
== NULL
) {
1148 my_uio
= (uio_t
) kalloc(a_uio
->uio_size
);
1150 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);
1153 bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
);
1154 /* need to set our iovec pointer to point to first active iovec */
1155 if (my_uio
->uio_max_iovs
> 0) {
1156 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
1157 (((uint8_t *)my_uio
) + sizeof(struct uio
));
1159 /* advance to first nonzero iovec */
1160 if (my_uio
->uio_iovcnt
> 0) {
1161 for (i
= 0; i
< my_uio
->uio_max_iovs
; i
++) {
1162 if (UIO_IS_USER_SPACE(a_uio
)) {
1163 if (my_uio
->uio_iovs
.uiovp
->iov_len
!= 0) {
1166 my_uio
->uio_iovs
.uiovp
++;
1168 if (my_uio
->uio_iovs
.kiovp
->iov_len
!= 0) {
1171 my_uio
->uio_iovs
.kiovp
++;
1177 my_uio
->uio_flags
= UIO_FLAGS_WE_ALLOCED
| UIO_FLAGS_INITED
;
1179 os_atomic_inc(&uio_t_count
, relaxed
);
1187 copyin_user_iovec_array(user_addr_t uaddr
, int spacetype
, int count
, struct user_iovec
*dst
)
1189 size_t size_of_iovec
= (spacetype
== UIO_USERSPACE64
? sizeof(struct user64_iovec
) : sizeof(struct user32_iovec
));
1193 // copyin to the front of "dst", without regard for putting records in the right places
1194 error
= copyin(uaddr
, dst
, count
* size_of_iovec
);
1199 // now, unpack the entries in reverse order, so we don't overwrite anything
1200 for (i
= count
- 1; i
>= 0; i
--) {
1201 if (spacetype
== UIO_USERSPACE64
) {
1202 struct user64_iovec iovec
= ((struct user64_iovec
*)dst
)[i
];
1203 dst
[i
].iov_base
= (user_addr_t
)iovec
.iov_base
;
1204 dst
[i
].iov_len
= (user_size_t
)iovec
.iov_len
;
1206 struct user32_iovec iovec
= ((struct user32_iovec
*)dst
)[i
];
1207 dst
[i
].iov_base
= iovec
.iov_base
;
1208 dst
[i
].iov_len
= iovec
.iov_len
;