2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
32 * Copyright (c) 1982, 1986, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
87 #include <kern/simple_lock.h>
89 static int uio_t_count
= 0;
99 return uiomove64((addr64_t
)((unsigned int)cp
), n
, uio
);
102 // LP64todo - fix this! 'n' should be int64_t?
104 uiomove64(addr64_t cp
, int n
, register struct uio
*uio
)
107 register uint64_t acnt
;
114 if (uio
->uio_rw
!= UIO_READ
&& uio
->uio_rw
!= UIO_WRITE
)
115 panic("uiomove: mode");
119 if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) {
120 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);
122 #endif /* LP64_DEBUG */
124 while (n
> 0 && uio_resid(uio
)) {
125 acnt
= uio_iov_len(uio
);
131 if (n
> 0 && acnt
> (uint64_t)n
)
134 switch (uio
->uio_segflg
) {
136 case UIO_USERSPACE64
:
137 case UIO_USERISPACE64
:
138 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
139 if (uio
->uio_rw
== UIO_READ
)
141 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
142 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
144 error
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.iov64p
->iov_base
, acnt
);
146 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
147 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
151 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
152 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
154 error
= copyin(uio
->uio_iovs
.iov64p
->iov_base
, CAST_DOWN(caddr_t
, cp
), acnt
);
156 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
157 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
163 case UIO_USERSPACE32
:
164 case UIO_USERISPACE32
:
167 if (uio
->uio_rw
== UIO_READ
)
169 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
170 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
172 error
= copyout( CAST_DOWN(caddr_t
, cp
), CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), acnt
);
174 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
175 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
179 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
180 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
182 error
= copyin(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), CAST_DOWN(caddr_t
, cp
), acnt
);
184 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
185 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
193 if (uio
->uio_rw
== UIO_READ
)
194 error
= copywithin(CAST_DOWN(caddr_t
, cp
), (caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
,
197 error
= copywithin((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
, CAST_DOWN(caddr_t
, cp
),
201 case UIO_PHYS_USERSPACE64
:
202 if (uio
->uio_rw
== UIO_READ
)
204 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
205 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
207 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov64p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
208 if (error
) /* Copy physical to virtual */
211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
212 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
217 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
219 error
= copypv(uio
->uio_iovs
.iov64p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
220 if (error
) /* Copy virtual to physical */
223 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
224 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
230 case UIO_PHYS_USERSPACE32
:
231 case UIO_PHYS_USERSPACE
:
232 if (uio
->uio_rw
== UIO_READ
)
234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
235 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
237 error
= copypv((addr64_t
)cp
, (addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
238 if (error
) /* Copy physical to virtual */
241 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
242 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
246 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
247 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
249 error
= copypv((addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
250 if (error
) /* Copy virtual to physical */
253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
254 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
260 case UIO_PHYS_SYSSPACE32
:
261 case UIO_PHYS_SYSSPACE
:
262 if (uio
->uio_rw
== UIO_READ
)
264 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
265 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
267 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvKmap
| cppvPsrc
| cppvNoRefSrc
);
268 if (error
) /* Copy physical to virtual */
271 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
272 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
276 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
277 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
279 error
= copypv(uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvKmap
| cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
280 if (error
) /* Copy virtual to physical */
283 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
284 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
293 uio_iov_base_add(uio
, acnt
);
295 uio_iov_len_add(uio
, -((int64_t)acnt
));
296 uio_setresid(uio
, (uio_resid(uio
) - ((int64_t)acnt
)));
298 uio_iov_len_add(uio
, -((int)acnt
));
299 uio_setresid(uio
, (uio_resid(uio
) - ((int)acnt
)));
301 uio
->uio_offset
+= acnt
;
309 * Give next character to user as result of read.
314 register struct uio
*uio
;
316 if (uio_resid(uio
) <= 0)
317 panic("ureadc: non-positive resid");
319 if (uio
->uio_iovcnt
== 0)
320 panic("ureadc: non-positive iovcnt");
321 if (uio_iov_len(uio
) <= 0) {
326 switch (uio
->uio_segflg
) {
328 case UIO_USERSPACE32
:
330 if (subyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
334 case UIO_USERSPACE64
:
335 if (subyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
, c
) < 0)
341 *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) = c
;
344 case UIO_USERISPACE32
:
346 if (suibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
353 uio_iov_base_add(uio
, 1);
354 uio_iov_len_add(uio
, -1);
355 uio_setresid(uio
, (uio_resid(uio
) - 1));
360 #if defined(vax) || defined(ppc)
361 /* unused except by ct.c, other oddities XXX */
363 * Get next character written in by user from uio.
371 if (uio_resid(uio
) <= 0)
374 if (uio
->uio_iovcnt
<= 0)
375 panic("uwritec: non-positive iovcnt");
377 if (uio_iov_len(uio
) == 0) {
379 if (--uio
->uio_iovcnt
== 0)
383 switch (uio
->uio_segflg
) {
385 case UIO_USERSPACE32
:
387 c
= fubyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
390 case UIO_USERSPACE64
:
391 c
= fubyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
);
396 c
= *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) & 0377;
399 case UIO_USERISPACE32
:
401 c
= fuibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
405 c
= 0; /* avoid uninitialized variable warning */
406 panic("uwritec: bogus uio_segflg");
411 uio_iov_base_add(uio
, 1);
412 uio_iov_len_add(uio
, -1);
413 uio_setresid(uio
, (uio_resid(uio
) - 1));
417 #endif /* vax || ppc */
420 * General routine to allocate a hash table.
423 hashinit(elements
, type
, hashmask
)
428 LIST_HEAD(generic
, generic
) *hashtbl
;
432 panic("hashinit: bad cnt");
433 for (hashsize
= 1; hashsize
<= elements
; hashsize
<<= 1)
436 MALLOC(hashtbl
, struct generic
*,
437 (u_long
)hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
|M_ZERO
);
438 if (hashtbl
!= NULL
) {
439 for (i
= 0; i
< hashsize
; i
++)
440 LIST_INIT(&hashtbl
[i
]);
441 *hashmask
= hashsize
- 1;
447 * uio_resid - return the residual IO value for the given uio_t
449 user_ssize_t
uio_resid( uio_t a_uio
)
453 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
455 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
456 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
460 /* return 0 if there are no active iovecs */
465 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
466 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
467 return( (user_ssize_t
)a_uio
->uio_resid
);
469 return( a_uio
->uio_resid_64
);
472 return( (user_ssize_t
)a_uio
->uio_resid
);
476 * uio_setresid - set the residual IO value for the given uio_t
478 void uio_setresid( uio_t a_uio
, user_ssize_t a_value
)
482 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
484 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
485 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
493 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
494 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
495 a_uio
->uio_resid
= (int)a_value
;
497 a_uio
->uio_resid_64
= a_value
;
501 a_uio
->uio_resid
= (int)a_value
;
508 * uio_proc_t - return the proc_t for the given uio_t
509 * WARNING - This call is going away. Find another way to get the proc_t!!
511 __private_extern__ proc_t
uio_proc_t( uio_t a_uio
)
515 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
517 #endif /* LP64_DEBUG */
519 /* return 0 if there are no active iovecs */
523 return( a_uio
->uio_procp
);
527 * uio_setproc_t - set the residual IO value for the given uio_t
528 * WARNING - This call is going away.
530 __private_extern__
void uio_setproc_t( uio_t a_uio
, proc_t a_proc_t
)
534 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
535 #endif /* LP64_DEBUG */
539 a_uio
->uio_procp
= a_proc_t
;
545 * uio_curriovbase - return the base address of the current iovec associated
546 * with the given uio_t. May return 0.
548 user_addr_t
uio_curriovbase( uio_t a_uio
)
552 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
554 #endif /* LP64_DEBUG */
556 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
560 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
561 return(a_uio
->uio_iovs
.uiovp
->iov_base
);
563 return((user_addr_t
)((uintptr_t)a_uio
->uio_iovs
.kiovp
->iov_base
));
568 * uio_curriovlen - return the length value of the current iovec associated
569 * with the given uio_t.
571 user_size_t
uio_curriovlen( uio_t a_uio
)
575 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
577 #endif /* LP64_DEBUG */
579 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
583 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
584 return(a_uio
->uio_iovs
.uiovp
->iov_len
);
586 return((user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
);
590 * uio_setcurriovlen - set the length value of the current iovec associated
591 * with the given uio_t.
593 __private_extern__
void uio_setcurriovlen( uio_t a_uio
, user_size_t a_value
)
597 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
599 #endif /* LP64_DEBUG */
605 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
606 a_uio
->uio_iovs
.uiovp
->iov_len
= a_value
;
610 if (a_value
> 0xFFFFFFFFull
) {
611 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
613 #endif /* LP64_DEBUG */
614 a_uio
->uio_iovs
.kiovp
->iov_len
= (size_t)a_value
;
620 * uio_iovcnt - return count of active iovecs for the given uio_t
622 int uio_iovcnt( uio_t a_uio
)
626 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
628 #endif /* LP64_DEBUG */
634 return( a_uio
->uio_iovcnt
);
638 * uio_offset - return the current offset value for the given uio_t
640 off_t
uio_offset( uio_t a_uio
)
644 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
646 #endif /* LP64_DEBUG */
651 return( a_uio
->uio_offset
);
655 * uio_setoffset - set the current offset value for the given uio_t
657 void uio_setoffset( uio_t a_uio
, off_t a_offset
)
661 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
663 #endif /* LP64_DEBUG */
668 a_uio
->uio_offset
= a_offset
;
673 * uio_rw - return the read / write flag for the given uio_t
675 int uio_rw( uio_t a_uio
)
679 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
681 #endif /* LP64_DEBUG */
686 return( a_uio
->uio_rw
);
690 * uio_setrw - set the read / write flag for the given uio_t
692 void uio_setrw( uio_t a_uio
, int a_value
)
696 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
697 #endif /* LP64_DEBUG */
702 if (!(a_value
== UIO_READ
|| a_value
== UIO_WRITE
)) {
703 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
705 #endif /* LP64_DEBUG */
707 if (a_value
== UIO_READ
|| a_value
== UIO_WRITE
) {
708 a_uio
->uio_rw
= a_value
;
714 * uio_isuserspace - return non zero value if the address space
715 * flag is for a user address space (could be 32 or 64 bit).
717 int uio_isuserspace( uio_t a_uio
)
721 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
722 #endif /* LP64_DEBUG */
726 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
734 * uio_create - create an uio_t.
735 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
736 * is not fully initialized until all iovecs are added using uio_addiov calls.
737 * a_iovcount is the maximum number of iovecs you may add.
739 uio_t
uio_create( int a_iovcount
, /* number of iovecs */
740 off_t a_offset
, /* current offset */
741 int a_spacetype
, /* type of address space */
742 int a_iodirection
) /* read or write flag */
748 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
749 my_buf_p
= kalloc(my_size
);
750 my_uio
= uio_createwithbuffer( a_iovcount
,
757 /* leave a note that we allocated this uio_t */
758 my_uio
->uio_flags
|= UIO_FLAGS_WE_ALLOCED
;
760 hw_atomic_add(&uio_t_count
, 1);
769 * uio_createwithbuffer - create an uio_t.
770 * Create a uio_t using the given buffer. The uio_t
771 * is not fully initialized until all iovecs are added using uio_addiov calls.
772 * a_iovcount is the maximum number of iovecs you may add.
773 * This call may fail if the given buffer is not large enough.
775 __private_extern__ uio_t
776 uio_createwithbuffer( int a_iovcount
, /* number of iovecs */
777 off_t a_offset
, /* current offset */
778 int a_spacetype
, /* type of address space */
779 int a_iodirection
, /* read or write flag */
780 void *a_buf_p
, /* pointer to a uio_t buffer */
781 int a_buffer_size
) /* size of uio_t buffer */
783 uio_t my_uio
= (uio_t
) a_buf_p
;
786 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
787 if (a_buffer_size
< my_size
) {
789 panic("%s :%d - a_buffer_size is too small\n", __FILE__
, __LINE__
);
793 my_size
= a_buffer_size
;
797 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
799 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
800 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
802 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
803 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
805 if (a_iovcount
> UIO_MAXIOV
) {
806 panic("%s :%d - invalid a_iovcount\n", __FILE__
, __LINE__
);
810 bzero(my_uio
, my_size
);
811 my_uio
->uio_size
= my_size
;
813 /* we use uio_segflg to indicate if the uio_t is the new format or */
814 /* old (pre LP64 support) legacy format */
815 switch (a_spacetype
) {
817 my_uio
->uio_segflg
= UIO_USERSPACE32
;
819 my_uio
->uio_segflg
= UIO_SYSSPACE32
;
820 case UIO_PHYS_USERSPACE
:
821 my_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
822 case UIO_PHYS_SYSSPACE
:
823 my_uio
->uio_segflg
= UIO_PHYS_SYSSPACE32
;
825 my_uio
->uio_segflg
= a_spacetype
;
829 if (a_iovcount
> 0) {
830 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
831 (((uint8_t *)my_uio
) + sizeof(struct uio
));
834 my_uio
->uio_iovs
.uiovp
= NULL
;
837 my_uio
->uio_max_iovs
= a_iovcount
;
838 my_uio
->uio_offset
= a_offset
;
839 my_uio
->uio_rw
= a_iodirection
;
840 my_uio
->uio_flags
= UIO_FLAGS_INITED
;
846 * uio_spacetype - return the address space type for the given uio_t
848 int uio_spacetype( uio_t a_uio
)
852 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
853 #endif /* LP64_DEBUG */
857 return( a_uio
->uio_segflg
);
861 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
862 * This returns the location of the iovecs within the uio.
863 * NOTE - for compatibility mode we just return the current value in uio_iovs
864 * which will increase as the IO is completed and is NOT embedded within the
865 * uio, it is a seperate array of one or more iovecs.
867 struct user_iovec
* uio_iovsaddr( uio_t a_uio
)
869 struct user_iovec
* my_addr
;
875 if (a_uio
->uio_segflg
== UIO_USERSPACE
|| a_uio
->uio_segflg
== UIO_SYSSPACE
) {
876 /* we need this for compatibility mode. */
877 my_addr
= (struct user_iovec
*) a_uio
->uio_iovs
.iovp
;
880 my_addr
= (struct user_iovec
*) (((uint8_t *)a_uio
) + sizeof(struct uio
));
886 * uio_reset - reset an uio_t.
887 * Reset the given uio_t to initial values. The uio_t is not fully initialized
888 * until all iovecs are added using uio_addiov calls.
889 * The a_iovcount value passed in the uio_create is the maximum number of
890 * iovecs you may add.
892 void uio_reset( uio_t a_uio
,
893 off_t a_offset
, /* current offset */
894 int a_spacetype
, /* type of address space */
895 int a_iodirection
) /* read or write flag */
899 u_int32_t my_old_flags
;
903 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
905 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
906 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
908 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
909 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
911 #endif /* LP64_DEBUG */
917 my_size
= a_uio
->uio_size
;
918 my_old_flags
= a_uio
->uio_flags
;
919 my_max_iovs
= a_uio
->uio_max_iovs
;
920 bzero(a_uio
, my_size
);
921 a_uio
->uio_size
= my_size
;
922 a_uio
->uio_segflg
= a_spacetype
;
923 if (my_max_iovs
> 0) {
924 a_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
925 (((uint8_t *)a_uio
) + sizeof(struct uio
));
928 a_uio
->uio_iovs
.uiovp
= NULL
;
930 a_uio
->uio_max_iovs
= my_max_iovs
;
931 a_uio
->uio_offset
= a_offset
;
932 a_uio
->uio_rw
= a_iodirection
;
933 a_uio
->uio_flags
= my_old_flags
;
939 * uio_free - free a uio_t allocated via uio_init. this also frees all
942 void uio_free( uio_t a_uio
)
946 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);
948 #endif /* LP64_DEBUG */
950 if (a_uio
!= NULL
&& (a_uio
->uio_flags
& UIO_FLAGS_WE_ALLOCED
) != 0) {
952 if ((int)(hw_atomic_sub(&uio_t_count
, 1)) < 0) {
953 panic("%s :%d - uio_t_count has gone negative\n", __FILE__
, __LINE__
);
956 kfree(a_uio
, a_uio
->uio_size
);
963 * uio_addiov - add an iovec to the given uio_t. You may call this up to
964 * the a_iovcount number that was passed to uio_create. This call will
965 * increment the residual IO count as iovecs are added to the uio_t.
966 * returns 0 if add was successful else non zero.
968 int uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length
)
974 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
975 #endif /* LP64_DEBUG */
979 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
980 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
981 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
== 0) {
982 a_uio
->uio_iovs
.uiovp
[i
].iov_len
= a_length
;
983 a_uio
->uio_iovs
.uiovp
[i
].iov_base
= a_baseaddr
;
985 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
986 a_uio
->uio_resid
+= a_length
;
988 a_uio
->uio_resid_64
+= a_length
;
995 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
996 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
== 0) {
997 a_uio
->uio_iovs
.kiovp
[i
].iov_len
= (u_int32_t
)a_length
;
998 a_uio
->uio_iovs
.kiovp
[i
].iov_base
= (u_int32_t
)((uintptr_t)a_baseaddr
);
1000 a_uio
->uio_resid
+= a_length
;
1010 * uio_getiov - get iovec data associated with the given uio_t. Use
1011 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1012 * a_baseaddr_p and a_length_p may be NULL.
1013 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1014 * returns 0 when data is returned.
1016 int uio_getiov( uio_t a_uio
,
1018 user_addr_t
* a_baseaddr_p
,
1019 user_size_t
* a_length_p
)
1021 if (a_uio
== NULL
) {
1023 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1027 if ( a_index
< 0 || a_index
>= a_uio
->uio_iovcnt
) {
1031 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1032 if (a_baseaddr_p
!= NULL
) {
1033 *a_baseaddr_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
;
1035 if (a_length_p
!= NULL
) {
1036 *a_length_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
;
1040 if (a_baseaddr_p
!= NULL
) {
1041 *a_baseaddr_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
;
1043 if (a_length_p
!= NULL
) {
1044 *a_length_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
;
1052 * uio_calculateresid - runs through all iovecs associated with this
1053 * uio_t and calculates (and sets) the residual IO count.
1055 __private_extern__
void uio_calculateresid( uio_t a_uio
)
1059 if (a_uio
== NULL
) {
1061 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1062 #endif /* LP64_DEBUG */
1066 a_uio
->uio_iovcnt
= a_uio
->uio_max_iovs
;
1067 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1068 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1069 a_uio
->uio_resid
= 0;
1071 a_uio
->uio_resid_64
= 0;
1073 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1074 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
!= 0) {
1075 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1076 a_uio
->uio_resid
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1078 a_uio
->uio_resid_64
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1083 /* position to first non zero length iovec (4235922) */
1084 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1085 a_uio
->uio_iovcnt
--;
1086 if (a_uio
->uio_iovcnt
> 0) {
1087 a_uio
->uio_iovs
.uiovp
++;
1092 a_uio
->uio_resid
= 0;
1093 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1094 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
!= 0) {
1095 a_uio
->uio_resid
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
;
1099 /* position to first non zero length iovec (4235922) */
1100 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1101 a_uio
->uio_iovcnt
--;
1102 if (a_uio
->uio_iovcnt
> 0) {
1103 a_uio
->uio_iovs
.kiovp
++;
1112 * uio_update - update the given uio_t for a_count of completed IO.
1113 * This call decrements the current iovec length and residual IO value
1114 * and increments the current iovec base address and offset value.
1115 * If the current iovec length is 0 then advance to the next
1117 * If the a_count passed in is 0, than only do the advancement
1118 * over any 0 length iovec's.
1120 void uio_update( uio_t a_uio
, user_size_t a_count
)
1123 if (a_uio
== NULL
) {
1124 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1126 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1127 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1129 #endif /* LP64_DEBUG */
1131 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
1135 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1137 * if a_count == 0, then we are asking to skip over
1141 if (a_count
> a_uio
->uio_iovs
.uiovp
->iov_len
) {
1142 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_uio
->uio_iovs
.uiovp
->iov_len
;
1143 a_uio
->uio_iovs
.uiovp
->iov_len
= 0;
1146 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_count
;
1147 a_uio
->uio_iovs
.uiovp
->iov_len
-= a_count
;
1149 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1150 if (a_uio
->uio_resid
< 0) {
1151 a_uio
->uio_resid
= 0;
1153 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1154 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1155 a_uio
->uio_resid
= 0;
1158 a_uio
->uio_offset
+= a_count
;
1159 a_uio
->uio_resid
-= a_count
;
1162 if (a_uio
->uio_resid_64
< 0) {
1163 a_uio
->uio_resid_64
= 0;
1165 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1166 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1167 a_uio
->uio_resid_64
= 0;
1170 a_uio
->uio_offset
+= a_count
;
1171 a_uio
->uio_resid_64
-= a_count
;
1176 * advance to next iovec if current one is totally consumed
1178 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1179 a_uio
->uio_iovcnt
--;
1180 if (a_uio
->uio_iovcnt
> 0) {
1181 a_uio
->uio_iovs
.uiovp
++;
1187 * if a_count == 0, then we are asking to skip over
1191 if (a_count
> a_uio
->uio_iovs
.kiovp
->iov_len
) {
1192 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_uio
->uio_iovs
.kiovp
->iov_len
;
1193 a_uio
->uio_iovs
.kiovp
->iov_len
= 0;
1196 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_count
;
1197 a_uio
->uio_iovs
.kiovp
->iov_len
-= a_count
;
1199 if (a_uio
->uio_resid
< 0) {
1200 a_uio
->uio_resid
= 0;
1202 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1203 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1204 a_uio
->uio_resid
= 0;
1207 a_uio
->uio_offset
+= a_count
;
1208 a_uio
->uio_resid
-= a_count
;
1212 * advance to next iovec if current one is totally consumed
1214 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1215 a_uio
->uio_iovcnt
--;
1216 if (a_uio
->uio_iovcnt
> 0) {
1217 a_uio
->uio_iovs
.kiovp
++;
1226 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1229 uio_t
uio_duplicate( uio_t a_uio
)
1234 if (a_uio
== NULL
) {
1238 my_uio
= (uio_t
) kalloc(a_uio
->uio_size
);
1240 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);
1243 bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
);
1244 /* need to set our iovec pointer to point to first active iovec */
1245 if (my_uio
->uio_max_iovs
> 0) {
1246 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
1247 (((uint8_t *)my_uio
) + sizeof(struct uio
));
1249 /* advance to first nonzero iovec */
1250 if (my_uio
->uio_iovcnt
> 0) {
1251 for ( i
= 0; i
< my_uio
->uio_max_iovs
; i
++ ) {
1252 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1253 if (my_uio
->uio_iovs
.uiovp
->iov_len
!= 0) {
1256 my_uio
->uio_iovs
.uiovp
++;
1259 if (my_uio
->uio_iovs
.kiovp
->iov_len
!= 0) {
1262 my_uio
->uio_iovs
.kiovp
++;