2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc_internal.h>
66 #include <sys/malloc.h>
67 #include <sys/queue.h>
69 #include <sys/uio_internal.h>
70 #include <kern/kalloc.h>
74 #include <sys/kdebug.h>
75 #define DBG_UIO_COPYOUT 16
76 #define DBG_UIO_COPYIN 17
79 #include <kern/simple_lock.h>
81 static int uio_t_count
= 0;
91 return uiomove64((addr64_t
)((unsigned int)cp
), n
, uio
);
94 // LP64todo - fix this! 'n' should be int64_t?
96 uiomove64(addr64_t cp
, int n
, register struct uio
*uio
)
99 register uint64_t acnt
;
106 if (uio
->uio_rw
!= UIO_READ
&& uio
->uio_rw
!= UIO_WRITE
)
107 panic("uiomove: mode");
111 if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) {
112 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);
114 #endif /* LP64_DEBUG */
116 while (n
> 0 && uio_resid(uio
)) {
117 acnt
= uio_iov_len(uio
);
123 if (n
> 0 && acnt
> (uint64_t)n
)
126 switch (uio
->uio_segflg
) {
128 case UIO_USERSPACE64
:
129 case UIO_USERISPACE64
:
130 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
131 if (uio
->uio_rw
== UIO_READ
)
133 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
134 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
136 error
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.iov64p
->iov_base
, acnt
);
138 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
139 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0);
143 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
144 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
146 error
= copyin(uio
->uio_iovs
.iov64p
->iov_base
, CAST_DOWN(caddr_t
, cp
), acnt
);
148 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
149 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0);
155 case UIO_USERSPACE32
:
156 case UIO_USERISPACE32
:
159 if (uio
->uio_rw
== UIO_READ
)
161 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
162 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
164 error
= copyout( CAST_DOWN(caddr_t
, cp
), CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), acnt
);
166 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
167 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0);
171 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
172 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
174 error
= copyin(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), CAST_DOWN(caddr_t
, cp
), acnt
);
176 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
177 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0);
185 if (uio
->uio_rw
== UIO_READ
)
186 error
= copywithin(CAST_DOWN(caddr_t
, cp
), (caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
,
189 error
= copywithin((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
, CAST_DOWN(caddr_t
, cp
),
193 case UIO_PHYS_USERSPACE64
:
194 if (uio
->uio_rw
== UIO_READ
)
196 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
197 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
199 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov64p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
200 if (error
) /* Copy physical to virtual */
203 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
204 (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0);
208 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
209 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
211 error
= copypv(uio
->uio_iovs
.iov64p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
212 if (error
) /* Copy virtual to physical */
215 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
216 (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0);
222 case UIO_PHYS_USERSPACE32
:
223 case UIO_PHYS_USERSPACE
:
224 if (uio
->uio_rw
== UIO_READ
)
226 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
227 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
229 error
= copypv((addr64_t
)cp
, (addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvPsrc
| cppvNoRefSrc
);
230 if (error
) /* Copy physical to virtual */
233 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
234 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0);
238 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
239 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
241 error
= copypv((addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
242 if (error
) /* Copy virtual to physical */
245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
246 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0);
252 case UIO_PHYS_SYSSPACE32
:
253 case UIO_PHYS_SYSSPACE
:
254 if (uio
->uio_rw
== UIO_READ
)
256 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
,
257 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
259 error
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvKmap
| cppvPsrc
| cppvNoRefSrc
);
260 if (error
) /* Copy physical to virtual */
263 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
,
264 (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0);
268 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
,
269 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
271 error
= copypv(uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvKmap
| cppvPsnk
| cppvNoRefSrc
| cppvNoModSnk
);
272 if (error
) /* Copy virtual to physical */
275 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
,
276 (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0);
285 uio_iov_base_add(uio
, acnt
);
287 uio_iov_len_add(uio
, -((int64_t)acnt
));
288 uio_setresid(uio
, (uio_resid(uio
) - ((int64_t)acnt
)));
290 uio_iov_len_add(uio
, -((int)acnt
));
291 uio_setresid(uio
, (uio_resid(uio
) - ((int)acnt
)));
293 uio
->uio_offset
+= acnt
;
301 * Give next character to user as result of read.
306 register struct uio
*uio
;
308 if (uio_resid(uio
) <= 0)
309 panic("ureadc: non-positive resid");
311 if (uio
->uio_iovcnt
== 0)
312 panic("ureadc: non-positive iovcnt");
313 if (uio_iov_len(uio
) <= 0) {
318 switch (uio
->uio_segflg
) {
320 case UIO_USERSPACE32
:
322 if (subyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
326 case UIO_USERSPACE64
:
327 if (subyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
, c
) < 0)
333 *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) = c
;
336 case UIO_USERISPACE32
:
338 if (suibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0)
345 uio_iov_base_add(uio
, 1);
346 uio_iov_len_add(uio
, -1);
347 uio_setresid(uio
, (uio_resid(uio
) - 1));
352 #if defined(vax) || defined(ppc)
353 /* unused except by ct.c, other oddities XXX */
355 * Get next character written in by user from uio.
363 if (uio_resid(uio
) <= 0)
366 if (uio
->uio_iovcnt
<= 0)
367 panic("uwritec: non-positive iovcnt");
369 if (uio_iov_len(uio
) == 0) {
371 if (--uio
->uio_iovcnt
== 0)
375 switch (uio
->uio_segflg
) {
377 case UIO_USERSPACE32
:
379 c
= fubyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
382 case UIO_USERSPACE64
:
383 c
= fubyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
);
388 c
= *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) & 0377;
391 case UIO_USERISPACE32
:
393 c
= fuibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
));
397 c
= 0; /* avoid uninitialized variable warning */
398 panic("uwritec: bogus uio_segflg");
403 uio_iov_base_add(uio
, 1);
404 uio_iov_len_add(uio
, -1);
405 uio_setresid(uio
, (uio_resid(uio
) - 1));
409 #endif /* vax || ppc */
412 * General routine to allocate a hash table.
415 hashinit(elements
, type
, hashmask
)
420 LIST_HEAD(generic
, generic
) *hashtbl
;
424 panic("hashinit: bad cnt");
425 for (hashsize
= 1; hashsize
<= elements
; hashsize
<<= 1)
428 MALLOC(hashtbl
, struct generic
*,
429 (u_long
)hashsize
* sizeof(*hashtbl
), type
, M_WAITOK
|M_ZERO
);
430 if (hashtbl
!= NULL
) {
431 for (i
= 0; i
< hashsize
; i
++)
432 LIST_INIT(&hashtbl
[i
]);
433 *hashmask
= hashsize
- 1;
439 * uio_resid - return the residual IO value for the given uio_t
441 user_ssize_t
uio_resid( uio_t a_uio
)
445 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
447 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
448 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
452 /* return 0 if there are no active iovecs */
457 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
458 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
459 return( (user_ssize_t
)a_uio
->uio_resid
);
461 return( a_uio
->uio_resid_64
);
464 return( (user_ssize_t
)a_uio
->uio_resid
);
468 * uio_setresid - set the residual IO value for the given uio_t
470 void uio_setresid( uio_t a_uio
, user_ssize_t a_value
)
474 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
476 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
477 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
485 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
486 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
487 a_uio
->uio_resid
= (int)a_value
;
489 a_uio
->uio_resid_64
= a_value
;
493 a_uio
->uio_resid
= (int)a_value
;
500 * uio_proc_t - return the proc_t for the given uio_t
501 * WARNING - This call is going away. Find another way to get the proc_t!!
503 __private_extern__ proc_t
uio_proc_t( uio_t a_uio
)
507 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
509 #endif /* LP64_DEBUG */
511 /* return 0 if there are no active iovecs */
515 return( a_uio
->uio_procp
);
519 * uio_setproc_t - set the residual IO value for the given uio_t
520 * WARNING - This call is going away.
522 __private_extern__
void uio_setproc_t( uio_t a_uio
, proc_t a_proc_t
)
526 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
527 #endif /* LP64_DEBUG */
531 a_uio
->uio_procp
= a_proc_t
;
537 * uio_curriovbase - return the base address of the current iovec associated
538 * with the given uio_t. May return 0.
540 user_addr_t
uio_curriovbase( uio_t a_uio
)
544 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
546 #endif /* LP64_DEBUG */
548 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
552 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
553 return(a_uio
->uio_iovs
.uiovp
->iov_base
);
555 return((user_addr_t
)((uintptr_t)a_uio
->uio_iovs
.kiovp
->iov_base
));
560 * uio_curriovlen - return the length value of the current iovec associated
561 * with the given uio_t.
563 user_size_t
uio_curriovlen( uio_t a_uio
)
567 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
569 #endif /* LP64_DEBUG */
571 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
575 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
576 return(a_uio
->uio_iovs
.uiovp
->iov_len
);
578 return((user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
);
582 * uio_setcurriovlen - set the length value of the current iovec associated
583 * with the given uio_t.
585 __private_extern__
void uio_setcurriovlen( uio_t a_uio
, user_size_t a_value
)
589 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
591 #endif /* LP64_DEBUG */
597 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
598 a_uio
->uio_iovs
.uiovp
->iov_len
= a_value
;
602 if (a_value
> 0xFFFFFFFFull
) {
603 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
605 #endif /* LP64_DEBUG */
606 a_uio
->uio_iovs
.kiovp
->iov_len
= (size_t)a_value
;
612 * uio_iovcnt - return count of active iovecs for the given uio_t
614 int uio_iovcnt( uio_t a_uio
)
618 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
620 #endif /* LP64_DEBUG */
626 return( a_uio
->uio_iovcnt
);
630 * uio_offset - return the current offset value for the given uio_t
632 off_t
uio_offset( uio_t a_uio
)
636 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
638 #endif /* LP64_DEBUG */
643 return( a_uio
->uio_offset
);
647 * uio_setoffset - set the current offset value for the given uio_t
649 void uio_setoffset( uio_t a_uio
, off_t a_offset
)
653 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
655 #endif /* LP64_DEBUG */
660 a_uio
->uio_offset
= a_offset
;
665 * uio_rw - return the read / write flag for the given uio_t
667 int uio_rw( uio_t a_uio
)
671 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
673 #endif /* LP64_DEBUG */
678 return( a_uio
->uio_rw
);
682 * uio_setrw - set the read / write flag for the given uio_t
684 void uio_setrw( uio_t a_uio
, int a_value
)
688 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
689 #endif /* LP64_DEBUG */
694 if (!(a_value
== UIO_READ
|| a_value
== UIO_WRITE
)) {
695 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);
697 #endif /* LP64_DEBUG */
699 if (a_value
== UIO_READ
|| a_value
== UIO_WRITE
) {
700 a_uio
->uio_rw
= a_value
;
706 * uio_isuserspace - return non zero value if the address space
707 * flag is for a user address space (could be 32 or 64 bit).
709 int uio_isuserspace( uio_t a_uio
)
713 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
714 #endif /* LP64_DEBUG */
718 if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) {
726 * uio_create - create an uio_t.
727 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
728 * is not fully initialized until all iovecs are added using uio_addiov calls.
729 * a_iovcount is the maximum number of iovecs you may add.
731 uio_t
uio_create( int a_iovcount
, /* number of iovecs */
732 off_t a_offset
, /* current offset */
733 int a_spacetype
, /* type of address space */
734 int a_iodirection
) /* read or write flag */
740 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
741 my_buf_p
= kalloc(my_size
);
742 my_uio
= uio_createwithbuffer( a_iovcount
,
749 /* leave a note that we allocated this uio_t */
750 my_uio
->uio_flags
|= UIO_FLAGS_WE_ALLOCED
;
752 hw_atomic_add(&uio_t_count
, 1);
761 * uio_createwithbuffer - create an uio_t.
762 * Create a uio_t using the given buffer. The uio_t
763 * is not fully initialized until all iovecs are added using uio_addiov calls.
764 * a_iovcount is the maximum number of iovecs you may add.
765 * This call may fail if the given buffer is not large enough.
767 __private_extern__ uio_t
768 uio_createwithbuffer( int a_iovcount
, /* number of iovecs */
769 off_t a_offset
, /* current offset */
770 int a_spacetype
, /* type of address space */
771 int a_iodirection
, /* read or write flag */
772 void *a_buf_p
, /* pointer to a uio_t buffer */
773 int a_buffer_size
) /* size of uio_t buffer */
775 uio_t my_uio
= (uio_t
) a_buf_p
;
778 my_size
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
);
779 if (a_buffer_size
< my_size
) {
781 panic("%s :%d - a_buffer_size is too small\n", __FILE__
, __LINE__
);
785 my_size
= a_buffer_size
;
789 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
791 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
792 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
794 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
795 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
797 if (a_iovcount
> UIO_MAXIOV
) {
798 panic("%s :%d - invalid a_iovcount\n", __FILE__
, __LINE__
);
802 bzero(my_uio
, my_size
);
803 my_uio
->uio_size
= my_size
;
805 /* we use uio_segflg to indicate if the uio_t is the new format or */
806 /* old (pre LP64 support) legacy format */
807 switch (a_spacetype
) {
809 my_uio
->uio_segflg
= UIO_USERSPACE32
;
811 my_uio
->uio_segflg
= UIO_SYSSPACE32
;
812 case UIO_PHYS_USERSPACE
:
813 my_uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
814 case UIO_PHYS_SYSSPACE
:
815 my_uio
->uio_segflg
= UIO_PHYS_SYSSPACE32
;
817 my_uio
->uio_segflg
= a_spacetype
;
821 if (a_iovcount
> 0) {
822 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
823 (((uint8_t *)my_uio
) + sizeof(struct uio
));
826 my_uio
->uio_iovs
.uiovp
= NULL
;
829 my_uio
->uio_max_iovs
= a_iovcount
;
830 my_uio
->uio_offset
= a_offset
;
831 my_uio
->uio_rw
= a_iodirection
;
832 my_uio
->uio_flags
= UIO_FLAGS_INITED
;
838 * uio_spacetype - return the address space type for the given uio_t
840 int uio_spacetype( uio_t a_uio
)
844 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
845 #endif /* LP64_DEBUG */
849 return( a_uio
->uio_segflg
);
853 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
854 * This returns the location of the iovecs within the uio.
855 * NOTE - for compatibility mode we just return the current value in uio_iovs
856 * which will increase as the IO is completed and is NOT embedded within the
857 * uio, it is a seperate array of one or more iovecs.
859 struct user_iovec
* uio_iovsaddr( uio_t a_uio
)
861 struct user_iovec
* my_addr
;
867 if (a_uio
->uio_segflg
== UIO_USERSPACE
|| a_uio
->uio_segflg
== UIO_SYSSPACE
) {
868 /* we need this for compatibility mode. */
869 my_addr
= (struct user_iovec
*) a_uio
->uio_iovs
.iovp
;
872 my_addr
= (struct user_iovec
*) (((uint8_t *)a_uio
) + sizeof(struct uio
));
878 * uio_reset - reset an uio_t.
879 * Reset the given uio_t to initial values. The uio_t is not fully initialized
880 * until all iovecs are added using uio_addiov calls.
881 * The a_iovcount value passed in the uio_create is the maximum number of
882 * iovecs you may add.
884 void uio_reset( uio_t a_uio
,
885 off_t a_offset
, /* current offset */
886 int a_spacetype
, /* type of address space */
887 int a_iodirection
) /* read or write flag */
891 u_int32_t my_old_flags
;
895 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);
897 if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) {
898 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);
900 if (!(a_iodirection
== UIO_READ
|| a_iodirection
== UIO_WRITE
)) {
901 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);
903 #endif /* LP64_DEBUG */
909 my_size
= a_uio
->uio_size
;
910 my_old_flags
= a_uio
->uio_flags
;
911 my_max_iovs
= a_uio
->uio_max_iovs
;
912 bzero(a_uio
, my_size
);
913 a_uio
->uio_size
= my_size
;
914 a_uio
->uio_segflg
= a_spacetype
;
915 if (my_max_iovs
> 0) {
916 a_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
917 (((uint8_t *)a_uio
) + sizeof(struct uio
));
920 a_uio
->uio_iovs
.uiovp
= NULL
;
922 a_uio
->uio_max_iovs
= my_max_iovs
;
923 a_uio
->uio_offset
= a_offset
;
924 a_uio
->uio_rw
= a_iodirection
;
925 a_uio
->uio_flags
= my_old_flags
;
931 * uio_free - free a uio_t allocated via uio_init. this also frees all
934 void uio_free( uio_t a_uio
)
938 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);
940 #endif /* LP64_DEBUG */
942 if (a_uio
!= NULL
&& (a_uio
->uio_flags
& UIO_FLAGS_WE_ALLOCED
) != 0) {
944 if ((int)(hw_atomic_sub(&uio_t_count
, 1)) < 0) {
945 panic("%s :%d - uio_t_count has gone negative\n", __FILE__
, __LINE__
);
948 kfree(a_uio
, a_uio
->uio_size
);
955 * uio_addiov - add an iovec to the given uio_t. You may call this up to
956 * the a_iovcount number that was passed to uio_create. This call will
957 * increment the residual IO count as iovecs are added to the uio_t.
958 * returns 0 if add was successful else non zero.
960 int uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length
)
966 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
967 #endif /* LP64_DEBUG */
971 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
972 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
973 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
== 0) {
974 a_uio
->uio_iovs
.uiovp
[i
].iov_len
= a_length
;
975 a_uio
->uio_iovs
.uiovp
[i
].iov_base
= a_baseaddr
;
977 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
978 a_uio
->uio_resid
+= a_length
;
980 a_uio
->uio_resid_64
+= a_length
;
987 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
988 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
== 0) {
989 a_uio
->uio_iovs
.kiovp
[i
].iov_len
= (u_int32_t
)a_length
;
990 a_uio
->uio_iovs
.kiovp
[i
].iov_base
= (u_int32_t
)((uintptr_t)a_baseaddr
);
992 a_uio
->uio_resid
+= a_length
;
1002 * uio_getiov - get iovec data associated with the given uio_t. Use
1003 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1004 * a_baseaddr_p and a_length_p may be NULL.
1005 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1006 * returns 0 when data is returned.
1008 int uio_getiov( uio_t a_uio
,
1010 user_addr_t
* a_baseaddr_p
,
1011 user_size_t
* a_length_p
)
1013 if (a_uio
== NULL
) {
1015 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1019 if ( a_index
< 0 || a_index
>= a_uio
->uio_iovcnt
) {
1023 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1024 if (a_baseaddr_p
!= NULL
) {
1025 *a_baseaddr_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
;
1027 if (a_length_p
!= NULL
) {
1028 *a_length_p
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
;
1032 if (a_baseaddr_p
!= NULL
) {
1033 *a_baseaddr_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
;
1035 if (a_length_p
!= NULL
) {
1036 *a_length_p
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
;
1044 * uio_calculateresid - runs through all iovecs associated with this
1045 * uio_t and calculates (and sets) the residual IO count.
1047 __private_extern__
void uio_calculateresid( uio_t a_uio
)
1051 if (a_uio
== NULL
) {
1053 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1054 #endif /* LP64_DEBUG */
1058 a_uio
->uio_iovcnt
= 0;
1059 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1060 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1061 a_uio
->uio_resid
= 0;
1063 a_uio
->uio_resid_64
= 0;
1065 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1066 if (a_uio
->uio_iovs
.uiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base
!= 0) {
1067 a_uio
->uio_iovcnt
++;
1068 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1069 a_uio
->uio_resid
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1071 a_uio
->uio_resid_64
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
;
1077 a_uio
->uio_resid
= 0;
1078 for ( i
= 0; i
< a_uio
->uio_max_iovs
; i
++ ) {
1079 if (a_uio
->uio_iovs
.kiovp
[i
].iov_len
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base
!= 0) {
1080 a_uio
->uio_iovcnt
++;
1081 a_uio
->uio_resid
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
;
1089 * uio_update - update the given uio_t for a_count of completed IO.
1090 * This call decrements the current iovec length and residual IO value
1091 * and increments the current iovec base address and offset value.
1092 * If the current iovec length is 0 then advance to the next
1095 void uio_update( uio_t a_uio
, user_size_t a_count
)
1098 if (a_uio
== NULL
) {
1099 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);
1101 if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count
> 0xFFFFFFFFull
) {
1102 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);
1104 #endif /* LP64_DEBUG */
1106 if (a_uio
== NULL
|| a_uio
->uio_iovcnt
< 1) {
1110 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1111 if (a_count
> a_uio
->uio_iovs
.uiovp
->iov_len
) {
1112 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_uio
->uio_iovs
.uiovp
->iov_len
;
1113 a_uio
->uio_iovs
.uiovp
->iov_len
= 0;
1116 a_uio
->uio_iovs
.uiovp
->iov_base
+= a_count
;
1117 a_uio
->uio_iovs
.uiovp
->iov_len
-= a_count
;
1119 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1120 if (a_uio
->uio_resid
< 0) {
1121 a_uio
->uio_resid
= 0;
1123 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1124 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1125 a_uio
->uio_resid
= 0;
1128 a_uio
->uio_offset
+= a_count
;
1129 a_uio
->uio_resid
-= a_count
;
1132 if (a_uio
->uio_resid_64
< 0) {
1133 a_uio
->uio_resid_64
= 0;
1135 if (a_count
> (user_size_t
)a_uio
->uio_resid_64
) {
1136 a_uio
->uio_offset
+= a_uio
->uio_resid_64
;
1137 a_uio
->uio_resid_64
= 0;
1140 a_uio
->uio_offset
+= a_count
;
1141 a_uio
->uio_resid_64
-= a_count
;
1145 /* advance to next iovec if current one is totally consumed */
1146 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len
== 0) {
1147 a_uio
->uio_iovcnt
--;
1148 if (a_uio
->uio_iovcnt
> 0) {
1149 a_uio
->uio_iovs
.uiovp
++;
1154 if (a_count
> a_uio
->uio_iovs
.kiovp
->iov_len
) {
1155 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_uio
->uio_iovs
.kiovp
->iov_len
;
1156 a_uio
->uio_iovs
.kiovp
->iov_len
= 0;
1159 a_uio
->uio_iovs
.kiovp
->iov_base
+= a_count
;
1160 a_uio
->uio_iovs
.kiovp
->iov_len
-= a_count
;
1162 if (a_uio
->uio_resid
< 0) {
1163 a_uio
->uio_resid
= 0;
1165 if (a_count
> (user_size_t
)a_uio
->uio_resid
) {
1166 a_uio
->uio_offset
+= a_uio
->uio_resid
;
1167 a_uio
->uio_resid
= 0;
1170 a_uio
->uio_offset
+= a_count
;
1171 a_uio
->uio_resid
-= a_count
;
1174 /* advance to next iovec if current one is totally consumed */
1175 while (a_uio
->uio_iovcnt
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len
== 0) {
1176 a_uio
->uio_iovcnt
--;
1177 if (a_uio
->uio_iovcnt
> 0) {
1178 a_uio
->uio_iovs
.kiovp
++;
1187 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1190 uio_t
uio_duplicate( uio_t a_uio
)
1195 if (a_uio
== NULL
) {
1199 my_uio
= (uio_t
) kalloc(a_uio
->uio_size
);
1201 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);
1204 bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
);
1205 /* need to set our iovec pointer to point to first active iovec */
1206 if (my_uio
->uio_max_iovs
> 0) {
1207 my_uio
->uio_iovs
.uiovp
= (struct user_iovec
*)
1208 (((uint8_t *)my_uio
) + sizeof(struct uio
));
1210 /* advance to first nonzero iovec */
1211 if (my_uio
->uio_iovcnt
> 0) {
1212 for ( i
= 0; i
< my_uio
->uio_max_iovs
; i
++ ) {
1213 if (UIO_IS_64_BIT_SPACE(a_uio
)) {
1214 if (my_uio
->uio_iovs
.uiovp
->iov_len
!= 0) {
1217 my_uio
->uio_iovs
.uiovp
++;
1220 if (my_uio
->uio_iovs
.kiovp
->iov_len
!= 0) {
1223 my_uio
->uio_iovs
.kiovp
++;