2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. Please obtain a copy of the License at 
  10  * http://www.opensource.apple.com/apsl/ and read it before using this 
  13  * The Original Code and all software distributed under the License are 
  14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  18  * Please see the License for the specific language governing rights and 
  19  * limitations under the License. 
  21  * @APPLE_LICENSE_HEADER_END@ 
  23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ 
  25  * Copyright (c) 1982, 1986, 1991, 1993 
  26  *      The Regents of the University of California.  All rights reserved. 
  27  * (c) UNIX System Laboratories, Inc. 
  28  * All or some portions of this file are derived from material licensed 
  29  * to the University of California by American Telephone and Telegraph 
  30  * Co. or Unix System Laboratories, Inc. and are reproduced herein with 
  31  * the permission of UNIX System Laboratories, Inc. 
  33  * Redistribution and use in source and binary forms, with or without 
  34  * modification, are permitted provided that the following conditions 
  36  * 1. Redistributions of source code must retain the above copyright 
  37  *    notice, this list of conditions and the following disclaimer. 
  38  * 2. Redistributions in binary form must reproduce the above copyright 
  39  *    notice, this list of conditions and the following disclaimer in the 
  40  *    documentation and/or other materials provided with the distribution. 
  41  * 3. All advertising materials mentioning features or use of this software 
  42  *    must display the following acknowledgement: 
  43  *      This product includes software developed by the University of 
  44  *      California, Berkeley and its contributors. 
  45  * 4. Neither the name of the University nor the names of its contributors 
  46  *    may be used to endorse or promote products derived from this software 
  47  *    without specific prior written permission. 
  49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  61  *      @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 
  64 #include <sys/param.h> 
  65 #include <sys/systm.h> 
  66 #include <sys/proc_internal.h> 
  67 #include <sys/malloc.h> 
  68 #include <sys/queue.h> 
  70 #include <sys/uio_internal.h> 
  71 #include <kern/kalloc.h> 
  75 #include <sys/kdebug.h> 
  76 #define DBG_UIO_COPYOUT 16 
  77 #define DBG_UIO_COPYIN  17 
  80 #include <kern/simple_lock.h> 
  82 static int                              uio_t_count 
= 0; 
  92         return uiomove64((addr64_t
)((unsigned int)cp
), n
, uio
); 
  95         // LP64todo - fix this! 'n' should be int64_t? 
  97 uiomove64(addr64_t cp
, int n
, register struct uio 
*uio
) 
 100         register uint64_t acnt
; 
 107         if (uio
->uio_rw 
!= UIO_READ 
&& uio
->uio_rw 
!= UIO_WRITE
) 
 108                 panic("uiomove: mode"); 
 112         if (IS_VALID_UIO_SEGFLG(uio
->uio_segflg
) == 0) { 
 113                 panic("%s :%d - invalid uio_segflg\n", __FILE__
, __LINE__
);  
 115 #endif /* LP64_DEBUG */ 
 117         while (n 
> 0 && uio_resid(uio
)) { 
 118                 acnt 
= uio_iov_len(uio
); 
 124                 if (n 
> 0 && acnt 
> (uint64_t)n
) 
 127                 switch (uio
->uio_segflg
) { 
 129                 case UIO_USERSPACE64
: 
 130                 case UIO_USERISPACE64
: 
 131                         // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit 
 132                         if (uio
->uio_rw 
== UIO_READ
) 
 134                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
, 
 135                                          (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0); 
 137                                         error 
= copyout( CAST_DOWN(caddr_t
, cp
), uio
->uio_iovs
.iov64p
->iov_base
, acnt 
); 
 139                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
, 
 140                                          (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 0,0); 
 144                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
, 
 145                                          (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0); 
 147                                 error 
= copyin(uio
->uio_iovs
.iov64p
->iov_base
, CAST_DOWN(caddr_t
, cp
), acnt
); 
 149                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
, 
 150                                          (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 0,0); 
 156                 case UIO_USERSPACE32
: 
 157                 case UIO_USERISPACE32
: 
 160                         if (uio
->uio_rw 
== UIO_READ
) 
 162                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
, 
 163                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0); 
 165                                         error 
= copyout( CAST_DOWN(caddr_t
, cp
), CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), acnt 
); 
 167                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
, 
 168                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 0,0); 
 172                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
, 
 173                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0); 
 175                                 error 
= copyin(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), CAST_DOWN(caddr_t
, cp
), acnt
); 
 177                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
, 
 178                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 0,0); 
 186                         if (uio
->uio_rw 
== UIO_READ
) 
 187                                 error 
= copywithin(CAST_DOWN(caddr_t
, cp
), (caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
, 
 190                                 error 
= copywithin((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
, CAST_DOWN(caddr_t
, cp
), 
 194                 case UIO_PHYS_USERSPACE64
: 
 195                         if (uio
->uio_rw 
== UIO_READ
) 
 197                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
, 
 198                                          (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0); 
 200                                 error 
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov64p
->iov_base
, acnt
, cppvPsrc 
| cppvNoRefSrc
); 
 201                                 if (error
)      /* Copy physical to virtual */ 
 204                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
, 
 205                                          (int)cp
, (int)uio
->uio_iovs
.iov64p
->iov_base
, acnt
, 1,0); 
 209                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
, 
 210                                          (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0); 
 212                                 error 
= copypv(uio
->uio_iovs
.iov64p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk 
| cppvNoRefSrc 
| cppvNoModSnk
); 
 213                                 if (error
)      /* Copy virtual to physical */ 
 216                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
, 
 217                                          (int)uio
->uio_iovs
.iov64p
->iov_base
, (int)cp
, acnt
, 1,0); 
 223                 case UIO_PHYS_USERSPACE32
: 
 224                 case UIO_PHYS_USERSPACE
: 
 225                         if (uio
->uio_rw 
== UIO_READ
) 
 227                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
, 
 228                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0); 
 230                                 error 
= copypv((addr64_t
)cp
, (addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvPsrc 
| cppvNoRefSrc
); 
 231                                 if (error
)      /* Copy physical to virtual */ 
 234                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
, 
 235                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 1,0); 
 239                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
, 
 240                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0); 
 242                                 error 
= copypv((addr64_t
)uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvPsnk 
| cppvNoRefSrc 
| cppvNoModSnk
); 
 243                                 if (error
)      /* Copy virtual to physical */ 
 246                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
, 
 247                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 1,0); 
 253                 case UIO_PHYS_SYSSPACE32
: 
 254                 case UIO_PHYS_SYSSPACE
: 
 255                         if (uio
->uio_rw 
== UIO_READ
) 
 257                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_START
, 
 258                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0); 
 260                                 error 
= copypv((addr64_t
)cp
, uio
->uio_iovs
.iov32p
->iov_base
, acnt
, cppvKmap 
| cppvPsrc 
| cppvNoRefSrc
); 
 261                                 if (error
)      /* Copy physical to virtual */ 
 264                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYOUT
)) | DBG_FUNC_END
, 
 265                                          (int)cp
, (int)uio
->uio_iovs
.iov32p
->iov_base
, acnt
, 2,0); 
 269                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_START
, 
 270                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0); 
 272                                 error 
= copypv(uio
->uio_iovs
.iov32p
->iov_base
, (addr64_t
)cp
, acnt
, cppvKmap 
| cppvPsnk 
| cppvNoRefSrc 
| cppvNoModSnk
); 
 273                                 if (error
)      /* Copy virtual to physical */ 
 276                                 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, DBG_UIO_COPYIN
)) | DBG_FUNC_END
, 
 277                                          (int)uio
->uio_iovs
.iov32p
->iov_base
, (int)cp
, acnt
, 2,0); 
 286                 uio_iov_base_add(uio
, acnt
); 
 288                 uio_iov_len_add(uio
, -((int64_t)acnt
)); 
 289                 uio_setresid(uio
, (uio_resid(uio
) - ((int64_t)acnt
))); 
 291                 uio_iov_len_add(uio
, -((int)acnt
)); 
 292                 uio_setresid(uio
, (uio_resid(uio
) - ((int)acnt
))); 
 294                 uio
->uio_offset 
+= acnt
; 
 302  * Give next character to user as result of read. 
 307         register struct uio 
*uio
; 
 309         if (uio_resid(uio
) <= 0) 
 310                 panic("ureadc: non-positive resid"); 
 312         if (uio
->uio_iovcnt 
== 0) 
 313                 panic("ureadc: non-positive iovcnt"); 
 314         if (uio_iov_len(uio
) <= 0) { 
 319         switch (uio
->uio_segflg
) { 
 321         case UIO_USERSPACE32
: 
 323                 if (subyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0) 
 327         case UIO_USERSPACE64
: 
 328                 if (subyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
, c
) < 0) 
 334                 *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) = c
; 
 337         case UIO_USERISPACE32
: 
 339                 if (suibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
), c
) < 0) 
 346         uio_iov_base_add(uio
, 1); 
 347         uio_iov_len_add(uio
, -1); 
 348         uio_setresid(uio
, (uio_resid(uio
) - 1)); 
 353 #if defined(vax) || defined(ppc) 
 354 /* unused except by ct.c, other oddities XXX */ 
 356  * Get next character written in by user from uio. 
 364         if (uio_resid(uio
) <= 0) 
 367         if (uio
->uio_iovcnt 
<= 0) 
 368                 panic("uwritec: non-positive iovcnt"); 
 370         if (uio_iov_len(uio
) == 0) { 
 372                 if (--uio
->uio_iovcnt 
== 0) 
 376         switch (uio
->uio_segflg
) { 
 378         case UIO_USERSPACE32
: 
 380                 c 
= fubyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
)); 
 383         case UIO_USERSPACE64
: 
 384                 c 
= fubyte((user_addr_t
)uio
->uio_iovs
.iov64p
->iov_base
);  
 389                 c 
= *((caddr_t
)uio
->uio_iovs
.iov32p
->iov_base
) & 0377; 
 392         case UIO_USERISPACE32
: 
 394                 c 
= fuibyte(CAST_USER_ADDR_T(uio
->uio_iovs
.iov32p
->iov_base
)); 
 398                 c 
= 0;  /* avoid uninitialized variable warning */ 
 399                 panic("uwritec: bogus uio_segflg"); 
 404         uio_iov_base_add(uio
, 1); 
 405         uio_iov_len_add(uio
, -1); 
 406         uio_setresid(uio
, (uio_resid(uio
) - 1)); 
 410 #endif /* vax || ppc */ 
 413  * General routine to allocate a hash table. 
 416 hashinit(elements
, type
, hashmask
) 
 421         LIST_HEAD(generic
, generic
) *hashtbl
; 
 425                 panic("hashinit: bad cnt"); 
 426         for (hashsize 
= 1; hashsize 
<= elements
; hashsize 
<<= 1) 
 429         MALLOC(hashtbl
, struct generic 
*,  
 430                 (u_long
)hashsize 
* sizeof(*hashtbl
), type
, M_WAITOK
|M_ZERO
); 
 431         if (hashtbl 
!= NULL
) { 
 432                 for (i 
= 0; i 
< hashsize
; i
++) 
 433                         LIST_INIT(&hashtbl
[i
]); 
 434                 *hashmask 
= hashsize 
- 1; 
 440  * uio_resid - return the residual IO value for the given uio_t 
 442 user_ssize_t 
uio_resid( uio_t a_uio 
) 
 446                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 448 /*      if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ 
 449 /*              panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);  */ 
 453         /* return 0 if there are no active iovecs */ 
 458         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 459 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
 460                 return( (user_ssize_t
)a_uio
->uio_resid 
); 
 462                 return( a_uio
->uio_resid_64 
); 
 465         return( (user_ssize_t
)a_uio
->uio_resid 
); 
 469  * uio_setresid - set the residual IO value for the given uio_t 
 471 void uio_setresid( uio_t a_uio
, user_ssize_t a_value 
) 
 475                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 477 /*      if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ 
 478 /*              panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);  */ 
 486         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 487 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
 488                 a_uio
->uio_resid 
= (int)a_value
; 
 490                 a_uio
->uio_resid_64 
= a_value
; 
 494                 a_uio
->uio_resid 
= (int)a_value
; 
 501  * uio_proc_t - return the proc_t for the given uio_t 
 502  * WARNING - This call is going away.  Find another way to get the proc_t!! 
 504 __private_extern__ proc_t 
uio_proc_t( uio_t a_uio 
) 
 508                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 510 #endif /* LP64_DEBUG */ 
 512         /* return 0 if there are no active iovecs */ 
 516         return( a_uio
->uio_procp 
); 
 520  * uio_setproc_t - set the residual IO value for the given uio_t 
 521  * WARNING - This call is going away.  
 523 __private_extern__ 
void uio_setproc_t( uio_t a_uio
, proc_t a_proc_t 
) 
 527                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 528 #endif /* LP64_DEBUG */ 
 532         a_uio
->uio_procp 
= a_proc_t
; 
 538  * uio_curriovbase - return the base address of the current iovec associated  
 539  *      with the given uio_t.  May return 0. 
 541 user_addr_t 
uio_curriovbase( uio_t a_uio 
) 
 545                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 547 #endif /* LP64_DEBUG */ 
 549         if (a_uio 
== NULL 
|| a_uio
->uio_iovcnt 
< 1) { 
 553         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 554                 return(a_uio
->uio_iovs
.uiovp
->iov_base
); 
 556         return((user_addr_t
)((uintptr_t)a_uio
->uio_iovs
.kiovp
->iov_base
)); 
 561  * uio_curriovlen - return the length value of the current iovec associated  
 562  *      with the given uio_t. 
 564 user_size_t 
uio_curriovlen( uio_t a_uio 
) 
 568                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 570 #endif /* LP64_DEBUG */ 
 572         if (a_uio 
== NULL 
|| a_uio
->uio_iovcnt 
< 1) { 
 576         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 577                 return(a_uio
->uio_iovs
.uiovp
->iov_len
); 
 579         return((user_size_t
)a_uio
->uio_iovs
.kiovp
->iov_len
); 
 583  * uio_setcurriovlen - set the length value of the current iovec associated  
 584  *      with the given uio_t. 
 586 __private_extern__ 
void uio_setcurriovlen( uio_t a_uio
, user_size_t a_value 
) 
 590                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 592 #endif /* LP64_DEBUG */ 
 598         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 599                 a_uio
->uio_iovs
.uiovp
->iov_len 
= a_value
; 
 603                 if (a_value 
> 0xFFFFFFFFull
) { 
 604                         panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);  
 606 #endif /* LP64_DEBUG */ 
 607                 a_uio
->uio_iovs
.kiovp
->iov_len 
= (size_t)a_value
; 
 613  * uio_iovcnt - return count of active iovecs for the given uio_t 
 615 int uio_iovcnt( uio_t a_uio 
) 
 619                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 621 #endif /* LP64_DEBUG */ 
 627         return( a_uio
->uio_iovcnt 
); 
 631  * uio_offset - return the current offset value for the given uio_t 
 633 off_t 
uio_offset( uio_t a_uio 
) 
 637                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 639 #endif /* LP64_DEBUG */ 
 644         return( a_uio
->uio_offset 
); 
 648  * uio_setoffset - set the current offset value for the given uio_t 
 650 void uio_setoffset( uio_t a_uio
, off_t a_offset 
) 
 654                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 656 #endif /* LP64_DEBUG */ 
 661         a_uio
->uio_offset 
= a_offset
; 
 666  * uio_rw - return the read / write flag for the given uio_t 
 668 int uio_rw( uio_t a_uio 
) 
 672                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 674 #endif /* LP64_DEBUG */ 
 679         return( a_uio
->uio_rw 
); 
 683  * uio_setrw - set the read / write flag for the given uio_t 
 685 void uio_setrw( uio_t a_uio
, int a_value 
) 
 689         panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 690 #endif /* LP64_DEBUG */ 
 695         if (!(a_value 
== UIO_READ 
|| a_value 
== UIO_WRITE
)) { 
 696                 panic("%s :%d - invalid a_value\n", __FILE__
, __LINE__
);  
 698 #endif /* LP64_DEBUG */ 
 700         if (a_value 
== UIO_READ 
|| a_value 
== UIO_WRITE
) { 
 701                 a_uio
->uio_rw 
= a_value
; 
 707  * uio_isuserspace - return non zero value if the address space  
 708  * flag is for a user address space (could be 32 or 64 bit). 
 710 int uio_isuserspace( uio_t a_uio 
) 
 714                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 715 #endif /* LP64_DEBUG */ 
 719         if (UIO_SEG_IS_USER_SPACE(a_uio
->uio_segflg
)) { 
 727  * uio_create - create an uio_t. 
 728  *      Space is allocated to hold up to a_iovcount number of iovecs.  The uio_t 
 729  *      is not fully initialized until all iovecs are added using uio_addiov calls. 
 730  *      a_iovcount is the maximum number of iovecs you may add. 
 732 uio_t 
uio_create( int a_iovcount
,               /* number of iovecs */ 
 733                                   off_t a_offset
,               /* current offset */ 
 734                                   int a_spacetype
,              /* type of address space */ 
 735                                   int a_iodirection 
)   /* read or write flag */ 
 741         my_size 
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
); 
 742         my_buf_p 
= kalloc(my_size
); 
 743         my_uio 
= uio_createwithbuffer( a_iovcount
,  
 750                 /* leave a note that we allocated this uio_t */ 
 751                 my_uio
->uio_flags 
|= UIO_FLAGS_WE_ALLOCED
; 
 753                 hw_atomic_add(&uio_t_count
, 1); 
 762  * uio_createwithbuffer - create an uio_t. 
 763  *      Create a uio_t using the given buffer.  The uio_t 
 764  *      is not fully initialized until all iovecs are added using uio_addiov calls. 
 765  *      a_iovcount is the maximum number of iovecs you may add. 
 766  *      This call may fail if the given buffer is not large enough. 
 768 __private_extern__ uio_t 
 
 769         uio_createwithbuffer( int a_iovcount
,           /* number of iovecs */ 
 770                                                         off_t a_offset
,         /* current offset */ 
 771                                                         int a_spacetype
,        /* type of address space */ 
 772                                                         int a_iodirection
,      /* read or write flag */ 
 773                                                         void *a_buf_p
,          /* pointer to a uio_t buffer */ 
 774                                                         int a_buffer_size 
)     /* size of uio_t buffer */ 
 776         uio_t                           my_uio 
= (uio_t
) a_buf_p
; 
 779         my_size 
= sizeof(struct uio
) + (sizeof(struct user_iovec
) * a_iovcount
); 
 780         if (a_buffer_size 
< my_size
) { 
 782                 panic("%s :%d - a_buffer_size is too small\n", __FILE__
, __LINE__
);  
 786         my_size 
= a_buffer_size
; 
 790                 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);  
 792         if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) { 
 793                 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);  
 795         if (!(a_iodirection 
== UIO_READ 
|| a_iodirection 
== UIO_WRITE
)) { 
 796                 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);  
 798         if (a_iovcount 
> UIO_MAXIOV
) { 
 799                 panic("%s :%d - invalid a_iovcount\n", __FILE__
, __LINE__
);  
 803         bzero(my_uio
, my_size
); 
 804         my_uio
->uio_size 
= my_size
; 
 806         /* we use uio_segflg to indicate if the uio_t is the new format or */ 
 807         /* old (pre LP64 support) legacy format */ 
 808         switch (a_spacetype
) { 
 810                 my_uio
->uio_segflg 
= UIO_USERSPACE32
; 
 812                 my_uio
->uio_segflg 
= UIO_SYSSPACE32
; 
 813         case UIO_PHYS_USERSPACE
: 
 814                 my_uio
->uio_segflg 
= UIO_PHYS_USERSPACE32
; 
 815         case UIO_PHYS_SYSSPACE
: 
 816                 my_uio
->uio_segflg 
= UIO_PHYS_SYSSPACE32
; 
 818                 my_uio
->uio_segflg 
= a_spacetype
; 
 822         if (a_iovcount 
> 0) { 
 823                 my_uio
->uio_iovs
.uiovp 
= (struct user_iovec 
*) 
 824                         (((uint8_t *)my_uio
) + sizeof(struct uio
)); 
 827                 my_uio
->uio_iovs
.uiovp 
= NULL
; 
 830         my_uio
->uio_max_iovs 
= a_iovcount
; 
 831         my_uio
->uio_offset 
= a_offset
; 
 832         my_uio
->uio_rw 
= a_iodirection
; 
 833         my_uio
->uio_flags 
= UIO_FLAGS_INITED
; 
 839  * uio_spacetype - return the address space type for the given uio_t 
 841 int uio_spacetype( uio_t a_uio 
) 
 845                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 846 #endif /* LP64_DEBUG */ 
 850         return( a_uio
->uio_segflg 
); 
 854  * uio_iovsaddr - get the address of the iovec array for the given uio_t. 
 855  * This returns the location of the iovecs within the uio. 
 856  * NOTE - for compatibility mode we just return the current value in uio_iovs 
 857  * which will increase as the IO is completed and is NOT embedded within the 
 858  * uio, it is a seperate array of one or more iovecs. 
 860 struct user_iovec 
* uio_iovsaddr( uio_t a_uio 
) 
 862         struct user_iovec 
*             my_addr
; 
 868         if (a_uio
->uio_segflg 
== UIO_USERSPACE 
|| a_uio
->uio_segflg 
== UIO_SYSSPACE
) { 
 869                 /* we need this for compatibility mode. */ 
 870                 my_addr 
= (struct user_iovec 
*) a_uio
->uio_iovs
.iovp
; 
 873                 my_addr 
= (struct user_iovec 
*) (((uint8_t *)a_uio
) + sizeof(struct uio
)); 
 879  * uio_reset - reset an uio_t. 
 880  *      Reset the given uio_t to initial values.  The uio_t is not fully initialized 
 881  *      until all iovecs are added using uio_addiov calls. 
 882  *      The a_iovcount value passed in the uio_create is the maximum number of  
 883  *      iovecs you may add. 
 885 void uio_reset( uio_t a_uio
, 
 886                                 off_t a_offset
,                 /* current offset */ 
 887                                 int a_spacetype
,                /* type of address space */ 
 888                                 int a_iodirection 
)             /* read or write flag */ 
 892         u_int32_t       my_old_flags
; 
 896                 panic("%s :%d - could not allocate uio_t\n", __FILE__
, __LINE__
);  
 898         if (!IS_VALID_UIO_SEGFLG(a_spacetype
)) { 
 899                 panic("%s :%d - invalid address space type\n", __FILE__
, __LINE__
);  
 901         if (!(a_iodirection 
== UIO_READ 
|| a_iodirection 
== UIO_WRITE
)) { 
 902                 panic("%s :%d - invalid IO direction flag\n", __FILE__
, __LINE__
);  
 904 #endif /* LP64_DEBUG */ 
 910         my_size 
= a_uio
->uio_size
; 
 911         my_old_flags 
= a_uio
->uio_flags
; 
 912         my_max_iovs 
= a_uio
->uio_max_iovs
; 
 913         bzero(a_uio
, my_size
); 
 914         a_uio
->uio_size 
= my_size
; 
 915         a_uio
->uio_segflg 
= a_spacetype
; 
 916         if (my_max_iovs 
> 0) { 
 917                 a_uio
->uio_iovs
.uiovp 
= (struct user_iovec 
*) 
 918                         (((uint8_t *)a_uio
) + sizeof(struct uio
)); 
 921                 a_uio
->uio_iovs
.uiovp 
= NULL
; 
 923         a_uio
->uio_max_iovs 
= my_max_iovs
; 
 924         a_uio
->uio_offset 
= a_offset
; 
 925         a_uio
->uio_rw 
= a_iodirection
; 
 926         a_uio
->uio_flags 
= my_old_flags
; 
 932  * uio_free - free a uio_t allocated via uio_init.  this also frees all 
 935 void uio_free( uio_t a_uio 
)  
 939                 panic("%s :%d - passing NULL uio_t\n", __FILE__
, __LINE__
);  
 941 #endif /* LP64_DEBUG */ 
 943         if (a_uio 
!= NULL 
&& (a_uio
->uio_flags 
& UIO_FLAGS_WE_ALLOCED
) != 0) { 
 945                 if ((int)(hw_atomic_sub(&uio_t_count
, 1)) < 0) { 
 946                         panic("%s :%d - uio_t_count has gone negative\n", __FILE__
, __LINE__
);  
 949                 kfree(a_uio
, a_uio
->uio_size
); 
 956  * uio_addiov - add an iovec to the given uio_t.  You may call this up to 
 957  *      the a_iovcount number that was passed to uio_create.  This call will  
 958  *      increment the residual IO count as iovecs are added to the uio_t. 
 959  *      returns 0 if add was successful else non zero. 
 961 int uio_addiov( uio_t a_uio
, user_addr_t a_baseaddr
, user_size_t a_length 
) 
 967                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
 968 #endif /* LP64_DEBUG */ 
 972         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
 973                 for ( i 
= 0; i 
< a_uio
->uio_max_iovs
; i
++ ) { 
 974                         if (a_uio
->uio_iovs
.uiovp
[i
].iov_len 
== 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base 
== 0) { 
 975                                 a_uio
->uio_iovs
.uiovp
[i
].iov_len 
= a_length
; 
 976                                 a_uio
->uio_iovs
.uiovp
[i
].iov_base 
= a_baseaddr
; 
 978 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
 979                                 a_uio
->uio_resid 
+= a_length
; 
 981                                 a_uio
->uio_resid_64 
+= a_length
; 
 988                 for ( i 
= 0; i 
< a_uio
->uio_max_iovs
; i
++ ) { 
 989                         if (a_uio
->uio_iovs
.kiovp
[i
].iov_len 
== 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base 
== 0) { 
 990                                 a_uio
->uio_iovs
.kiovp
[i
].iov_len 
= (u_int32_t
)a_length
; 
 991                                 a_uio
->uio_iovs
.kiovp
[i
].iov_base 
= (u_int32_t
)((uintptr_t)a_baseaddr
); 
 993                                 a_uio
->uio_resid 
+= a_length
; 
1003  * uio_getiov - get iovec data associated with the given uio_t.  Use 
1004  *  a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)). 
1005  *  a_baseaddr_p and a_length_p may be NULL. 
1006  *      returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.  
1007  *      returns 0 when data is returned. 
1009 int uio_getiov( uio_t a_uio
,  
1011                  user_addr_t 
* a_baseaddr_p
,  
1012                  user_size_t 
* a_length_p 
) 
1014         if (a_uio 
== NULL
) { 
1016                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
1020     if ( a_index 
< 0 || a_index 
>= a_uio
->uio_iovcnt
) { 
1024         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
1025         if (a_baseaddr_p 
!= NULL
) { 
1026             *a_baseaddr_p 
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_base
; 
1028         if (a_length_p 
!= NULL
) { 
1029             *a_length_p 
= a_uio
->uio_iovs
.uiovp
[a_index
].iov_len
; 
1033         if (a_baseaddr_p 
!= NULL
) { 
1034             *a_baseaddr_p 
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_base
; 
1036         if (a_length_p 
!= NULL
) { 
1037             *a_length_p 
= a_uio
->uio_iovs
.kiovp
[a_index
].iov_len
; 
1045  * uio_calculateresid - runs through all iovecs associated with this 
1046  *      uio_t and calculates (and sets) the residual IO count. 
1048 __private_extern__ 
void uio_calculateresid( uio_t a_uio 
) 
1052         if (a_uio 
== NULL
) { 
1054                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
1055 #endif /* LP64_DEBUG */ 
1059         a_uio
->uio_iovcnt 
= 0; 
1060         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
1061 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
1062                 a_uio
->uio_resid 
= 0; 
1064                 a_uio
->uio_resid_64 
= 0; 
1066                 for ( i 
= 0; i 
< a_uio
->uio_max_iovs
; i
++ ) { 
1067                         if (a_uio
->uio_iovs
.uiovp
[i
].iov_len 
!= 0 && a_uio
->uio_iovs
.uiovp
[i
].iov_base 
!= 0) { 
1068                                 a_uio
->uio_iovcnt
++; 
1069 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
1070                                 a_uio
->uio_resid 
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
; 
1072                                 a_uio
->uio_resid_64 
+= a_uio
->uio_iovs
.uiovp
[i
].iov_len
; 
1078                 a_uio
->uio_resid 
= 0; 
1079                 for ( i 
= 0; i 
< a_uio
->uio_max_iovs
; i
++ ) { 
1080                         if (a_uio
->uio_iovs
.kiovp
[i
].iov_len 
!= 0 && a_uio
->uio_iovs
.kiovp
[i
].iov_base 
!= 0) { 
1081                                 a_uio
->uio_iovcnt
++; 
1082                                 a_uio
->uio_resid 
+= a_uio
->uio_iovs
.kiovp
[i
].iov_len
; 
1090  * uio_update - update the given uio_t for a_count of completed IO. 
1091  *      This call decrements the current iovec length and residual IO value 
1092  *      and increments the current iovec base address and offset value.  
1093  *      If the current iovec length is 0 then advance to the next 
1095  *      If the a_count passed in is 0, than only do the advancement 
1096  *      over any 0 length iovec's. 
1098 void uio_update( uio_t a_uio
, user_size_t a_count 
) 
1101         if (a_uio 
== NULL
) { 
1102                 panic("%s :%d - invalid uio_t\n", __FILE__
, __LINE__
);  
1104         if (UIO_IS_32_BIT_SPACE(a_uio
) && a_count 
> 0xFFFFFFFFull
) { 
1105                 panic("%s :%d - invalid count value \n", __FILE__
, __LINE__
);  
1107 #endif /* LP64_DEBUG */ 
1109         if (a_uio 
== NULL 
|| a_uio
->uio_iovcnt 
< 1) { 
1113         if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
1115                  * if a_count == 0, then we are asking to skip over 
1119                         if (a_count 
> a_uio
->uio_iovs
.uiovp
->iov_len
) { 
1120                                 a_uio
->uio_iovs
.uiovp
->iov_base 
+= a_uio
->uio_iovs
.uiovp
->iov_len
; 
1121                                 a_uio
->uio_iovs
.uiovp
->iov_len 
= 0; 
1124                                 a_uio
->uio_iovs
.uiovp
->iov_base 
+= a_count
; 
1125                                 a_uio
->uio_iovs
.uiovp
->iov_len 
-= a_count
; 
1127 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI 
1128                         if (a_uio
->uio_resid 
< 0) { 
1129                                 a_uio
->uio_resid 
= 0; 
1131                         if (a_count 
> (user_size_t
)a_uio
->uio_resid
) { 
1132                                 a_uio
->uio_offset 
+= a_uio
->uio_resid
; 
1133                                 a_uio
->uio_resid 
= 0; 
1136                                 a_uio
->uio_offset 
+= a_count
; 
1137                                 a_uio
->uio_resid 
-= a_count
; 
1140                         if (a_uio
->uio_resid_64 
< 0) { 
1141                                 a_uio
->uio_resid_64 
= 0; 
1143                         if (a_count 
> (user_size_t
)a_uio
->uio_resid_64
) { 
1144                                 a_uio
->uio_offset 
+= a_uio
->uio_resid_64
; 
1145                                 a_uio
->uio_resid_64 
= 0; 
1148                                 a_uio
->uio_offset 
+= a_count
; 
1149                                 a_uio
->uio_resid_64 
-= a_count
; 
1154                  * advance to next iovec if current one is totally consumed 
1156                 while (a_uio
->uio_iovcnt 
> 0 && a_uio
->uio_iovs
.uiovp
->iov_len 
== 0) { 
1157                         a_uio
->uio_iovcnt
--; 
1158                         if (a_uio
->uio_iovcnt 
> 0) { 
1159                                 a_uio
->uio_iovs
.uiovp
++; 
1165                  * if a_count == 0, then we are asking to skip over 
1169                         if (a_count 
> a_uio
->uio_iovs
.kiovp
->iov_len
) { 
1170                                 a_uio
->uio_iovs
.kiovp
->iov_base 
+= a_uio
->uio_iovs
.kiovp
->iov_len
; 
1171                                 a_uio
->uio_iovs
.kiovp
->iov_len 
= 0; 
1174                                 a_uio
->uio_iovs
.kiovp
->iov_base 
+= a_count
; 
1175                                 a_uio
->uio_iovs
.kiovp
->iov_len 
-= a_count
; 
1177                         if (a_uio
->uio_resid 
< 0) { 
1178                                 a_uio
->uio_resid 
= 0; 
1180                         if (a_count 
> (user_size_t
)a_uio
->uio_resid
) { 
1181                                 a_uio
->uio_offset 
+= a_uio
->uio_resid
; 
1182                                 a_uio
->uio_resid 
= 0; 
1185                                 a_uio
->uio_offset 
+= a_count
; 
1186                                 a_uio
->uio_resid 
-= a_count
; 
1190                  * advance to next iovec if current one is totally consumed 
1192                 while (a_uio
->uio_iovcnt 
> 0 && a_uio
->uio_iovs
.kiovp
->iov_len 
== 0) { 
1193                         a_uio
->uio_iovcnt
--; 
1194                         if (a_uio
->uio_iovcnt 
> 0) { 
1195                                 a_uio
->uio_iovs
.kiovp
++; 
1204  * uio_duplicate - allocate a new uio and make a copy of the given uio_t. 
1207 uio_t 
uio_duplicate( uio_t a_uio 
) 
1212         if (a_uio 
== NULL
) { 
1216         my_uio 
= (uio_t
) kalloc(a_uio
->uio_size
); 
1218                 panic("%s :%d - allocation failed\n", __FILE__
, __LINE__
);  
1221         bcopy((void *)a_uio
, (void *)my_uio
, a_uio
->uio_size
); 
1222         /* need to set our iovec pointer to point to first active iovec */ 
1223         if (my_uio
->uio_max_iovs 
> 0) { 
1224                 my_uio
->uio_iovs
.uiovp 
= (struct user_iovec 
*) 
1225                         (((uint8_t *)my_uio
) + sizeof(struct uio
)); 
1227                 /* advance to first nonzero iovec */ 
1228                 if (my_uio
->uio_iovcnt 
> 0) { 
1229                         for ( i 
= 0; i 
< my_uio
->uio_max_iovs
; i
++ ) { 
1230                                 if (UIO_IS_64_BIT_SPACE(a_uio
)) { 
1231                                         if (my_uio
->uio_iovs
.uiovp
->iov_len 
!= 0) { 
1234                                         my_uio
->uio_iovs
.uiovp
++; 
1237                                         if (my_uio
->uio_iovs
.kiovp
->iov_len 
!= 0) { 
1240                                         my_uio
->uio_iovs
.kiovp
++;