]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
8ffe0738014b7ccfdb04ad809ab06078c93f3a44
[apple/xnu.git] / bsd / kern / kern_subr.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1982, 1986, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
69 */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
76 #include <vm/pmap.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
79
80 #include <kdebug.h>
81
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
85
86 #if DEBUG
87 #include <kern/simple_lock.h>
88
89 static int uio_t_count = 0;
90 #endif /* DEBUG */
91
92
93 int
94 uiomove(cp, n, uio)
95 register caddr_t cp;
96 register int n;
97 register uio_t uio;
98 {
99 return uiomove64((addr64_t)((unsigned int)cp), n, uio);
100 }
101
102 // LP64todo - fix this! 'n' should be int64_t?
103 int
104 uiomove64(addr64_t cp, int n, register struct uio *uio)
105 {
106 #if LP64KERN
107 register uint64_t acnt;
108 #else
109 register u_int acnt;
110 #endif
111 int error = 0;
112
113 #if DIAGNOSTIC
114 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
115 panic("uiomove: mode");
116 #endif
117
118 #if LP64_DEBUG
119 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
120 panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);
121 }
122 #endif /* LP64_DEBUG */
123
124 while (n > 0 && uio_resid(uio)) {
125 acnt = uio_iov_len(uio);
126 if (acnt == 0) {
127 uio_next_iov(uio);
128 uio->uio_iovcnt--;
129 continue;
130 }
131 if (n > 0 && acnt > (uint64_t)n)
132 acnt = n;
133
134 switch (uio->uio_segflg) {
135
136 case UIO_USERSPACE64:
137 case UIO_USERISPACE64:
138 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
139 if (uio->uio_rw == UIO_READ)
140 {
141 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
142 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
143
144 error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt );
145
146 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
147 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
148 }
149 else
150 {
151 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
152 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
153
154 error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt);
155
156 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
157 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
158 }
159 if (error)
160 return (error);
161 break;
162
163 case UIO_USERSPACE32:
164 case UIO_USERISPACE32:
165 case UIO_USERSPACE:
166 case UIO_USERISPACE:
167 if (uio->uio_rw == UIO_READ)
168 {
169 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
170 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
171
172 error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt );
173
174 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
175 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
176 }
177 else
178 {
179 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
180 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
181
182 error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt);
183
184 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
185 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
186 }
187 if (error)
188 return (error);
189 break;
190
191 case UIO_SYSSPACE32:
192 case UIO_SYSSPACE:
193 if (uio->uio_rw == UIO_READ)
194 error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base,
195 acnt);
196 else
197 error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp),
198 acnt);
199 break;
200
201 case UIO_PHYS_USERSPACE64:
202 if (uio->uio_rw == UIO_READ)
203 {
204 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
205 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
206
207 error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
208 if (error) /* Copy physical to virtual */
209 error = EFAULT;
210
211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
212 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
213 }
214 else
215 {
216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
217 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
218
219 error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
220 if (error) /* Copy virtual to physical */
221 error = EFAULT;
222
223 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
224 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
225 }
226 if (error)
227 return (error);
228 break;
229
230 case UIO_PHYS_USERSPACE32:
231 case UIO_PHYS_USERSPACE:
232 if (uio->uio_rw == UIO_READ)
233 {
234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
235 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
236
237 error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
238 if (error) /* Copy physical to virtual */
239 error = EFAULT;
240
241 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
242 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
243 }
244 else
245 {
246 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
247 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
248
249 error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
250 if (error) /* Copy virtual to physical */
251 error = EFAULT;
252
253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
254 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
255 }
256 if (error)
257 return (error);
258 break;
259
260 case UIO_PHYS_SYSSPACE32:
261 case UIO_PHYS_SYSSPACE:
262 if (uio->uio_rw == UIO_READ)
263 {
264 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
265 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
266
267 error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
268 if (error) /* Copy physical to virtual */
269 error = EFAULT;
270
271 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
272 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
273 }
274 else
275 {
276 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
277 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
278
279 error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
280 if (error) /* Copy virtual to physical */
281 error = EFAULT;
282
283 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
284 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
285 }
286 if (error)
287 return (error);
288 break;
289
290 default:
291 break;
292 }
293 uio_iov_base_add(uio, acnt);
294 #if LP64KERN
295 uio_iov_len_add(uio, -((int64_t)acnt));
296 uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt)));
297 #else
298 uio_iov_len_add(uio, -((int)acnt));
299 uio_setresid(uio, (uio_resid(uio) - ((int)acnt)));
300 #endif
301 uio->uio_offset += acnt;
302 cp += acnt;
303 n -= acnt;
304 }
305 return (error);
306 }
307
308 /*
309 * Give next character to user as result of read.
310 */
311 int
312 ureadc(c, uio)
313 register int c;
314 register struct uio *uio;
315 {
316 if (uio_resid(uio) <= 0)
317 panic("ureadc: non-positive resid");
318 again:
319 if (uio->uio_iovcnt == 0)
320 panic("ureadc: non-positive iovcnt");
321 if (uio_iov_len(uio) <= 0) {
322 uio->uio_iovcnt--;
323 uio_next_iov(uio);
324 goto again;
325 }
326 switch (uio->uio_segflg) {
327
328 case UIO_USERSPACE32:
329 case UIO_USERSPACE:
330 if (subyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
331 return (EFAULT);
332 break;
333
334 case UIO_USERSPACE64:
335 if (subyte((user_addr_t)uio->uio_iovs.iov64p->iov_base, c) < 0)
336 return (EFAULT);
337 break;
338
339 case UIO_SYSSPACE32:
340 case UIO_SYSSPACE:
341 *((caddr_t)uio->uio_iovs.iov32p->iov_base) = c;
342 break;
343
344 case UIO_USERISPACE32:
345 case UIO_USERISPACE:
346 if (suibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
347 return (EFAULT);
348 break;
349
350 default:
351 break;
352 }
353 uio_iov_base_add(uio, 1);
354 uio_iov_len_add(uio, -1);
355 uio_setresid(uio, (uio_resid(uio) - 1));
356 uio->uio_offset++;
357 return (0);
358 }
359
360 #if defined(vax) || defined(ppc)
361 /* unused except by ct.c, other oddities XXX */
362 /*
363 * Get next character written in by user from uio.
364 */
365 int
366 uwritec(uio)
367 uio_t uio;
368 {
369 register int c = 0;
370
371 if (uio_resid(uio) <= 0)
372 return (-1);
373 again:
374 if (uio->uio_iovcnt <= 0)
375 panic("uwritec: non-positive iovcnt");
376
377 if (uio_iov_len(uio) == 0) {
378 uio_next_iov(uio);
379 if (--uio->uio_iovcnt == 0)
380 return (-1);
381 goto again;
382 }
383 switch (uio->uio_segflg) {
384
385 case UIO_USERSPACE32:
386 case UIO_USERSPACE:
387 c = fubyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
388 break;
389
390 case UIO_USERSPACE64:
391 c = fubyte((user_addr_t)uio->uio_iovs.iov64p->iov_base);
392 break;
393
394 case UIO_SYSSPACE32:
395 case UIO_SYSSPACE:
396 c = *((caddr_t)uio->uio_iovs.iov32p->iov_base) & 0377;
397 break;
398
399 case UIO_USERISPACE32:
400 case UIO_USERISPACE:
401 c = fuibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
402 break;
403
404 default:
405 c = 0; /* avoid uninitialized variable warning */
406 panic("uwritec: bogus uio_segflg");
407 break;
408 }
409 if (c < 0)
410 return (-1);
411 uio_iov_base_add(uio, 1);
412 uio_iov_len_add(uio, -1);
413 uio_setresid(uio, (uio_resid(uio) - 1));
414 uio->uio_offset++;
415 return (c);
416 }
417 #endif /* vax || ppc */
418
419 /*
420 * General routine to allocate a hash table.
421 */
422 void *
423 hashinit(elements, type, hashmask)
424 int elements, type;
425 u_long *hashmask;
426 {
427 long hashsize;
428 LIST_HEAD(generic, generic) *hashtbl;
429 int i;
430
431 if (elements <= 0)
432 panic("hashinit: bad cnt");
433 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
434 continue;
435 hashsize >>= 1;
436 MALLOC(hashtbl, struct generic *,
437 (u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
438 if (hashtbl != NULL) {
439 for (i = 0; i < hashsize; i++)
440 LIST_INIT(&hashtbl[i]);
441 *hashmask = hashsize - 1;
442 }
443 return (hashtbl);
444 }
445
446 /*
447 * uio_resid - return the residual IO value for the given uio_t
448 */
449 user_ssize_t uio_resid( uio_t a_uio )
450 {
451 #if DEBUG
452 if (a_uio == NULL) {
453 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
454 }
455 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
456 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
457 /* } */
458 #endif /* DEBUG */
459
460 /* return 0 if there are no active iovecs */
461 if (a_uio == NULL) {
462 return( 0 );
463 }
464
465 if (UIO_IS_64_BIT_SPACE(a_uio)) {
466 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
467 return( (user_ssize_t)a_uio->uio_resid );
468 #else
469 return( a_uio->uio_resid_64 );
470 #endif
471 }
472 return( (user_ssize_t)a_uio->uio_resid );
473 }
474
475 /*
476 * uio_setresid - set the residual IO value for the given uio_t
477 */
478 void uio_setresid( uio_t a_uio, user_ssize_t a_value )
479 {
480 #if DEBUG
481 if (a_uio == NULL) {
482 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
483 }
484 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
485 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
486 /* } */
487 #endif /* DEBUG */
488
489 if (a_uio == NULL) {
490 return;
491 }
492
493 if (UIO_IS_64_BIT_SPACE(a_uio)) {
494 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
495 a_uio->uio_resid = (int)a_value;
496 #else
497 a_uio->uio_resid_64 = a_value;
498 #endif
499 }
500 else {
501 a_uio->uio_resid = (int)a_value;
502 }
503 return;
504 }
505
506 #if 0 // obsolete
507 /*
508 * uio_proc_t - return the proc_t for the given uio_t
509 * WARNING - This call is going away. Find another way to get the proc_t!!
510 */
511 __private_extern__ proc_t uio_proc_t( uio_t a_uio )
512 {
513 #if LP64_DEBUG
514 if (a_uio == NULL) {
515 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
516 }
517 #endif /* LP64_DEBUG */
518
519 /* return 0 if there are no active iovecs */
520 if (a_uio == NULL) {
521 return( NULL );
522 }
523 return( a_uio->uio_procp );
524 }
525
526 /*
527 * uio_setproc_t - set the residual IO value for the given uio_t
528 * WARNING - This call is going away.
529 */
530 __private_extern__ void uio_setproc_t( uio_t a_uio, proc_t a_proc_t )
531 {
532 if (a_uio == NULL) {
533 #if LP64_DEBUG
534 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
535 #endif /* LP64_DEBUG */
536 return;
537 }
538
539 a_uio->uio_procp = a_proc_t;
540 return;
541 }
542 #endif // obsolete
543
544 /*
545 * uio_curriovbase - return the base address of the current iovec associated
546 * with the given uio_t. May return 0.
547 */
548 user_addr_t uio_curriovbase( uio_t a_uio )
549 {
550 #if LP64_DEBUG
551 if (a_uio == NULL) {
552 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
553 }
554 #endif /* LP64_DEBUG */
555
556 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
557 return(0);
558 }
559
560 if (UIO_IS_64_BIT_SPACE(a_uio)) {
561 return(a_uio->uio_iovs.uiovp->iov_base);
562 }
563 return((user_addr_t)((uintptr_t)a_uio->uio_iovs.kiovp->iov_base));
564
565 }
566
567 /*
568 * uio_curriovlen - return the length value of the current iovec associated
569 * with the given uio_t.
570 */
571 user_size_t uio_curriovlen( uio_t a_uio )
572 {
573 #if LP64_DEBUG
574 if (a_uio == NULL) {
575 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
576 }
577 #endif /* LP64_DEBUG */
578
579 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
580 return(0);
581 }
582
583 if (UIO_IS_64_BIT_SPACE(a_uio)) {
584 return(a_uio->uio_iovs.uiovp->iov_len);
585 }
586 return((user_size_t)a_uio->uio_iovs.kiovp->iov_len);
587 }
588
589 /*
590 * uio_setcurriovlen - set the length value of the current iovec associated
591 * with the given uio_t.
592 */
593 __private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
594 {
595 #if LP64_DEBUG
596 if (a_uio == NULL) {
597 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
598 }
599 #endif /* LP64_DEBUG */
600
601 if (a_uio == NULL) {
602 return;
603 }
604
605 if (UIO_IS_64_BIT_SPACE(a_uio)) {
606 a_uio->uio_iovs.uiovp->iov_len = a_value;
607 }
608 else {
609 #if LP64_DEBUG
610 if (a_value > 0xFFFFFFFFull) {
611 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
612 }
613 #endif /* LP64_DEBUG */
614 a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value;
615 }
616 return;
617 }
618
619 /*
620 * uio_iovcnt - return count of active iovecs for the given uio_t
621 */
622 int uio_iovcnt( uio_t a_uio )
623 {
624 #if LP64_DEBUG
625 if (a_uio == NULL) {
626 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
627 }
628 #endif /* LP64_DEBUG */
629
630 if (a_uio == NULL) {
631 return(0);
632 }
633
634 return( a_uio->uio_iovcnt );
635 }
636
637 /*
638 * uio_offset - return the current offset value for the given uio_t
639 */
640 off_t uio_offset( uio_t a_uio )
641 {
642 #if LP64_DEBUG
643 if (a_uio == NULL) {
644 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
645 }
646 #endif /* LP64_DEBUG */
647
648 if (a_uio == NULL) {
649 return(0);
650 }
651 return( a_uio->uio_offset );
652 }
653
654 /*
655 * uio_setoffset - set the current offset value for the given uio_t
656 */
657 void uio_setoffset( uio_t a_uio, off_t a_offset )
658 {
659 #if LP64_DEBUG
660 if (a_uio == NULL) {
661 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
662 }
663 #endif /* LP64_DEBUG */
664
665 if (a_uio == NULL) {
666 return;
667 }
668 a_uio->uio_offset = a_offset;
669 return;
670 }
671
672 /*
673 * uio_rw - return the read / write flag for the given uio_t
674 */
675 int uio_rw( uio_t a_uio )
676 {
677 #if LP64_DEBUG
678 if (a_uio == NULL) {
679 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
680 }
681 #endif /* LP64_DEBUG */
682
683 if (a_uio == NULL) {
684 return(-1);
685 }
686 return( a_uio->uio_rw );
687 }
688
689 /*
690 * uio_setrw - set the read / write flag for the given uio_t
691 */
692 void uio_setrw( uio_t a_uio, int a_value )
693 {
694 if (a_uio == NULL) {
695 #if LP64_DEBUG
696 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
697 #endif /* LP64_DEBUG */
698 return;
699 }
700
701 #if LP64_DEBUG
702 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
703 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
704 }
705 #endif /* LP64_DEBUG */
706
707 if (a_value == UIO_READ || a_value == UIO_WRITE) {
708 a_uio->uio_rw = a_value;
709 }
710 return;
711 }
712
713 /*
714 * uio_isuserspace - return non zero value if the address space
715 * flag is for a user address space (could be 32 or 64 bit).
716 */
717 int uio_isuserspace( uio_t a_uio )
718 {
719 if (a_uio == NULL) {
720 #if LP64_DEBUG
721 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
722 #endif /* LP64_DEBUG */
723 return(0);
724 }
725
726 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
727 return( 1 );
728 }
729 return( 0 );
730 }
731
732
733 /*
734 * uio_create - create an uio_t.
735 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
736 * is not fully initialized until all iovecs are added using uio_addiov calls.
737 * a_iovcount is the maximum number of iovecs you may add.
738 */
739 uio_t uio_create( int a_iovcount, /* number of iovecs */
740 off_t a_offset, /* current offset */
741 int a_spacetype, /* type of address space */
742 int a_iodirection ) /* read or write flag */
743 {
744 void * my_buf_p;
745 int my_size;
746 uio_t my_uio;
747
748 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
749 my_buf_p = kalloc(my_size);
750 my_uio = uio_createwithbuffer( a_iovcount,
751 a_offset,
752 a_spacetype,
753 a_iodirection,
754 my_buf_p,
755 my_size );
756 if (my_uio != 0) {
757 /* leave a note that we allocated this uio_t */
758 my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
759 #if DEBUG
760 hw_atomic_add(&uio_t_count, 1);
761 #endif
762 }
763
764 return( my_uio );
765 }
766
767
768 /*
769 * uio_createwithbuffer - create an uio_t.
770 * Create a uio_t using the given buffer. The uio_t
771 * is not fully initialized until all iovecs are added using uio_addiov calls.
772 * a_iovcount is the maximum number of iovecs you may add.
773 * This call may fail if the given buffer is not large enough.
774 */
775 __private_extern__ uio_t
776 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
777 off_t a_offset, /* current offset */
778 int a_spacetype, /* type of address space */
779 int a_iodirection, /* read or write flag */
780 void *a_buf_p, /* pointer to a uio_t buffer */
781 int a_buffer_size ) /* size of uio_t buffer */
782 {
783 uio_t my_uio = (uio_t) a_buf_p;
784 int my_size;
785
786 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
787 if (a_buffer_size < my_size) {
788 #if DEBUG
789 panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__);
790 #endif /* DEBUG */
791 return( NULL );
792 }
793 my_size = a_buffer_size;
794
795 #if DEBUG
796 if (my_uio == 0) {
797 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
798 }
799 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
800 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
801 }
802 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
803 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
804 }
805 if (a_iovcount > UIO_MAXIOV) {
806 panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__);
807 }
808 #endif /* DEBUG */
809
810 bzero(my_uio, my_size);
811 my_uio->uio_size = my_size;
812
813 /* we use uio_segflg to indicate if the uio_t is the new format or */
814 /* old (pre LP64 support) legacy format */
815 switch (a_spacetype) {
816 case UIO_USERSPACE:
817 my_uio->uio_segflg = UIO_USERSPACE32;
818 case UIO_SYSSPACE:
819 my_uio->uio_segflg = UIO_SYSSPACE32;
820 case UIO_PHYS_USERSPACE:
821 my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
822 case UIO_PHYS_SYSSPACE:
823 my_uio->uio_segflg = UIO_PHYS_SYSSPACE32;
824 default:
825 my_uio->uio_segflg = a_spacetype;
826 break;
827 }
828
829 if (a_iovcount > 0) {
830 my_uio->uio_iovs.uiovp = (struct user_iovec *)
831 (((uint8_t *)my_uio) + sizeof(struct uio));
832 }
833 else {
834 my_uio->uio_iovs.uiovp = NULL;
835 }
836
837 my_uio->uio_max_iovs = a_iovcount;
838 my_uio->uio_offset = a_offset;
839 my_uio->uio_rw = a_iodirection;
840 my_uio->uio_flags = UIO_FLAGS_INITED;
841
842 return( my_uio );
843 }
844
845 /*
846 * uio_spacetype - return the address space type for the given uio_t
847 */
848 int uio_spacetype( uio_t a_uio )
849 {
850 if (a_uio == NULL) {
851 #if LP64_DEBUG
852 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
853 #endif /* LP64_DEBUG */
854 return(-1);
855 }
856
857 return( a_uio->uio_segflg );
858 }
859
860 /*
861 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
862 * This returns the location of the iovecs within the uio.
863 * NOTE - for compatibility mode we just return the current value in uio_iovs
864 * which will increase as the IO is completed and is NOT embedded within the
865 * uio, it is a seperate array of one or more iovecs.
866 */
867 struct user_iovec * uio_iovsaddr( uio_t a_uio )
868 {
869 struct user_iovec * my_addr;
870
871 if (a_uio == NULL) {
872 return(NULL);
873 }
874
875 if (a_uio->uio_segflg == UIO_USERSPACE || a_uio->uio_segflg == UIO_SYSSPACE) {
876 /* we need this for compatibility mode. */
877 my_addr = (struct user_iovec *) a_uio->uio_iovs.iovp;
878 }
879 else {
880 my_addr = (struct user_iovec *) (((uint8_t *)a_uio) + sizeof(struct uio));
881 }
882 return(my_addr);
883 }
884
885 /*
886 * uio_reset - reset an uio_t.
887 * Reset the given uio_t to initial values. The uio_t is not fully initialized
888 * until all iovecs are added using uio_addiov calls.
889 * The a_iovcount value passed in the uio_create is the maximum number of
890 * iovecs you may add.
891 */
892 void uio_reset( uio_t a_uio,
893 off_t a_offset, /* current offset */
894 int a_spacetype, /* type of address space */
895 int a_iodirection ) /* read or write flag */
896 {
897 vm_size_t my_size;
898 int my_max_iovs;
899 u_int32_t my_old_flags;
900
901 #if LP64_DEBUG
902 if (a_uio == NULL) {
903 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
904 }
905 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
906 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
907 }
908 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
909 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
910 }
911 #endif /* LP64_DEBUG */
912
913 if (a_uio == NULL) {
914 return;
915 }
916
917 my_size = a_uio->uio_size;
918 my_old_flags = a_uio->uio_flags;
919 my_max_iovs = a_uio->uio_max_iovs;
920 bzero(a_uio, my_size);
921 a_uio->uio_size = my_size;
922 a_uio->uio_segflg = a_spacetype;
923 if (my_max_iovs > 0) {
924 a_uio->uio_iovs.uiovp = (struct user_iovec *)
925 (((uint8_t *)a_uio) + sizeof(struct uio));
926 }
927 else {
928 a_uio->uio_iovs.uiovp = NULL;
929 }
930 a_uio->uio_max_iovs = my_max_iovs;
931 a_uio->uio_offset = a_offset;
932 a_uio->uio_rw = a_iodirection;
933 a_uio->uio_flags = my_old_flags;
934
935 return;
936 }
937
938 /*
939 * uio_free - free a uio_t allocated via uio_init. this also frees all
940 * associated iovecs.
941 */
942 void uio_free( uio_t a_uio )
943 {
944 #if DEBUG
945 if (a_uio == NULL) {
946 panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__);
947 }
948 #endif /* LP64_DEBUG */
949
950 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
951 #if DEBUG
952 if ((int)(hw_atomic_sub(&uio_t_count, 1)) < 0) {
953 panic("%s :%d - uio_t_count has gone negative\n", __FILE__, __LINE__);
954 }
955 #endif
956 kfree(a_uio, a_uio->uio_size);
957 }
958
959
960 }
961
962 /*
963 * uio_addiov - add an iovec to the given uio_t. You may call this up to
964 * the a_iovcount number that was passed to uio_create. This call will
965 * increment the residual IO count as iovecs are added to the uio_t.
966 * returns 0 if add was successful else non zero.
967 */
968 int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
969 {
970 int i;
971
972 if (a_uio == NULL) {
973 #if DEBUG
974 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
975 #endif /* LP64_DEBUG */
976 return(-1);
977 }
978
979 if (UIO_IS_64_BIT_SPACE(a_uio)) {
980 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
981 if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
982 a_uio->uio_iovs.uiovp[i].iov_len = a_length;
983 a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
984 a_uio->uio_iovcnt++;
985 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
986 a_uio->uio_resid += a_length;
987 #else
988 a_uio->uio_resid_64 += a_length;
989 #endif
990 return( 0 );
991 }
992 }
993 }
994 else {
995 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
996 if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
997 a_uio->uio_iovs.kiovp[i].iov_len = (u_int32_t)a_length;
998 a_uio->uio_iovs.kiovp[i].iov_base = (u_int32_t)((uintptr_t)a_baseaddr);
999 a_uio->uio_iovcnt++;
1000 a_uio->uio_resid += a_length;
1001 return( 0 );
1002 }
1003 }
1004 }
1005
1006 return( -1 );
1007 }
1008
1009 /*
1010 * uio_getiov - get iovec data associated with the given uio_t. Use
1011 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1012 * a_baseaddr_p and a_length_p may be NULL.
1013 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1014 * returns 0 when data is returned.
1015 */
1016 int uio_getiov( uio_t a_uio,
1017 int a_index,
1018 user_addr_t * a_baseaddr_p,
1019 user_size_t * a_length_p )
1020 {
1021 if (a_uio == NULL) {
1022 #if DEBUG
1023 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1024 #endif /* DEBUG */
1025 return(-1);
1026 }
1027 if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) {
1028 return(-1);
1029 }
1030
1031 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1032 if (a_baseaddr_p != NULL) {
1033 *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
1034 }
1035 if (a_length_p != NULL) {
1036 *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
1037 }
1038 }
1039 else {
1040 if (a_baseaddr_p != NULL) {
1041 *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base;
1042 }
1043 if (a_length_p != NULL) {
1044 *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len;
1045 }
1046 }
1047
1048 return( 0 );
1049 }
1050
1051 /*
1052 * uio_calculateresid - runs through all iovecs associated with this
1053 * uio_t and calculates (and sets) the residual IO count.
1054 */
1055 __private_extern__ void uio_calculateresid( uio_t a_uio )
1056 {
1057 int i;
1058
1059 if (a_uio == NULL) {
1060 #if LP64_DEBUG
1061 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1062 #endif /* LP64_DEBUG */
1063 return;
1064 }
1065
1066 a_uio->uio_iovcnt = a_uio->uio_max_iovs;
1067 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1068 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1069 a_uio->uio_resid = 0;
1070 #else
1071 a_uio->uio_resid_64 = 0;
1072 #endif
1073 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1074 if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
1075 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1076 a_uio->uio_resid += a_uio->uio_iovs.uiovp[i].iov_len;
1077 #else
1078 a_uio->uio_resid_64 += a_uio->uio_iovs.uiovp[i].iov_len;
1079 #endif
1080 }
1081 }
1082
1083 /* position to first non zero length iovec (4235922) */
1084 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1085 a_uio->uio_iovcnt--;
1086 if (a_uio->uio_iovcnt > 0) {
1087 a_uio->uio_iovs.uiovp++;
1088 }
1089 }
1090 }
1091 else {
1092 a_uio->uio_resid = 0;
1093 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1094 if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
1095 a_uio->uio_resid += a_uio->uio_iovs.kiovp[i].iov_len;
1096 }
1097 }
1098
1099 /* position to first non zero length iovec (4235922) */
1100 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1101 a_uio->uio_iovcnt--;
1102 if (a_uio->uio_iovcnt > 0) {
1103 a_uio->uio_iovs.kiovp++;
1104 }
1105 }
1106 }
1107
1108 return;
1109 }
1110
1111 /*
1112 * uio_update - update the given uio_t for a_count of completed IO.
1113 * This call decrements the current iovec length and residual IO value
1114 * and increments the current iovec base address and offset value.
1115 * If the current iovec length is 0 then advance to the next
1116 * iovec (if any).
1117 * If the a_count passed in is 0, than only do the advancement
1118 * over any 0 length iovec's.
1119 */
1120 void uio_update( uio_t a_uio, user_size_t a_count )
1121 {
1122 #if LP64_DEBUG
1123 if (a_uio == NULL) {
1124 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1125 }
1126 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1127 panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
1128 }
1129 #endif /* LP64_DEBUG */
1130
1131 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
1132 return;
1133 }
1134
1135 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1136 /*
1137 * if a_count == 0, then we are asking to skip over
1138 * any empty iovs
1139 */
1140 if (a_count) {
1141 if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
1142 a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
1143 a_uio->uio_iovs.uiovp->iov_len = 0;
1144 }
1145 else {
1146 a_uio->uio_iovs.uiovp->iov_base += a_count;
1147 a_uio->uio_iovs.uiovp->iov_len -= a_count;
1148 }
1149 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1150 if (a_uio->uio_resid < 0) {
1151 a_uio->uio_resid = 0;
1152 }
1153 if (a_count > (user_size_t)a_uio->uio_resid) {
1154 a_uio->uio_offset += a_uio->uio_resid;
1155 a_uio->uio_resid = 0;
1156 }
1157 else {
1158 a_uio->uio_offset += a_count;
1159 a_uio->uio_resid -= a_count;
1160 }
1161 #else
1162 if (a_uio->uio_resid_64 < 0) {
1163 a_uio->uio_resid_64 = 0;
1164 }
1165 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1166 a_uio->uio_offset += a_uio->uio_resid_64;
1167 a_uio->uio_resid_64 = 0;
1168 }
1169 else {
1170 a_uio->uio_offset += a_count;
1171 a_uio->uio_resid_64 -= a_count;
1172 }
1173 #endif // LP64todo
1174 }
1175 /*
1176 * advance to next iovec if current one is totally consumed
1177 */
1178 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1179 a_uio->uio_iovcnt--;
1180 if (a_uio->uio_iovcnt > 0) {
1181 a_uio->uio_iovs.uiovp++;
1182 }
1183 }
1184 }
1185 else {
1186 /*
1187 * if a_count == 0, then we are asking to skip over
1188 * any empty iovs
1189 */
1190 if (a_count) {
1191 if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
1192 a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
1193 a_uio->uio_iovs.kiovp->iov_len = 0;
1194 }
1195 else {
1196 a_uio->uio_iovs.kiovp->iov_base += a_count;
1197 a_uio->uio_iovs.kiovp->iov_len -= a_count;
1198 }
1199 if (a_uio->uio_resid < 0) {
1200 a_uio->uio_resid = 0;
1201 }
1202 if (a_count > (user_size_t)a_uio->uio_resid) {
1203 a_uio->uio_offset += a_uio->uio_resid;
1204 a_uio->uio_resid = 0;
1205 }
1206 else {
1207 a_uio->uio_offset += a_count;
1208 a_uio->uio_resid -= a_count;
1209 }
1210 }
1211 /*
1212 * advance to next iovec if current one is totally consumed
1213 */
1214 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1215 a_uio->uio_iovcnt--;
1216 if (a_uio->uio_iovcnt > 0) {
1217 a_uio->uio_iovs.kiovp++;
1218 }
1219 }
1220 }
1221 return;
1222 }
1223
1224
1225 /*
1226 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1227 * may return NULL.
1228 */
1229 uio_t uio_duplicate( uio_t a_uio )
1230 {
1231 uio_t my_uio;
1232 int i;
1233
1234 if (a_uio == NULL) {
1235 return(NULL);
1236 }
1237
1238 my_uio = (uio_t) kalloc(a_uio->uio_size);
1239 if (my_uio == 0) {
1240 panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
1241 }
1242
1243 bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
1244 /* need to set our iovec pointer to point to first active iovec */
1245 if (my_uio->uio_max_iovs > 0) {
1246 my_uio->uio_iovs.uiovp = (struct user_iovec *)
1247 (((uint8_t *)my_uio) + sizeof(struct uio));
1248
1249 /* advance to first nonzero iovec */
1250 if (my_uio->uio_iovcnt > 0) {
1251 for ( i = 0; i < my_uio->uio_max_iovs; i++ ) {
1252 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1253 if (my_uio->uio_iovs.uiovp->iov_len != 0) {
1254 break;
1255 }
1256 my_uio->uio_iovs.uiovp++;
1257 }
1258 else {
1259 if (my_uio->uio_iovs.kiovp->iov_len != 0) {
1260 break;
1261 }
1262 my_uio->uio_iovs.kiovp++;
1263 }
1264 }
1265 }
1266 }
1267
1268 return(my_uio);
1269 }
1270