]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
cc05a7db7309e0b2d24063e9b2a187ca6dc6d7a9
[apple/xnu.git] / bsd / kern / kern_subr.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc_internal.h>
72 #include <sys/malloc.h>
73 #include <sys/queue.h>
74 #include <vm/pmap.h>
75 #include <sys/uio_internal.h>
76 #include <kern/kalloc.h>
77
78 #include <kdebug.h>
79
80 #include <sys/kdebug.h>
81 #define DBG_UIO_COPYOUT 16
82 #define DBG_UIO_COPYIN 17
83
84 #if DEBUG
85 #include <kern/simple_lock.h>
86
87 static uint32_t uio_t_count = 0;
88 #endif /* DEBUG */
89
90 #define IS_VALID_UIO_SEGFLG(segflg) \
91 ( (segflg) == UIO_USERSPACE || \
92 (segflg) == UIO_SYSSPACE || \
93 (segflg) == UIO_USERSPACE32 || \
94 (segflg) == UIO_USERSPACE64 || \
95 (segflg) == UIO_SYSSPACE32 || \
96 (segflg) == UIO_USERISPACE || \
97 (segflg) == UIO_PHYS_USERSPACE || \
98 (segflg) == UIO_PHYS_SYSSPACE || \
99 (segflg) == UIO_USERISPACE32 || \
100 (segflg) == UIO_PHYS_USERSPACE32 || \
101 (segflg) == UIO_USERISPACE64 || \
102 (segflg) == UIO_PHYS_USERSPACE64 )
103
104 /*
105 * Returns: 0 Success
106 * uiomove64:EFAULT
107 *
108 * Notes: The first argument should be a caddr_t, but const poisoning
109 * for typedef'ed types doesn't work in gcc.
110 */
111 int
112 uiomove(const char * cp, int n, uio_t uio)
113 {
114 return uiomove64((const addr64_t)(uintptr_t)cp, n, uio);
115 }
116
117 /*
118 * Returns: 0 Success
119 * EFAULT
120 * copyout:EFAULT
121 * copyin:EFAULT
122 * copywithin:EFAULT
123 * copypv:EFAULT
124 */
125 int
126 uiomove64(const addr64_t c_cp, int n, struct uio *uio)
127 {
128 addr64_t cp = c_cp;
129 #if LP64KERN
130 uint64_t acnt;
131 #else
132 u_int acnt;
133 #endif
134 int error = 0;
135
136 #if DIAGNOSTIC
137 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
138 panic("uiomove: mode");
139 #endif
140
141 #if LP64_DEBUG
142 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
143 panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);
144 }
145 #endif /* LP64_DEBUG */
146
147 while (n > 0 && uio_resid(uio)) {
148 uio_update(uio, 0);
149 acnt = uio_curriovlen(uio);
150 if (acnt == 0) {
151 continue;
152 }
153 if (n > 0 && acnt > (uint64_t)n)
154 acnt = n;
155
156 switch (uio->uio_segflg) {
157
158 case UIO_USERSPACE64:
159 case UIO_USERISPACE64:
160 case UIO_USERSPACE32:
161 case UIO_USERISPACE32:
162 case UIO_USERSPACE:
163 case UIO_USERISPACE:
164 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
165 if (uio->uio_rw == UIO_READ)
166 {
167 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
168 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);
169
170 error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt );
171
172 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
173 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);
174 }
175 else
176 {
177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
178 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);
179
180 error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt);
181
182 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
183 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);
184 }
185 if (error)
186 return (error);
187 break;
188
189 case UIO_SYSSPACE32:
190 case UIO_SYSSPACE:
191 if (uio->uio_rw == UIO_READ)
192 error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base),
193 acnt);
194 else
195 error = copywithin(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), CAST_DOWN(caddr_t, cp),
196 acnt);
197 break;
198
199 case UIO_PHYS_USERSPACE64:
200 case UIO_PHYS_USERSPACE32:
201 case UIO_PHYS_USERSPACE:
202 if (uio->uio_rw == UIO_READ)
203 {
204 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
205 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);
206
207 error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
208 if (error) /* Copy physical to virtual */
209 error = EFAULT;
210
211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
212 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);
213 }
214 else
215 {
216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
217 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);
218
219 error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
220 if (error) /* Copy virtual to physical */
221 error = EFAULT;
222
223 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
224 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);
225 }
226 if (error)
227 return (error);
228 break;
229
230 case UIO_PHYS_SYSSPACE:
231 if (uio->uio_rw == UIO_READ)
232 {
233 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
234 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);
235
236 error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
237 if (error) /* Copy physical to virtual */
238 error = EFAULT;
239
240 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
241 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);
242 }
243 else
244 {
245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
246 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);
247
248 error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
249 if (error) /* Copy virtual to physical */
250 error = EFAULT;
251
252 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
253 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);
254 }
255 if (error)
256 return (error);
257 break;
258
259 default:
260 break;
261 }
262 uio_update(uio, acnt);
263 cp += acnt;
264 n -= acnt;
265 }
266 return (error);
267 }
268
269 /*
270 * Give next character to user as result of read.
271 */
272 int
273 ureadc(int c, struct uio *uio)
274 {
275 if (uio_resid(uio) <= 0)
276 panic("ureadc: non-positive resid");
277 uio_update(uio, 0);
278 if (uio->uio_iovcnt == 0)
279 panic("ureadc: non-positive iovcnt");
280 if (uio_curriovlen(uio) <= 0)
281 panic("ureadc: non-positive iovlen");
282
283 switch (uio->uio_segflg) {
284
285 case UIO_USERSPACE32:
286 case UIO_USERSPACE:
287 case UIO_USERISPACE32:
288 case UIO_USERISPACE:
289 case UIO_USERSPACE64:
290 case UIO_USERISPACE64:
291 if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0)
292 return (EFAULT);
293 break;
294
295 case UIO_SYSSPACE32:
296 case UIO_SYSSPACE:
297 *(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c;
298 break;
299
300 default:
301 break;
302 }
303 uio_update(uio, 1);
304 return (0);
305 }
306
307 /*
308 * General routine to allocate a hash table.
309 */
310 void *
311 hashinit(int elements, int type, u_long *hashmask)
312 {
313 long hashsize;
314 LIST_HEAD(generic, generic) *hashtbl;
315 int i;
316
317 if (elements <= 0)
318 panic("hashinit: bad cnt");
319 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
320 continue;
321 hashsize >>= 1;
322 MALLOC(hashtbl, struct generic *,
323 hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
324 if (hashtbl != NULL) {
325 for (i = 0; i < hashsize; i++)
326 LIST_INIT(&hashtbl[i]);
327 *hashmask = hashsize - 1;
328 }
329 return (hashtbl);
330 }
331
332 /*
333 * uio_resid - return the residual IO value for the given uio_t
334 */
335 user_ssize_t uio_resid( uio_t a_uio )
336 {
337 #if DEBUG
338 if (a_uio == NULL) {
339 printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
340 }
341 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
342 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
343 /* } */
344 #endif /* DEBUG */
345
346 /* return 0 if there are no active iovecs */
347 if (a_uio == NULL) {
348 return( 0 );
349 }
350
351 return( a_uio->uio_resid_64 );
352 }
353
354 /*
355 * uio_setresid - set the residual IO value for the given uio_t
356 */
357 void uio_setresid( uio_t a_uio, user_ssize_t a_value )
358 {
359 #if DEBUG
360 if (a_uio == NULL) {
361 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
362 }
363 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
364 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
365 /* } */
366 #endif /* DEBUG */
367
368 if (a_uio == NULL) {
369 return;
370 }
371
372 a_uio->uio_resid_64 = a_value;
373 return;
374 }
375
376 /*
377 * uio_curriovbase - return the base address of the current iovec associated
378 * with the given uio_t. May return 0.
379 */
380 user_addr_t uio_curriovbase( uio_t a_uio )
381 {
382 #if LP64_DEBUG
383 if (a_uio == NULL) {
384 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
385 }
386 #endif /* LP64_DEBUG */
387
388 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
389 return(0);
390 }
391
392 if (UIO_IS_USER_SPACE(a_uio)) {
393 return(a_uio->uio_iovs.uiovp->iov_base);
394 }
395 return((user_addr_t)a_uio->uio_iovs.kiovp->iov_base);
396
397 }
398
399 /*
400 * uio_curriovlen - return the length value of the current iovec associated
401 * with the given uio_t.
402 */
403 user_size_t uio_curriovlen( uio_t a_uio )
404 {
405 #if LP64_DEBUG
406 if (a_uio == NULL) {
407 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
408 }
409 #endif /* LP64_DEBUG */
410
411 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
412 return(0);
413 }
414
415 if (UIO_IS_USER_SPACE(a_uio)) {
416 return(a_uio->uio_iovs.uiovp->iov_len);
417 }
418 return((user_size_t)a_uio->uio_iovs.kiovp->iov_len);
419 }
420
421 /*
422 * uio_setcurriovlen - set the length value of the current iovec associated
423 * with the given uio_t.
424 */
425 __private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
426 {
427 #if LP64_DEBUG
428 if (a_uio == NULL) {
429 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
430 }
431 #endif /* LP64_DEBUG */
432
433 if (a_uio == NULL) {
434 return;
435 }
436
437 if (UIO_IS_USER_SPACE(a_uio)) {
438 a_uio->uio_iovs.uiovp->iov_len = a_value;
439 }
440 else {
441 #if LP64_DEBUG
442 if (a_value > 0xFFFFFFFFull) {
443 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
444 }
445 #endif /* LP64_DEBUG */
446 a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value;
447 }
448 return;
449 }
450
451 /*
452 * uio_iovcnt - return count of active iovecs for the given uio_t
453 */
454 int uio_iovcnt( uio_t a_uio )
455 {
456 #if LP64_DEBUG
457 if (a_uio == NULL) {
458 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
459 }
460 #endif /* LP64_DEBUG */
461
462 if (a_uio == NULL) {
463 return(0);
464 }
465
466 return( a_uio->uio_iovcnt );
467 }
468
469 /*
470 * uio_offset - return the current offset value for the given uio_t
471 */
472 off_t uio_offset( uio_t a_uio )
473 {
474 #if LP64_DEBUG
475 if (a_uio == NULL) {
476 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
477 }
478 #endif /* LP64_DEBUG */
479
480 if (a_uio == NULL) {
481 return(0);
482 }
483 return( a_uio->uio_offset );
484 }
485
486 /*
487 * uio_setoffset - set the current offset value for the given uio_t
488 */
489 void uio_setoffset( uio_t a_uio, off_t a_offset )
490 {
491 #if LP64_DEBUG
492 if (a_uio == NULL) {
493 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
494 }
495 #endif /* LP64_DEBUG */
496
497 if (a_uio == NULL) {
498 return;
499 }
500 a_uio->uio_offset = a_offset;
501 return;
502 }
503
504 /*
505 * uio_rw - return the read / write flag for the given uio_t
506 */
507 int uio_rw( uio_t a_uio )
508 {
509 #if LP64_DEBUG
510 if (a_uio == NULL) {
511 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
512 }
513 #endif /* LP64_DEBUG */
514
515 if (a_uio == NULL) {
516 return(-1);
517 }
518 return( a_uio->uio_rw );
519 }
520
521 /*
522 * uio_setrw - set the read / write flag for the given uio_t
523 */
524 void uio_setrw( uio_t a_uio, int a_value )
525 {
526 if (a_uio == NULL) {
527 #if LP64_DEBUG
528 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
529 #endif /* LP64_DEBUG */
530 return;
531 }
532
533 #if LP64_DEBUG
534 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
535 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
536 }
537 #endif /* LP64_DEBUG */
538
539 if (a_value == UIO_READ || a_value == UIO_WRITE) {
540 a_uio->uio_rw = a_value;
541 }
542 return;
543 }
544
545 /*
546 * uio_isuserspace - return non zero value if the address space
547 * flag is for a user address space (could be 32 or 64 bit).
548 */
549 int uio_isuserspace( uio_t a_uio )
550 {
551 if (a_uio == NULL) {
552 #if LP64_DEBUG
553 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
554 #endif /* LP64_DEBUG */
555 return(0);
556 }
557
558 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
559 return( 1 );
560 }
561 return( 0 );
562 }
563
564
565 /*
566 * uio_create - create an uio_t.
567 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
568 * is not fully initialized until all iovecs are added using uio_addiov calls.
569 * a_iovcount is the maximum number of iovecs you may add.
570 */
571 uio_t uio_create( int a_iovcount, /* number of iovecs */
572 off_t a_offset, /* current offset */
573 int a_spacetype, /* type of address space */
574 int a_iodirection ) /* read or write flag */
575 {
576 void * my_buf_p;
577 size_t my_size;
578 uio_t my_uio;
579
580 my_size = UIO_SIZEOF(a_iovcount);
581 my_buf_p = kalloc(my_size);
582 my_uio = uio_createwithbuffer( a_iovcount,
583 a_offset,
584 a_spacetype,
585 a_iodirection,
586 my_buf_p,
587 my_size );
588 if (my_uio != 0) {
589 /* leave a note that we allocated this uio_t */
590 my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
591 #if DEBUG
592 (void)hw_atomic_add(&uio_t_count, 1);
593 #endif
594 }
595
596 return( my_uio );
597 }
598
599
600 /*
601 * uio_createwithbuffer - create an uio_t.
602 * Create a uio_t using the given buffer. The uio_t
603 * is not fully initialized until all iovecs are added using uio_addiov calls.
604 * a_iovcount is the maximum number of iovecs you may add.
605 * This call may fail if the given buffer is not large enough.
606 */
607 __private_extern__ uio_t
608 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
609 off_t a_offset, /* current offset */
610 int a_spacetype, /* type of address space */
611 int a_iodirection, /* read or write flag */
612 void *a_buf_p, /* pointer to a uio_t buffer */
613 size_t a_buffer_size ) /* size of uio_t buffer */
614 {
615 uio_t my_uio = (uio_t) a_buf_p;
616 size_t my_size;
617
618 my_size = UIO_SIZEOF(a_iovcount);
619 if (a_buffer_size < my_size) {
620 #if DEBUG
621 panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__);
622 #endif /* DEBUG */
623 return( NULL );
624 }
625 my_size = a_buffer_size;
626
627 #if DEBUG
628 if (my_uio == 0) {
629 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
630 }
631 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
632 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
633 }
634 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
635 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
636 }
637 if (a_iovcount > UIO_MAXIOV) {
638 panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__);
639 }
640 #endif /* DEBUG */
641
642 bzero(my_uio, my_size);
643 my_uio->uio_size = my_size;
644
645 /*
646 * we use uio_segflg to indicate if the uio_t is the new format or
647 * old (pre LP64 support) legacy format
648 * This switch statement should canonicalize incoming space type
649 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
650 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
651 */
652 switch (a_spacetype) {
653 case UIO_USERSPACE:
654 my_uio->uio_segflg = UIO_USERSPACE32;
655 break;
656 case UIO_SYSSPACE32:
657 my_uio->uio_segflg = UIO_SYSSPACE;
658 break;
659 case UIO_PHYS_USERSPACE:
660 my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
661 break;
662 default:
663 my_uio->uio_segflg = a_spacetype;
664 break;
665 }
666
667 if (a_iovcount > 0) {
668 my_uio->uio_iovs.uiovp = (struct user_iovec *)
669 (((uint8_t *)my_uio) + sizeof(struct uio));
670 }
671 else {
672 my_uio->uio_iovs.uiovp = NULL;
673 }
674
675 my_uio->uio_max_iovs = a_iovcount;
676 my_uio->uio_offset = a_offset;
677 my_uio->uio_rw = a_iodirection;
678 my_uio->uio_flags = UIO_FLAGS_INITED;
679
680 return( my_uio );
681 }
682
683 /*
684 * uio_spacetype - return the address space type for the given uio_t
685 */
686 __private_extern__ int uio_spacetype( uio_t a_uio )
687 {
688 if (a_uio == NULL) {
689 #if LP64_DEBUG
690 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
691 #endif /* LP64_DEBUG */
692 return(-1);
693 }
694
695 return( a_uio->uio_segflg );
696 }
697
698 /*
699 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
700 * This returns the location of the iovecs within the uio.
701 * NOTE - for compatibility mode we just return the current value in uio_iovs
702 * which will increase as the IO is completed and is NOT embedded within the
703 * uio, it is a seperate array of one or more iovecs.
704 */
705 __private_extern__ struct user_iovec * uio_iovsaddr( uio_t a_uio )
706 {
707 struct user_iovec * my_addr;
708
709 if (a_uio == NULL) {
710 return(NULL);
711 }
712
713 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
714 /* we need this for compatibility mode. */
715 my_addr = (struct user_iovec *) a_uio->uio_iovs.uiovp;
716 }
717 else {
718 #if DEBUG
719 panic("uio_iovsaddr called for UIO_SYSSPACE request");
720 #endif
721 my_addr = 0;
722 }
723 return(my_addr);
724 }
725
726 /*
727 * uio_reset - reset an uio_t.
728 * Reset the given uio_t to initial values. The uio_t is not fully initialized
729 * until all iovecs are added using uio_addiov calls.
730 * The a_iovcount value passed in the uio_create is the maximum number of
731 * iovecs you may add.
732 */
733 void uio_reset( uio_t a_uio,
734 off_t a_offset, /* current offset */
735 int a_spacetype, /* type of address space */
736 int a_iodirection ) /* read or write flag */
737 {
738 vm_size_t my_size;
739 int my_max_iovs;
740 u_int32_t my_old_flags;
741
742 #if LP64_DEBUG
743 if (a_uio == NULL) {
744 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
745 }
746 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
747 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
748 }
749 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
750 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
751 }
752 #endif /* LP64_DEBUG */
753
754 if (a_uio == NULL) {
755 return;
756 }
757
758 my_size = a_uio->uio_size;
759 my_old_flags = a_uio->uio_flags;
760 my_max_iovs = a_uio->uio_max_iovs;
761 bzero(a_uio, my_size);
762 a_uio->uio_size = my_size;
763
764 /*
765 * we use uio_segflg to indicate if the uio_t is the new format or
766 * old (pre LP64 support) legacy format
767 * This switch statement should canonicalize incoming space type
768 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
769 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
770 */
771 switch (a_spacetype) {
772 case UIO_USERSPACE:
773 a_uio->uio_segflg = UIO_USERSPACE32;
774 break;
775 case UIO_SYSSPACE32:
776 a_uio->uio_segflg = UIO_SYSSPACE;
777 break;
778 case UIO_PHYS_USERSPACE:
779 a_uio->uio_segflg = UIO_PHYS_USERSPACE32;
780 break;
781 default:
782 a_uio->uio_segflg = a_spacetype;
783 break;
784 }
785
786 if (my_max_iovs > 0) {
787 a_uio->uio_iovs.uiovp = (struct user_iovec *)
788 (((uint8_t *)a_uio) + sizeof(struct uio));
789 }
790 else {
791 a_uio->uio_iovs.uiovp = NULL;
792 }
793
794 a_uio->uio_max_iovs = my_max_iovs;
795 a_uio->uio_offset = a_offset;
796 a_uio->uio_rw = a_iodirection;
797 a_uio->uio_flags = my_old_flags;
798
799 return;
800 }
801
802 /*
803 * uio_free - free a uio_t allocated via uio_init. this also frees all
804 * associated iovecs.
805 */
806 void uio_free( uio_t a_uio )
807 {
808 #if DEBUG
809 if (a_uio == NULL) {
810 panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__);
811 }
812 #endif /* LP64_DEBUG */
813
814 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
815 #if DEBUG
816 if (hw_atomic_sub(&uio_t_count, 1) == UINT_MAX)
817 panic("%s :%d - uio_t_count underflow\n", __FILE__, __LINE__);
818 #endif
819 kfree(a_uio, a_uio->uio_size);
820 }
821
822
823 }
824
825 /*
826 * uio_addiov - add an iovec to the given uio_t. You may call this up to
827 * the a_iovcount number that was passed to uio_create. This call will
828 * increment the residual IO count as iovecs are added to the uio_t.
829 * returns 0 if add was successful else non zero.
830 */
831 int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
832 {
833 int i;
834
835 if (a_uio == NULL) {
836 #if DEBUG
837 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
838 #endif /* LP64_DEBUG */
839 return(-1);
840 }
841
842 if (UIO_IS_USER_SPACE(a_uio)) {
843 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
844 if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
845 a_uio->uio_iovs.uiovp[i].iov_len = a_length;
846 a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
847 a_uio->uio_iovcnt++;
848 a_uio->uio_resid_64 += a_length;
849 return( 0 );
850 }
851 }
852 }
853 else {
854 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
855 if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
856 a_uio->uio_iovs.kiovp[i].iov_len = (u_int64_t)a_length;
857 a_uio->uio_iovs.kiovp[i].iov_base = (u_int64_t)a_baseaddr;
858 a_uio->uio_iovcnt++;
859 a_uio->uio_resid_64 += a_length;
860 return( 0 );
861 }
862 }
863 }
864
865 return( -1 );
866 }
867
868 /*
869 * uio_getiov - get iovec data associated with the given uio_t. Use
870 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
871 * a_baseaddr_p and a_length_p may be NULL.
872 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
873 * returns 0 when data is returned.
874 */
875 int uio_getiov( uio_t a_uio,
876 int a_index,
877 user_addr_t * a_baseaddr_p,
878 user_size_t * a_length_p )
879 {
880 if (a_uio == NULL) {
881 #if DEBUG
882 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
883 #endif /* DEBUG */
884 return(-1);
885 }
886 if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) {
887 return(-1);
888 }
889
890 if (UIO_IS_USER_SPACE(a_uio)) {
891 if (a_baseaddr_p != NULL) {
892 *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
893 }
894 if (a_length_p != NULL) {
895 *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
896 }
897 }
898 else {
899 if (a_baseaddr_p != NULL) {
900 *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base;
901 }
902 if (a_length_p != NULL) {
903 *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len;
904 }
905 }
906
907 return( 0 );
908 }
909
910 /*
911 * uio_calculateresid - runs through all iovecs associated with this
912 * uio_t and calculates (and sets) the residual IO count.
913 */
914 __private_extern__ void uio_calculateresid( uio_t a_uio )
915 {
916 int i;
917
918 if (a_uio == NULL) {
919 #if LP64_DEBUG
920 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
921 #endif /* LP64_DEBUG */
922 return;
923 }
924
925 a_uio->uio_iovcnt = a_uio->uio_max_iovs;
926 if (UIO_IS_USER_SPACE(a_uio)) {
927 a_uio->uio_resid_64 = 0;
928 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
929 if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
930 a_uio->uio_resid_64 += a_uio->uio_iovs.uiovp[i].iov_len;
931 }
932 }
933
934 /* position to first non zero length iovec (4235922) */
935 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
936 a_uio->uio_iovcnt--;
937 if (a_uio->uio_iovcnt > 0) {
938 a_uio->uio_iovs.uiovp++;
939 }
940 }
941 }
942 else {
943 a_uio->uio_resid_64 = 0;
944 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
945 if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
946 a_uio->uio_resid_64 += a_uio->uio_iovs.kiovp[i].iov_len;
947 }
948 }
949
950 /* position to first non zero length iovec (4235922) */
951 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
952 a_uio->uio_iovcnt--;
953 if (a_uio->uio_iovcnt > 0) {
954 a_uio->uio_iovs.kiovp++;
955 }
956 }
957 }
958
959 return;
960 }
961
962 /*
963 * uio_update - update the given uio_t for a_count of completed IO.
964 * This call decrements the current iovec length and residual IO value
965 * and increments the current iovec base address and offset value.
966 * If the current iovec length is 0 then advance to the next
967 * iovec (if any).
968 * If the a_count passed in is 0, than only do the advancement
969 * over any 0 length iovec's.
970 */
971 void uio_update( uio_t a_uio, user_size_t a_count )
972 {
973 #if LP64_DEBUG
974 if (a_uio == NULL) {
975 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
976 }
977 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
978 panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
979 }
980 #endif /* LP64_DEBUG */
981
982 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
983 return;
984 }
985
986 if (UIO_IS_USER_SPACE(a_uio)) {
987 /*
988 * if a_count == 0, then we are asking to skip over
989 * any empty iovs
990 */
991 if (a_count) {
992 if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
993 a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
994 a_uio->uio_iovs.uiovp->iov_len = 0;
995 }
996 else {
997 a_uio->uio_iovs.uiovp->iov_base += a_count;
998 a_uio->uio_iovs.uiovp->iov_len -= a_count;
999 }
1000 if (a_uio->uio_resid_64 < 0) {
1001 a_uio->uio_resid_64 = 0;
1002 }
1003 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1004 a_uio->uio_offset += a_uio->uio_resid_64;
1005 a_uio->uio_resid_64 = 0;
1006 }
1007 else {
1008 a_uio->uio_offset += a_count;
1009 a_uio->uio_resid_64 -= a_count;
1010 }
1011 }
1012 /*
1013 * advance to next iovec if current one is totally consumed
1014 */
1015 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1016 a_uio->uio_iovcnt--;
1017 if (a_uio->uio_iovcnt > 0) {
1018 a_uio->uio_iovs.uiovp++;
1019 }
1020 }
1021 }
1022 else {
1023 /*
1024 * if a_count == 0, then we are asking to skip over
1025 * any empty iovs
1026 */
1027 if (a_count) {
1028 if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
1029 a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
1030 a_uio->uio_iovs.kiovp->iov_len = 0;
1031 }
1032 else {
1033 a_uio->uio_iovs.kiovp->iov_base += a_count;
1034 a_uio->uio_iovs.kiovp->iov_len -= a_count;
1035 }
1036 if (a_uio->uio_resid_64 < 0) {
1037 a_uio->uio_resid_64 = 0;
1038 }
1039 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1040 a_uio->uio_offset += a_uio->uio_resid_64;
1041 a_uio->uio_resid_64 = 0;
1042 }
1043 else {
1044 a_uio->uio_offset += a_count;
1045 a_uio->uio_resid_64 -= a_count;
1046 }
1047 }
1048 /*
1049 * advance to next iovec if current one is totally consumed
1050 */
1051 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1052 a_uio->uio_iovcnt--;
1053 if (a_uio->uio_iovcnt > 0) {
1054 a_uio->uio_iovs.kiovp++;
1055 }
1056 }
1057 }
1058 return;
1059 }
1060
1061 /*
1062 * uio_pushback - undo uncommitted I/O by subtracting from the
1063 * current base address and offset, and incrementing the residiual
1064 * IO. If the UIO was previously exhausted, this call will panic.
1065 * New code should not use this functionality.
1066 */
1067 __private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count )
1068 {
1069 #if LP64_DEBUG
1070 if (a_uio == NULL) {
1071 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1072 }
1073 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1074 panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
1075 }
1076 #endif /* LP64_DEBUG */
1077
1078 if (a_uio == NULL || a_count == 0) {
1079 return;
1080 }
1081
1082 if (a_uio->uio_iovcnt < 1) {
1083 panic("Invalid uio for pushback");
1084 }
1085
1086 if (UIO_IS_USER_SPACE(a_uio)) {
1087 a_uio->uio_iovs.uiovp->iov_base -= a_count;
1088 a_uio->uio_iovs.uiovp->iov_len += a_count;
1089 }
1090 else {
1091 a_uio->uio_iovs.kiovp->iov_base -= a_count;
1092 a_uio->uio_iovs.kiovp->iov_len += a_count;
1093 }
1094
1095 a_uio->uio_offset -= a_count;
1096 a_uio->uio_resid_64 += a_count;
1097
1098 return;
1099 }
1100
1101
1102 /*
1103 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1104 * may return NULL.
1105 */
1106 uio_t uio_duplicate( uio_t a_uio )
1107 {
1108 uio_t my_uio;
1109 int i;
1110
1111 if (a_uio == NULL) {
1112 return(NULL);
1113 }
1114
1115 my_uio = (uio_t) kalloc(a_uio->uio_size);
1116 if (my_uio == 0) {
1117 panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
1118 }
1119
1120 bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
1121 /* need to set our iovec pointer to point to first active iovec */
1122 if (my_uio->uio_max_iovs > 0) {
1123 my_uio->uio_iovs.uiovp = (struct user_iovec *)
1124 (((uint8_t *)my_uio) + sizeof(struct uio));
1125
1126 /* advance to first nonzero iovec */
1127 if (my_uio->uio_iovcnt > 0) {
1128 for ( i = 0; i < my_uio->uio_max_iovs; i++ ) {
1129 if (UIO_IS_USER_SPACE(a_uio)) {
1130 if (my_uio->uio_iovs.uiovp->iov_len != 0) {
1131 break;
1132 }
1133 my_uio->uio_iovs.uiovp++;
1134 }
1135 else {
1136 if (my_uio->uio_iovs.kiovp->iov_len != 0) {
1137 break;
1138 }
1139 my_uio->uio_iovs.kiovp++;
1140 }
1141 }
1142 }
1143 }
1144
1145 my_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED;
1146
1147 return(my_uio);
1148 }
1149
1150 int copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst)
1151 {
1152 size_t size_of_iovec = ( spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec));
1153 int error;
1154 int i;
1155
1156 // copyin to the front of "dst", without regard for putting records in the right places
1157 error = copyin(uaddr, dst, count * size_of_iovec);
1158 if (error)
1159 return (error);
1160
1161 // now, unpack the entries in reverse order, so we don't overwrite anything
1162 for (i = count - 1; i >= 0; i--) {
1163 if (spacetype == UIO_USERSPACE64) {
1164 struct user64_iovec iovec = ((struct user64_iovec *)dst)[i];
1165 dst[i].iov_base = iovec.iov_base;
1166 dst[i].iov_len = iovec.iov_len;
1167 } else {
1168 struct user32_iovec iovec = ((struct user32_iovec *)dst)[i];
1169 dst[i].iov_base = iovec.iov_base;
1170 dst[i].iov_len = iovec.iov_len;
1171 }
1172 }
1173
1174 return (0);
1175 }