]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
xnu-792.1.5.tar.gz
[apple/xnu.git] / bsd / kern / kern_subr.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc_internal.h>
66 #include <sys/malloc.h>
67 #include <sys/queue.h>
68 #include <vm/pmap.h>
69 #include <sys/uio_internal.h>
70 #include <kern/kalloc.h>
71
72 #include <kdebug.h>
73
74 #include <sys/kdebug.h>
75 #define DBG_UIO_COPYOUT 16
76 #define DBG_UIO_COPYIN 17
77
78 #if DEBUG
79 #include <kern/simple_lock.h>
80
81 static int uio_t_count = 0;
82 #endif /* DEBUG */
83
84
85 int
86 uiomove(cp, n, uio)
87 register caddr_t cp;
88 register int n;
89 register uio_t uio;
90 {
91 return uiomove64((addr64_t)((unsigned int)cp), n, uio);
92 }
93
94 // LP64todo - fix this! 'n' should be int64_t?
95 int
96 uiomove64(addr64_t cp, int n, register struct uio *uio)
97 {
98 #if LP64KERN
99 register uint64_t acnt;
100 #else
101 register u_int acnt;
102 #endif
103 int error = 0;
104
105 #if DIAGNOSTIC
106 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
107 panic("uiomove: mode");
108 #endif
109
110 #if LP64_DEBUG
111 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
112 panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);
113 }
114 #endif /* LP64_DEBUG */
115
116 while (n > 0 && uio_resid(uio)) {
117 acnt = uio_iov_len(uio);
118 if (acnt == 0) {
119 uio_next_iov(uio);
120 uio->uio_iovcnt--;
121 continue;
122 }
123 if (n > 0 && acnt > (uint64_t)n)
124 acnt = n;
125
126 switch (uio->uio_segflg) {
127
128 case UIO_USERSPACE64:
129 case UIO_USERISPACE64:
130 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
131 if (uio->uio_rw == UIO_READ)
132 {
133 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
134 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
135
136 error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt );
137
138 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
139 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
140 }
141 else
142 {
143 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
144 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
145
146 error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt);
147
148 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
149 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
150 }
151 if (error)
152 return (error);
153 break;
154
155 case UIO_USERSPACE32:
156 case UIO_USERISPACE32:
157 case UIO_USERSPACE:
158 case UIO_USERISPACE:
159 if (uio->uio_rw == UIO_READ)
160 {
161 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
162 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
163
164 error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt );
165
166 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
167 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
168 }
169 else
170 {
171 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
172 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
173
174 error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt);
175
176 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
177 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
178 }
179 if (error)
180 return (error);
181 break;
182
183 case UIO_SYSSPACE32:
184 case UIO_SYSSPACE:
185 if (uio->uio_rw == UIO_READ)
186 error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base,
187 acnt);
188 else
189 error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp),
190 acnt);
191 break;
192
193 case UIO_PHYS_USERSPACE64:
194 if (uio->uio_rw == UIO_READ)
195 {
196 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
197 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
198
199 error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
200 if (error) /* Copy physical to virtual */
201 error = EFAULT;
202
203 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
204 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
205 }
206 else
207 {
208 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
209 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
210
211 error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
212 if (error) /* Copy virtual to physical */
213 error = EFAULT;
214
215 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
216 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
217 }
218 if (error)
219 return (error);
220 break;
221
222 case UIO_PHYS_USERSPACE32:
223 case UIO_PHYS_USERSPACE:
224 if (uio->uio_rw == UIO_READ)
225 {
226 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
227 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
228
229 error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
230 if (error) /* Copy physical to virtual */
231 error = EFAULT;
232
233 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
234 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
235 }
236 else
237 {
238 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
239 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
240
241 error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
242 if (error) /* Copy virtual to physical */
243 error = EFAULT;
244
245 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
246 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
247 }
248 if (error)
249 return (error);
250 break;
251
252 case UIO_PHYS_SYSSPACE32:
253 case UIO_PHYS_SYSSPACE:
254 if (uio->uio_rw == UIO_READ)
255 {
256 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
257 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
258
259 error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
260 if (error) /* Copy physical to virtual */
261 error = EFAULT;
262
263 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
264 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
265 }
266 else
267 {
268 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
269 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
270
271 error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
272 if (error) /* Copy virtual to physical */
273 error = EFAULT;
274
275 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
276 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
277 }
278 if (error)
279 return (error);
280 break;
281
282 default:
283 break;
284 }
285 uio_iov_base_add(uio, acnt);
286 #if LP64KERN
287 uio_iov_len_add(uio, -((int64_t)acnt));
288 uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt)));
289 #else
290 uio_iov_len_add(uio, -((int)acnt));
291 uio_setresid(uio, (uio_resid(uio) - ((int)acnt)));
292 #endif
293 uio->uio_offset += acnt;
294 cp += acnt;
295 n -= acnt;
296 }
297 return (error);
298 }
299
300 /*
301 * Give next character to user as result of read.
302 */
303 int
304 ureadc(c, uio)
305 register int c;
306 register struct uio *uio;
307 {
308 if (uio_resid(uio) <= 0)
309 panic("ureadc: non-positive resid");
310 again:
311 if (uio->uio_iovcnt == 0)
312 panic("ureadc: non-positive iovcnt");
313 if (uio_iov_len(uio) <= 0) {
314 uio->uio_iovcnt--;
315 uio_next_iov(uio);
316 goto again;
317 }
318 switch (uio->uio_segflg) {
319
320 case UIO_USERSPACE32:
321 case UIO_USERSPACE:
322 if (subyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
323 return (EFAULT);
324 break;
325
326 case UIO_USERSPACE64:
327 if (subyte((user_addr_t)uio->uio_iovs.iov64p->iov_base, c) < 0)
328 return (EFAULT);
329 break;
330
331 case UIO_SYSSPACE32:
332 case UIO_SYSSPACE:
333 *((caddr_t)uio->uio_iovs.iov32p->iov_base) = c;
334 break;
335
336 case UIO_USERISPACE32:
337 case UIO_USERISPACE:
338 if (suibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
339 return (EFAULT);
340 break;
341
342 default:
343 break;
344 }
345 uio_iov_base_add(uio, 1);
346 uio_iov_len_add(uio, -1);
347 uio_setresid(uio, (uio_resid(uio) - 1));
348 uio->uio_offset++;
349 return (0);
350 }
351
352 #if defined(vax) || defined(ppc)
353 /* unused except by ct.c, other oddities XXX */
354 /*
355 * Get next character written in by user from uio.
356 */
357 int
358 uwritec(uio)
359 uio_t uio;
360 {
361 register int c = 0;
362
363 if (uio_resid(uio) <= 0)
364 return (-1);
365 again:
366 if (uio->uio_iovcnt <= 0)
367 panic("uwritec: non-positive iovcnt");
368
369 if (uio_iov_len(uio) == 0) {
370 uio_next_iov(uio);
371 if (--uio->uio_iovcnt == 0)
372 return (-1);
373 goto again;
374 }
375 switch (uio->uio_segflg) {
376
377 case UIO_USERSPACE32:
378 case UIO_USERSPACE:
379 c = fubyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
380 break;
381
382 case UIO_USERSPACE64:
383 c = fubyte((user_addr_t)uio->uio_iovs.iov64p->iov_base);
384 break;
385
386 case UIO_SYSSPACE32:
387 case UIO_SYSSPACE:
388 c = *((caddr_t)uio->uio_iovs.iov32p->iov_base) & 0377;
389 break;
390
391 case UIO_USERISPACE32:
392 case UIO_USERISPACE:
393 c = fuibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
394 break;
395
396 default:
397 c = 0; /* avoid uninitialized variable warning */
398 panic("uwritec: bogus uio_segflg");
399 break;
400 }
401 if (c < 0)
402 return (-1);
403 uio_iov_base_add(uio, 1);
404 uio_iov_len_add(uio, -1);
405 uio_setresid(uio, (uio_resid(uio) - 1));
406 uio->uio_offset++;
407 return (c);
408 }
409 #endif /* vax || ppc */
410
411 /*
412 * General routine to allocate a hash table.
413 */
414 void *
415 hashinit(elements, type, hashmask)
416 int elements, type;
417 u_long *hashmask;
418 {
419 long hashsize;
420 LIST_HEAD(generic, generic) *hashtbl;
421 int i;
422
423 if (elements <= 0)
424 panic("hashinit: bad cnt");
425 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
426 continue;
427 hashsize >>= 1;
428 MALLOC(hashtbl, struct generic *,
429 (u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
430 if (hashtbl != NULL) {
431 for (i = 0; i < hashsize; i++)
432 LIST_INIT(&hashtbl[i]);
433 *hashmask = hashsize - 1;
434 }
435 return (hashtbl);
436 }
437
438 /*
439 * uio_resid - return the residual IO value for the given uio_t
440 */
441 user_ssize_t uio_resid( uio_t a_uio )
442 {
443 #if DEBUG
444 if (a_uio == NULL) {
445 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
446 }
447 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
448 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
449 /* } */
450 #endif /* DEBUG */
451
452 /* return 0 if there are no active iovecs */
453 if (a_uio == NULL) {
454 return( 0 );
455 }
456
457 if (UIO_IS_64_BIT_SPACE(a_uio)) {
458 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
459 return( (user_ssize_t)a_uio->uio_resid );
460 #else
461 return( a_uio->uio_resid_64 );
462 #endif
463 }
464 return( (user_ssize_t)a_uio->uio_resid );
465 }
466
467 /*
468 * uio_setresid - set the residual IO value for the given uio_t
469 */
470 void uio_setresid( uio_t a_uio, user_ssize_t a_value )
471 {
472 #if DEBUG
473 if (a_uio == NULL) {
474 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
475 }
476 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
477 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
478 /* } */
479 #endif /* DEBUG */
480
481 if (a_uio == NULL) {
482 return;
483 }
484
485 if (UIO_IS_64_BIT_SPACE(a_uio)) {
486 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
487 a_uio->uio_resid = (int)a_value;
488 #else
489 a_uio->uio_resid_64 = a_value;
490 #endif
491 }
492 else {
493 a_uio->uio_resid = (int)a_value;
494 }
495 return;
496 }
497
498 #if 0 // obsolete
499 /*
500 * uio_proc_t - return the proc_t for the given uio_t
501 * WARNING - This call is going away. Find another way to get the proc_t!!
502 */
503 __private_extern__ proc_t uio_proc_t( uio_t a_uio )
504 {
505 #if LP64_DEBUG
506 if (a_uio == NULL) {
507 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
508 }
509 #endif /* LP64_DEBUG */
510
511 /* return 0 if there are no active iovecs */
512 if (a_uio == NULL) {
513 return( NULL );
514 }
515 return( a_uio->uio_procp );
516 }
517
518 /*
519 * uio_setproc_t - set the residual IO value for the given uio_t
520 * WARNING - This call is going away.
521 */
522 __private_extern__ void uio_setproc_t( uio_t a_uio, proc_t a_proc_t )
523 {
524 if (a_uio == NULL) {
525 #if LP64_DEBUG
526 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
527 #endif /* LP64_DEBUG */
528 return;
529 }
530
531 a_uio->uio_procp = a_proc_t;
532 return;
533 }
534 #endif // obsolete
535
536 /*
537 * uio_curriovbase - return the base address of the current iovec associated
538 * with the given uio_t. May return 0.
539 */
540 user_addr_t uio_curriovbase( uio_t a_uio )
541 {
542 #if LP64_DEBUG
543 if (a_uio == NULL) {
544 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
545 }
546 #endif /* LP64_DEBUG */
547
548 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
549 return(0);
550 }
551
552 if (UIO_IS_64_BIT_SPACE(a_uio)) {
553 return(a_uio->uio_iovs.uiovp->iov_base);
554 }
555 return((user_addr_t)((uintptr_t)a_uio->uio_iovs.kiovp->iov_base));
556
557 }
558
559 /*
560 * uio_curriovlen - return the length value of the current iovec associated
561 * with the given uio_t.
562 */
563 user_size_t uio_curriovlen( uio_t a_uio )
564 {
565 #if LP64_DEBUG
566 if (a_uio == NULL) {
567 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
568 }
569 #endif /* LP64_DEBUG */
570
571 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
572 return(0);
573 }
574
575 if (UIO_IS_64_BIT_SPACE(a_uio)) {
576 return(a_uio->uio_iovs.uiovp->iov_len);
577 }
578 return((user_size_t)a_uio->uio_iovs.kiovp->iov_len);
579 }
580
581 /*
582 * uio_setcurriovlen - set the length value of the current iovec associated
583 * with the given uio_t.
584 */
585 __private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
586 {
587 #if LP64_DEBUG
588 if (a_uio == NULL) {
589 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
590 }
591 #endif /* LP64_DEBUG */
592
593 if (a_uio == NULL) {
594 return;
595 }
596
597 if (UIO_IS_64_BIT_SPACE(a_uio)) {
598 a_uio->uio_iovs.uiovp->iov_len = a_value;
599 }
600 else {
601 #if LP64_DEBUG
602 if (a_value > 0xFFFFFFFFull) {
603 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
604 }
605 #endif /* LP64_DEBUG */
606 a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value;
607 }
608 return;
609 }
610
611 /*
612 * uio_iovcnt - return count of active iovecs for the given uio_t
613 */
614 int uio_iovcnt( uio_t a_uio )
615 {
616 #if LP64_DEBUG
617 if (a_uio == NULL) {
618 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
619 }
620 #endif /* LP64_DEBUG */
621
622 if (a_uio == NULL) {
623 return(0);
624 }
625
626 return( a_uio->uio_iovcnt );
627 }
628
629 /*
630 * uio_offset - return the current offset value for the given uio_t
631 */
632 off_t uio_offset( uio_t a_uio )
633 {
634 #if LP64_DEBUG
635 if (a_uio == NULL) {
636 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
637 }
638 #endif /* LP64_DEBUG */
639
640 if (a_uio == NULL) {
641 return(0);
642 }
643 return( a_uio->uio_offset );
644 }
645
646 /*
647 * uio_setoffset - set the current offset value for the given uio_t
648 */
649 void uio_setoffset( uio_t a_uio, off_t a_offset )
650 {
651 #if LP64_DEBUG
652 if (a_uio == NULL) {
653 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
654 }
655 #endif /* LP64_DEBUG */
656
657 if (a_uio == NULL) {
658 return;
659 }
660 a_uio->uio_offset = a_offset;
661 return;
662 }
663
664 /*
665 * uio_rw - return the read / write flag for the given uio_t
666 */
667 int uio_rw( uio_t a_uio )
668 {
669 #if LP64_DEBUG
670 if (a_uio == NULL) {
671 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
672 }
673 #endif /* LP64_DEBUG */
674
675 if (a_uio == NULL) {
676 return(-1);
677 }
678 return( a_uio->uio_rw );
679 }
680
681 /*
682 * uio_setrw - set the read / write flag for the given uio_t
683 */
684 void uio_setrw( uio_t a_uio, int a_value )
685 {
686 if (a_uio == NULL) {
687 #if LP64_DEBUG
688 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
689 #endif /* LP64_DEBUG */
690 return;
691 }
692
693 #if LP64_DEBUG
694 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
695 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
696 }
697 #endif /* LP64_DEBUG */
698
699 if (a_value == UIO_READ || a_value == UIO_WRITE) {
700 a_uio->uio_rw = a_value;
701 }
702 return;
703 }
704
705 /*
706 * uio_isuserspace - return non zero value if the address space
707 * flag is for a user address space (could be 32 or 64 bit).
708 */
709 int uio_isuserspace( uio_t a_uio )
710 {
711 if (a_uio == NULL) {
712 #if LP64_DEBUG
713 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
714 #endif /* LP64_DEBUG */
715 return(0);
716 }
717
718 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
719 return( 1 );
720 }
721 return( 0 );
722 }
723
724
725 /*
726 * uio_create - create an uio_t.
727 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
728 * is not fully initialized until all iovecs are added using uio_addiov calls.
729 * a_iovcount is the maximum number of iovecs you may add.
730 */
731 uio_t uio_create( int a_iovcount, /* number of iovecs */
732 off_t a_offset, /* current offset */
733 int a_spacetype, /* type of address space */
734 int a_iodirection ) /* read or write flag */
735 {
736 void * my_buf_p;
737 int my_size;
738 uio_t my_uio;
739
740 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
741 my_buf_p = kalloc(my_size);
742 my_uio = uio_createwithbuffer( a_iovcount,
743 a_offset,
744 a_spacetype,
745 a_iodirection,
746 my_buf_p,
747 my_size );
748 if (my_uio != 0) {
749 /* leave a note that we allocated this uio_t */
750 my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
751 #if DEBUG
752 hw_atomic_add(&uio_t_count, 1);
753 #endif
754 }
755
756 return( my_uio );
757 }
758
759
760 /*
761 * uio_createwithbuffer - create an uio_t.
762 * Create a uio_t using the given buffer. The uio_t
763 * is not fully initialized until all iovecs are added using uio_addiov calls.
764 * a_iovcount is the maximum number of iovecs you may add.
765 * This call may fail if the given buffer is not large enough.
766 */
767 __private_extern__ uio_t
768 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
769 off_t a_offset, /* current offset */
770 int a_spacetype, /* type of address space */
771 int a_iodirection, /* read or write flag */
772 void *a_buf_p, /* pointer to a uio_t buffer */
773 int a_buffer_size ) /* size of uio_t buffer */
774 {
775 uio_t my_uio = (uio_t) a_buf_p;
776 int my_size;
777
778 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
779 if (a_buffer_size < my_size) {
780 #if DEBUG
781 panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__);
782 #endif /* DEBUG */
783 return( NULL );
784 }
785 my_size = a_buffer_size;
786
787 #if DEBUG
788 if (my_uio == 0) {
789 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
790 }
791 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
792 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
793 }
794 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
795 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
796 }
797 if (a_iovcount > UIO_MAXIOV) {
798 panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__);
799 }
800 #endif /* DEBUG */
801
802 bzero(my_uio, my_size);
803 my_uio->uio_size = my_size;
804
805 /* we use uio_segflg to indicate if the uio_t is the new format or */
806 /* old (pre LP64 support) legacy format */
807 switch (a_spacetype) {
808 case UIO_USERSPACE:
809 my_uio->uio_segflg = UIO_USERSPACE32;
810 case UIO_SYSSPACE:
811 my_uio->uio_segflg = UIO_SYSSPACE32;
812 case UIO_PHYS_USERSPACE:
813 my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
814 case UIO_PHYS_SYSSPACE:
815 my_uio->uio_segflg = UIO_PHYS_SYSSPACE32;
816 default:
817 my_uio->uio_segflg = a_spacetype;
818 break;
819 }
820
821 if (a_iovcount > 0) {
822 my_uio->uio_iovs.uiovp = (struct user_iovec *)
823 (((uint8_t *)my_uio) + sizeof(struct uio));
824 }
825 else {
826 my_uio->uio_iovs.uiovp = NULL;
827 }
828
829 my_uio->uio_max_iovs = a_iovcount;
830 my_uio->uio_offset = a_offset;
831 my_uio->uio_rw = a_iodirection;
832 my_uio->uio_flags = UIO_FLAGS_INITED;
833
834 return( my_uio );
835 }
836
837 /*
838 * uio_spacetype - return the address space type for the given uio_t
839 */
840 int uio_spacetype( uio_t a_uio )
841 {
842 if (a_uio == NULL) {
843 #if LP64_DEBUG
844 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
845 #endif /* LP64_DEBUG */
846 return(-1);
847 }
848
849 return( a_uio->uio_segflg );
850 }
851
852 /*
853 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
854 * This returns the location of the iovecs within the uio.
855 * NOTE - for compatibility mode we just return the current value in uio_iovs
856 * which will increase as the IO is completed and is NOT embedded within the
857 * uio, it is a seperate array of one or more iovecs.
858 */
859 struct user_iovec * uio_iovsaddr( uio_t a_uio )
860 {
861 struct user_iovec * my_addr;
862
863 if (a_uio == NULL) {
864 return(NULL);
865 }
866
867 if (a_uio->uio_segflg == UIO_USERSPACE || a_uio->uio_segflg == UIO_SYSSPACE) {
868 /* we need this for compatibility mode. */
869 my_addr = (struct user_iovec *) a_uio->uio_iovs.iovp;
870 }
871 else {
872 my_addr = (struct user_iovec *) (((uint8_t *)a_uio) + sizeof(struct uio));
873 }
874 return(my_addr);
875 }
876
877 /*
878 * uio_reset - reset an uio_t.
879 * Reset the given uio_t to initial values. The uio_t is not fully initialized
880 * until all iovecs are added using uio_addiov calls.
881 * The a_iovcount value passed in the uio_create is the maximum number of
882 * iovecs you may add.
883 */
884 void uio_reset( uio_t a_uio,
885 off_t a_offset, /* current offset */
886 int a_spacetype, /* type of address space */
887 int a_iodirection ) /* read or write flag */
888 {
889 vm_size_t my_size;
890 int my_max_iovs;
891 u_int32_t my_old_flags;
892
893 #if LP64_DEBUG
894 if (a_uio == NULL) {
895 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
896 }
897 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
898 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
899 }
900 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
901 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
902 }
903 #endif /* LP64_DEBUG */
904
905 if (a_uio == NULL) {
906 return;
907 }
908
909 my_size = a_uio->uio_size;
910 my_old_flags = a_uio->uio_flags;
911 my_max_iovs = a_uio->uio_max_iovs;
912 bzero(a_uio, my_size);
913 a_uio->uio_size = my_size;
914 a_uio->uio_segflg = a_spacetype;
915 if (my_max_iovs > 0) {
916 a_uio->uio_iovs.uiovp = (struct user_iovec *)
917 (((uint8_t *)a_uio) + sizeof(struct uio));
918 }
919 else {
920 a_uio->uio_iovs.uiovp = NULL;
921 }
922 a_uio->uio_max_iovs = my_max_iovs;
923 a_uio->uio_offset = a_offset;
924 a_uio->uio_rw = a_iodirection;
925 a_uio->uio_flags = my_old_flags;
926
927 return;
928 }
929
930 /*
931 * uio_free - free a uio_t allocated via uio_init. this also frees all
932 * associated iovecs.
933 */
934 void uio_free( uio_t a_uio )
935 {
936 #if DEBUG
937 if (a_uio == NULL) {
938 panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__);
939 }
940 #endif /* LP64_DEBUG */
941
942 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
943 #if DEBUG
944 if ((int)(hw_atomic_sub(&uio_t_count, 1)) < 0) {
945 panic("%s :%d - uio_t_count has gone negative\n", __FILE__, __LINE__);
946 }
947 #endif
948 kfree(a_uio, a_uio->uio_size);
949 }
950
951
952 }
953
954 /*
955 * uio_addiov - add an iovec to the given uio_t. You may call this up to
956 * the a_iovcount number that was passed to uio_create. This call will
957 * increment the residual IO count as iovecs are added to the uio_t.
958 * returns 0 if add was successful else non zero.
959 */
960 int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
961 {
962 int i;
963
964 if (a_uio == NULL) {
965 #if DEBUG
966 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
967 #endif /* LP64_DEBUG */
968 return(-1);
969 }
970
971 if (UIO_IS_64_BIT_SPACE(a_uio)) {
972 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
973 if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
974 a_uio->uio_iovs.uiovp[i].iov_len = a_length;
975 a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
976 a_uio->uio_iovcnt++;
977 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
978 a_uio->uio_resid += a_length;
979 #else
980 a_uio->uio_resid_64 += a_length;
981 #endif
982 return( 0 );
983 }
984 }
985 }
986 else {
987 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
988 if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
989 a_uio->uio_iovs.kiovp[i].iov_len = (u_int32_t)a_length;
990 a_uio->uio_iovs.kiovp[i].iov_base = (u_int32_t)((uintptr_t)a_baseaddr);
991 a_uio->uio_iovcnt++;
992 a_uio->uio_resid += a_length;
993 return( 0 );
994 }
995 }
996 }
997
998 return( -1 );
999 }
1000
1001 /*
1002 * uio_getiov - get iovec data associated with the given uio_t. Use
1003 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1004 * a_baseaddr_p and a_length_p may be NULL.
1005 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1006 * returns 0 when data is returned.
1007 */
1008 int uio_getiov( uio_t a_uio,
1009 int a_index,
1010 user_addr_t * a_baseaddr_p,
1011 user_size_t * a_length_p )
1012 {
1013 if (a_uio == NULL) {
1014 #if DEBUG
1015 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1016 #endif /* DEBUG */
1017 return(-1);
1018 }
1019 if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) {
1020 return(-1);
1021 }
1022
1023 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1024 if (a_baseaddr_p != NULL) {
1025 *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
1026 }
1027 if (a_length_p != NULL) {
1028 *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
1029 }
1030 }
1031 else {
1032 if (a_baseaddr_p != NULL) {
1033 *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base;
1034 }
1035 if (a_length_p != NULL) {
1036 *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len;
1037 }
1038 }
1039
1040 return( 0 );
1041 }
1042
1043 /*
1044 * uio_calculateresid - runs through all iovecs associated with this
1045 * uio_t and calculates (and sets) the residual IO count.
1046 */
1047 __private_extern__ void uio_calculateresid( uio_t a_uio )
1048 {
1049 int i;
1050
1051 if (a_uio == NULL) {
1052 #if LP64_DEBUG
1053 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1054 #endif /* LP64_DEBUG */
1055 return;
1056 }
1057
1058 a_uio->uio_iovcnt = 0;
1059 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1060 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1061 a_uio->uio_resid = 0;
1062 #else
1063 a_uio->uio_resid_64 = 0;
1064 #endif
1065 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1066 if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
1067 a_uio->uio_iovcnt++;
1068 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1069 a_uio->uio_resid += a_uio->uio_iovs.uiovp[i].iov_len;
1070 #else
1071 a_uio->uio_resid_64 += a_uio->uio_iovs.uiovp[i].iov_len;
1072 #endif
1073 }
1074 }
1075 }
1076 else {
1077 a_uio->uio_resid = 0;
1078 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1079 if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
1080 a_uio->uio_iovcnt++;
1081 a_uio->uio_resid += a_uio->uio_iovs.kiovp[i].iov_len;
1082 }
1083 }
1084 }
1085 return;
1086 }
1087
1088 /*
1089 * uio_update - update the given uio_t for a_count of completed IO.
1090 * This call decrements the current iovec length and residual IO value
1091 * and increments the current iovec base address and offset value.
1092 * If the current iovec length is 0 then advance to the next
1093 * iovec (if any).
1094 */
1095 void uio_update( uio_t a_uio, user_size_t a_count )
1096 {
1097 #if LP64_DEBUG
1098 if (a_uio == NULL) {
1099 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1100 }
1101 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1102 panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
1103 }
1104 #endif /* LP64_DEBUG */
1105
1106 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
1107 return;
1108 }
1109
1110 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1111 if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
1112 a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
1113 a_uio->uio_iovs.uiovp->iov_len = 0;
1114 }
1115 else {
1116 a_uio->uio_iovs.uiovp->iov_base += a_count;
1117 a_uio->uio_iovs.uiovp->iov_len -= a_count;
1118 }
1119 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1120 if (a_uio->uio_resid < 0) {
1121 a_uio->uio_resid = 0;
1122 }
1123 if (a_count > (user_size_t)a_uio->uio_resid) {
1124 a_uio->uio_offset += a_uio->uio_resid;
1125 a_uio->uio_resid = 0;
1126 }
1127 else {
1128 a_uio->uio_offset += a_count;
1129 a_uio->uio_resid -= a_count;
1130 }
1131 #else
1132 if (a_uio->uio_resid_64 < 0) {
1133 a_uio->uio_resid_64 = 0;
1134 }
1135 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1136 a_uio->uio_offset += a_uio->uio_resid_64;
1137 a_uio->uio_resid_64 = 0;
1138 }
1139 else {
1140 a_uio->uio_offset += a_count;
1141 a_uio->uio_resid_64 -= a_count;
1142 }
1143 #endif // LP64todo
1144
1145 /* advance to next iovec if current one is totally consumed */
1146 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1147 a_uio->uio_iovcnt--;
1148 if (a_uio->uio_iovcnt > 0) {
1149 a_uio->uio_iovs.uiovp++;
1150 }
1151 }
1152 }
1153 else {
1154 if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
1155 a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
1156 a_uio->uio_iovs.kiovp->iov_len = 0;
1157 }
1158 else {
1159 a_uio->uio_iovs.kiovp->iov_base += a_count;
1160 a_uio->uio_iovs.kiovp->iov_len -= a_count;
1161 }
1162 if (a_uio->uio_resid < 0) {
1163 a_uio->uio_resid = 0;
1164 }
1165 if (a_count > (user_size_t)a_uio->uio_resid) {
1166 a_uio->uio_offset += a_uio->uio_resid;
1167 a_uio->uio_resid = 0;
1168 }
1169 else {
1170 a_uio->uio_offset += a_count;
1171 a_uio->uio_resid -= a_count;
1172 }
1173
1174 /* advance to next iovec if current one is totally consumed */
1175 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1176 a_uio->uio_iovcnt--;
1177 if (a_uio->uio_iovcnt > 0) {
1178 a_uio->uio_iovs.kiovp++;
1179 }
1180 }
1181 }
1182 return;
1183 }
1184
1185
1186 /*
1187 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1188 * may return NULL.
1189 */
1190 uio_t uio_duplicate( uio_t a_uio )
1191 {
1192 uio_t my_uio;
1193 int i;
1194
1195 if (a_uio == NULL) {
1196 return(NULL);
1197 }
1198
1199 my_uio = (uio_t) kalloc(a_uio->uio_size);
1200 if (my_uio == 0) {
1201 panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
1202 }
1203
1204 bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
1205 /* need to set our iovec pointer to point to first active iovec */
1206 if (my_uio->uio_max_iovs > 0) {
1207 my_uio->uio_iovs.uiovp = (struct user_iovec *)
1208 (((uint8_t *)my_uio) + sizeof(struct uio));
1209
1210 /* advance to first nonzero iovec */
1211 if (my_uio->uio_iovcnt > 0) {
1212 for ( i = 0; i < my_uio->uio_max_iovs; i++ ) {
1213 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1214 if (my_uio->uio_iovs.uiovp->iov_len != 0) {
1215 break;
1216 }
1217 my_uio->uio_iovs.uiovp++;
1218 }
1219 else {
1220 if (my_uio->uio_iovs.kiovp->iov_len != 0) {
1221 break;
1222 }
1223 my_uio->uio_iovs.kiovp++;
1224 }
1225 }
1226 }
1227 }
1228
1229 return(my_uio);
1230 }
1231