]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_subr.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / kern / kern_subr.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1982, 1986, 1991, 1993
26 * The Regents of the University of California. All rights reserved.
27 * (c) UNIX System Laboratories, Inc.
28 * All or some portions of this file are derived from material licensed
29 * to the University of California by American Telephone and Telegraph
30 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
31 * the permission of UNIX System Laboratories, Inc.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/proc_internal.h>
67 #include <sys/malloc.h>
68 #include <sys/queue.h>
69 #include <vm/pmap.h>
70 #include <sys/uio_internal.h>
71 #include <kern/kalloc.h>
72
73 #include <kdebug.h>
74
75 #include <sys/kdebug.h>
76 #define DBG_UIO_COPYOUT 16
77 #define DBG_UIO_COPYIN 17
78
79 #if DEBUG
80 #include <kern/simple_lock.h>
81
82 static int uio_t_count = 0;
83 #endif /* DEBUG */
84
85
86 int
87 uiomove(cp, n, uio)
88 register caddr_t cp;
89 register int n;
90 register uio_t uio;
91 {
92 return uiomove64((addr64_t)((unsigned int)cp), n, uio);
93 }
94
95 // LP64todo - fix this! 'n' should be int64_t?
96 int
97 uiomove64(addr64_t cp, int n, register struct uio *uio)
98 {
99 #if LP64KERN
100 register uint64_t acnt;
101 #else
102 register u_int acnt;
103 #endif
104 int error = 0;
105
106 #if DIAGNOSTIC
107 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
108 panic("uiomove: mode");
109 #endif
110
111 #if LP64_DEBUG
112 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
113 panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__);
114 }
115 #endif /* LP64_DEBUG */
116
117 while (n > 0 && uio_resid(uio)) {
118 acnt = uio_iov_len(uio);
119 if (acnt == 0) {
120 uio_next_iov(uio);
121 uio->uio_iovcnt--;
122 continue;
123 }
124 if (n > 0 && acnt > (uint64_t)n)
125 acnt = n;
126
127 switch (uio->uio_segflg) {
128
129 case UIO_USERSPACE64:
130 case UIO_USERISPACE64:
131 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
132 if (uio->uio_rw == UIO_READ)
133 {
134 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
135 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
136
137 error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt );
138
139 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
140 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
141 }
142 else
143 {
144 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
145 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
146
147 error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt);
148
149 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
150 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
151 }
152 if (error)
153 return (error);
154 break;
155
156 case UIO_USERSPACE32:
157 case UIO_USERISPACE32:
158 case UIO_USERSPACE:
159 case UIO_USERISPACE:
160 if (uio->uio_rw == UIO_READ)
161 {
162 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
163 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
164
165 error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt );
166
167 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
168 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
169 }
170 else
171 {
172 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
173 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
174
175 error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt);
176
177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
178 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
179 }
180 if (error)
181 return (error);
182 break;
183
184 case UIO_SYSSPACE32:
185 case UIO_SYSSPACE:
186 if (uio->uio_rw == UIO_READ)
187 error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base,
188 acnt);
189 else
190 error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp),
191 acnt);
192 break;
193
194 case UIO_PHYS_USERSPACE64:
195 if (uio->uio_rw == UIO_READ)
196 {
197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
198 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
199
200 error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
201 if (error) /* Copy physical to virtual */
202 error = EFAULT;
203
204 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
205 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
206 }
207 else
208 {
209 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
210 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
211
212 error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
213 if (error) /* Copy virtual to physical */
214 error = EFAULT;
215
216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
217 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
218 }
219 if (error)
220 return (error);
221 break;
222
223 case UIO_PHYS_USERSPACE32:
224 case UIO_PHYS_USERSPACE:
225 if (uio->uio_rw == UIO_READ)
226 {
227 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
228 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
229
230 error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
231 if (error) /* Copy physical to virtual */
232 error = EFAULT;
233
234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
235 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
236 }
237 else
238 {
239 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
240 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
241
242 error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
243 if (error) /* Copy virtual to physical */
244 error = EFAULT;
245
246 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
247 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
248 }
249 if (error)
250 return (error);
251 break;
252
253 case UIO_PHYS_SYSSPACE32:
254 case UIO_PHYS_SYSSPACE:
255 if (uio->uio_rw == UIO_READ)
256 {
257 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
258 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
259
260 error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
261 if (error) /* Copy physical to virtual */
262 error = EFAULT;
263
264 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
265 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
266 }
267 else
268 {
269 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
270 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
271
272 error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
273 if (error) /* Copy virtual to physical */
274 error = EFAULT;
275
276 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
277 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
278 }
279 if (error)
280 return (error);
281 break;
282
283 default:
284 break;
285 }
286 uio_iov_base_add(uio, acnt);
287 #if LP64KERN
288 uio_iov_len_add(uio, -((int64_t)acnt));
289 uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt)));
290 #else
291 uio_iov_len_add(uio, -((int)acnt));
292 uio_setresid(uio, (uio_resid(uio) - ((int)acnt)));
293 #endif
294 uio->uio_offset += acnt;
295 cp += acnt;
296 n -= acnt;
297 }
298 return (error);
299 }
300
301 /*
302 * Give next character to user as result of read.
303 */
304 int
305 ureadc(c, uio)
306 register int c;
307 register struct uio *uio;
308 {
309 if (uio_resid(uio) <= 0)
310 panic("ureadc: non-positive resid");
311 again:
312 if (uio->uio_iovcnt == 0)
313 panic("ureadc: non-positive iovcnt");
314 if (uio_iov_len(uio) <= 0) {
315 uio->uio_iovcnt--;
316 uio_next_iov(uio);
317 goto again;
318 }
319 switch (uio->uio_segflg) {
320
321 case UIO_USERSPACE32:
322 case UIO_USERSPACE:
323 if (subyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
324 return (EFAULT);
325 break;
326
327 case UIO_USERSPACE64:
328 if (subyte((user_addr_t)uio->uio_iovs.iov64p->iov_base, c) < 0)
329 return (EFAULT);
330 break;
331
332 case UIO_SYSSPACE32:
333 case UIO_SYSSPACE:
334 *((caddr_t)uio->uio_iovs.iov32p->iov_base) = c;
335 break;
336
337 case UIO_USERISPACE32:
338 case UIO_USERISPACE:
339 if (suibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
340 return (EFAULT);
341 break;
342
343 default:
344 break;
345 }
346 uio_iov_base_add(uio, 1);
347 uio_iov_len_add(uio, -1);
348 uio_setresid(uio, (uio_resid(uio) - 1));
349 uio->uio_offset++;
350 return (0);
351 }
352
353 #if defined(vax) || defined(ppc)
354 /* unused except by ct.c, other oddities XXX */
355 /*
356 * Get next character written in by user from uio.
357 */
358 int
359 uwritec(uio)
360 uio_t uio;
361 {
362 register int c = 0;
363
364 if (uio_resid(uio) <= 0)
365 return (-1);
366 again:
367 if (uio->uio_iovcnt <= 0)
368 panic("uwritec: non-positive iovcnt");
369
370 if (uio_iov_len(uio) == 0) {
371 uio_next_iov(uio);
372 if (--uio->uio_iovcnt == 0)
373 return (-1);
374 goto again;
375 }
376 switch (uio->uio_segflg) {
377
378 case UIO_USERSPACE32:
379 case UIO_USERSPACE:
380 c = fubyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
381 break;
382
383 case UIO_USERSPACE64:
384 c = fubyte((user_addr_t)uio->uio_iovs.iov64p->iov_base);
385 break;
386
387 case UIO_SYSSPACE32:
388 case UIO_SYSSPACE:
389 c = *((caddr_t)uio->uio_iovs.iov32p->iov_base) & 0377;
390 break;
391
392 case UIO_USERISPACE32:
393 case UIO_USERISPACE:
394 c = fuibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
395 break;
396
397 default:
398 c = 0; /* avoid uninitialized variable warning */
399 panic("uwritec: bogus uio_segflg");
400 break;
401 }
402 if (c < 0)
403 return (-1);
404 uio_iov_base_add(uio, 1);
405 uio_iov_len_add(uio, -1);
406 uio_setresid(uio, (uio_resid(uio) - 1));
407 uio->uio_offset++;
408 return (c);
409 }
410 #endif /* vax || ppc */
411
412 /*
413 * General routine to allocate a hash table.
414 */
415 void *
416 hashinit(elements, type, hashmask)
417 int elements, type;
418 u_long *hashmask;
419 {
420 long hashsize;
421 LIST_HEAD(generic, generic) *hashtbl;
422 int i;
423
424 if (elements <= 0)
425 panic("hashinit: bad cnt");
426 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
427 continue;
428 hashsize >>= 1;
429 MALLOC(hashtbl, struct generic *,
430 (u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO);
431 if (hashtbl != NULL) {
432 for (i = 0; i < hashsize; i++)
433 LIST_INIT(&hashtbl[i]);
434 *hashmask = hashsize - 1;
435 }
436 return (hashtbl);
437 }
438
439 /*
440 * uio_resid - return the residual IO value for the given uio_t
441 */
442 user_ssize_t uio_resid( uio_t a_uio )
443 {
444 #if DEBUG
445 if (a_uio == NULL) {
446 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
447 }
448 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
449 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
450 /* } */
451 #endif /* DEBUG */
452
453 /* return 0 if there are no active iovecs */
454 if (a_uio == NULL) {
455 return( 0 );
456 }
457
458 if (UIO_IS_64_BIT_SPACE(a_uio)) {
459 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
460 return( (user_ssize_t)a_uio->uio_resid );
461 #else
462 return( a_uio->uio_resid_64 );
463 #endif
464 }
465 return( (user_ssize_t)a_uio->uio_resid );
466 }
467
468 /*
469 * uio_setresid - set the residual IO value for the given uio_t
470 */
471 void uio_setresid( uio_t a_uio, user_ssize_t a_value )
472 {
473 #if DEBUG
474 if (a_uio == NULL) {
475 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
476 }
477 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
478 /* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */
479 /* } */
480 #endif /* DEBUG */
481
482 if (a_uio == NULL) {
483 return;
484 }
485
486 if (UIO_IS_64_BIT_SPACE(a_uio)) {
487 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
488 a_uio->uio_resid = (int)a_value;
489 #else
490 a_uio->uio_resid_64 = a_value;
491 #endif
492 }
493 else {
494 a_uio->uio_resid = (int)a_value;
495 }
496 return;
497 }
498
499 #if 0 // obsolete
500 /*
501 * uio_proc_t - return the proc_t for the given uio_t
502 * WARNING - This call is going away. Find another way to get the proc_t!!
503 */
504 __private_extern__ proc_t uio_proc_t( uio_t a_uio )
505 {
506 #if LP64_DEBUG
507 if (a_uio == NULL) {
508 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
509 }
510 #endif /* LP64_DEBUG */
511
512 /* return 0 if there are no active iovecs */
513 if (a_uio == NULL) {
514 return( NULL );
515 }
516 return( a_uio->uio_procp );
517 }
518
519 /*
520 * uio_setproc_t - set the residual IO value for the given uio_t
521 * WARNING - This call is going away.
522 */
523 __private_extern__ void uio_setproc_t( uio_t a_uio, proc_t a_proc_t )
524 {
525 if (a_uio == NULL) {
526 #if LP64_DEBUG
527 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
528 #endif /* LP64_DEBUG */
529 return;
530 }
531
532 a_uio->uio_procp = a_proc_t;
533 return;
534 }
535 #endif // obsolete
536
537 /*
538 * uio_curriovbase - return the base address of the current iovec associated
539 * with the given uio_t. May return 0.
540 */
541 user_addr_t uio_curriovbase( uio_t a_uio )
542 {
543 #if LP64_DEBUG
544 if (a_uio == NULL) {
545 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
546 }
547 #endif /* LP64_DEBUG */
548
549 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
550 return(0);
551 }
552
553 if (UIO_IS_64_BIT_SPACE(a_uio)) {
554 return(a_uio->uio_iovs.uiovp->iov_base);
555 }
556 return((user_addr_t)((uintptr_t)a_uio->uio_iovs.kiovp->iov_base));
557
558 }
559
560 /*
561 * uio_curriovlen - return the length value of the current iovec associated
562 * with the given uio_t.
563 */
564 user_size_t uio_curriovlen( uio_t a_uio )
565 {
566 #if LP64_DEBUG
567 if (a_uio == NULL) {
568 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
569 }
570 #endif /* LP64_DEBUG */
571
572 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
573 return(0);
574 }
575
576 if (UIO_IS_64_BIT_SPACE(a_uio)) {
577 return(a_uio->uio_iovs.uiovp->iov_len);
578 }
579 return((user_size_t)a_uio->uio_iovs.kiovp->iov_len);
580 }
581
582 /*
583 * uio_setcurriovlen - set the length value of the current iovec associated
584 * with the given uio_t.
585 */
586 __private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
587 {
588 #if LP64_DEBUG
589 if (a_uio == NULL) {
590 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
591 }
592 #endif /* LP64_DEBUG */
593
594 if (a_uio == NULL) {
595 return;
596 }
597
598 if (UIO_IS_64_BIT_SPACE(a_uio)) {
599 a_uio->uio_iovs.uiovp->iov_len = a_value;
600 }
601 else {
602 #if LP64_DEBUG
603 if (a_value > 0xFFFFFFFFull) {
604 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
605 }
606 #endif /* LP64_DEBUG */
607 a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value;
608 }
609 return;
610 }
611
612 /*
613 * uio_iovcnt - return count of active iovecs for the given uio_t
614 */
615 int uio_iovcnt( uio_t a_uio )
616 {
617 #if LP64_DEBUG
618 if (a_uio == NULL) {
619 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
620 }
621 #endif /* LP64_DEBUG */
622
623 if (a_uio == NULL) {
624 return(0);
625 }
626
627 return( a_uio->uio_iovcnt );
628 }
629
630 /*
631 * uio_offset - return the current offset value for the given uio_t
632 */
633 off_t uio_offset( uio_t a_uio )
634 {
635 #if LP64_DEBUG
636 if (a_uio == NULL) {
637 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
638 }
639 #endif /* LP64_DEBUG */
640
641 if (a_uio == NULL) {
642 return(0);
643 }
644 return( a_uio->uio_offset );
645 }
646
647 /*
648 * uio_setoffset - set the current offset value for the given uio_t
649 */
650 void uio_setoffset( uio_t a_uio, off_t a_offset )
651 {
652 #if LP64_DEBUG
653 if (a_uio == NULL) {
654 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
655 }
656 #endif /* LP64_DEBUG */
657
658 if (a_uio == NULL) {
659 return;
660 }
661 a_uio->uio_offset = a_offset;
662 return;
663 }
664
665 /*
666 * uio_rw - return the read / write flag for the given uio_t
667 */
668 int uio_rw( uio_t a_uio )
669 {
670 #if LP64_DEBUG
671 if (a_uio == NULL) {
672 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
673 }
674 #endif /* LP64_DEBUG */
675
676 if (a_uio == NULL) {
677 return(-1);
678 }
679 return( a_uio->uio_rw );
680 }
681
682 /*
683 * uio_setrw - set the read / write flag for the given uio_t
684 */
685 void uio_setrw( uio_t a_uio, int a_value )
686 {
687 if (a_uio == NULL) {
688 #if LP64_DEBUG
689 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
690 #endif /* LP64_DEBUG */
691 return;
692 }
693
694 #if LP64_DEBUG
695 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
696 panic("%s :%d - invalid a_value\n", __FILE__, __LINE__);
697 }
698 #endif /* LP64_DEBUG */
699
700 if (a_value == UIO_READ || a_value == UIO_WRITE) {
701 a_uio->uio_rw = a_value;
702 }
703 return;
704 }
705
706 /*
707 * uio_isuserspace - return non zero value if the address space
708 * flag is for a user address space (could be 32 or 64 bit).
709 */
710 int uio_isuserspace( uio_t a_uio )
711 {
712 if (a_uio == NULL) {
713 #if LP64_DEBUG
714 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
715 #endif /* LP64_DEBUG */
716 return(0);
717 }
718
719 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
720 return( 1 );
721 }
722 return( 0 );
723 }
724
725
726 /*
727 * uio_create - create an uio_t.
728 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
729 * is not fully initialized until all iovecs are added using uio_addiov calls.
730 * a_iovcount is the maximum number of iovecs you may add.
731 */
732 uio_t uio_create( int a_iovcount, /* number of iovecs */
733 off_t a_offset, /* current offset */
734 int a_spacetype, /* type of address space */
735 int a_iodirection ) /* read or write flag */
736 {
737 void * my_buf_p;
738 int my_size;
739 uio_t my_uio;
740
741 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
742 my_buf_p = kalloc(my_size);
743 my_uio = uio_createwithbuffer( a_iovcount,
744 a_offset,
745 a_spacetype,
746 a_iodirection,
747 my_buf_p,
748 my_size );
749 if (my_uio != 0) {
750 /* leave a note that we allocated this uio_t */
751 my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
752 #if DEBUG
753 hw_atomic_add(&uio_t_count, 1);
754 #endif
755 }
756
757 return( my_uio );
758 }
759
760
761 /*
762 * uio_createwithbuffer - create an uio_t.
763 * Create a uio_t using the given buffer. The uio_t
764 * is not fully initialized until all iovecs are added using uio_addiov calls.
765 * a_iovcount is the maximum number of iovecs you may add.
766 * This call may fail if the given buffer is not large enough.
767 */
768 __private_extern__ uio_t
769 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
770 off_t a_offset, /* current offset */
771 int a_spacetype, /* type of address space */
772 int a_iodirection, /* read or write flag */
773 void *a_buf_p, /* pointer to a uio_t buffer */
774 int a_buffer_size ) /* size of uio_t buffer */
775 {
776 uio_t my_uio = (uio_t) a_buf_p;
777 int my_size;
778
779 my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
780 if (a_buffer_size < my_size) {
781 #if DEBUG
782 panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__);
783 #endif /* DEBUG */
784 return( NULL );
785 }
786 my_size = a_buffer_size;
787
788 #if DEBUG
789 if (my_uio == 0) {
790 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
791 }
792 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
793 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
794 }
795 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
796 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
797 }
798 if (a_iovcount > UIO_MAXIOV) {
799 panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__);
800 }
801 #endif /* DEBUG */
802
803 bzero(my_uio, my_size);
804 my_uio->uio_size = my_size;
805
806 /* we use uio_segflg to indicate if the uio_t is the new format or */
807 /* old (pre LP64 support) legacy format */
808 switch (a_spacetype) {
809 case UIO_USERSPACE:
810 my_uio->uio_segflg = UIO_USERSPACE32;
811 case UIO_SYSSPACE:
812 my_uio->uio_segflg = UIO_SYSSPACE32;
813 case UIO_PHYS_USERSPACE:
814 my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
815 case UIO_PHYS_SYSSPACE:
816 my_uio->uio_segflg = UIO_PHYS_SYSSPACE32;
817 default:
818 my_uio->uio_segflg = a_spacetype;
819 break;
820 }
821
822 if (a_iovcount > 0) {
823 my_uio->uio_iovs.uiovp = (struct user_iovec *)
824 (((uint8_t *)my_uio) + sizeof(struct uio));
825 }
826 else {
827 my_uio->uio_iovs.uiovp = NULL;
828 }
829
830 my_uio->uio_max_iovs = a_iovcount;
831 my_uio->uio_offset = a_offset;
832 my_uio->uio_rw = a_iodirection;
833 my_uio->uio_flags = UIO_FLAGS_INITED;
834
835 return( my_uio );
836 }
837
838 /*
839 * uio_spacetype - return the address space type for the given uio_t
840 */
841 int uio_spacetype( uio_t a_uio )
842 {
843 if (a_uio == NULL) {
844 #if LP64_DEBUG
845 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
846 #endif /* LP64_DEBUG */
847 return(-1);
848 }
849
850 return( a_uio->uio_segflg );
851 }
852
853 /*
854 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
855 * This returns the location of the iovecs within the uio.
856 * NOTE - for compatibility mode we just return the current value in uio_iovs
857 * which will increase as the IO is completed and is NOT embedded within the
858 * uio, it is a seperate array of one or more iovecs.
859 */
860 struct user_iovec * uio_iovsaddr( uio_t a_uio )
861 {
862 struct user_iovec * my_addr;
863
864 if (a_uio == NULL) {
865 return(NULL);
866 }
867
868 if (a_uio->uio_segflg == UIO_USERSPACE || a_uio->uio_segflg == UIO_SYSSPACE) {
869 /* we need this for compatibility mode. */
870 my_addr = (struct user_iovec *) a_uio->uio_iovs.iovp;
871 }
872 else {
873 my_addr = (struct user_iovec *) (((uint8_t *)a_uio) + sizeof(struct uio));
874 }
875 return(my_addr);
876 }
877
878 /*
879 * uio_reset - reset an uio_t.
880 * Reset the given uio_t to initial values. The uio_t is not fully initialized
881 * until all iovecs are added using uio_addiov calls.
882 * The a_iovcount value passed in the uio_create is the maximum number of
883 * iovecs you may add.
884 */
885 void uio_reset( uio_t a_uio,
886 off_t a_offset, /* current offset */
887 int a_spacetype, /* type of address space */
888 int a_iodirection ) /* read or write flag */
889 {
890 vm_size_t my_size;
891 int my_max_iovs;
892 u_int32_t my_old_flags;
893
894 #if LP64_DEBUG
895 if (a_uio == NULL) {
896 panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__);
897 }
898 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
899 panic("%s :%d - invalid address space type\n", __FILE__, __LINE__);
900 }
901 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
902 panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__);
903 }
904 #endif /* LP64_DEBUG */
905
906 if (a_uio == NULL) {
907 return;
908 }
909
910 my_size = a_uio->uio_size;
911 my_old_flags = a_uio->uio_flags;
912 my_max_iovs = a_uio->uio_max_iovs;
913 bzero(a_uio, my_size);
914 a_uio->uio_size = my_size;
915 a_uio->uio_segflg = a_spacetype;
916 if (my_max_iovs > 0) {
917 a_uio->uio_iovs.uiovp = (struct user_iovec *)
918 (((uint8_t *)a_uio) + sizeof(struct uio));
919 }
920 else {
921 a_uio->uio_iovs.uiovp = NULL;
922 }
923 a_uio->uio_max_iovs = my_max_iovs;
924 a_uio->uio_offset = a_offset;
925 a_uio->uio_rw = a_iodirection;
926 a_uio->uio_flags = my_old_flags;
927
928 return;
929 }
930
931 /*
932 * uio_free - free a uio_t allocated via uio_init. this also frees all
933 * associated iovecs.
934 */
935 void uio_free( uio_t a_uio )
936 {
937 #if DEBUG
938 if (a_uio == NULL) {
939 panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__);
940 }
941 #endif /* LP64_DEBUG */
942
943 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
944 #if DEBUG
945 if ((int)(hw_atomic_sub(&uio_t_count, 1)) < 0) {
946 panic("%s :%d - uio_t_count has gone negative\n", __FILE__, __LINE__);
947 }
948 #endif
949 kfree(a_uio, a_uio->uio_size);
950 }
951
952
953 }
954
955 /*
956 * uio_addiov - add an iovec to the given uio_t. You may call this up to
957 * the a_iovcount number that was passed to uio_create. This call will
958 * increment the residual IO count as iovecs are added to the uio_t.
959 * returns 0 if add was successful else non zero.
960 */
961 int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
962 {
963 int i;
964
965 if (a_uio == NULL) {
966 #if DEBUG
967 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
968 #endif /* LP64_DEBUG */
969 return(-1);
970 }
971
972 if (UIO_IS_64_BIT_SPACE(a_uio)) {
973 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
974 if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
975 a_uio->uio_iovs.uiovp[i].iov_len = a_length;
976 a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
977 a_uio->uio_iovcnt++;
978 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
979 a_uio->uio_resid += a_length;
980 #else
981 a_uio->uio_resid_64 += a_length;
982 #endif
983 return( 0 );
984 }
985 }
986 }
987 else {
988 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
989 if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
990 a_uio->uio_iovs.kiovp[i].iov_len = (u_int32_t)a_length;
991 a_uio->uio_iovs.kiovp[i].iov_base = (u_int32_t)((uintptr_t)a_baseaddr);
992 a_uio->uio_iovcnt++;
993 a_uio->uio_resid += a_length;
994 return( 0 );
995 }
996 }
997 }
998
999 return( -1 );
1000 }
1001
1002 /*
1003 * uio_getiov - get iovec data associated with the given uio_t. Use
1004 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1005 * a_baseaddr_p and a_length_p may be NULL.
1006 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1007 * returns 0 when data is returned.
1008 */
1009 int uio_getiov( uio_t a_uio,
1010 int a_index,
1011 user_addr_t * a_baseaddr_p,
1012 user_size_t * a_length_p )
1013 {
1014 if (a_uio == NULL) {
1015 #if DEBUG
1016 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1017 #endif /* DEBUG */
1018 return(-1);
1019 }
1020 if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) {
1021 return(-1);
1022 }
1023
1024 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1025 if (a_baseaddr_p != NULL) {
1026 *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
1027 }
1028 if (a_length_p != NULL) {
1029 *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
1030 }
1031 }
1032 else {
1033 if (a_baseaddr_p != NULL) {
1034 *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base;
1035 }
1036 if (a_length_p != NULL) {
1037 *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len;
1038 }
1039 }
1040
1041 return( 0 );
1042 }
1043
1044 /*
1045 * uio_calculateresid - runs through all iovecs associated with this
1046 * uio_t and calculates (and sets) the residual IO count.
1047 */
1048 __private_extern__ void uio_calculateresid( uio_t a_uio )
1049 {
1050 int i;
1051
1052 if (a_uio == NULL) {
1053 #if LP64_DEBUG
1054 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1055 #endif /* LP64_DEBUG */
1056 return;
1057 }
1058
1059 a_uio->uio_iovcnt = 0;
1060 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1061 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1062 a_uio->uio_resid = 0;
1063 #else
1064 a_uio->uio_resid_64 = 0;
1065 #endif
1066 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1067 if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
1068 a_uio->uio_iovcnt++;
1069 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1070 a_uio->uio_resid += a_uio->uio_iovs.uiovp[i].iov_len;
1071 #else
1072 a_uio->uio_resid_64 += a_uio->uio_iovs.uiovp[i].iov_len;
1073 #endif
1074 }
1075 }
1076 }
1077 else {
1078 a_uio->uio_resid = 0;
1079 for ( i = 0; i < a_uio->uio_max_iovs; i++ ) {
1080 if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
1081 a_uio->uio_iovcnt++;
1082 a_uio->uio_resid += a_uio->uio_iovs.kiovp[i].iov_len;
1083 }
1084 }
1085 }
1086 return;
1087 }
1088
1089 /*
1090 * uio_update - update the given uio_t for a_count of completed IO.
1091 * This call decrements the current iovec length and residual IO value
1092 * and increments the current iovec base address and offset value.
1093 * If the current iovec length is 0 then advance to the next
1094 * iovec (if any).
1095 * If the a_count passed in is 0, than only do the advancement
1096 * over any 0 length iovec's.
1097 */
1098 void uio_update( uio_t a_uio, user_size_t a_count )
1099 {
1100 #if LP64_DEBUG
1101 if (a_uio == NULL) {
1102 panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
1103 }
1104 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1105 panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
1106 }
1107 #endif /* LP64_DEBUG */
1108
1109 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
1110 return;
1111 }
1112
1113 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1114 /*
1115 * if a_count == 0, then we are asking to skip over
1116 * any empty iovs
1117 */
1118 if (a_count) {
1119 if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
1120 a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
1121 a_uio->uio_iovs.uiovp->iov_len = 0;
1122 }
1123 else {
1124 a_uio->uio_iovs.uiovp->iov_base += a_count;
1125 a_uio->uio_iovs.uiovp->iov_len -= a_count;
1126 }
1127 #if 1 // LP64todo - remove this temp workaround once we go live with uio KPI
1128 if (a_uio->uio_resid < 0) {
1129 a_uio->uio_resid = 0;
1130 }
1131 if (a_count > (user_size_t)a_uio->uio_resid) {
1132 a_uio->uio_offset += a_uio->uio_resid;
1133 a_uio->uio_resid = 0;
1134 }
1135 else {
1136 a_uio->uio_offset += a_count;
1137 a_uio->uio_resid -= a_count;
1138 }
1139 #else
1140 if (a_uio->uio_resid_64 < 0) {
1141 a_uio->uio_resid_64 = 0;
1142 }
1143 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1144 a_uio->uio_offset += a_uio->uio_resid_64;
1145 a_uio->uio_resid_64 = 0;
1146 }
1147 else {
1148 a_uio->uio_offset += a_count;
1149 a_uio->uio_resid_64 -= a_count;
1150 }
1151 #endif // LP64todo
1152 }
1153 /*
1154 * advance to next iovec if current one is totally consumed
1155 */
1156 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
1157 a_uio->uio_iovcnt--;
1158 if (a_uio->uio_iovcnt > 0) {
1159 a_uio->uio_iovs.uiovp++;
1160 }
1161 }
1162 }
1163 else {
1164 /*
1165 * if a_count == 0, then we are asking to skip over
1166 * any empty iovs
1167 */
1168 if (a_count) {
1169 if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
1170 a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
1171 a_uio->uio_iovs.kiovp->iov_len = 0;
1172 }
1173 else {
1174 a_uio->uio_iovs.kiovp->iov_base += a_count;
1175 a_uio->uio_iovs.kiovp->iov_len -= a_count;
1176 }
1177 if (a_uio->uio_resid < 0) {
1178 a_uio->uio_resid = 0;
1179 }
1180 if (a_count > (user_size_t)a_uio->uio_resid) {
1181 a_uio->uio_offset += a_uio->uio_resid;
1182 a_uio->uio_resid = 0;
1183 }
1184 else {
1185 a_uio->uio_offset += a_count;
1186 a_uio->uio_resid -= a_count;
1187 }
1188 }
1189 /*
1190 * advance to next iovec if current one is totally consumed
1191 */
1192 while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
1193 a_uio->uio_iovcnt--;
1194 if (a_uio->uio_iovcnt > 0) {
1195 a_uio->uio_iovs.kiovp++;
1196 }
1197 }
1198 }
1199 return;
1200 }
1201
1202
1203 /*
1204 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1205 * may return NULL.
1206 */
1207 uio_t uio_duplicate( uio_t a_uio )
1208 {
1209 uio_t my_uio;
1210 int i;
1211
1212 if (a_uio == NULL) {
1213 return(NULL);
1214 }
1215
1216 my_uio = (uio_t) kalloc(a_uio->uio_size);
1217 if (my_uio == 0) {
1218 panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
1219 }
1220
1221 bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
1222 /* need to set our iovec pointer to point to first active iovec */
1223 if (my_uio->uio_max_iovs > 0) {
1224 my_uio->uio_iovs.uiovp = (struct user_iovec *)
1225 (((uint8_t *)my_uio) + sizeof(struct uio));
1226
1227 /* advance to first nonzero iovec */
1228 if (my_uio->uio_iovcnt > 0) {
1229 for ( i = 0; i < my_uio->uio_max_iovs; i++ ) {
1230 if (UIO_IS_64_BIT_SPACE(a_uio)) {
1231 if (my_uio->uio_iovs.uiovp->iov_len != 0) {
1232 break;
1233 }
1234 my_uio->uio_iovs.uiovp++;
1235 }
1236 else {
1237 if (my_uio->uio_iovs.kiovp->iov_len != 0) {
1238 break;
1239 }
1240 my_uio->uio_iovs.kiovp++;
1241 }
1242 }
1243 }
1244 }
1245
1246 return(my_uio);
1247 }
1248