+ if (a_uio == NULL) {
+#if DEBUG
+ panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
+#endif
+ return -1;
+ }
+
+ if (os_add_overflow(a_length, a_uio->uio_resid_64, &resid)) {
+#if DEBUG
+ panic("%s :%d - invalid length %lu\n", __FILE__, __LINE__, (unsigned long)a_length);
+#endif
+ return -1;
+ }
+
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ for (i = 0; i < a_uio->uio_max_iovs; i++) {
+ if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) {
+ a_uio->uio_iovs.uiovp[i].iov_len = a_length;
+ a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr;
+ a_uio->uio_iovcnt++;
+ a_uio->uio_resid_64 = resid;
+ return 0;
+ }
+ }
+ } else {
+ for (i = 0; i < a_uio->uio_max_iovs; i++) {
+ if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) {
+ a_uio->uio_iovs.kiovp[i].iov_len = (u_int64_t)a_length;
+ a_uio->uio_iovs.kiovp[i].iov_base = (u_int64_t)a_baseaddr;
+ a_uio->uio_iovcnt++;
+ a_uio->uio_resid_64 = resid;
+ return 0;
+ }
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * uio_getiov - get iovec data associated with the given uio_t. Use
+ * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
+ * a_baseaddr_p and a_length_p may be NULL.
+ * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
+ * returns 0 when data is returned.
+ */
+int
+uio_getiov( uio_t a_uio,
+ int a_index,
+ user_addr_t * a_baseaddr_p,
+ user_size_t * a_length_p )
+{
+ if (a_uio == NULL) {
+#if DEBUG
+ panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
+#endif /* DEBUG */
+ return -1;
+ }
+ if (a_index < 0 || a_index >= a_uio->uio_iovcnt) {
+ return -1;
+ }
+
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ if (a_baseaddr_p != NULL) {
+ *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base;
+ }
+ if (a_length_p != NULL) {
+ *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len;
+ }
+ } else {
+ if (a_baseaddr_p != NULL) {
+ *a_baseaddr_p = (user_addr_t)a_uio->uio_iovs.kiovp[a_index].iov_base;
+ }
+ if (a_length_p != NULL) {
+ *a_length_p = (user_size_t)a_uio->uio_iovs.kiovp[a_index].iov_len;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * uio_calculateresid - runs through all iovecs associated with this
+ * uio_t and calculates (and sets) the residual IO count.
+ */
+__private_extern__ int
+uio_calculateresid( uio_t a_uio )
+{
+ int i;
+ u_int64_t resid = 0;
+
+ if (a_uio == NULL) {
+#if LP64_DEBUG
+ panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
+#endif /* LP64_DEBUG */
+ return EINVAL;
+ }
+
+ a_uio->uio_iovcnt = a_uio->uio_max_iovs;
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ a_uio->uio_resid_64 = 0;
+ for (i = 0; i < a_uio->uio_max_iovs; i++) {
+ if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) {
+ if (a_uio->uio_iovs.uiovp[i].iov_len > LONG_MAX) {
+ return EINVAL;
+ }
+ resid += a_uio->uio_iovs.uiovp[i].iov_len;
+ if (resid > LONG_MAX) {
+ return EINVAL;
+ }
+ }
+ }
+ a_uio->uio_resid_64 = (user_size_t)resid;
+
+ /* position to first non zero length iovec (4235922) */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
+ a_uio->uio_iovcnt--;
+ if (a_uio->uio_iovcnt > 0) {
+ a_uio->uio_iovs.uiovp++;
+ }
+ }
+ } else {
+ a_uio->uio_resid_64 = 0;
+ for (i = 0; i < a_uio->uio_max_iovs; i++) {
+ if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) {
+ if (a_uio->uio_iovs.kiovp[i].iov_len > LONG_MAX) {
+ return EINVAL;
+ }
+ resid += a_uio->uio_iovs.kiovp[i].iov_len;
+ if (resid > LONG_MAX) {
+ return EINVAL;
+ }
+ }
+ }
+ a_uio->uio_resid_64 = (user_size_t)resid;
+
+ /* position to first non zero length iovec (4235922) */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
+ a_uio->uio_iovcnt--;
+ if (a_uio->uio_iovcnt > 0) {
+ a_uio->uio_iovs.kiovp++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * uio_update - update the given uio_t for a_count of completed IO.
+ * This call decrements the current iovec length and residual IO value
+ * and increments the current iovec base address and offset value.
+ * If the current iovec length is 0 then advance to the next
+ * iovec (if any).
+ * If the a_count passed in is 0, than only do the advancement
+ * over any 0 length iovec's.
+ */
+void
+uio_update( uio_t a_uio, user_size_t a_count )
+{
+#if LP64_DEBUG
+ if (a_uio == NULL) {
+ panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
+ }
+ if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
+ panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
+ }
+#endif /* LP64_DEBUG */
+
+ if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
+ return;
+ }
+
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ /*
+ * if a_count == 0, then we are asking to skip over
+ * any empty iovs
+ */
+ if (a_count) {
+ if (a_count > a_uio->uio_iovs.uiovp->iov_len) {
+ a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len;
+ a_uio->uio_iovs.uiovp->iov_len = 0;
+ } else {
+ a_uio->uio_iovs.uiovp->iov_base += a_count;
+ a_uio->uio_iovs.uiovp->iov_len -= a_count;
+ }
+ if (a_count > (user_size_t)a_uio->uio_resid_64) {
+ a_uio->uio_offset += a_uio->uio_resid_64;
+ a_uio->uio_resid_64 = 0;
+ } else {
+ a_uio->uio_offset += a_count;
+ a_uio->uio_resid_64 -= a_count;
+ }
+ }
+ /*
+ * advance to next iovec if current one is totally consumed
+ */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) {
+ a_uio->uio_iovcnt--;
+ if (a_uio->uio_iovcnt > 0) {
+ a_uio->uio_iovs.uiovp++;
+ }
+ }
+ } else {
+ /*
+ * if a_count == 0, then we are asking to skip over
+ * any empty iovs
+ */
+ if (a_count) {
+ if (a_count > a_uio->uio_iovs.kiovp->iov_len) {
+ a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len;
+ a_uio->uio_iovs.kiovp->iov_len = 0;
+ } else {
+ a_uio->uio_iovs.kiovp->iov_base += a_count;
+ a_uio->uio_iovs.kiovp->iov_len -= a_count;
+ }
+ if (a_count > (user_size_t)a_uio->uio_resid_64) {
+ a_uio->uio_offset += a_uio->uio_resid_64;
+ a_uio->uio_resid_64 = 0;
+ } else {
+ a_uio->uio_offset += a_count;
+ a_uio->uio_resid_64 -= a_count;
+ }
+ }
+ /*
+ * advance to next iovec if current one is totally consumed
+ */
+ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) {
+ a_uio->uio_iovcnt--;
+ if (a_uio->uio_iovcnt > 0) {
+ a_uio->uio_iovs.kiovp++;
+ }
+ }
+ }
+ return;
+}
+
+/*
+ * uio_pushback - undo uncommitted I/O by subtracting from the
+ * current base address and offset, and incrementing the residiual
+ * IO. If the UIO was previously exhausted, this call will panic.
+ * New code should not use this functionality.
+ */
+__private_extern__ void
+uio_pushback( uio_t a_uio, user_size_t a_count )
+{
+#if LP64_DEBUG
+ if (a_uio == NULL) {
+ panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
+ }
+ if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
+ panic("%s :%d - invalid count value \n", __FILE__, __LINE__);
+ }
+#endif /* LP64_DEBUG */
+
+ if (a_uio == NULL || a_count == 0) {
+ return;
+ }
+
+ if (a_uio->uio_iovcnt < 1) {
+ panic("Invalid uio for pushback");
+ }
+
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ a_uio->uio_iovs.uiovp->iov_base -= a_count;
+ a_uio->uio_iovs.uiovp->iov_len += a_count;
+ } else {
+ a_uio->uio_iovs.kiovp->iov_base -= a_count;
+ a_uio->uio_iovs.kiovp->iov_len += a_count;
+ }
+
+ a_uio->uio_offset -= a_count;
+ a_uio->uio_resid_64 += a_count;
+
+ return;
+}
+
+
+/*
+ * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
+ * may return NULL.
+ */
+uio_t
+uio_duplicate( uio_t a_uio )
+{
+ uio_t my_uio;
+ int i;
+
+ if (a_uio == NULL) {
+ return NULL;
+ }
+
+ my_uio = (uio_t) kalloc(a_uio->uio_size);
+ if (my_uio == 0) {
+ panic("%s :%d - allocation failed\n", __FILE__, __LINE__);
+ }
+
+ bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size);
+ /* need to set our iovec pointer to point to first active iovec */
+ if (my_uio->uio_max_iovs > 0) {
+ my_uio->uio_iovs.uiovp = (struct user_iovec *)
+ (((uint8_t *)my_uio) + sizeof(struct uio));
+
+ /* advance to first nonzero iovec */
+ if (my_uio->uio_iovcnt > 0) {
+ for (i = 0; i < my_uio->uio_max_iovs; i++) {
+ if (UIO_IS_USER_SPACE(a_uio)) {
+ if (my_uio->uio_iovs.uiovp->iov_len != 0) {
+ break;
+ }
+ my_uio->uio_iovs.uiovp++;
+ } else {
+ if (my_uio->uio_iovs.kiovp->iov_len != 0) {
+ break;
+ }
+ my_uio->uio_iovs.kiovp++;
+ }
+ }
+ }
+ }
+
+ my_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED;
+#if DEBUG
+ os_atomic_inc(&uio_t_count, relaxed);
+#endif
+
+
+ return my_uio;
+}
+
+int
+copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst)
+{
+ size_t size_of_iovec = (spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec));
+ int error;
+ int i;
+
+ // copyin to the front of "dst", without regard for putting records in the right places
+ error = copyin(uaddr, dst, count * size_of_iovec);
+ if (error) {
+ return error;
+ }
+
+ // now, unpack the entries in reverse order, so we don't overwrite anything
+ for (i = count - 1; i >= 0; i--) {
+ if (spacetype == UIO_USERSPACE64) {
+ struct user64_iovec iovec = ((struct user64_iovec *)dst)[i];
+ dst[i].iov_base = (user_addr_t)iovec.iov_base;
+ dst[i].iov_len = (user_size_t)iovec.iov_len;
+ } else {
+ struct user32_iovec iovec = ((struct user32_iovec *)dst)[i];
+ dst[i].iov_base = iovec.iov_base;
+ dst[i].iov_len = iovec.iov_len;
+ }
+ }
+
+ return 0;