2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1989, 1990, 1993
30 * The Regents of the University of California. All rights reserved.
32 * sendfile(2) and related extensions:
33 * Copyright (c) 1998, David Greenman. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by the University of
46 * California, Berkeley and its contributors.
47 * 4. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
66 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/filedesc.h>
75 #include <sys/proc_internal.h>
76 #include <sys/file_internal.h>
77 #include <sys/vnode_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/mcache.h>
81 #include <kern/locks.h>
82 #include <sys/domain.h>
83 #include <sys/protosw.h>
84 #include <sys/signalvar.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/kernel.h>
88 #include <sys/uio_internal.h>
89 #include <sys/kauth.h>
90 #include <kern/task.h>
93 #include <security/audit/audit.h>
95 #include <sys/kdebug.h>
96 #include <sys/sysproto.h>
97 #include <netinet/in.h>
98 #include <net/route.h>
99 #include <netinet/in_pcb.h>
101 #if CONFIG_MACF_SOCKET_SUBSET
102 #include <security/mac_framework.h>
103 #endif /* MAC_SOCKET_SUBSET */
105 #define f_flag f_fglob->fg_flag
106 #define f_type f_fglob->fg_ops->fo_type
107 #define f_msgcount f_fglob->fg_msgcount
108 #define f_cred f_fglob->fg_cred
109 #define f_ops f_fglob->fg_ops
110 #define f_offset f_fglob->fg_offset
111 #define f_data f_fglob->fg_data
113 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
114 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
115 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
116 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
117 #define DBG_FNC_SENDMSG NETDBG_CODE(DBG_NETSOCK, (1 << 8) | 1)
118 #define DBG_FNC_SENDTO NETDBG_CODE(DBG_NETSOCK, (2 << 8) | 1)
119 #define DBG_FNC_SENDIT NETDBG_CODE(DBG_NETSOCK, (3 << 8) | 1)
120 #define DBG_FNC_RECVFROM NETDBG_CODE(DBG_NETSOCK, (5 << 8))
121 #define DBG_FNC_RECVMSG NETDBG_CODE(DBG_NETSOCK, (6 << 8))
122 #define DBG_FNC_RECVIT NETDBG_CODE(DBG_NETSOCK, (7 << 8))
123 #define DBG_FNC_SENDFILE NETDBG_CODE(DBG_NETSOCK, (10 << 8))
124 #define DBG_FNC_SENDFILE_WAIT NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 1))
125 #define DBG_FNC_SENDFILE_READ NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 2))
126 #define DBG_FNC_SENDFILE_SEND NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 3))
127 #define DBG_FNC_SENDMSG_X NETDBG_CODE(DBG_NETSOCK, (11 << 8))
128 #define DBG_FNC_RECVMSG_X NETDBG_CODE(DBG_NETSOCK, (12 << 8))
131 /* TODO: should be in header file */
132 int falloc_locked(proc_t
, struct fileproc
**, int *, vfs_context_t
, int);
134 static int sendit(struct proc
*, int, struct user_msghdr
*, uio_t
, int,
136 static int recvit(struct proc
*, int, struct user_msghdr
*, uio_t
, user_addr_t
,
138 static int connectit(struct socket
*, struct sockaddr
*);
139 static int getsockaddr(struct socket
*, struct sockaddr
**, user_addr_t
,
141 static int getsockaddr_s(struct socket
*, struct sockaddr_storage
*,
142 user_addr_t
, size_t, boolean_t
);
143 static int getsockaddrlist(struct socket
*, struct sockaddr_list
**,
144 user_addr_t
, socklen_t
, boolean_t
);
146 static void alloc_sendpkt(int, size_t, unsigned int *, struct mbuf
**,
148 #endif /* SENDFILE */
149 static int connectx_nocancel(struct proc
*, struct connectx_args
*, int *);
150 static int connectitx(struct socket
*, struct sockaddr_list
**,
151 struct sockaddr_list
**, struct proc
*, uint32_t, associd_t
, connid_t
*);
152 static int peeloff_nocancel(struct proc
*, struct peeloff_args
*, int *);
153 static int disconnectx_nocancel(struct proc
*, struct disconnectx_args
*,
155 static int socket_common(struct proc
*, int, int, int, pid_t
, int32_t *, int);
157 static int internalize_user_msghdr_array(const void *, int, int, u_int
,
158 struct user_msghdr_x
*, struct uio
**);
159 static u_int
externalize_user_msghdr_array(void *, int, int, u_int
,
160 const struct user_msghdr_x
*, struct uio
**);
162 static void free_uio_array(struct uio
**, u_int
);
163 static int uio_array_is_valid(struct uio
**, u_int
);
166 * System call interface to the socket abstraction.
169 extern const struct fileops socketops
;
173 * EACCES Mandatory Access Control failure
177 * socreate:EAFNOSUPPORT
178 * socreate:EPROTOTYPE
179 * socreate:EPROTONOSUPPORT
182 * socreate:??? [other protocol families, IPSEC]
185 socket(struct proc
*p
,
186 struct socket_args
*uap
,
189 return (socket_common(p
, uap
->domain
, uap
->type
, uap
->protocol
,
190 proc_selfpid(), retval
, 0));
194 socket_delegate(struct proc
*p
,
195 struct socket_delegate_args
*uap
,
198 return socket_common(p
, uap
->domain
, uap
->type
, uap
->protocol
,
199 uap
->epid
, retval
, 1);
203 socket_common(struct proc
*p
,
215 AUDIT_ARG(socket
, domain
, type
, protocol
);
216 #if CONFIG_MACF_SOCKET_SUBSET
217 if ((error
= mac_socket_check_create(kauth_cred_get(), domain
,
218 type
, protocol
)) != 0)
220 #endif /* MAC_SOCKET_SUBSET */
223 error
= priv_check_cred(kauth_cred_get(),
224 PRIV_NET_PRIVILEGED_SOCKET_DELEGATE
, 0);
229 error
= falloc(p
, &fp
, &fd
, vfs_context_current());
233 fp
->f_flag
= FREAD
|FWRITE
;
234 fp
->f_ops
= &socketops
;
237 error
= socreate_delegate(domain
, &so
, type
, protocol
, epid
);
239 error
= socreate(domain
, &so
, type
, protocol
);
244 fp
->f_data
= (caddr_t
)so
;
247 procfdtbl_releasefd(p
, fd
, NULL
);
249 fp_drop(p
, fd
, fp
, 1);
259 * EDESTADDRREQ Destination address required
260 * EBADF Bad file descriptor
261 * EACCES Mandatory Access Control failure
262 * file_socket:ENOTSOCK
264 * getsockaddr:ENAMETOOLONG Filename too long
265 * getsockaddr:EINVAL Invalid argument
266 * getsockaddr:ENOMEM Not enough space
267 * getsockaddr:EFAULT Bad address
272 bind(__unused proc_t p
, struct bind_args
*uap
, __unused
int32_t *retval
)
274 struct sockaddr_storage ss
;
275 struct sockaddr
*sa
= NULL
;
277 boolean_t want_free
= TRUE
;
280 AUDIT_ARG(fd
, uap
->s
);
281 error
= file_socket(uap
->s
, &so
);
288 if (uap
->name
== USER_ADDR_NULL
) {
289 error
= EDESTADDRREQ
;
292 if (uap
->namelen
> sizeof (ss
)) {
293 error
= getsockaddr(so
, &sa
, uap
->name
, uap
->namelen
, TRUE
);
295 error
= getsockaddr_s(so
, &ss
, uap
->name
, uap
->namelen
, TRUE
);
297 sa
= (struct sockaddr
*)&ss
;
303 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()), sa
);
304 #if CONFIG_MACF_SOCKET_SUBSET
305 if ((error
= mac_socket_check_bind(kauth_cred_get(), so
, sa
)) == 0)
306 error
= sobindlock(so
, sa
, 1); /* will lock socket */
308 error
= sobindlock(so
, sa
, 1); /* will lock socket */
309 #endif /* MAC_SOCKET_SUBSET */
320 * EACCES Mandatory Access Control failure
321 * file_socket:ENOTSOCK
324 * solisten:EOPNOTSUPP
328 listen(__unused
struct proc
*p
, struct listen_args
*uap
,
329 __unused
int32_t *retval
)
334 AUDIT_ARG(fd
, uap
->s
);
335 error
= file_socket(uap
->s
, &so
);
339 #if CONFIG_MACF_SOCKET_SUBSET
341 error
= mac_socket_check_listen(kauth_cred_get(), so
);
343 error
= solisten(so
, uap
->backlog
);
346 error
= solisten(so
, uap
->backlog
);
347 #endif /* MAC_SOCKET_SUBSET */
356 * Returns: fp_getfsock:EBADF Bad file descriptor
357 * fp_getfsock:EOPNOTSUPP ...
358 * xlate => :ENOTSOCK Socket operation on non-socket
359 * :EFAULT Bad address on copyin/copyout
360 * :EBADF Bad file descriptor
361 * :EOPNOTSUPP Operation not supported on socket
362 * :EINVAL Invalid argument
363 * :EWOULDBLOCK Operation would block
364 * :ECONNABORTED Connection aborted
365 * :EINTR Interrupted function
366 * :EACCES Mandatory Access Control failure
367 * falloc_locked:ENFILE Too many files open in system
368 * falloc_locked::EMFILE Too many open files
369 * falloc_locked::ENOMEM Not enough space
373 accept_nocancel(struct proc
*p
, struct accept_nocancel_args
*uap
,
377 struct sockaddr
*sa
= NULL
;
380 struct socket
*head
, *so
= NULL
;
381 lck_mtx_t
*mutex_held
;
384 short fflag
; /* type must match fp->f_flag */
389 AUDIT_ARG(fd
, uap
->s
);
392 error
= copyin(uap
->anamelen
, (caddr_t
)&namelen
,
397 error
= fp_getfsock(p
, fd
, &fp
, &head
);
399 if (error
== EOPNOTSUPP
)
407 #if CONFIG_MACF_SOCKET_SUBSET
408 if ((error
= mac_socket_check_accept(kauth_cred_get(), head
)) != 0)
410 #endif /* MAC_SOCKET_SUBSET */
412 socket_lock(head
, 1);
414 if (head
->so_proto
->pr_getlock
!= NULL
) {
415 mutex_held
= (*head
->so_proto
->pr_getlock
)(head
, 0);
418 mutex_held
= head
->so_proto
->pr_domain
->dom_mtx
;
422 if ((head
->so_options
& SO_ACCEPTCONN
) == 0) {
423 if ((head
->so_proto
->pr_flags
& PR_CONNREQUIRED
) == 0) {
426 /* POSIX: The socket is not accepting connections */
429 socket_unlock(head
, 1);
432 if ((head
->so_state
& SS_NBIO
) && head
->so_comp
.tqh_first
== NULL
) {
433 socket_unlock(head
, 1);
437 while (TAILQ_EMPTY(&head
->so_comp
) && head
->so_error
== 0) {
438 if (head
->so_state
& SS_CANTRCVMORE
) {
439 head
->so_error
= ECONNABORTED
;
442 if (head
->so_usecount
< 1)
443 panic("accept: head=%p refcount=%d\n", head
,
445 error
= msleep((caddr_t
)&head
->so_timeo
, mutex_held
,
446 PSOCK
| PCATCH
, "accept", 0);
447 if (head
->so_usecount
< 1)
448 panic("accept: 2 head=%p refcount=%d\n", head
,
450 if ((head
->so_state
& SS_DRAINING
)) {
451 error
= ECONNABORTED
;
454 socket_unlock(head
, 1);
458 if (head
->so_error
) {
459 error
= head
->so_error
;
461 socket_unlock(head
, 1);
467 * At this point we know that there is at least one connection
468 * ready to be accepted. Remove it from the queue prior to
469 * allocating the file descriptor for it since falloc() may
470 * block allowing another process to accept the connection
473 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
474 so
= TAILQ_FIRST(&head
->so_comp
);
475 TAILQ_REMOVE(&head
->so_comp
, so
, so_list
);
477 /* unlock head to avoid deadlock with select, keep a ref on head */
478 socket_unlock(head
, 0);
480 #if CONFIG_MACF_SOCKET_SUBSET
482 * Pass the pre-accepted socket to the MAC framework. This is
483 * cheaper than allocating a file descriptor for the socket,
484 * calling the protocol accept callback, and possibly freeing
485 * the file descriptor should the MAC check fails.
487 if ((error
= mac_socket_check_accepted(kauth_cred_get(), so
)) != 0) {
489 so
->so_state
&= ~(SS_NOFDREF
| SS_COMP
);
491 socket_unlock(so
, 1);
493 /* Drop reference on listening socket */
497 #endif /* MAC_SOCKET_SUBSET */
500 * Pass the pre-accepted socket to any interested socket filter(s).
501 * Upon failure, the socket would have been closed by the callee.
503 if (so
->so_filt
!= NULL
&& (error
= soacceptfilter(so
)) != 0) {
504 /* Drop reference on listening socket */
506 /* Propagate socket filter's error code to the caller */
511 error
= falloc(p
, &fp
, &newfd
, vfs_context_current());
514 * Probably ran out of file descriptors.
516 * <rdar://problem/8554930>
517 * Don't put this back on the socket like we used to, that
518 * just causes the client to spin. Drop the socket.
521 so
->so_state
&= ~(SS_NOFDREF
| SS_COMP
);
523 socket_unlock(so
, 1);
530 fp
->f_ops
= &socketops
;
531 fp
->f_data
= (caddr_t
)so
;
533 socket_lock(head
, 0);
537 so
->so_state
&= ~SS_COMP
;
540 /* Sync socket non-blocking/async state with file flags */
541 if (fp
->f_flag
& FNONBLOCK
) {
542 so
->so_state
|= SS_NBIO
;
544 so
->so_state
&= ~SS_NBIO
;
547 if (fp
->f_flag
& FASYNC
) {
548 so
->so_state
|= SS_ASYNC
;
549 so
->so_rcv
.sb_flags
|= SB_ASYNC
;
550 so
->so_snd
.sb_flags
|= SB_ASYNC
;
552 so
->so_state
&= ~SS_ASYNC
;
553 so
->so_rcv
.sb_flags
&= ~SB_ASYNC
;
554 so
->so_snd
.sb_flags
&= ~SB_ASYNC
;
557 (void) soacceptlock(so
, &sa
, 0);
558 socket_unlock(head
, 1);
566 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()), sa
);
571 /* save sa_len before it is destroyed */
573 namelen
= MIN(namelen
, sa_len
);
574 error
= copyout(sa
, uap
->name
, namelen
);
576 /* return the actual, untruncated address length */
579 error
= copyout((caddr_t
)&namelen
, uap
->anamelen
,
586 * If the socket has been marked as inactive by sosetdefunct(),
587 * disallow further operations on it.
589 if (so
->so_flags
& SOF_DEFUNCT
) {
590 sodefunct(current_proc(), so
,
591 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL
);
595 socket_unlock(so
, 1);
598 procfdtbl_releasefd(p
, newfd
, NULL
);
599 fp_drop(p
, newfd
, fp
, 1);
608 accept(struct proc
*p
, struct accept_args
*uap
, int32_t *retval
)
610 __pthread_testcancel(1);
611 return(accept_nocancel(p
, (struct accept_nocancel_args
*)uap
, retval
));
616 * EBADF Bad file descriptor
617 * EALREADY Connection already in progress
618 * EINPROGRESS Operation in progress
619 * ECONNABORTED Connection aborted
620 * EINTR Interrupted function
621 * EACCES Mandatory Access Control failure
622 * file_socket:ENOTSOCK
624 * getsockaddr:ENAMETOOLONG Filename too long
625 * getsockaddr:EINVAL Invalid argument
626 * getsockaddr:ENOMEM Not enough space
627 * getsockaddr:EFAULT Bad address
628 * soconnectlock:EOPNOTSUPP
629 * soconnectlock:EISCONN
630 * soconnectlock:??? [depends on protocol, filters]
633 * Imputed: so_error error may be set from so_error, which
634 * may have been set by soconnectlock.
638 connect(struct proc
*p
, struct connect_args
*uap
, int32_t *retval
)
640 __pthread_testcancel(1);
641 return(connect_nocancel(p
, (struct connect_nocancel_args
*)uap
, retval
));
645 connect_nocancel(proc_t p
, struct connect_nocancel_args
*uap
, int32_t *retval
)
647 #pragma unused(p, retval)
649 struct sockaddr_storage ss
;
650 struct sockaddr
*sa
= NULL
;
655 AUDIT_ARG(fd
, uap
->s
);
656 error
= file_socket(fd
, &so
);
665 * Ask getsockaddr{_s} to not translate AF_UNSPEC to AF_INET
666 * if this is a datagram socket; translate for other types.
668 dgram
= (so
->so_type
== SOCK_DGRAM
);
670 /* Get socket address now before we obtain socket lock */
671 if (uap
->namelen
> sizeof (ss
)) {
672 error
= getsockaddr(so
, &sa
, uap
->name
, uap
->namelen
, !dgram
);
674 error
= getsockaddr_s(so
, &ss
, uap
->name
, uap
->namelen
, !dgram
);
676 sa
= (struct sockaddr
*)&ss
;
681 error
= connectit(so
, sa
);
683 if (sa
!= NULL
&& sa
!= SA(&ss
))
685 if (error
== ERESTART
)
693 connectx_nocancel(struct proc
*p
, struct connectx_args
*uap
, int *retval
)
695 #pragma unused(p, retval)
696 struct sockaddr_list
*src_sl
= NULL
, *dst_sl
= NULL
;
698 int error
, fd
= uap
->s
;
700 connid_t cid
= CONNID_ANY
;
702 AUDIT_ARG(fd
, uap
->s
);
703 error
= file_socket(fd
, &so
);
712 * XXX Workaround to ensure connectx does not fail because
713 * of unreaped so_error.
718 * Ask getsockaddr{_s} to not translate AF_UNSPEC to AF_INET
719 * if this is a datagram socket; translate for other types.
721 dgram
= (so
->so_type
== SOCK_DGRAM
);
724 * Get socket address(es) now before we obtain socket lock; use
725 * sockaddr_list for src address for convenience, if present,
726 * even though it won't hold more than one.
728 if (uap
->src
!= USER_ADDR_NULL
&& (error
= getsockaddrlist(so
,
729 &src_sl
, uap
->src
, uap
->srclen
, dgram
)) != 0)
732 error
= getsockaddrlist(so
, &dst_sl
, uap
->dsts
, uap
->dstlen
, dgram
);
736 VERIFY(dst_sl
!= NULL
&&
737 !TAILQ_EMPTY(&dst_sl
->sl_head
) && dst_sl
->sl_cnt
> 0);
739 error
= connectitx(so
, &src_sl
, &dst_sl
, p
, uap
->ifscope
,
741 if (error
== ERESTART
)
744 if (uap
->cid
!= USER_ADDR_NULL
)
745 (void) copyout(&cid
, uap
->cid
, sizeof (cid
));
750 sockaddrlist_free(src_sl
);
752 sockaddrlist_free(dst_sl
);
757 connectx(struct proc
*p
, struct connectx_args
*uap
, int *retval
)
760 * Due to similiarity with a POSIX interface, define as
761 * an unofficial cancellation point.
763 __pthread_testcancel(1);
764 return (connectx_nocancel(p
, uap
, retval
));
768 connectit(struct socket
*so
, struct sockaddr
*sa
)
772 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()), sa
);
773 #if CONFIG_MACF_SOCKET_SUBSET
774 if ((error
= mac_socket_check_connect(kauth_cred_get(), so
, sa
)) != 0)
776 #endif /* MAC_SOCKET_SUBSET */
779 if ((so
->so_state
& SS_NBIO
) && (so
->so_state
& SS_ISCONNECTING
)) {
783 error
= soconnectlock(so
, sa
, 0);
785 so
->so_state
&= ~SS_ISCONNECTING
;
788 if ((so
->so_state
& SS_NBIO
) && (so
->so_state
& SS_ISCONNECTING
)) {
792 while ((so
->so_state
& SS_ISCONNECTING
) && so
->so_error
== 0) {
793 lck_mtx_t
*mutex_held
;
795 if (so
->so_proto
->pr_getlock
!= NULL
)
796 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
798 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
799 error
= msleep((caddr_t
)&so
->so_timeo
, mutex_held
,
800 PSOCK
| PCATCH
, __func__
, 0);
801 if (so
->so_state
& SS_DRAINING
) {
802 error
= ECONNABORTED
;
808 error
= so
->so_error
;
812 socket_unlock(so
, 1);
817 connectitx(struct socket
*so
, struct sockaddr_list
**src_sl
,
818 struct sockaddr_list
**dst_sl
, struct proc
*p
, uint32_t ifscope
,
819 associd_t aid
, connid_t
*pcid
)
821 struct sockaddr_entry
*se
;
824 VERIFY(dst_sl
!= NULL
&& *dst_sl
!= NULL
);
826 TAILQ_FOREACH(se
, &(*dst_sl
)->sl_head
, se_link
) {
827 VERIFY(se
->se_addr
!= NULL
);
828 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()),
830 #if CONFIG_MACF_SOCKET_SUBSET
831 if ((error
= mac_socket_check_connect(kauth_cred_get(),
832 so
, se
->se_addr
)) != 0)
834 #endif /* MAC_SOCKET_SUBSET */
838 if ((so
->so_state
& SS_NBIO
) && (so
->so_state
& SS_ISCONNECTING
)) {
842 error
= soconnectxlocked(so
, src_sl
, dst_sl
, p
, ifscope
,
843 aid
, pcid
, 0, NULL
, 0);
845 so
->so_state
&= ~SS_ISCONNECTING
;
848 if ((so
->so_state
& SS_NBIO
) && (so
->so_state
& SS_ISCONNECTING
)) {
852 while ((so
->so_state
& SS_ISCONNECTING
) && so
->so_error
== 0) {
853 lck_mtx_t
*mutex_held
;
855 if (so
->so_proto
->pr_getlock
!= NULL
)
856 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
858 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
859 error
= msleep((caddr_t
)&so
->so_timeo
, mutex_held
,
860 PSOCK
| PCATCH
, __func__
, 0);
861 if (so
->so_state
& SS_DRAINING
) {
862 error
= ECONNABORTED
;
868 error
= so
->so_error
;
872 socket_unlock(so
, 1);
877 peeloff(struct proc
*p
, struct peeloff_args
*uap
, int *retval
)
880 * Due to similiarity with a POSIX interface, define as
881 * an unofficial cancellation point.
883 __pthread_testcancel(1);
884 return (peeloff_nocancel(p
, uap
, retval
));
888 peeloff_nocancel(struct proc
*p
, struct peeloff_args
*uap
, int *retval
)
891 struct socket
*mp_so
, *so
= NULL
;
892 int newfd
, fd
= uap
->s
;
893 short fflag
; /* type must match fp->f_flag */
898 error
= fp_getfsock(p
, fd
, &fp
, &mp_so
);
900 if (error
== EOPNOTSUPP
)
909 socket_lock(mp_so
, 1);
910 error
= sopeelofflocked(mp_so
, uap
->aid
, &so
);
912 socket_unlock(mp_so
, 1);
916 socket_unlock(mp_so
, 0); /* keep ref on mp_so for us */
919 error
= falloc(p
, &fp
, &newfd
, vfs_context_current());
921 /* drop this socket (probably ran out of file descriptors) */
923 sodereference(mp_so
); /* our mp_so ref */
928 fp
->f_ops
= &socketops
;
929 fp
->f_data
= (caddr_t
)so
;
932 * If the socket has been marked as inactive by sosetdefunct(),
933 * disallow further operations on it.
935 if (so
->so_flags
& SOF_DEFUNCT
) {
936 sodefunct(current_proc(), so
,
937 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL
);
941 procfdtbl_releasefd(p
, newfd
, NULL
);
942 fp_drop(p
, newfd
, fp
, 1);
945 sodereference(mp_so
); /* our mp_so ref */
956 disconnectx(struct proc
*p
, struct disconnectx_args
*uap
, int *retval
)
959 * Due to similiarity with a POSIX interface, define as
960 * an unofficial cancellation point.
962 __pthread_testcancel(1);
963 return (disconnectx_nocancel(p
, uap
, retval
));
967 disconnectx_nocancel(struct proc
*p
, struct disconnectx_args
*uap
, int *retval
)
969 #pragma unused(p, retval)
974 error
= file_socket(fd
, &so
);
982 error
= sodisconnectx(so
, uap
->aid
, uap
->cid
);
990 * socreate:EAFNOSUPPORT
991 * socreate:EPROTOTYPE
992 * socreate:EPROTONOSUPPORT
996 * socreate:??? [other protocol families, IPSEC]
1002 * soconnect2:EPROTOTYPE
1003 * soconnect2:??? [other protocol families[
1006 socketpair(struct proc
*p
, struct socketpair_args
*uap
,
1007 __unused
int32_t *retval
)
1009 struct fileproc
*fp1
, *fp2
;
1010 struct socket
*so1
, *so2
;
1011 int fd
, error
, sv
[2];
1013 AUDIT_ARG(socket
, uap
->domain
, uap
->type
, uap
->protocol
);
1014 error
= socreate(uap
->domain
, &so1
, uap
->type
, uap
->protocol
);
1017 error
= socreate(uap
->domain
, &so2
, uap
->type
, uap
->protocol
);
1021 error
= falloc(p
, &fp1
, &fd
, vfs_context_current());
1025 fp1
->f_flag
= FREAD
|FWRITE
;
1026 fp1
->f_ops
= &socketops
;
1027 fp1
->f_data
= (caddr_t
)so1
;
1030 error
= falloc(p
, &fp2
, &fd
, vfs_context_current());
1034 fp2
->f_flag
= FREAD
|FWRITE
;
1035 fp2
->f_ops
= &socketops
;
1036 fp2
->f_data
= (caddr_t
)so2
;
1039 error
= soconnect2(so1
, so2
);
1043 if (uap
->type
== SOCK_DGRAM
) {
1045 * Datagram socket connection is asymmetric.
1047 error
= soconnect2(so2
, so1
);
1053 if ((error
= copyout(sv
, uap
->rsv
, 2 * sizeof (int))) != 0)
1057 procfdtbl_releasefd(p
, sv
[0], NULL
);
1058 procfdtbl_releasefd(p
, sv
[1], NULL
);
1059 fp_drop(p
, sv
[0], fp1
, 1);
1060 fp_drop(p
, sv
[1], fp2
, 1);
1065 fp_free(p
, sv
[1], fp2
);
1067 fp_free(p
, sv
[0], fp1
);
1069 (void) soclose(so2
);
1071 (void) soclose(so1
);
1076 * Returns: 0 Success
1081 * EACCES Mandatory Access Control failure
1082 * file_socket:ENOTSOCK
1084 * getsockaddr:ENAMETOOLONG Filename too long
1085 * getsockaddr:EINVAL Invalid argument
1086 * getsockaddr:ENOMEM Not enough space
1087 * getsockaddr:EFAULT Bad address
1088 * <pru_sosend>:EACCES[TCP]
1089 * <pru_sosend>:EADDRINUSE[TCP]
1090 * <pru_sosend>:EADDRNOTAVAIL[TCP]
1091 * <pru_sosend>:EAFNOSUPPORT[TCP]
1092 * <pru_sosend>:EAGAIN[TCP]
1093 * <pru_sosend>:EBADF
1094 * <pru_sosend>:ECONNRESET[TCP]
1095 * <pru_sosend>:EFAULT
1096 * <pru_sosend>:EHOSTUNREACH[TCP]
1097 * <pru_sosend>:EINTR
1098 * <pru_sosend>:EINVAL
1099 * <pru_sosend>:EISCONN[AF_INET]
1100 * <pru_sosend>:EMSGSIZE[TCP]
1101 * <pru_sosend>:ENETDOWN[TCP]
1102 * <pru_sosend>:ENETUNREACH[TCP]
1103 * <pru_sosend>:ENOBUFS
1104 * <pru_sosend>:ENOMEM[TCP]
1105 * <pru_sosend>:ENOTCONN[AF_INET]
1106 * <pru_sosend>:EOPNOTSUPP
1107 * <pru_sosend>:EPERM[TCP]
1108 * <pru_sosend>:EPIPE
1109 * <pru_sosend>:EWOULDBLOCK
1110 * <pru_sosend>:???[TCP] [ignorable: mostly IPSEC/firewall/DLIL]
1111 * <pru_sosend>:???[AF_INET] [whatever a filter author chooses]
1112 * <pru_sosend>:??? [value from so_error]
1116 sendit(struct proc
*p
, int s
, struct user_msghdr
*mp
, uio_t uiop
,
1117 int flags
, int32_t *retval
)
1119 struct mbuf
*control
= NULL
;
1120 struct sockaddr_storage ss
;
1121 struct sockaddr
*to
= NULL
;
1122 boolean_t want_free
= TRUE
;
1127 KERNEL_DEBUG(DBG_FNC_SENDIT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1129 error
= file_socket(s
, &so
);
1131 KERNEL_DEBUG(DBG_FNC_SENDIT
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1138 if (mp
->msg_name
!= USER_ADDR_NULL
) {
1139 if (mp
->msg_namelen
> sizeof (ss
)) {
1140 error
= getsockaddr(so
, &to
, mp
->msg_name
,
1141 mp
->msg_namelen
, TRUE
);
1143 error
= getsockaddr_s(so
, &ss
, mp
->msg_name
,
1144 mp
->msg_namelen
, TRUE
);
1146 to
= (struct sockaddr
*)&ss
;
1152 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()), to
);
1154 if (mp
->msg_control
!= USER_ADDR_NULL
) {
1155 if (mp
->msg_controllen
< sizeof (struct cmsghdr
)) {
1159 error
= sockargs(&control
, mp
->msg_control
,
1160 mp
->msg_controllen
, MT_CONTROL
);
1165 #if CONFIG_MACF_SOCKET_SUBSET
1167 * We check the state without holding the socket lock;
1168 * if a race condition occurs, it would simply result
1169 * in an extra call to the MAC check function.
1172 !(so
->so_state
& SS_DEFUNCT
) &&
1173 (error
= mac_socket_check_send(kauth_cred_get(), so
, to
)) != 0)
1175 #endif /* MAC_SOCKET_SUBSET */
1177 len
= uio_resid(uiop
);
1178 error
= so
->so_proto
->pr_usrreqs
->pru_sosend(so
, to
, uiop
, 0,
1181 if (uio_resid(uiop
) != len
&& (error
== ERESTART
||
1182 error
== EINTR
|| error
== EWOULDBLOCK
))
1184 /* Generation of SIGPIPE can be controlled per socket */
1185 if (error
== EPIPE
&& !(so
->so_flags
& SOF_NOSIGPIPE
))
1186 psignal(p
, SIGPIPE
);
1189 *retval
= (int)(len
- uio_resid(uiop
));
1191 if (to
!= NULL
&& want_free
)
1194 KERNEL_DEBUG(DBG_FNC_SENDIT
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1200 * Returns: 0 Success
1202 * sendit:??? [see sendit definition in this file]
1203 * write:??? [4056224: applicable for pipes]
1206 sendto(struct proc
*p
, struct sendto_args
*uap
, int32_t *retval
)
1208 __pthread_testcancel(1);
1209 return (sendto_nocancel(p
, (struct sendto_nocancel_args
*)uap
, retval
));
1213 sendto_nocancel(struct proc
*p
,
1214 struct sendto_nocancel_args
*uap
,
1217 struct user_msghdr msg
;
1221 KERNEL_DEBUG(DBG_FNC_SENDTO
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1222 AUDIT_ARG(fd
, uap
->s
);
1224 auio
= uio_create(1, 0,
1225 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
1230 uio_addiov(auio
, uap
->buf
, uap
->len
);
1232 msg
.msg_name
= uap
->to
;
1233 msg
.msg_namelen
= uap
->tolen
;
1234 /* no need to set up msg_iov. sendit uses uio_t we send it */
1237 msg
.msg_control
= 0;
1240 error
= sendit(p
, uap
->s
, &msg
, auio
, uap
->flags
, retval
);
1246 KERNEL_DEBUG(DBG_FNC_SENDTO
| DBG_FUNC_END
, error
, *retval
, 0, 0, 0);
1252 * Returns: 0 Success
1255 * sendit:??? [see sendit definition in this file]
1258 sendmsg(struct proc
*p
, struct sendmsg_args
*uap
, int32_t *retval
)
1260 __pthread_testcancel(1);
1261 return (sendmsg_nocancel(p
, (struct sendmsg_nocancel_args
*)uap
, retval
));
1265 sendmsg_nocancel(struct proc
*p
, struct sendmsg_nocancel_args
*uap
, int32_t *retval
)
1267 struct user32_msghdr msg32
;
1268 struct user64_msghdr msg64
;
1269 struct user_msghdr user_msg
;
1274 struct user_iovec
*iovp
;
1276 KERNEL_DEBUG(DBG_FNC_SENDMSG
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1277 AUDIT_ARG(fd
, uap
->s
);
1278 if (IS_64BIT_PROCESS(p
)) {
1279 msghdrp
= (caddr_t
)&msg64
;
1280 size_of_msghdr
= sizeof (msg64
);
1282 msghdrp
= (caddr_t
)&msg32
;
1283 size_of_msghdr
= sizeof (msg32
);
1285 error
= copyin(uap
->msg
, msghdrp
, size_of_msghdr
);
1287 KERNEL_DEBUG(DBG_FNC_SENDMSG
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1291 if (IS_64BIT_PROCESS(p
)) {
1292 user_msg
.msg_flags
= msg64
.msg_flags
;
1293 user_msg
.msg_controllen
= msg64
.msg_controllen
;
1294 user_msg
.msg_control
= msg64
.msg_control
;
1295 user_msg
.msg_iovlen
= msg64
.msg_iovlen
;
1296 user_msg
.msg_iov
= msg64
.msg_iov
;
1297 user_msg
.msg_namelen
= msg64
.msg_namelen
;
1298 user_msg
.msg_name
= msg64
.msg_name
;
1300 user_msg
.msg_flags
= msg32
.msg_flags
;
1301 user_msg
.msg_controllen
= msg32
.msg_controllen
;
1302 user_msg
.msg_control
= msg32
.msg_control
;
1303 user_msg
.msg_iovlen
= msg32
.msg_iovlen
;
1304 user_msg
.msg_iov
= msg32
.msg_iov
;
1305 user_msg
.msg_namelen
= msg32
.msg_namelen
;
1306 user_msg
.msg_name
= msg32
.msg_name
;
1309 if (user_msg
.msg_iovlen
<= 0 || user_msg
.msg_iovlen
> UIO_MAXIOV
) {
1310 KERNEL_DEBUG(DBG_FNC_SENDMSG
| DBG_FUNC_END
, EMSGSIZE
,
1315 /* allocate a uio large enough to hold the number of iovecs passed */
1316 auio
= uio_create(user_msg
.msg_iovlen
, 0,
1317 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
1324 if (user_msg
.msg_iovlen
) {
1326 * get location of iovecs within the uio.
1327 * then copyin the iovecs from user space.
1329 iovp
= uio_iovsaddr(auio
);
1334 error
= copyin_user_iovec_array(user_msg
.msg_iov
,
1335 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
1336 user_msg
.msg_iovlen
, iovp
);
1339 user_msg
.msg_iov
= CAST_USER_ADDR_T(iovp
);
1341 /* finish setup of uio_t */
1342 error
= uio_calculateresid(auio
);
1347 user_msg
.msg_iov
= 0;
1350 /* msg_flags is ignored for send */
1351 user_msg
.msg_flags
= 0;
1353 error
= sendit(p
, uap
->s
, &user_msg
, auio
, uap
->flags
, retval
);
1358 KERNEL_DEBUG(DBG_FNC_SENDMSG
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1364 sendmsg_x(struct proc
*p
, struct sendmsg_x_args
*uap
, user_ssize_t
*retval
)
1367 struct user_msghdr_x
*user_msg
= NULL
;
1368 struct uio
**uiop
= NULL
;
1371 struct sockaddr
*to
= NULL
;
1372 struct mbuf
*control
= NULL
;
1373 user_ssize_t len_before
= 0, len_after
;
1375 size_t size_of_msghdr
;
1379 KERNEL_DEBUG(DBG_FNC_SENDMSG_X
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1381 error
= file_socket(uap
->s
, &so
);
1390 if (so
->so_proto
->pr_usrreqs
->pru_sosend_list
== NULL
) {
1391 printf("%s no pru_sosend_list\n", __func__
);
1397 * Input parameter range check
1399 if (uap
->cnt
== 0 || uap
->cnt
> UIO_MAXIOV
) {
1403 user_msg
= _MALLOC(uap
->cnt
* sizeof(struct user_msghdr_x
),
1404 M_TEMP
, M_WAITOK
| M_ZERO
);
1405 if (user_msg
== NULL
) {
1406 printf("%s _MALLOC() user_msg failed\n", __func__
);
1410 uiop
= _MALLOC(uap
->cnt
* sizeof(struct uio
*),
1411 M_TEMP
, M_WAITOK
| M_ZERO
);
1413 printf("%s _MALLOC() uiop failed\n", __func__
);
1418 size_of_msghdr
= IS_64BIT_PROCESS(p
) ?
1419 sizeof(struct user64_msghdr_x
) : sizeof(struct user32_msghdr_x
);
1421 umsgp
= _MALLOC(uap
->cnt
* size_of_msghdr
,
1422 M_TEMP
, M_WAITOK
| M_ZERO
);
1423 if (umsgp
== NULL
) {
1424 printf("%s _MALLOC() user_msg failed\n", __func__
);
1428 error
= copyin(uap
->msgp
, umsgp
, uap
->cnt
* size_of_msghdr
);
1430 printf("%s copyin() failed\n", __func__
);
1433 error
= internalize_user_msghdr_array(umsgp
,
1434 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
1435 UIO_WRITE
, uap
->cnt
, user_msg
, uiop
);
1437 printf("%s copyin_user_msghdr_array() failed\n", __func__
);
1441 * Make sure the size of each message iovec and
1442 * the aggregate size of all the iovec is valid
1444 if (uio_array_is_valid(uiop
, uap
->cnt
) == 0) {
1450 * Sanity check on passed arguments
1452 for (i
= 0; i
< uap
->cnt
; i
++) {
1453 struct user_msghdr_x
*mp
= &user_msg
[i
];
1456 * No flags on send message
1458 if (mp
->msg_flags
!= 0) {
1463 * No support for address or ancillary data (yet)
1465 if (mp
->msg_name
!= USER_ADDR_NULL
|| mp
->msg_namelen
!= 0) {
1469 if (mp
->msg_control
!= USER_ADDR_NULL
||
1470 mp
->msg_controllen
!= 0) {
1474 #if CONFIG_MACF_SOCKET_SUBSET
1476 * We check the state without holding the socket lock;
1477 * if a race condition occurs, it would simply result
1478 * in an extra call to the MAC check function.
1480 * Note: The following check is never true taken with the
1481 * current limitation that we do not accept to pass an address,
1482 * this is effectively placeholder code. If we add support for addresses,
1483 * we will have to check every address.
1486 !(so
->so_state
& SS_DEFUNCT
) &&
1487 (error
= mac_socket_check_send(kauth_cred_get(), so
, to
)) != 0)
1489 #endif /* MAC_SOCKET_SUBSET */
1492 len_before
= uio_array_resid(uiop
, uap
->cnt
);
1494 error
= so
->so_proto
->pr_usrreqs
->pru_sosend_list(so
, to
, uiop
,
1495 uap
->cnt
, 0, control
, uap
->flags
);
1497 len_after
= uio_array_resid(uiop
, uap
->cnt
);
1500 if (len_after
!= len_before
&& (error
== ERESTART
||
1501 error
== EINTR
|| error
== EWOULDBLOCK
))
1503 /* Generation of SIGPIPE can be controlled per socket */
1504 if (error
== EPIPE
&& !(so
->so_flags
& SOF_NOSIGPIPE
))
1505 psignal(p
, SIGPIPE
);
1508 uiocnt
= externalize_user_msghdr_array(umsgp
,
1509 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
1510 UIO_WRITE
, uap
->cnt
, user_msg
, uiop
);
1512 *retval
= (int)(uiocnt
);
1518 _FREE(umsgp
, M_TEMP
);
1520 free_uio_array(uiop
, uap
->cnt
);
1521 _FREE(uiop
, M_TEMP
);
1523 if (user_msg
!= NULL
)
1524 _FREE(user_msg
, M_TEMP
);
1526 KERNEL_DEBUG(DBG_FNC_SENDMSG_X
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1532 * Returns: 0 Success
1536 * EACCES Mandatory Access Control failure
1539 * <pru_soreceive>:ENOBUFS
1540 * <pru_soreceive>:ENOTCONN
1541 * <pru_soreceive>:EWOULDBLOCK
1542 * <pru_soreceive>:EFAULT
1543 * <pru_soreceive>:EINTR
1544 * <pru_soreceive>:EBADF
1545 * <pru_soreceive>:EINVAL
1546 * <pru_soreceive>:EMSGSIZE
1547 * <pru_soreceive>:???
1549 * Notes: Additional return values from calls through <pru_soreceive>
1550 * depend on protocols other than TCP or AF_UNIX, which are
1554 recvit(struct proc
*p
, int s
, struct user_msghdr
*mp
, uio_t uiop
,
1555 user_addr_t namelenp
, int32_t *retval
)
1559 struct mbuf
*m
, *control
= 0;
1562 struct sockaddr
*fromsa
= 0;
1563 struct fileproc
*fp
;
1565 KERNEL_DEBUG(DBG_FNC_RECVIT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1567 if ((error
= fp_lookup(p
, s
, &fp
, 1))) {
1568 KERNEL_DEBUG(DBG_FNC_RECVIT
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1572 if (fp
->f_type
!= DTYPE_SOCKET
) {
1573 fp_drop(p
, s
, fp
, 1);
1578 so
= (struct socket
*)fp
->f_data
;
1580 fp_drop(p
, s
, fp
, 1);
1587 #if CONFIG_MACF_SOCKET_SUBSET
1589 * We check the state without holding the socket lock;
1590 * if a race condition occurs, it would simply result
1591 * in an extra call to the MAC check function.
1593 if (!(so
->so_state
& SS_DEFUNCT
) &&
1594 !(so
->so_state
& SS_ISCONNECTED
) &&
1595 !(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
1596 (error
= mac_socket_check_receive(kauth_cred_get(), so
)) != 0)
1598 #endif /* MAC_SOCKET_SUBSET */
1599 if (uio_resid(uiop
) < 0) {
1600 KERNEL_DEBUG(DBG_FNC_RECVIT
| DBG_FUNC_END
, EINVAL
, 0, 0, 0, 0);
1605 len
= uio_resid(uiop
);
1606 error
= so
->so_proto
->pr_usrreqs
->pru_soreceive(so
, &fromsa
, uiop
,
1607 (struct mbuf
**)0, mp
->msg_control
? &control
: (struct mbuf
**)0,
1610 AUDIT_ARG(sockaddr
, vfs_context_cwd(vfs_context_current()),
1613 if (uio_resid(uiop
) != len
&& (error
== ERESTART
||
1614 error
== EINTR
|| error
== EWOULDBLOCK
))
1621 *retval
= len
- uio_resid(uiop
);
1623 socklen_t sa_len
= 0;
1625 len
= mp
->msg_namelen
;
1626 if (len
<= 0 || fromsa
== 0) {
1630 #define MIN(a, b) ((a) > (b) ? (b) : (a))
1632 sa_len
= fromsa
->sa_len
;
1633 len
= MIN((unsigned int)len
, sa_len
);
1634 error
= copyout(fromsa
, mp
->msg_name
, (unsigned)len
);
1638 mp
->msg_namelen
= sa_len
;
1639 /* return the actual, untruncated address length */
1641 (error
= copyout((caddr_t
)&sa_len
, namelenp
,
1646 if (mp
->msg_control
) {
1647 len
= mp
->msg_controllen
;
1649 mp
->msg_controllen
= 0;
1650 ctlbuf
= mp
->msg_control
;
1652 while (m
&& len
> 0) {
1653 unsigned int tocopy
;
1654 struct cmsghdr
*cp
= mtod(m
, struct cmsghdr
*);
1655 int cp_size
= CMSG_ALIGN(cp
->cmsg_len
);
1656 int buflen
= m
->m_len
;
1658 while (buflen
> 0 && len
> 0) {
1661 SCM_TIMESTAMP hack because struct timeval has a
1662 * different size for 32 bits and 64 bits processes
1664 if (cp
->cmsg_level
== SOL_SOCKET
&& cp
->cmsg_type
== SCM_TIMESTAMP
) {
1665 unsigned char tmp_buffer
[CMSG_SPACE(sizeof(struct user64_timeval
))];
1666 struct cmsghdr
*tmp_cp
= (struct cmsghdr
*)(void *)tmp_buffer
;
1668 struct timeval
*tv
= (struct timeval
*)(void *)CMSG_DATA(cp
);
1670 tmp_cp
->cmsg_level
= SOL_SOCKET
;
1671 tmp_cp
->cmsg_type
= SCM_TIMESTAMP
;
1673 if (proc_is64bit(p
)) {
1674 struct user64_timeval
*tv64
= (struct user64_timeval
*)(void *)CMSG_DATA(tmp_cp
);
1676 tv64
->tv_sec
= tv
->tv_sec
;
1677 tv64
->tv_usec
= tv
->tv_usec
;
1679 tmp_cp
->cmsg_len
= CMSG_LEN(sizeof(struct user64_timeval
));
1680 tmp_space
= CMSG_SPACE(sizeof(struct user64_timeval
));
1682 struct user32_timeval
*tv32
= (struct user32_timeval
*)(void *)CMSG_DATA(tmp_cp
);
1684 tv32
->tv_sec
= tv
->tv_sec
;
1685 tv32
->tv_usec
= tv
->tv_usec
;
1687 tmp_cp
->cmsg_len
= CMSG_LEN(sizeof(struct user32_timeval
));
1688 tmp_space
= CMSG_SPACE(sizeof(struct user32_timeval
));
1690 if (len
>= tmp_space
) {
1693 mp
->msg_flags
|= MSG_CTRUNC
;
1696 error
= copyout(tmp_buffer
, ctlbuf
, tocopy
);
1702 if (cp_size
> buflen
) {
1703 panic("cp_size > buflen, something wrong with alignment!");
1706 if (len
>= cp_size
) {
1709 mp
->msg_flags
|= MSG_CTRUNC
;
1713 error
= copyout((caddr_t
) cp
, ctlbuf
,
1724 cp
= (struct cmsghdr
*)(void *)((unsigned char *) cp
+ cp_size
);
1725 cp_size
= CMSG_ALIGN(cp
->cmsg_len
);
1730 mp
->msg_controllen
= ctlbuf
- mp
->msg_control
;
1734 FREE(fromsa
, M_SONAME
);
1737 KERNEL_DEBUG(DBG_FNC_RECVIT
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1739 fp_drop(p
, s
, fp
, 0);
1744 * Returns: 0 Success
1748 * read:??? [4056224: applicable for pipes]
1750 * Notes: The read entry point is only called as part of support for
1751 * binary backward compatability; new code should use read
1752 * instead of recv or recvfrom when attempting to read data
1755 * For full documentation of the return codes from recvit, see
1756 * the block header for the recvit function.
1759 recvfrom(struct proc
*p
, struct recvfrom_args
*uap
, int32_t *retval
)
1761 __pthread_testcancel(1);
1762 return(recvfrom_nocancel(p
, (struct recvfrom_nocancel_args
*)uap
, retval
));
1766 recvfrom_nocancel(struct proc
*p
, struct recvfrom_nocancel_args
*uap
, int32_t *retval
)
1768 struct user_msghdr msg
;
1772 KERNEL_DEBUG(DBG_FNC_RECVFROM
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1773 AUDIT_ARG(fd
, uap
->s
);
1775 if (uap
->fromlenaddr
) {
1776 error
= copyin(uap
->fromlenaddr
,
1777 (caddr_t
)&msg
.msg_namelen
, sizeof (msg
.msg_namelen
));
1781 msg
.msg_namelen
= 0;
1783 msg
.msg_name
= uap
->from
;
1784 auio
= uio_create(1, 0,
1785 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
1791 uio_addiov(auio
, uap
->buf
, uap
->len
);
1792 /* no need to set up msg_iov. recvit uses uio_t we send it */
1795 msg
.msg_control
= 0;
1796 msg
.msg_controllen
= 0;
1797 msg
.msg_flags
= uap
->flags
;
1798 error
= recvit(p
, uap
->s
, &msg
, auio
, uap
->fromlenaddr
, retval
);
1803 KERNEL_DEBUG(DBG_FNC_RECVFROM
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1809 * Returns: 0 Success
1816 * Notes: For full documentation of the return codes from recvit, see
1817 * the block header for the recvit function.
1820 recvmsg(struct proc
*p
, struct recvmsg_args
*uap
, int32_t *retval
)
1822 __pthread_testcancel(1);
1823 return(recvmsg_nocancel(p
, (struct recvmsg_nocancel_args
*)uap
, retval
));
1827 recvmsg_nocancel(struct proc
*p
, struct recvmsg_nocancel_args
*uap
, int32_t *retval
)
1829 struct user32_msghdr msg32
;
1830 struct user64_msghdr msg64
;
1831 struct user_msghdr user_msg
;
1837 struct user_iovec
*iovp
;
1839 KERNEL_DEBUG(DBG_FNC_RECVMSG
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1840 AUDIT_ARG(fd
, uap
->s
);
1841 if (IS_64BIT_PROCESS(p
)) {
1842 msghdrp
= (caddr_t
)&msg64
;
1843 size_of_msghdr
= sizeof (msg64
);
1845 msghdrp
= (caddr_t
)&msg32
;
1846 size_of_msghdr
= sizeof (msg32
);
1848 error
= copyin(uap
->msg
, msghdrp
, size_of_msghdr
);
1850 KERNEL_DEBUG(DBG_FNC_RECVMSG
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1854 /* only need to copy if user process is not 64-bit */
1855 if (IS_64BIT_PROCESS(p
)) {
1856 user_msg
.msg_flags
= msg64
.msg_flags
;
1857 user_msg
.msg_controllen
= msg64
.msg_controllen
;
1858 user_msg
.msg_control
= msg64
.msg_control
;
1859 user_msg
.msg_iovlen
= msg64
.msg_iovlen
;
1860 user_msg
.msg_iov
= msg64
.msg_iov
;
1861 user_msg
.msg_namelen
= msg64
.msg_namelen
;
1862 user_msg
.msg_name
= msg64
.msg_name
;
1864 user_msg
.msg_flags
= msg32
.msg_flags
;
1865 user_msg
.msg_controllen
= msg32
.msg_controllen
;
1866 user_msg
.msg_control
= msg32
.msg_control
;
1867 user_msg
.msg_iovlen
= msg32
.msg_iovlen
;
1868 user_msg
.msg_iov
= msg32
.msg_iov
;
1869 user_msg
.msg_namelen
= msg32
.msg_namelen
;
1870 user_msg
.msg_name
= msg32
.msg_name
;
1873 if (user_msg
.msg_iovlen
<= 0 || user_msg
.msg_iovlen
> UIO_MAXIOV
) {
1874 KERNEL_DEBUG(DBG_FNC_RECVMSG
| DBG_FUNC_END
, EMSGSIZE
,
1879 user_msg
.msg_flags
= uap
->flags
;
1881 /* allocate a uio large enough to hold the number of iovecs passed */
1882 auio
= uio_create(user_msg
.msg_iovlen
, 0,
1883 (IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
),
1891 * get location of iovecs within the uio. then copyin the iovecs from
1894 iovp
= uio_iovsaddr(auio
);
1899 uiov
= user_msg
.msg_iov
;
1900 user_msg
.msg_iov
= CAST_USER_ADDR_T(iovp
);
1901 error
= copyin_user_iovec_array(uiov
,
1902 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
1903 user_msg
.msg_iovlen
, iovp
);
1907 /* finish setup of uio_t */
1908 error
= uio_calculateresid(auio
);
1913 error
= recvit(p
, uap
->s
, &user_msg
, auio
, 0, retval
);
1915 user_msg
.msg_iov
= uiov
;
1916 if (IS_64BIT_PROCESS(p
)) {
1917 msg64
.msg_flags
= user_msg
.msg_flags
;
1918 msg64
.msg_controllen
= user_msg
.msg_controllen
;
1919 msg64
.msg_control
= user_msg
.msg_control
;
1920 msg64
.msg_iovlen
= user_msg
.msg_iovlen
;
1921 msg64
.msg_iov
= user_msg
.msg_iov
;
1922 msg64
.msg_namelen
= user_msg
.msg_namelen
;
1923 msg64
.msg_name
= user_msg
.msg_name
;
1925 msg32
.msg_flags
= user_msg
.msg_flags
;
1926 msg32
.msg_controllen
= user_msg
.msg_controllen
;
1927 msg32
.msg_control
= user_msg
.msg_control
;
1928 msg32
.msg_iovlen
= user_msg
.msg_iovlen
;
1929 msg32
.msg_iov
= user_msg
.msg_iov
;
1930 msg32
.msg_namelen
= user_msg
.msg_namelen
;
1931 msg32
.msg_name
= user_msg
.msg_name
;
1933 error
= copyout(msghdrp
, uap
->msg
, size_of_msghdr
);
1939 KERNEL_DEBUG(DBG_FNC_RECVMSG
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
1944 recvmsg_x(struct proc
*p
, struct recvmsg_x_args
*uap
, user_ssize_t
*retval
)
1946 int error
= EOPNOTSUPP
;
1947 struct user_msghdr_x
*user_msg
= NULL
;
1948 struct uio
**uiop
= NULL
;
1950 user_ssize_t len_before
= 0, len_after
;
1952 size_t size_of_msghdr
;
1957 KERNEL_DEBUG(DBG_FNC_RECVMSG_X
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
1959 error
= file_socket(uap
->s
, &so
);
1968 if (so
->so_proto
->pr_usrreqs
->pru_soreceive_list
== NULL
) {
1969 printf("%s no pru_soreceive_list\n", __func__
);
1975 * Input parameter range check
1977 if (uap
->cnt
== 0 || uap
->cnt
> UIO_MAXIOV
) {
1981 user_msg
= _MALLOC(uap
->cnt
* sizeof(struct user_msghdr_x
),
1982 M_TEMP
, M_WAITOK
| M_ZERO
);
1983 if (user_msg
== NULL
) {
1984 printf("%s _MALLOC() user_msg failed\n", __func__
);
1988 uiop
= _MALLOC(uap
->cnt
* sizeof(struct uio
*),
1989 M_TEMP
, M_WAITOK
| M_ZERO
);
1991 printf("%s _MALLOC() uiop failed\n", __func__
);
1996 size_of_msghdr
= IS_64BIT_PROCESS(p
) ?
1997 sizeof(struct user64_msghdr_x
) : sizeof(struct user32_msghdr_x
);
1999 umsgp
= _MALLOC(uap
->cnt
* size_of_msghdr
, M_TEMP
, M_WAITOK
| M_ZERO
);
2000 if (umsgp
== NULL
) {
2001 printf("%s _MALLOC() user_msg failed\n", __func__
);
2005 error
= copyin(uap
->msgp
, umsgp
, uap
->cnt
* size_of_msghdr
);
2007 printf("%s copyin() failed\n", __func__
);
2010 error
= internalize_user_msghdr_array(umsgp
,
2011 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
2012 UIO_READ
, uap
->cnt
, user_msg
, uiop
);
2014 printf("%s copyin_user_msghdr_array() failed\n", __func__
);
2018 * Make sure the size of each message iovec and
2019 * the aggregate size of all the iovec is valid
2021 if (uio_array_is_valid(uiop
, uap
->cnt
) == 0) {
2027 * Sanity check on passed arguments
2029 for (i
= 0; i
< uap
->cnt
; i
++) {
2030 struct user_msghdr_x
*mp
= &user_msg
[i
];
2032 if (mp
->msg_flags
!= 0) {
2037 * No support for address or ancillary data (yet)
2039 if (mp
->msg_name
!= USER_ADDR_NULL
|| mp
->msg_namelen
!= 0) {
2043 if (mp
->msg_control
!= USER_ADDR_NULL
||
2044 mp
->msg_controllen
!= 0) {
2049 #if CONFIG_MACF_SOCKET_SUBSET
2051 * We check the state without holding the socket lock;
2052 * if a race condition occurs, it would simply result
2053 * in an extra call to the MAC check function.
2055 if (!(so
->so_state
& SS_DEFUNCT
) &&
2056 !(so
->so_state
& SS_ISCONNECTED
) &&
2057 !(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
2058 (error
= mac_socket_check_receive(kauth_cred_get(), so
)) != 0)
2060 #endif /* MAC_SOCKET_SUBSET */
2062 len_before
= uio_array_resid(uiop
, uap
->cnt
);
2064 error
= so
->so_proto
->pr_usrreqs
->pru_soreceive_list(so
, NULL
, uiop
,
2065 uap
->cnt
, (struct mbuf
**)0, NULL
, NULL
);
2067 len_after
= uio_array_resid(uiop
, uap
->cnt
);
2070 if (len_after
!= len_before
&& (error
== ERESTART
||
2071 error
== EINTR
|| error
== EWOULDBLOCK
))
2075 uiocnt
= externalize_user_msghdr_array(umsgp
,
2076 IS_64BIT_PROCESS(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
,
2077 UIO_READ
, uap
->cnt
, user_msg
, uiop
);
2079 error
= copyout(umsgp
, uap
->msgp
, uap
->cnt
* size_of_msghdr
);
2081 printf("%s copyout() failed\n", __func__
);
2084 *retval
= (int)(uiocnt
);
2090 _FREE(umsgp
, M_TEMP
);
2092 free_uio_array(uiop
, uap
->cnt
);
2093 _FREE(uiop
, M_TEMP
);
2095 if (user_msg
!= NULL
)
2096 _FREE(user_msg
, M_TEMP
);
2098 KERNEL_DEBUG(DBG_FNC_RECVMSG_X
| DBG_FUNC_END
, error
, 0, 0, 0, 0);
2104 * Returns: 0 Success
2106 * file_socket:ENOTSOCK
2109 * soshutdown:ENOTCONN
2110 * soshutdown:EADDRNOTAVAIL[TCP]
2111 * soshutdown:ENOBUFS[TCP]
2112 * soshutdown:EMSGSIZE[TCP]
2113 * soshutdown:EHOSTUNREACH[TCP]
2114 * soshutdown:ENETUNREACH[TCP]
2115 * soshutdown:ENETDOWN[TCP]
2116 * soshutdown:ENOMEM[TCP]
2117 * soshutdown:EACCES[TCP]
2118 * soshutdown:EMSGSIZE[TCP]
2119 * soshutdown:ENOBUFS[TCP]
2120 * soshutdown:???[TCP] [ignorable: mostly IPSEC/firewall/DLIL]
2121 * soshutdown:??? [other protocol families]
2125 shutdown(__unused
struct proc
*p
, struct shutdown_args
*uap
,
2126 __unused
int32_t *retval
)
2131 AUDIT_ARG(fd
, uap
->s
);
2132 error
= file_socket(uap
->s
, &so
);
2139 error
= soshutdown((struct socket
*)so
, uap
->how
);
2146 * Returns: 0 Success
2149 * EACCES Mandatory Access Control failure
2150 * file_socket:ENOTSOCK
2153 * sosetopt:ENOPROTOOPT
2157 * sosetopt:EOPNOTSUPP[AF_UNIX]
2162 setsockopt(struct proc
*p
, struct setsockopt_args
*uap
,
2163 __unused
int32_t *retval
)
2166 struct sockopt sopt
;
2169 AUDIT_ARG(fd
, uap
->s
);
2170 if (uap
->val
== 0 && uap
->valsize
!= 0)
2172 /* No bounds checking on size (it's unsigned) */
2174 error
= file_socket(uap
->s
, &so
);
2178 sopt
.sopt_dir
= SOPT_SET
;
2179 sopt
.sopt_level
= uap
->level
;
2180 sopt
.sopt_name
= uap
->name
;
2181 sopt
.sopt_val
= uap
->val
;
2182 sopt
.sopt_valsize
= uap
->valsize
;
2189 #if CONFIG_MACF_SOCKET_SUBSET
2190 if ((error
= mac_socket_check_setsockopt(kauth_cred_get(), so
,
2193 #endif /* MAC_SOCKET_SUBSET */
2194 error
= sosetoptlock(so
, &sopt
, 1); /* will lock socket */
2203 * Returns: 0 Success
2206 * EACCES Mandatory Access Control failure
2209 * file_socket:ENOTSOCK
2214 getsockopt(struct proc
*p
, struct getsockopt_args
*uap
,
2215 __unused
int32_t *retval
)
2219 struct sockopt sopt
;
2222 error
= file_socket(uap
->s
, &so
);
2226 error
= copyin(uap
->avalsize
, (caddr_t
)&valsize
,
2230 /* No bounds checking on size (it's unsigned) */
2234 sopt
.sopt_dir
= SOPT_GET
;
2235 sopt
.sopt_level
= uap
->level
;
2236 sopt
.sopt_name
= uap
->name
;
2237 sopt
.sopt_val
= uap
->val
;
2238 sopt
.sopt_valsize
= (size_t)valsize
; /* checked non-negative above */
2245 #if CONFIG_MACF_SOCKET_SUBSET
2246 if ((error
= mac_socket_check_getsockopt(kauth_cred_get(), so
,
2249 #endif /* MAC_SOCKET_SUBSET */
2250 error
= sogetoptlock((struct socket
*)so
, &sopt
, 1); /* will lock */
2252 valsize
= sopt
.sopt_valsize
;
2253 error
= copyout((caddr_t
)&valsize
, uap
->avalsize
,
2265 * Returns: 0 Success
2267 * file_socket:ENOTSOCK
2271 * <pru_sockaddr>:ENOBUFS[TCP]
2272 * <pru_sockaddr>:ECONNRESET[TCP]
2273 * <pru_sockaddr>:EINVAL[AF_UNIX]
2274 * <sf_getsockname>:???
2278 getsockname(__unused
struct proc
*p
, struct getsockname_args
*uap
,
2279 __unused
int32_t *retval
)
2282 struct sockaddr
*sa
;
2287 error
= file_socket(uap
->fdes
, &so
);
2290 error
= copyin(uap
->alen
, (caddr_t
)&len
, sizeof (socklen_t
));
2299 error
= (*so
->so_proto
->pr_usrreqs
->pru_sockaddr
)(so
, &sa
);
2301 error
= sflt_getsockname(so
, &sa
);
2302 if (error
== EJUSTRETURN
)
2305 socket_unlock(so
, 1);
2313 sa_len
= sa
->sa_len
;
2314 len
= MIN(len
, sa_len
);
2315 error
= copyout((caddr_t
)sa
, uap
->asa
, len
);
2318 /* return the actual, untruncated address length */
2321 error
= copyout((caddr_t
)&len
, uap
->alen
, sizeof (socklen_t
));
2326 file_drop(uap
->fdes
);
2331 * Get name of peer for connected socket.
2333 * Returns: 0 Success
2337 * file_socket:ENOTSOCK
2341 * <pru_peeraddr>:???
2342 * <sf_getpeername>:???
2346 getpeername(__unused
struct proc
*p
, struct getpeername_args
*uap
,
2347 __unused
int32_t *retval
)
2350 struct sockaddr
*sa
;
2355 error
= file_socket(uap
->fdes
, &so
);
2365 if ((so
->so_state
& (SS_CANTRCVMORE
| SS_CANTSENDMORE
)) ==
2366 (SS_CANTRCVMORE
| SS_CANTSENDMORE
)) {
2367 /* the socket has been shutdown, no more getpeername's */
2368 socket_unlock(so
, 1);
2373 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONFIRMING
)) == 0) {
2374 socket_unlock(so
, 1);
2378 error
= copyin(uap
->alen
, (caddr_t
)&len
, sizeof (socklen_t
));
2380 socket_unlock(so
, 1);
2384 error
= (*so
->so_proto
->pr_usrreqs
->pru_peeraddr
)(so
, &sa
);
2386 error
= sflt_getpeername(so
, &sa
);
2387 if (error
== EJUSTRETURN
)
2390 socket_unlock(so
, 1);
2397 sa_len
= sa
->sa_len
;
2398 len
= MIN(len
, sa_len
);
2399 error
= copyout(sa
, uap
->asa
, len
);
2402 /* return the actual, untruncated address length */
2405 error
= copyout((caddr_t
)&len
, uap
->alen
, sizeof (socklen_t
));
2407 if (sa
) FREE(sa
, M_SONAME
);
2409 file_drop(uap
->fdes
);
2414 sockargs(struct mbuf
**mp
, user_addr_t data
, int buflen
, int type
)
2416 struct sockaddr
*sa
;
2420 size_t alloc_buflen
= (size_t)buflen
;
2422 if(alloc_buflen
> INT_MAX
/2)
2425 /* The fd's in the buffer must expand to be pointers, thus we need twice as much space */
2426 if(type
== MT_CONTROL
)
2427 alloc_buflen
= ((buflen
- sizeof(struct cmsghdr
))*2) + sizeof(struct cmsghdr
);
2429 if (alloc_buflen
> MLEN
) {
2430 if (type
== MT_SONAME
&& alloc_buflen
<= 112)
2431 alloc_buflen
= MLEN
; /* unix domain compat. hack */
2432 else if (alloc_buflen
> MCLBYTES
)
2435 m
= m_get(M_WAIT
, type
);
2438 if (alloc_buflen
> MLEN
) {
2440 if ((m
->m_flags
& M_EXT
) == 0) {
2445 /* K64: We still copyin the original buflen because it gets expanded later
2446 * and we lie about the size of the mbuf because it only affects unp_* functions
2449 error
= copyin(data
, mtod(m
, caddr_t
), (u_int
)buflen
);
2454 if (type
== MT_SONAME
) {
2455 sa
= mtod(m
, struct sockaddr
*);
2456 sa
->sa_len
= buflen
;
2463 * Given a user_addr_t of length len, allocate and fill out a *sa.
2465 * Returns: 0 Success
2466 * ENAMETOOLONG Filename too long
2467 * EINVAL Invalid argument
2468 * ENOMEM Not enough space
2469 * copyin:EFAULT Bad address
2472 getsockaddr(struct socket
*so
, struct sockaddr
**namp
, user_addr_t uaddr
,
2473 size_t len
, boolean_t translate_unspec
)
2475 struct sockaddr
*sa
;
2478 if (len
> SOCK_MAXADDRLEN
)
2479 return (ENAMETOOLONG
);
2481 if (len
< offsetof(struct sockaddr
, sa_data
[0]))
2484 MALLOC(sa
, struct sockaddr
*, len
, M_SONAME
, M_WAITOK
| M_ZERO
);
2488 error
= copyin(uaddr
, (caddr_t
)sa
, len
);
2493 * Force sa_family to AF_INET on AF_INET sockets to handle
2494 * legacy applications that use AF_UNSPEC (0). On all other
2495 * sockets we leave it unchanged and let the lower layer
2498 if (translate_unspec
&& sa
->sa_family
== AF_UNSPEC
&&
2499 SOCK_CHECK_DOM(so
, PF_INET
) &&
2500 len
== sizeof (struct sockaddr_in
))
2501 sa
->sa_family
= AF_INET
;
2510 getsockaddr_s(struct socket
*so
, struct sockaddr_storage
*ss
,
2511 user_addr_t uaddr
, size_t len
, boolean_t translate_unspec
)
2515 if (ss
== NULL
|| uaddr
== USER_ADDR_NULL
||
2516 len
< offsetof(struct sockaddr
, sa_data
[0]))
2520 * sockaddr_storage size is less than SOCK_MAXADDRLEN,
2521 * so the check here is inclusive.
2523 if (len
> sizeof (*ss
))
2524 return (ENAMETOOLONG
);
2526 bzero(ss
, sizeof (*ss
));
2527 error
= copyin(uaddr
, (caddr_t
)ss
, len
);
2530 * Force sa_family to AF_INET on AF_INET sockets to handle
2531 * legacy applications that use AF_UNSPEC (0). On all other
2532 * sockets we leave it unchanged and let the lower layer
2535 if (translate_unspec
&& ss
->ss_family
== AF_UNSPEC
&&
2536 SOCK_CHECK_DOM(so
, PF_INET
) &&
2537 len
== sizeof (struct sockaddr_in
))
2538 ss
->ss_family
= AF_INET
;
2546 * Hard limit on the number of source and/or destination addresses
2547 * that can be specified by an application.
2549 #define SOCKADDRLIST_MAX_ENTRIES 64
2552 getsockaddrlist(struct socket
*so
, struct sockaddr_list
**slp
,
2553 user_addr_t uaddr
, socklen_t uaddrlen
, boolean_t xlate_unspec
)
2555 struct sockaddr_list
*sl
;
2560 if (uaddr
== USER_ADDR_NULL
|| uaddrlen
== 0)
2563 sl
= sockaddrlist_alloc(M_WAITOK
);
2567 VERIFY(sl
->sl_cnt
== 0);
2568 while (uaddrlen
> 0 && sl
->sl_cnt
< SOCKADDRLIST_MAX_ENTRIES
) {
2569 struct sockaddr_storage ss
;
2570 struct sockaddr_entry
*se
;
2571 struct sockaddr
*sa
;
2573 if (uaddrlen
< sizeof (struct sockaddr
)) {
2578 bzero(&ss
, sizeof (ss
));
2579 error
= copyin(uaddr
, (caddr_t
)&ss
, sizeof (struct sockaddr
));
2583 /* getsockaddr does the same but we need them now */
2584 if (uaddrlen
< ss
.ss_len
||
2585 ss
.ss_len
< offsetof(struct sockaddr
, sa_data
[0])) {
2588 } else if (ss
.ss_len
> sizeof (ss
)) {
2590 * sockaddr_storage size is less than SOCK_MAXADDRLEN,
2591 * so the check here is inclusive. We could user the
2592 * latter instead, but seems like an overkill for now.
2594 error
= ENAMETOOLONG
;
2598 se
= sockaddrentry_alloc(M_WAITOK
);
2602 sockaddrlist_insert(sl
, se
);
2604 error
= getsockaddr(so
, &sa
, uaddr
, ss
.ss_len
, xlate_unspec
);
2608 VERIFY(sa
!= NULL
&& sa
->sa_len
== ss
.ss_len
);
2612 VERIFY(((signed)uaddrlen
- ss
.ss_len
) >= 0);
2613 uaddrlen
-= ss
.ss_len
;
2617 sockaddrlist_free(sl
);
2625 internalize_user_msghdr_array(const void *src
, int spacetype
, int direction
,
2626 u_int count
, struct user_msghdr_x
*dst
, struct uio
**uiop
)
2631 for (i
= 0; i
< count
; i
++) {
2633 struct user_iovec
*iovp
;
2634 struct user_msghdr_x
*user_msg
= &dst
[i
];
2636 if (spacetype
== UIO_USERSPACE64
) {
2637 struct user64_msghdr_x
*msghdr64
;
2639 msghdr64
= ((struct user64_msghdr_x
*)src
) + i
;
2641 user_msg
->msg_name
= msghdr64
->msg_name
;
2642 user_msg
->msg_namelen
= msghdr64
->msg_namelen
;
2643 user_msg
->msg_iov
= msghdr64
->msg_iov
;
2644 user_msg
->msg_iovlen
= msghdr64
->msg_iovlen
;
2645 user_msg
->msg_control
= msghdr64
->msg_control
;
2646 user_msg
->msg_controllen
= msghdr64
->msg_controllen
;
2647 user_msg
->msg_flags
= msghdr64
->msg_flags
;
2648 user_msg
->msg_datalen
= msghdr64
->msg_datalen
;
2650 struct user32_msghdr_x
*msghdr32
;
2652 msghdr32
= ((struct user32_msghdr_x
*)src
) + i
;
2654 user_msg
->msg_name
= msghdr32
->msg_name
;
2655 user_msg
->msg_namelen
= msghdr32
->msg_namelen
;
2656 user_msg
->msg_iov
= msghdr32
->msg_iov
;
2657 user_msg
->msg_iovlen
= msghdr32
->msg_iovlen
;
2658 user_msg
->msg_control
= msghdr32
->msg_control
;
2659 user_msg
->msg_controllen
= msghdr32
->msg_controllen
;
2660 user_msg
->msg_flags
= msghdr32
->msg_flags
;
2661 user_msg
->msg_datalen
= msghdr32
->msg_datalen
;
2664 if (user_msg
->msg_iovlen
<= 0 || user_msg
->msg_iovlen
> UIO_MAXIOV
) {
2668 auio
= uio_create(user_msg
->msg_iovlen
, 0, spacetype
, direction
);
2675 if (user_msg
->msg_iovlen
) {
2676 iovp
= uio_iovsaddr(auio
);
2681 error
= copyin_user_iovec_array(user_msg
->msg_iov
,
2682 spacetype
, user_msg
->msg_iovlen
, iovp
);
2685 user_msg
->msg_iov
= CAST_USER_ADDR_T(iovp
);
2687 error
= uio_calculateresid(auio
);
2690 user_msg
->msg_datalen
= uio_resid(auio
);
2692 user_msg
->msg_datalen
= 0;
2700 externalize_user_msghdr_array(void *dst
, int spacetype
, int direction
,
2701 u_int count
, const struct user_msghdr_x
*src
, struct uio
**uiop
)
2703 #pragma unused(direction)
2708 for (i
= 0; i
< count
; i
++) {
2709 const struct user_msghdr_x
*user_msg
= &src
[i
];
2710 uio_t auio
= uiop
[i
];
2711 user_ssize_t len
= user_msg
->msg_datalen
- uio_resid(auio
);
2713 if (user_msg
->msg_datalen
!= 0 && len
== 0)
2719 if (spacetype
== UIO_USERSPACE64
) {
2720 struct user64_msghdr_x
*msghdr64
;
2722 msghdr64
= ((struct user64_msghdr_x
*)dst
) + i
;
2724 msghdr64
->msg_flags
= user_msg
->msg_flags
;
2725 msghdr64
->msg_datalen
= len
;
2728 struct user32_msghdr_x
*msghdr32
;
2730 msghdr32
= ((struct user32_msghdr_x
*)dst
) + i
;
2732 msghdr32
->msg_flags
= user_msg
->msg_flags
;
2733 msghdr32
->msg_datalen
= len
;
2740 free_uio_array(struct uio
**uiop
, u_int count
)
2744 for (i
= 0; i
< count
; i
++) {
2745 if (uiop
[i
] != NULL
)
2750 __private_extern__ user_ssize_t
2751 uio_array_resid(struct uio
**uiop
, u_int count
)
2753 user_ssize_t len
= 0;
2756 for (i
= 0; i
< count
; i
++) {
2757 struct uio
*auio
= uiop
[i
];
2760 len
+= uio_resid(auio
);
2766 uio_array_is_valid(struct uio
**uiop
, u_int count
)
2768 user_ssize_t len
= 0;
2771 for (i
= 0; i
< count
; i
++) {
2772 struct uio
*auio
= uiop
[i
];
2775 user_ssize_t resid
= uio_resid(auio
);
2778 * Sanity check on the validity of the iovec:
2779 * no point of going over sb_max
2781 if (resid
< 0 || (u_int32_t
)resid
> sb_max
)
2785 if (len
< 0 || (u_int32_t
)len
> sb_max
)
2794 #define SFUIOBUFS 64
2796 /* Macros to compute the number of mbufs needed depending on cluster size */
2797 #define HOWMANY_16K(n) ((((unsigned int)(n) - 1) >> (PGSHIFT + 2)) + 1)
2798 #define HOWMANY_4K(n) ((((unsigned int)(n) - 1) >> PGSHIFT) + 1)
2800 /* Upper send limit in bytes (SFUIOBUFS * PAGESIZE) */
2801 #define SENDFILE_MAX_BYTES (SFUIOBUFS << PGSHIFT)
2803 /* Upper send limit in the number of mbuf clusters */
2804 #define SENDFILE_MAX_16K HOWMANY_16K(SENDFILE_MAX_BYTES)
2805 #define SENDFILE_MAX_4K HOWMANY_4K(SENDFILE_MAX_BYTES)
2808 alloc_sendpkt(int how
, size_t pktlen
, unsigned int *maxchunks
,
2809 struct mbuf
**m
, boolean_t jumbocl
)
2811 unsigned int needed
;
2814 panic("%s: pktlen (%ld) must be non-zero\n", __func__
, pktlen
);
2817 * Try to allocate for the whole thing. Since we want full control
2818 * over the buffer size and be able to accept partial result, we can't
2819 * use mbuf_allocpacket(). The logic below is similar to sosend().
2822 if (pktlen
> MBIGCLBYTES
&& jumbocl
) {
2823 needed
= MIN(SENDFILE_MAX_16K
, HOWMANY_16K(pktlen
));
2824 *m
= m_getpackets_internal(&needed
, 1, how
, 0, M16KCLBYTES
);
2827 needed
= MIN(SENDFILE_MAX_4K
, HOWMANY_4K(pktlen
));
2828 *m
= m_getpackets_internal(&needed
, 1, how
, 0, MBIGCLBYTES
);
2832 * Our previous attempt(s) at allocation had failed; the system
2833 * may be short on mbufs, and we want to block until they are
2834 * available. This time, ask just for 1 mbuf and don't return
2839 *m
= m_getpackets_internal(&needed
, 1, M_WAIT
, 1, MBIGCLBYTES
);
2842 panic("%s: blocking allocation returned NULL\n", __func__
);
2844 *maxchunks
= needed
;
2849 * int sendfile(int fd, int s, off_t offset, off_t *nbytes,
2850 * struct sf_hdtr *hdtr, int flags)
2852 * Send a file specified by 'fd' and starting at 'offset' to a socket
2853 * specified by 's'. Send only '*nbytes' of the file or until EOF if
2854 * *nbytes == 0. Optionally add a header and/or trailer to the socket
2855 * output. If specified, write the total number of bytes sent into *nbytes.
2858 sendfile(struct proc
*p
, struct sendfile_args
*uap
, __unused
int *retval
)
2860 struct fileproc
*fp
;
2863 struct writev_nocancel_args nuap
;
2864 user_ssize_t writev_retval
;
2865 struct user_sf_hdtr user_hdtr
;
2866 struct user32_sf_hdtr user32_hdtr
;
2867 struct user64_sf_hdtr user64_hdtr
;
2869 off_t nbytes
= 0, sbytes
= 0;
2873 struct vfs_context context
= *vfs_context_current();
2874 #define ENXIO_10146739_DBG(err_str) { \
2875 if (error == ENXIO) { \
2878 "File a radar related to rdar://10146739 \n"); \
2881 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE
| DBG_FUNC_START
), uap
->s
,
2884 AUDIT_ARG(fd
, uap
->fd
);
2885 AUDIT_ARG(value32
, uap
->s
);
2888 * Do argument checking. Must be a regular file in, stream
2889 * type and connected socket out, positive offset.
2891 if ((error
= fp_getfvp(p
, uap
->fd
, &fp
, &vp
))) {
2892 ENXIO_10146739_DBG("%s: fp_getfvp error. %s");
2895 if ((fp
->f_flag
& FREAD
) == 0) {
2899 if (vnode_isreg(vp
) == 0) {
2903 error
= file_socket(uap
->s
, &so
);
2905 ENXIO_10146739_DBG("%s: file_socket error. %s");
2912 if (so
->so_type
!= SOCK_STREAM
) {
2916 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
2920 if (uap
->offset
< 0) {
2924 if (uap
->nbytes
== USER_ADDR_NULL
) {
2928 if (uap
->flags
!= 0) {
2933 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
2935 #if CONFIG_MACF_SOCKET_SUBSET
2936 /* JMM - fetch connected sockaddr? */
2937 error
= mac_socket_check_send(context
.vc_ucred
, so
, NULL
);
2943 * Get number of bytes to send
2944 * Should it applies to size of header and trailer?
2945 * JMM - error handling?
2947 copyin(uap
->nbytes
, &nbytes
, sizeof (off_t
));
2950 * If specified, get the pointer to the sf_hdtr struct for
2951 * any headers/trailers.
2953 if (uap
->hdtr
!= USER_ADDR_NULL
) {
2956 bzero(&user_hdtr
, sizeof (user_hdtr
));
2957 if (IS_64BIT_PROCESS(p
)) {
2958 hdtrp
= (caddr_t
)&user64_hdtr
;
2959 sizeof_hdtr
= sizeof (user64_hdtr
);
2961 hdtrp
= (caddr_t
)&user32_hdtr
;
2962 sizeof_hdtr
= sizeof (user32_hdtr
);
2964 error
= copyin(uap
->hdtr
, hdtrp
, sizeof_hdtr
);
2967 if (IS_64BIT_PROCESS(p
)) {
2968 user_hdtr
.headers
= user64_hdtr
.headers
;
2969 user_hdtr
.hdr_cnt
= user64_hdtr
.hdr_cnt
;
2970 user_hdtr
.trailers
= user64_hdtr
.trailers
;
2971 user_hdtr
.trl_cnt
= user64_hdtr
.trl_cnt
;
2973 user_hdtr
.headers
= user32_hdtr
.headers
;
2974 user_hdtr
.hdr_cnt
= user32_hdtr
.hdr_cnt
;
2975 user_hdtr
.trailers
= user32_hdtr
.trailers
;
2976 user_hdtr
.trl_cnt
= user32_hdtr
.trl_cnt
;
2980 * Send any headers. Wimp out and use writev(2).
2982 if (user_hdtr
.headers
!= USER_ADDR_NULL
) {
2983 bzero(&nuap
, sizeof (struct writev_args
));
2985 nuap
.iovp
= user_hdtr
.headers
;
2986 nuap
.iovcnt
= user_hdtr
.hdr_cnt
;
2987 error
= writev_nocancel(p
, &nuap
, &writev_retval
);
2989 ENXIO_10146739_DBG("%s: writev_nocancel error. %s");
2992 sbytes
+= writev_retval
;
2997 * Get the file size for 2 reasons:
2998 * 1. We don't want to allocate more mbufs than necessary
2999 * 2. We don't want to read past the end of file
3001 if ((error
= vnode_size(vp
, &file_size
, vfs_context_current())) != 0) {
3002 ENXIO_10146739_DBG("%s: vnode_size error. %s");
3007 * Simply read file data into a chain of mbufs that used with scatter
3008 * gather reads. We're not (yet?) setup to use zero copy external
3009 * mbufs that point to the file pages.
3012 error
= sblock(&so
->so_snd
, SBL_WAIT
);
3014 socket_unlock(so
, 1);
3017 for (off
= uap
->offset
; ; off
+= xfsize
, sbytes
+= xfsize
) {
3018 mbuf_t m0
= NULL
, m
;
3019 unsigned int nbufs
= SFUIOBUFS
, i
;
3021 char uio_buf
[UIO_SIZEOF(SFUIOBUFS
)]; /* 1 KB !!! */
3029 * Calculate the amount to transfer.
3030 * Align to round number of pages.
3031 * Not to exceed send socket buffer,
3032 * the EOF, or the passed in nbytes.
3034 xfsize
= sbspace(&so
->so_snd
);
3037 if (so
->so_state
& SS_CANTSENDMORE
) {
3040 } else if ((so
->so_state
& SS_NBIO
)) {
3048 if (xfsize
> SENDFILE_MAX_BYTES
)
3049 xfsize
= SENDFILE_MAX_BYTES
;
3050 else if (xfsize
> PAGE_SIZE
)
3051 xfsize
= trunc_page(xfsize
);
3052 pgoff
= off
& PAGE_MASK_64
;
3053 if (pgoff
> 0 && PAGE_SIZE
- pgoff
< xfsize
)
3054 xfsize
= PAGE_SIZE_64
- pgoff
;
3055 if (nbytes
&& xfsize
> (nbytes
- sbytes
))
3056 xfsize
= nbytes
- sbytes
;
3059 if (off
+ xfsize
> file_size
)
3060 xfsize
= file_size
- off
;
3065 * Attempt to use larger than system page-size clusters for
3066 * large writes only if there is a jumbo cluster pool and
3067 * if the socket is marked accordingly.
3069 jumbocl
= sosendjcl
&& njcl
> 0 &&
3070 ((so
->so_flags
& SOF_MULTIPAGES
) || sosendjcl_ignore_capab
);
3072 socket_unlock(so
, 0);
3073 alloc_sendpkt(M_WAIT
, xfsize
, &nbufs
, &m0
, jumbocl
);
3074 pktlen
= mbuf_pkthdr_maxlen(m0
);
3075 if (pktlen
< (size_t)xfsize
)
3078 auio
= uio_createwithbuffer(nbufs
, off
, UIO_SYSSPACE
,
3079 UIO_READ
, &uio_buf
[0], sizeof (uio_buf
));
3081 printf("sendfile failed. nbufs = %d. %s", nbufs
,
3082 "File a radar related to rdar://10146739.\n");
3089 for (i
= 0, m
= m0
, uiolen
= 0;
3090 i
< nbufs
&& m
!= NULL
&& uiolen
< (size_t)xfsize
;
3091 i
++, m
= mbuf_next(m
)) {
3092 size_t mlen
= mbuf_maxlen(m
);
3094 if (mlen
+ uiolen
> (size_t)xfsize
)
3095 mlen
= xfsize
- uiolen
;
3096 mbuf_setlen(m
, mlen
);
3097 uio_addiov(auio
, CAST_USER_ADDR_T(mbuf_datastart(m
)),
3102 if (xfsize
!= uio_resid(auio
))
3103 printf("sendfile: xfsize: %lld != uio_resid(auio): "
3104 "%lld\n", xfsize
, (long long)uio_resid(auio
));
3106 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_READ
| DBG_FUNC_START
),
3107 uap
->s
, (unsigned int)((xfsize
>> 32) & 0x0ffffffff),
3108 (unsigned int)(xfsize
& 0x0ffffffff), 0, 0);
3109 error
= fo_read(fp
, auio
, FOF_OFFSET
, &context
);
3112 if (uio_resid(auio
) != xfsize
&& (error
== ERESTART
||
3113 error
== EINTR
|| error
== EWOULDBLOCK
)) {
3116 ENXIO_10146739_DBG("%s: fo_read error. %s");
3121 xfsize
-= uio_resid(auio
);
3122 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_READ
| DBG_FUNC_END
),
3123 uap
->s
, (unsigned int)((xfsize
>> 32) & 0x0ffffffff),
3124 (unsigned int)(xfsize
& 0x0ffffffff), 0, 0);
3127 //printf("sendfile: fo_read 0 bytes, EOF\n");
3130 if (xfsize
+ off
> file_size
)
3131 printf("sendfile: xfsize: %lld + off: %lld > file_size:"
3132 "%lld\n", xfsize
, off
, file_size
);
3133 for (i
= 0, m
= m0
, rlen
= 0;
3134 i
< nbufs
&& m
!= NULL
&& rlen
< xfsize
;
3135 i
++, m
= mbuf_next(m
)) {
3136 size_t mlen
= mbuf_maxlen(m
);
3138 if (rlen
+ mlen
> (size_t)xfsize
)
3139 mlen
= xfsize
- rlen
;
3140 mbuf_setlen(m
, mlen
);
3144 mbuf_pkthdr_setlen(m0
, xfsize
);
3148 * Make sure that the socket is still able to take more data.
3149 * CANTSENDMORE being true usually means that the connection
3150 * was closed. so_error is true when an error was sensed after
3152 * The state is checked after the page mapping and buffer
3153 * allocation above since those operations may block and make
3154 * any socket checks stale. From this point forward, nothing
3155 * blocks before the pru_send (or more accurately, any blocking
3156 * results in a loop back to here to re-check).
3158 if ((so
->so_state
& SS_CANTSENDMORE
) || so
->so_error
) {
3159 if (so
->so_state
& SS_CANTSENDMORE
) {
3162 error
= so
->so_error
;
3166 ENXIO_10146739_DBG("%s: Unexpected socket error. %s");
3170 * Wait for socket space to become available. We do this just
3171 * after checking the connection state above in order to avoid
3172 * a race condition with sbwait().
3174 if (sbspace(&so
->so_snd
) < (long)so
->so_snd
.sb_lowat
) {
3175 if (so
->so_state
& SS_NBIO
) {
3180 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_WAIT
|
3181 DBG_FUNC_START
), uap
->s
, 0, 0, 0, 0);
3182 error
= sbwait(&so
->so_snd
);
3183 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_WAIT
|
3184 DBG_FUNC_END
), uap
->s
, 0, 0, 0, 0);
3186 * An error from sbwait usually indicates that we've
3187 * been interrupted by a signal. If we've sent anything
3188 * then return bytes sent, otherwise return the error.
3197 struct mbuf
*control
= NULL
;
3200 * Socket filter processing
3203 error
= sflt_data_out(so
, NULL
, &m0
, &control
, 0);
3205 if (error
== EJUSTRETURN
) {
3209 ENXIO_10146739_DBG("%s: sflt_data_out error. %s");
3213 * End Socket filter processing
3216 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_SEND
| DBG_FUNC_START
),
3217 uap
->s
, 0, 0, 0, 0);
3218 error
= (*so
->so_proto
->pr_usrreqs
->pru_send
)(so
, 0, m0
,
3220 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_SEND
| DBG_FUNC_START
),
3221 uap
->s
, 0, 0, 0, 0);
3223 ENXIO_10146739_DBG("%s: pru_send error. %s");
3227 sbunlock(&so
->so_snd
, FALSE
); /* will unlock socket */
3229 * Send trailers. Wimp out and use writev(2).
3231 if (uap
->hdtr
!= USER_ADDR_NULL
&&
3232 user_hdtr
.trailers
!= USER_ADDR_NULL
) {
3233 bzero(&nuap
, sizeof (struct writev_args
));
3235 nuap
.iovp
= user_hdtr
.trailers
;
3236 nuap
.iovcnt
= user_hdtr
.trl_cnt
;
3237 error
= writev_nocancel(p
, &nuap
, &writev_retval
);
3239 ENXIO_10146739_DBG("%s: writev_nocancel error. %s");
3242 sbytes
+= writev_retval
;
3249 if (uap
->nbytes
!= USER_ADDR_NULL
) {
3250 /* XXX this appears bogus for some early failure conditions */
3251 copyout(&sbytes
, uap
->nbytes
, sizeof (off_t
));
3253 KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE
| DBG_FUNC_END
), uap
->s
,
3254 (unsigned int)((sbytes
>> 32) & 0x0ffffffff),
3255 (unsigned int)(sbytes
& 0x0ffffffff), error
, 0);
3258 sbunlock(&so
->so_snd
, FALSE
); /* will unlock socket */
3263 #endif /* SENDFILE */