]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
8aff81de855fabf815c993af4792d89e5e6f79a1
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 #include <sys/kernel.h>
33 #include <sys/types.h>
34 #include <sys/socket.h>
35 #include <sys/socketvar.h>
36 #include <sys/param.h>
38 #include <sys/errno.h>
39 #include <sys/malloc.h>
40 #include <sys/protosw.h>
41 #include <sys/domain.h>
43 #include <sys/fcntl.h>
44 #include <sys/filio.h>
45 #include <sys/uio_internal.h>
46 #include <kern/lock.h>
48 extern void *memcpy(void *, const void *, size_t);
49 extern int soclose_locked(struct socket
*so
);
51 errno_t
sock_send_internal(
53 const struct msghdr
*msg
,
63 struct sockaddr
*from
,
71 struct socket
*new_so
;
72 lck_mtx_t
*mutex_held
;
76 if (sock
== NULL
|| new_sock
== NULL
) return EINVAL
;
78 if ((sock
->so_options
& SO_ACCEPTCONN
) == 0) {
79 socket_unlock(sock
, 1);
82 if ((flags
& ~(MSG_DONTWAIT
)) != 0) {
83 socket_unlock(sock
, 1);
86 if (((flags
& MSG_DONTWAIT
) != 0 || (sock
->so_state
& SS_NBIO
) != 0) &&
87 sock
->so_comp
.tqh_first
== NULL
) {
88 socket_unlock(sock
, 1);
92 if (sock
->so_proto
->pr_getlock
!= NULL
) {
93 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
97 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
101 while (TAILQ_EMPTY(&sock
->so_comp
) && sock
->so_error
== 0) {
102 if (sock
->so_state
& SS_CANTRCVMORE
) {
103 sock
->so_error
= ECONNABORTED
;
106 error
= msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
| PCATCH
, "sock_accept", 0);
108 socket_unlock(sock
, 1);
112 if (sock
->so_error
) {
113 error
= sock
->so_error
;
115 socket_unlock(sock
, 1);
119 new_so
= TAILQ_FIRST(&sock
->so_comp
);
120 TAILQ_REMOVE(&sock
->so_comp
, new_so
, so_list
);
122 socket_unlock(sock
, 1); /* release the head */
125 lck_mtx_assert(new_so
->so_proto
->pr_getlock(new_so
, 0),
126 LCK_MTX_ASSERT_NOTOWNED
);
127 socket_lock(new_so
, 1);
130 new_so
->so_state
&= ~SS_COMP
;
131 new_so
->so_head
= NULL
;
132 soacceptlock(new_so
, &sa
, 0);
135 new_so
->so_upcall
= callback
;
136 new_so
->so_upcallarg
= cookie
;
137 new_so
->so_rcv
.sb_flags
|= SB_UPCALL
;
142 if (fromlen
> sa
->sa_len
) fromlen
= sa
->sa_len
;
143 memcpy(from
, sa
, fromlen
);
145 if (sa
) FREE(sa
, M_SONAME
);
148 socket_unlock(new_so
, 1);
155 const struct sockaddr
*to
)
157 if (sock
== NULL
|| to
== NULL
) return EINVAL
;
159 return sobind(sock
, (struct sockaddr
*)to
);
165 const struct sockaddr
*to
,
169 lck_mtx_t
*mutex_held
;
171 if (sock
== NULL
|| to
== NULL
) return EINVAL
;
173 socket_lock(sock
, 1);
175 if ((sock
->so_state
& SS_ISCONNECTING
) &&
176 ((sock
->so_state
& SS_NBIO
) != 0 ||
177 (flags
& MSG_DONTWAIT
) != 0)) {
178 socket_unlock(sock
, 1);
181 error
= soconnectlock(sock
, (struct sockaddr
*)to
, 0);
183 if ((sock
->so_state
& SS_ISCONNECTING
) &&
184 ((sock
->so_state
& SS_NBIO
) != 0 || (flags
& MSG_DONTWAIT
) != 0)) {
185 socket_unlock(sock
, 1);
189 if (sock
->so_proto
->pr_getlock
!= NULL
)
190 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
192 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
194 while ((sock
->so_state
& SS_ISCONNECTING
) && sock
->so_error
== 0) {
195 error
= msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
| PCATCH
,
202 error
= sock
->so_error
;
207 sock
->so_state
&= ~SS_ISCONNECTING
;
209 socket_unlock(sock
, 1);
216 const struct timeval
*tv
)
218 lck_mtx_t
* mutex_held
;
222 socket_lock(sock
, 1);
224 // Check if we're already connected or if we've already errored out
225 if ((sock
->so_state
& SS_ISCONNECTING
) == 0 || sock
->so_error
) {
226 if (sock
->so_error
) {
227 retval
= sock
->so_error
;
231 if ((sock
->so_state
& SS_ISCONNECTED
) != 0)
239 // copied translation from timeval to hertz from SO_RCVTIMEO handling
240 if (tv
->tv_sec
< 0 || tv
->tv_sec
> SHRT_MAX
/ hz
||
241 tv
->tv_usec
< 0 || tv
->tv_usec
>= 1000000) {
246 ts
.tv_sec
= tv
->tv_sec
;
247 ts
.tv_nsec
= (tv
->tv_usec
* NSEC_PER_USEC
);
248 if ( (ts
.tv_sec
+ (ts
.tv_nsec
/NSEC_PER_SEC
))/100 > SHRT_MAX
) {
253 if (sock
->so_proto
->pr_getlock
!= NULL
)
254 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
256 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
258 msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
, "sock_connectwait", &ts
);
260 // Check if we're still waiting to connect
261 if ((sock
->so_state
& SS_ISCONNECTING
) && sock
->so_error
== 0) {
262 retval
= EINPROGRESS
;
266 if (sock
->so_error
) {
267 retval
= sock
->so_error
;
272 socket_unlock(sock
, 1);
281 socket_lock(sock
, 1);
284 sock
->so_rcv
.sb_flags
|= SB_NOINTR
; // This isn't safe
285 sock
->so_snd
.sb_flags
|= SB_NOINTR
; // This isn't safe
288 sock
->so_rcv
.sb_flags
&= ~SB_NOINTR
; // This isn't safe
289 sock
->so_snd
.sb_flags
&= ~SB_NOINTR
; // This isn't safe
292 socket_unlock(sock
, 1);
300 struct sockaddr
*peername
,
304 struct sockaddr
*sa
= NULL
;
306 if (sock
== NULL
|| peername
== NULL
|| peernamelen
< 0) return EINVAL
;
307 socket_lock(sock
, 1);
308 if ((sock
->so_state
& (SS_ISCONNECTED
|SS_ISCONFIRMING
)) == 0) {
309 socket_unlock(sock
, 1);
312 error
= sock
->so_proto
->pr_usrreqs
->pru_peeraddr(sock
, &sa
);
315 if (peernamelen
> sa
->sa_len
) peernamelen
= sa
->sa_len
;
316 memcpy(peername
, sa
, peernamelen
);
318 if (sa
) FREE(sa
, M_SONAME
);
319 socket_unlock(sock
, 1);
326 struct sockaddr
*sockname
,
330 struct sockaddr
*sa
= NULL
;
332 if (sock
== NULL
|| sockname
== NULL
|| socknamelen
< 0) return EINVAL
;
333 socket_lock(sock
, 1);
334 error
= sock
->so_proto
->pr_usrreqs
->pru_sockaddr(sock
, &sa
);
337 if (socknamelen
> sa
->sa_len
) socknamelen
= sa
->sa_len
;
338 memcpy(sockname
, sa
, socknamelen
);
340 if (sa
) FREE(sa
, M_SONAME
);
341 socket_unlock(sock
, 1);
356 if (sock
== NULL
|| optval
== NULL
|| optlen
== NULL
) return EINVAL
;
357 sopt
.sopt_dir
= SOPT_GET
;
358 sopt
.sopt_level
= level
;
359 sopt
.sopt_name
= optname
;
360 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
361 sopt
.sopt_valsize
= *optlen
;
363 error
= sogetopt(sock
, &sopt
); /* will lock socket */
364 if (error
== 0) *optlen
= sopt
.sopt_valsize
;
371 unsigned long request
,
374 return soioctl(sock
, request
, argp
, NULL
); /* will lock socket */
387 if (sock
== NULL
|| optval
== NULL
) return EINVAL
;
388 sopt
.sopt_dir
= SOPT_SET
;
389 sopt
.sopt_level
= level
;
390 sopt
.sopt_name
= optname
;
391 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
392 sopt
.sopt_valsize
= optlen
;
394 return sosetopt(sock
, &sopt
); /* will lock socket */
402 if (sock
== NULL
) return EINVAL
;
403 return solisten(sock
, backlog
); /* will lock socket */
407 sock_receive_internal(
415 struct mbuf
*control
= NULL
;
418 struct sockaddr
*fromsa
;
419 char uio_buf
[ UIO_SIZEOF((msg
!= NULL
) ? msg
->msg_iovlen
: 0) ];
421 if (sock
== NULL
) return EINVAL
;
423 auio
= uio_createwithbuffer(((msg
!= NULL
) ? msg
->msg_iovlen
: 0),
424 0, UIO_SYSSPACE
, UIO_READ
,
425 &uio_buf
[0], sizeof(uio_buf
));
426 if (msg
&& data
== NULL
) {
428 struct iovec_32
*tempp
= (struct iovec_32
*) msg
->msg_iov
;
430 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
431 uio_addiov(auio
, CAST_USER_ADDR_T((tempp
+ i
)->iov_base
), (tempp
+ i
)->iov_len
);
433 if (uio_resid(auio
) < 0) return EINVAL
;
436 uio_setresid(auio
, (uio_resid(auio
) + *recvdlen
));
438 length
= uio_resid(auio
);
443 if (msg
&& msg
->msg_control
) {
444 if ((size_t)msg
->msg_controllen
< sizeof(struct cmsghdr
)) return EINVAL
;
445 if ((size_t)msg
->msg_controllen
> MLEN
) return EINVAL
;
446 control
= m_get(M_NOWAIT
, MT_CONTROL
);
447 if (control
== NULL
) return ENOMEM
;
448 memcpy(mtod(control
, caddr_t
), msg
->msg_control
, msg
->msg_controllen
);
449 control
->m_len
= msg
->msg_controllen
;
452 /* let pru_soreceive handle the socket locking */
453 error
= sock
->so_proto
->pr_usrreqs
->pru_soreceive(sock
, &fromsa
, auio
,
454 data
, control
? &control
: NULL
, &flags
);
455 if (error
) goto cleanup
;
458 *recvdlen
= length
- uio_resid(auio
);
460 msg
->msg_flags
= flags
;
465 salen
= msg
->msg_namelen
;
466 if (msg
->msg_namelen
> 0 && fromsa
!= 0)
468 salen
= MIN(salen
, fromsa
->sa_len
);
469 memcpy(msg
->msg_name
, fromsa
,
470 msg
->msg_namelen
> fromsa
->sa_len
? fromsa
->sa_len
: msg
->msg_namelen
);
474 if (msg
->msg_control
)
476 struct mbuf
* m
= control
;
477 u_char
* ctlbuf
= msg
->msg_control
;
478 int clen
= msg
->msg_controllen
;
479 msg
->msg_controllen
= 0;
481 while (m
&& clen
> 0)
484 if (clen
>= m
->m_len
)
490 msg
->msg_flags
|= MSG_CTRUNC
;
493 memcpy(ctlbuf
, mtod(m
, caddr_t
), tocopy
);
498 msg
->msg_controllen
= (u_int32_t
)ctlbuf
- (u_int32_t
)msg
->msg_control
;
503 if (control
) m_freem(control
);
504 if (fromsa
) FREE(fromsa
, M_SONAME
);
516 (msg
->msg_iovlen
< 1) ||
517 (msg
->msg_iov
[0].iov_len
== 0) ||
518 (msg
->msg_iov
[0].iov_base
== NULL
))
520 return sock_receive_internal(sock
, msg
, NULL
, flags
, recvdlen
);
531 if (data
== NULL
|| recvlen
== 0 || *recvlen
<= 0 || (msg
&&
532 (msg
->msg_iov
!= NULL
|| msg
->msg_iovlen
!= 0)))
534 return sock_receive_internal(sock
, msg
, data
, flags
, recvlen
);
540 const struct msghdr
*msg
,
546 struct mbuf
*control
= NULL
;
549 char uio_buf
[ UIO_SIZEOF((msg
!= NULL
? msg
->msg_iovlen
: 1)) ];
556 if (data
== 0 && msg
!= NULL
) {
557 struct iovec_32
*tempp
= (struct iovec_32
*) msg
->msg_iov
;
559 auio
= uio_createwithbuffer(msg
->msg_iovlen
, 0, UIO_SYSSPACE
, UIO_WRITE
,
560 &uio_buf
[0], sizeof(uio_buf
));
565 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
566 uio_addiov(auio
, CAST_USER_ADDR_T((tempp
+ i
)->iov_base
), (tempp
+ i
)->iov_len
);
569 if (uio_resid(auio
) < 0) {
580 datalen
= uio_resid(auio
);
582 datalen
= data
->m_pkthdr
.len
;
584 if (msg
&& msg
->msg_control
)
586 if ((size_t)msg
->msg_controllen
< sizeof(struct cmsghdr
)) return EINVAL
;
587 if ((size_t)msg
->msg_controllen
> MLEN
) return EINVAL
;
588 control
= m_get(M_NOWAIT
, MT_CONTROL
);
589 if (control
== NULL
) {
593 memcpy(mtod(control
, caddr_t
), msg
->msg_control
, msg
->msg_controllen
);
594 control
->m_len
= msg
->msg_controllen
;
597 error
= sock
->so_proto
->pr_usrreqs
->pru_sosend(sock
, msg
? (struct sockaddr
*)msg
->msg_name
: 0,
598 auio
, data
, control
, flags
);
599 if (error
== 0 && sentlen
) {
601 *sentlen
= datalen
- uio_resid(auio
);
609 * In cases where we detect an error before returning, we need to
610 * free the mbuf chain if there is one. sosend (and pru_sosend) will
611 * free the mbuf chain if they encounter an error.
626 const struct msghdr
*msg
,
630 if (msg
== NULL
|| msg
->msg_iov
== NULL
|| msg
->msg_iovlen
< 1)
632 return sock_send_internal(sock
, msg
, NULL
, flags
, sentlen
);
638 const struct msghdr
*msg
,
643 if (data
== NULL
|| (msg
&&
644 (msg
->msg_iov
!= NULL
|| msg
->msg_iovlen
!= 0))) {
649 return sock_send_internal(sock
, msg
, data
, flags
, sentlen
);
657 if (sock
== NULL
) return EINVAL
;
658 return soshutdown(sock
, how
);
661 typedef void (*so_upcall
)(struct socket
*sock
, void* arg
, int waitf
);
668 sock_upcall callback
,
673 if (new_so
== NULL
) return EINVAL
;
674 /* socreate will create an initial so_count */
675 error
= socreate(domain
, new_so
, type
, protocol
);
676 if (error
== 0 && callback
)
678 (*new_so
)->so_rcv
.sb_flags
|= SB_UPCALL
;
679 (*new_so
)->so_upcall
= (so_upcall
)callback
;
680 (*new_so
)->so_upcallarg
= context
;
689 if (sock
== NULL
) return;
693 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
698 if (sock
== NULL
) return;
699 socket_lock(sock
, 1);
700 sock
->so_retaincnt
++;
701 sock
->so_usecount
++; /* add extra reference for holding the socket */
702 socket_unlock(sock
, 1);
705 /* Do we want this to be APPLE_PRIVATE API? */
710 if (sock
== NULL
) return;
711 socket_lock(sock
, 1);
712 sock
->so_retaincnt
--;
713 if (sock
->so_retaincnt
< 0)
714 panic("sock_release: negative retain count for sock=%x cnt=%x\n",
715 sock
, sock
->so_retaincnt
);
716 if ((sock
->so_retaincnt
== 0) && (sock
->so_usecount
== 2))
717 soclose_locked(sock
); /* close socket only if the FD is not holding it */
719 sock
->so_usecount
--; /* remove extra reference holding the socket */
720 socket_unlock(sock
, 1);
728 if (sock
== NULL
) return EINVAL
;
729 socket_lock(sock
, 1);
732 sock
->so_state
|= SS_PRIV
;
736 sock
->so_state
&= ~SS_PRIV
;
738 socket_unlock(sock
, 1);
747 socket_lock(sock
, 1);
748 retval
= (sock
->so_state
& SS_ISCONNECTED
) != 0;
749 socket_unlock(sock
, 1);
758 socket_lock(sock
, 1);
759 retval
= (sock
->so_state
& SS_NBIO
) != 0;
760 socket_unlock(sock
, 1);
771 socket_lock(sock
, 1);
773 *outDomain
= sock
->so_proto
->pr_domain
->dom_family
;
775 *outType
= sock
->so_type
;
777 *outProtocol
= sock
->so_proto
->pr_protocol
;
778 socket_unlock(sock
, 1);