]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socket.c
1 /*
2 * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/socketvar.h>
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/errno.h>
38 #include <sys/malloc.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/mbuf.h>
42 #include <sys/fcntl.h>
43 #include <sys/filio.h>
44 #include <sys/uio_internal.h>
45 #include <kern/lock.h>
46 #include <netinet/in.h>
47 #include <libkern/OSAtomic.h>
48
49 extern int soclose_locked(struct socket *so);
50 extern void soclose_wait_locked(struct socket *so);
51 extern int so_isdstlocal(struct socket *so);
52
53 errno_t sock_send_internal(
54 socket_t sock,
55 const struct msghdr *msg,
56 mbuf_t data,
57 int flags,
58 size_t *sentlen);
59
60 typedef void (*so_upcall)(struct socket *, caddr_t , int );
61
62
63 errno_t
64 sock_accept(
65 socket_t sock,
66 struct sockaddr *from,
67 int fromlen,
68 int flags,
69 sock_upcall callback,
70 void* cookie,
71 socket_t *new_sock)
72 {
73 struct sockaddr *sa;
74 struct socket *new_so;
75 lck_mtx_t *mutex_held;
76 int dosocklock;
77 errno_t error = 0;
78
79 if (sock == NULL || new_sock == NULL) return EINVAL;
80 socket_lock(sock, 1);
81 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
82 socket_unlock(sock, 1);
83 return EINVAL;
84 }
85 if ((flags & ~(MSG_DONTWAIT)) != 0) {
86 socket_unlock(sock, 1);
87 return ENOTSUP;
88 }
89 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
90 sock->so_comp.tqh_first == NULL) {
91 socket_unlock(sock, 1);
92 return EWOULDBLOCK;
93 }
94
95 if (sock->so_proto->pr_getlock != NULL) {
96 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
97 dosocklock = 1;
98 }
99 else {
100 mutex_held = sock->so_proto->pr_domain->dom_mtx;
101 dosocklock = 0;
102 }
103
104 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
105 if (sock->so_state & SS_CANTRCVMORE) {
106 sock->so_error = ECONNABORTED;
107 break;
108 }
109 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", NULL);
110 if (error) {
111 socket_unlock(sock, 1);
112 return (error);
113 }
114 }
115 if (sock->so_error) {
116 error = sock->so_error;
117 sock->so_error = 0;
118 socket_unlock(sock, 1);
119 return (error);
120 }
121
122 new_so = TAILQ_FIRST(&sock->so_comp);
123 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
124 sock->so_qlen--;
125
126 /*
127 * Pass the pre-accepted socket to any interested socket filter(s).
128 * Upon failure, the socket would have been closed by the callee.
129 */
130 if (new_so->so_filt != NULL) {
131 /*
132 * Temporarily drop the listening socket's lock before we
133 * hand off control over to the socket filter(s), but keep
134 * a reference so that it won't go away. We'll grab it
135 * again once we're done with the filter(s).
136 */
137 socket_unlock(sock, 0);
138 if ((error = soacceptfilter(new_so)) != 0) {
139 /* Drop reference on listening socket */
140 sodereference(sock);
141 return (error);
142 }
143 socket_lock(sock, 0);
144 }
145
146 if (dosocklock) {
147 lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0),
148 LCK_MTX_ASSERT_NOTOWNED);
149 socket_lock(new_so, 1);
150 }
151
152 new_so->so_state &= ~SS_COMP;
153 new_so->so_head = NULL;
154 (void) soacceptlock(new_so, &sa, 0);
155
156 socket_unlock(sock, 1); /* release the head */
157
158 if (callback) {
159 new_so->so_upcall = (so_upcall) callback;
160 new_so->so_upcallarg = cookie;
161 new_so->so_rcv.sb_flags |= SB_UPCALL;
162 #if CONFIG_SOWUPCALL
163 new_so->so_snd.sb_flags |= SB_UPCALL;
164 #endif
165 }
166
167 if (sa && from)
168 {
169 if (fromlen > sa->sa_len) fromlen = sa->sa_len;
170 memcpy(from, sa, fromlen);
171 }
172 if (sa) FREE(sa, M_SONAME);
173
174 /*
175 * If the socket has been marked as inactive by soacceptfilter(),
176 * disallow further operations on it. We explicitly call shutdown
177 * on both data directions to ensure that SS_CANT{RCV,SEND}MORE
178 * states are set for the socket. This would also flush out data
179 * hanging off the receive list of this socket.
180 */
181 if (new_so->so_flags & SOF_DEFUNCT) {
182 (void) soshutdownlock(new_so, SHUT_RD);
183 (void) soshutdownlock(new_so, SHUT_WR);
184 (void) sodisconnectlocked(new_so);
185 }
186
187 *new_sock = new_so;
188 if (dosocklock)
189 socket_unlock(new_so, 1);
190 return error;
191 }
192
193 errno_t
194 sock_bind(
195 socket_t sock,
196 const struct sockaddr *to)
197 {
198 if (sock == NULL || to == NULL) return EINVAL;
199
200 return sobind(sock, (struct sockaddr*)(uintptr_t)to);
201 }
202
203 errno_t
204 sock_connect(
205 socket_t sock,
206 const struct sockaddr *to,
207 int flags)
208 {
209 int error = 0;
210 lck_mtx_t *mutex_held;
211
212 if (sock == NULL || to == NULL) return EINVAL;
213
214 socket_lock(sock, 1);
215
216 if ((sock->so_state & SS_ISCONNECTING) &&
217 ((sock->so_state & SS_NBIO) != 0 ||
218 (flags & MSG_DONTWAIT) != 0)) {
219 socket_unlock(sock, 1);
220 return EALREADY;
221 }
222 error = soconnectlock(sock, (struct sockaddr*)(uintptr_t)to, 0);
223 if (!error) {
224 if ((sock->so_state & SS_ISCONNECTING) &&
225 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
226 socket_unlock(sock, 1);
227 return EINPROGRESS;
228 }
229
230 if (sock->so_proto->pr_getlock != NULL)
231 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
232 else
233 mutex_held = sock->so_proto->pr_domain->dom_mtx;
234
235 while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
236 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH,
237 "sock_connect", NULL);
238 if (error)
239 break;
240 }
241
242 if (error == 0) {
243 error = sock->so_error;
244 sock->so_error = 0;
245 }
246 }
247 else {
248 sock->so_state &= ~SS_ISCONNECTING;
249 }
250 socket_unlock(sock, 1);
251 return error;
252 }
253
254 errno_t
255 sock_connectwait(
256 socket_t sock,
257 const struct timeval *tv)
258 {
259 lck_mtx_t * mutex_held;
260 errno_t retval = 0;
261 struct timespec ts;
262
263 socket_lock(sock, 1);
264
265 // Check if we're already connected or if we've already errored out
266 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error) {
267 if (sock->so_error) {
268 retval = sock->so_error;
269 sock->so_error = 0;
270 }
271 else {
272 if ((sock->so_state & SS_ISCONNECTED) != 0)
273 retval = 0;
274 else
275 retval = EINVAL;
276 }
277 goto done;
278 }
279
280 // copied translation from timeval to hertz from SO_RCVTIMEO handling
281 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
282 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
283 retval = EDOM;
284 goto done;
285 }
286
287 ts.tv_sec = tv->tv_sec;
288 ts.tv_nsec = (tv->tv_usec * NSEC_PER_USEC);
289 if ( (ts.tv_sec + (ts.tv_nsec/NSEC_PER_SEC))/100 > SHRT_MAX) {
290 retval = EDOM;
291 goto done;
292 }
293
294 if (sock->so_proto->pr_getlock != NULL)
295 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
296 else
297 mutex_held = sock->so_proto->pr_domain->dom_mtx;
298
299 msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts);
300
301 // Check if we're still waiting to connect
302 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
303 retval = EINPROGRESS;
304 goto done;
305 }
306
307 if (sock->so_error) {
308 retval = sock->so_error;
309 sock->so_error = 0;
310 }
311
312 done:
313 socket_unlock(sock, 1);
314 return retval;
315 }
316
317 errno_t
318 sock_nointerrupt(
319 socket_t sock,
320 int on)
321 {
322 socket_lock(sock, 1);
323
324 if (on) {
325 sock->so_rcv.sb_flags |= SB_NOINTR; // This isn't safe
326 sock->so_snd.sb_flags |= SB_NOINTR; // This isn't safe
327 }
328 else {
329 sock->so_rcv.sb_flags &= ~SB_NOINTR; // This isn't safe
330 sock->so_snd.sb_flags &= ~SB_NOINTR; // This isn't safe
331 }
332
333 socket_unlock(sock, 1);
334
335 return 0;
336 }
337
338 errno_t
339 sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen)
340 {
341 int error;
342 struct sockaddr *sa = NULL;
343
344 if (sock == NULL || peername == NULL || peernamelen < 0)
345 return (EINVAL);
346
347 socket_lock(sock, 1);
348 if (!(sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING))) {
349 socket_unlock(sock, 1);
350 return (ENOTCONN);
351 }
352 error = sogetaddr_locked(sock, &sa, 1);
353 socket_unlock(sock, 1);
354 if (error == 0) {
355 if (peernamelen > sa->sa_len)
356 peernamelen = sa->sa_len;
357 memcpy(peername, sa, peernamelen);
358 FREE(sa, M_SONAME);
359 }
360 return (error);
361 }
362
363 errno_t
364 sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen)
365 {
366 int error;
367 struct sockaddr *sa = NULL;
368
369 if (sock == NULL || sockname == NULL || socknamelen < 0)
370 return (EINVAL);
371
372 socket_lock(sock, 1);
373 error = sogetaddr_locked(sock, &sa, 0);
374 socket_unlock(sock, 1);
375 if (error == 0) {
376 if (socknamelen > sa->sa_len)
377 socknamelen = sa->sa_len;
378 memcpy(sockname, sa, socknamelen);
379 FREE(sa, M_SONAME);
380 }
381 return (error);
382 }
383
384 __private_extern__ int
385 sogetaddr_locked(struct socket *so, struct sockaddr **psa, int peer)
386 {
387 int error;
388
389 if (so == NULL || psa == NULL)
390 return (EINVAL);
391
392 *psa = NULL;
393 error = peer ? so->so_proto->pr_usrreqs->pru_peeraddr(so, psa) :
394 so->so_proto->pr_usrreqs->pru_sockaddr(so, psa);
395
396 if (error == 0 && *psa == NULL) {
397 error = ENOMEM;
398 } else if (error != 0 && *psa != NULL) {
399 FREE(*psa, M_SONAME);
400 *psa = NULL;
401 }
402 return (error);
403 }
404
405 errno_t
406 sock_getaddr(socket_t sock, struct sockaddr **psa, int peer)
407 {
408 int error;
409
410 if (sock == NULL || psa == NULL)
411 return (EINVAL);
412
413 socket_lock(sock, 1);
414 error = sogetaddr_locked(sock, psa, peer);
415 socket_unlock(sock, 1);
416
417 return (error);
418 }
419
420 void
421 sock_freeaddr(struct sockaddr *sa)
422 {
423 if (sa != NULL)
424 FREE(sa, M_SONAME);
425 }
426
427 errno_t
428 sock_getsockopt(
429 socket_t sock,
430 int level,
431 int optname,
432 void *optval,
433 int *optlen)
434 {
435 int error = 0;
436 struct sockopt sopt;
437
438 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
439 sopt.sopt_dir = SOPT_GET;
440 sopt.sopt_level = level;
441 sopt.sopt_name = optname;
442 sopt.sopt_val = CAST_USER_ADDR_T(optval);
443 sopt.sopt_valsize = *optlen;
444 sopt.sopt_p = kernproc;
445 error = sogetopt(sock, &sopt); /* will lock socket */
446 if (error == 0) *optlen = sopt.sopt_valsize;
447 return error;
448 }
449
450 errno_t
451 sock_ioctl(
452 socket_t sock,
453 unsigned long request,
454 void *argp)
455 {
456 return soioctl(sock, request, argp, kernproc); /* will lock socket */
457 }
458
459 errno_t
460 sock_setsockopt(
461 socket_t sock,
462 int level,
463 int optname,
464 const void *optval,
465 int optlen)
466 {
467 struct sockopt sopt;
468
469 if (sock == NULL || optval == NULL) return EINVAL;
470 sopt.sopt_dir = SOPT_SET;
471 sopt.sopt_level = level;
472 sopt.sopt_name = optname;
473 sopt.sopt_val = CAST_USER_ADDR_T(optval);
474 sopt.sopt_valsize = optlen;
475 sopt.sopt_p = kernproc;
476 return sosetopt(sock, &sopt); /* will lock socket */
477 }
478
479 errno_t
480 sock_settclassopt(
481 socket_t sock,
482 const void *optval,
483 size_t optlen) {
484
485 errno_t error = 0;
486 struct sockopt sopt;
487
488 if (sock == NULL || optval == NULL || optlen == 0) return EINVAL;
489
490 sopt.sopt_dir = SOPT_SET;
491 sopt.sopt_val = CAST_USER_ADDR_T(optval);
492 sopt.sopt_valsize = optlen;
493 sopt.sopt_p = kernproc;
494
495 socket_lock(sock, 1);
496 if (!(sock->so_state & SS_ISCONNECTED)) {
497 /* If the socket is not connected then we don't know
498 * if the destination is on LAN or not. Skip
499 * setting traffic class in this case
500 */
501 error = ENOTCONN;
502 goto out;
503 }
504
505 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL || sock->so_pcb == NULL) {
506 error = EINVAL;
507 goto out;
508 }
509
510 /* Check if the destination address is LAN or link local address.
511 * We do not want to set traffic class bits if the destination
512 * is not local
513 */
514 if (!so_isdstlocal(sock)) {
515 goto out;
516 }
517
518 switch (sock->so_proto->pr_domain->dom_family) {
519 case AF_INET:
520 sopt.sopt_level = IPPROTO_IP;
521 sopt.sopt_name = IP_TOS;
522 break;
523 case AF_INET6:
524 sopt.sopt_level = IPPROTO_IPV6;
525 sopt.sopt_name = IPV6_TCLASS;
526 break;
527 default:
528 error = EINVAL;
529 goto out;
530 }
531
532 socket_unlock(sock, 1);
533 return sosetopt(sock, &sopt);
534 out:
535 socket_unlock(sock, 1);
536 return error;
537 }
538
539 errno_t
540 sock_gettclassopt(
541 socket_t sock,
542 void *optval,
543 size_t *optlen) {
544
545 errno_t error = 0;
546 struct sockopt sopt;
547
548 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
549
550 sopt.sopt_dir = SOPT_GET;
551 sopt.sopt_val = CAST_USER_ADDR_T(optval);
552 sopt.sopt_valsize = *optlen;
553 sopt.sopt_p = kernproc;
554
555 socket_lock(sock, 1);
556 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL) {
557 socket_unlock(sock, 1);
558 return EINVAL;
559 }
560
561 switch (sock->so_proto->pr_domain->dom_family) {
562 case AF_INET:
563 sopt.sopt_level = IPPROTO_IP;
564 sopt.sopt_name = IP_TOS;
565 break;
566 case AF_INET6:
567 sopt.sopt_level = IPPROTO_IPV6;
568 sopt.sopt_name = IPV6_TCLASS;
569 break;
570 default:
571 socket_unlock(sock, 1);
572 return EINVAL;
573
574 }
575 socket_unlock(sock, 1);
576 error = sogetopt(sock, &sopt); /* will lock socket */
577 if (error == 0) *optlen = sopt.sopt_valsize;
578 return error;
579 }
580
581 errno_t
582 sock_listen(
583 socket_t sock,
584 int backlog)
585 {
586 if (sock == NULL) return EINVAL;
587 return solisten(sock, backlog); /* will lock socket */
588 }
589
590 static errno_t
591 sock_receive_internal(
592 socket_t sock,
593 struct msghdr *msg,
594 mbuf_t *data,
595 int flags,
596 size_t *recvdlen)
597 {
598 uio_t auio;
599 struct mbuf *control = NULL;
600 int error = 0;
601 int length = 0;
602 struct sockaddr *fromsa;
603 char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ];
604
605 if (sock == NULL) return EINVAL;
606
607 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
608 0, UIO_SYSSPACE, UIO_READ,
609 &uio_buf[0], sizeof(uio_buf));
610 if (msg && data == NULL) {
611 int i;
612 struct iovec *tempp = msg->msg_iov;
613
614 for (i = 0; i < msg->msg_iovlen; i++) {
615 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
616 }
617 if (uio_resid(auio) < 0) return EINVAL;
618 }
619 else {
620 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
621 }
622 length = uio_resid(auio);
623
624 if (recvdlen)
625 *recvdlen = 0;
626
627 /* let pru_soreceive handle the socket locking */
628 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
629 data, (msg && msg->msg_control) ? &control : NULL, &flags);
630 if (error) goto cleanup;
631
632 if (recvdlen)
633 *recvdlen = length - uio_resid(auio);
634 if (msg) {
635 msg->msg_flags = flags;
636
637 if (msg->msg_name)
638 {
639 int salen;
640 salen = msg->msg_namelen;
641 if (msg->msg_namelen > 0 && fromsa != 0)
642 {
643 salen = MIN(salen, fromsa->sa_len);
644 memcpy(msg->msg_name, fromsa,
645 msg->msg_namelen > fromsa->sa_len ? fromsa->sa_len : msg->msg_namelen);
646 }
647 }
648
649 if (msg->msg_control)
650 {
651 struct mbuf* m = control;
652 u_char* ctlbuf = msg->msg_control;
653 int clen = msg->msg_controllen;
654 msg->msg_controllen = 0;
655
656 while (m && clen > 0)
657 {
658 unsigned int tocopy;
659 if (clen >= m->m_len)
660 {
661 tocopy = m->m_len;
662 }
663 else
664 {
665 msg->msg_flags |= MSG_CTRUNC;
666 tocopy = clen;
667 }
668 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
669 ctlbuf += tocopy;
670 clen -= tocopy;
671 m = m->m_next;
672 }
673 msg->msg_controllen = (uintptr_t)ctlbuf - (uintptr_t)msg->msg_control;
674 }
675 }
676
677 cleanup:
678 if (control) m_freem(control);
679 if (fromsa) FREE(fromsa, M_SONAME);
680 return error;
681 }
682
683 errno_t
684 sock_receive(
685 socket_t sock,
686 struct msghdr *msg,
687 int flags,
688 size_t *recvdlen)
689 {
690 if ((msg == NULL) ||
691 (msg->msg_iovlen < 1) ||
692 (msg->msg_iov[0].iov_len == 0) ||
693 (msg->msg_iov[0].iov_base == NULL))
694 return EINVAL;
695 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
696 }
697
698 errno_t
699 sock_receivembuf(
700 socket_t sock,
701 struct msghdr *msg,
702 mbuf_t *data,
703 int flags,
704 size_t *recvlen)
705 {
706 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg &&
707 (msg->msg_iov != NULL || msg->msg_iovlen != 0)))
708 return EINVAL;
709 return sock_receive_internal(sock, msg, data, flags, recvlen);
710 }
711
712 errno_t
713 sock_send_internal(
714 socket_t sock,
715 const struct msghdr *msg,
716 mbuf_t data,
717 int flags,
718 size_t *sentlen)
719 {
720 uio_t auio = NULL;
721 struct mbuf *control = NULL;
722 int error = 0;
723 int datalen = 0;
724 char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ];
725
726 if (sock == NULL) {
727 error = EINVAL;
728 goto errorout;
729 }
730
731 if (data == 0 && msg != NULL) {
732 struct iovec *tempp = msg->msg_iov;
733
734 auio = uio_createwithbuffer(msg->msg_iovlen, 0, UIO_SYSSPACE, UIO_WRITE,
735 &uio_buf[0], sizeof(uio_buf));
736 if (tempp != NULL)
737 {
738 int i;
739
740 for (i = 0; i < msg->msg_iovlen; i++) {
741 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
742 }
743
744 if (uio_resid(auio) < 0) {
745 error = EINVAL;
746 goto errorout;
747 }
748 }
749 }
750
751 if (sentlen)
752 *sentlen = 0;
753
754 if (auio)
755 datalen = uio_resid(auio);
756 else
757 datalen = data->m_pkthdr.len;
758
759 if (msg && msg->msg_control)
760 {
761 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
762 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
763 control = m_get(M_NOWAIT, MT_CONTROL);
764 if (control == NULL) {
765 error = ENOMEM;
766 goto errorout;
767 }
768 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
769 control->m_len = msg->msg_controllen;
770 }
771
772 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
773 (struct sockaddr*)msg->msg_name : NULL, auio, data, control, flags);
774
775 /*
776 * Residual data is possible in the case of IO vectors but not
777 * in the mbuf case since the latter is treated as atomic send.
778 * If pru_sosend() consumed a portion of the iovecs data and
779 * the error returned is transient, treat it as success; this
780 * is consistent with sendit() behavior.
781 */
782 if (auio != NULL && uio_resid(auio) != datalen &&
783 (error == ERESTART || error == EINTR || error == EWOULDBLOCK))
784 error = 0;
785
786 if (error == 0 && sentlen != NULL) {
787 if (auio != NULL)
788 *sentlen = datalen - uio_resid(auio);
789 else
790 *sentlen = datalen;
791 }
792
793 return error;
794
795 /*
796 * In cases where we detect an error before returning, we need to
797 * free the mbuf chain if there is one. sosend (and pru_sosend) will
798 * free the mbuf chain if they encounter an error.
799 */
800 errorout:
801 if (control)
802 m_freem(control);
803 if (data)
804 m_freem(data);
805 if (sentlen)
806 *sentlen = 0;
807 return error;
808 }
809
810 errno_t
811 sock_send(
812 socket_t sock,
813 const struct msghdr *msg,
814 int flags,
815 size_t *sentlen)
816 {
817 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1)
818 return EINVAL;
819 return sock_send_internal(sock, msg, NULL, flags, sentlen);
820 }
821
822 errno_t
823 sock_sendmbuf(
824 socket_t sock,
825 const struct msghdr *msg,
826 mbuf_t data,
827 int flags,
828 size_t *sentlen)
829 {
830 if (data == NULL || (msg &&
831 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
832 if (data)
833 m_freem(data);
834 return EINVAL;
835 }
836 return sock_send_internal(sock, msg, data, flags, sentlen);
837 }
838
839 errno_t
840 sock_shutdown(
841 socket_t sock,
842 int how)
843 {
844 if (sock == NULL) return EINVAL;
845 return soshutdown(sock, how);
846 }
847
848
849 errno_t
850 sock_socket(
851 int domain,
852 int type,
853 int protocol,
854 sock_upcall callback,
855 void* context,
856 socket_t *new_so)
857 {
858 int error = 0;
859 if (new_so == NULL) return EINVAL;
860 /* socreate will create an initial so_count */
861 error = socreate(domain, new_so, type, protocol);
862 if (error == 0 && callback)
863 {
864 (*new_so)->so_rcv.sb_flags |= SB_UPCALL;
865 #if CONFIG_SOWUPCALL
866 (*new_so)->so_snd.sb_flags |= SB_UPCALL;
867 #endif
868 (*new_so)->so_upcall = (so_upcall)callback;
869 (*new_so)->so_upcallarg = context;
870 }
871 return error;
872 }
873
874 void
875 sock_close(
876 socket_t sock)
877 {
878 if (sock == NULL) return;
879 soclose(sock);
880 }
881
882 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
883 void
884 sock_retain(
885 socket_t sock)
886 {
887 if (sock == NULL) return;
888 socket_lock(sock, 1);
889 sock->so_retaincnt++;
890 sock->so_usecount++; /* add extra reference for holding the socket */
891 socket_unlock(sock, 1);
892 }
893
894 /* Do we want this to be APPLE_PRIVATE API? */
895 void
896 sock_release(socket_t sock)
897 {
898 if (sock == NULL)
899 return;
900 socket_lock(sock, 1);
901
902 if (sock->so_flags & SOF_UPCALLINUSE)
903 soclose_wait_locked(sock);
904
905 sock->so_retaincnt--;
906 if (sock->so_retaincnt < 0)
907 panic("sock_release: negative retain count for sock=%p "
908 "cnt=%x\n", sock, sock->so_retaincnt);
909 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2)) {
910 /* close socket only if the FD is not holding it */
911 soclose_locked(sock);
912 } else {
913 /* remove extra reference holding the socket */
914 sock->so_usecount--;
915 }
916 socket_unlock(sock, 1);
917 }
918
919 errno_t
920 sock_setpriv(
921 socket_t sock,
922 int on)
923 {
924 if (sock == NULL) return EINVAL;
925 socket_lock(sock, 1);
926 if (on)
927 {
928 sock->so_state |= SS_PRIV;
929 }
930 else
931 {
932 sock->so_state &= ~SS_PRIV;
933 }
934 socket_unlock(sock, 1);
935 return 0;
936 }
937
938 int
939 sock_isconnected(
940 socket_t sock)
941 {
942 int retval;
943 socket_lock(sock, 1);
944 retval = (sock->so_state & SS_ISCONNECTED) != 0;
945 socket_unlock(sock, 1);
946 return (retval);
947 }
948
949 int
950 sock_isnonblocking(
951 socket_t sock)
952 {
953 int retval;
954 socket_lock(sock, 1);
955 retval = (sock->so_state & SS_NBIO) != 0;
956 socket_unlock(sock, 1);
957 return (retval);
958 }
959
960 errno_t
961 sock_gettype(
962 socket_t sock,
963 int *outDomain,
964 int *outType,
965 int *outProtocol)
966 {
967 socket_lock(sock, 1);
968 if (outDomain)
969 *outDomain = sock->so_proto->pr_domain->dom_family;
970 if (outType)
971 *outType = sock->so_type;
972 if (outProtocol)
973 *outProtocol = sock->so_proto->pr_protocol;
974 socket_unlock(sock, 1);
975 return 0;
976 }
977
978 /*
979 * Return the listening socket of a pre-accepted socket. It returns the
980 * listener (so_head) value of a given socket. This is intended to be
981 * called by a socket filter during a filter attach (sf_attach) callback.
982 * The value returned by this routine is safe to be used only in the
983 * context of that callback, because we hold the listener's lock across
984 * the sflt_initsock() call.
985 */
986 socket_t
987 sock_getlistener(socket_t sock)
988 {
989 return (sock->so_head);
990 }
991
992 /*
993 * Caller must have ensured socket is valid and won't be going away.
994 */
995 void
996 socket_set_traffic_mgt_flags(socket_t sock, u_int32_t flags)
997 {
998 (void) OSBitOrAtomic(flags, &sock->so_traffic_mgt_flags);
999 }
1000
1001 /*
1002 * Caller must have ensured socket is valid and won't be going away.
1003 */
1004 void
1005 socket_clear_traffic_mgt_flags(socket_t sock, u_int32_t flags)
1006 {
1007 (void) OSBitAndAtomic(~flags, &sock->so_traffic_mgt_flags);
1008 }