]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socket.c
1 /*
2 * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/socketvar.h>
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/errno.h>
38 #include <sys/malloc.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/mbuf.h>
42 #include <sys/fcntl.h>
43 #include <sys/filio.h>
44 #include <sys/uio_internal.h>
45 #include <kern/lock.h>
46 #include <netinet/in.h>
47
48 extern int soclose_locked(struct socket *so);
49 extern void soclose_wait_locked(struct socket *so);
50 extern int so_isdstlocal(struct socket *so);
51
52 errno_t sock_send_internal(
53 socket_t sock,
54 const struct msghdr *msg,
55 mbuf_t data,
56 int flags,
57 size_t *sentlen);
58
59 typedef void (*so_upcall)(struct socket *, caddr_t , int );
60
61
62 errno_t
63 sock_accept(
64 socket_t sock,
65 struct sockaddr *from,
66 int fromlen,
67 int flags,
68 sock_upcall callback,
69 void* cookie,
70 socket_t *new_sock)
71 {
72 struct sockaddr *sa;
73 struct socket *new_so;
74 lck_mtx_t *mutex_held;
75 int dosocklock;
76 errno_t error = 0;
77
78 if (sock == NULL || new_sock == NULL) return EINVAL;
79 socket_lock(sock, 1);
80 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
81 socket_unlock(sock, 1);
82 return EINVAL;
83 }
84 if ((flags & ~(MSG_DONTWAIT)) != 0) {
85 socket_unlock(sock, 1);
86 return ENOTSUP;
87 }
88 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
89 sock->so_comp.tqh_first == NULL) {
90 socket_unlock(sock, 1);
91 return EWOULDBLOCK;
92 }
93
94 if (sock->so_proto->pr_getlock != NULL) {
95 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
96 dosocklock = 1;
97 }
98 else {
99 mutex_held = sock->so_proto->pr_domain->dom_mtx;
100 dosocklock = 0;
101 }
102
103 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
104 if (sock->so_state & SS_CANTRCVMORE) {
105 sock->so_error = ECONNABORTED;
106 break;
107 }
108 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", NULL);
109 if (error) {
110 socket_unlock(sock, 1);
111 return (error);
112 }
113 }
114 if (sock->so_error) {
115 error = sock->so_error;
116 sock->so_error = 0;
117 socket_unlock(sock, 1);
118 return (error);
119 }
120
121 new_so = TAILQ_FIRST(&sock->so_comp);
122 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
123 sock->so_qlen--;
124
125 /*
126 * Pass the pre-accepted socket to any interested socket filter(s).
127 * Upon failure, the socket would have been closed by the callee.
128 */
129 if (new_so->so_filt != NULL) {
130 /*
131 * Temporarily drop the listening socket's lock before we
132 * hand off control over to the socket filter(s), but keep
133 * a reference so that it won't go away. We'll grab it
134 * again once we're done with the filter(s).
135 */
136 socket_unlock(sock, 0);
137 if ((error = soacceptfilter(new_so)) != 0) {
138 /* Drop reference on listening socket */
139 sodereference(sock);
140 return (error);
141 }
142 socket_lock(sock, 0);
143 }
144
145 if (dosocklock) {
146 lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0),
147 LCK_MTX_ASSERT_NOTOWNED);
148 socket_lock(new_so, 1);
149 }
150
151 new_so->so_state &= ~SS_COMP;
152 new_so->so_head = NULL;
153 (void) soacceptlock(new_so, &sa, 0);
154
155 socket_unlock(sock, 1); /* release the head */
156
157 if (callback) {
158 new_so->so_upcall = (so_upcall) callback;
159 new_so->so_upcallarg = cookie;
160 new_so->so_rcv.sb_flags |= SB_UPCALL;
161 #if CONFIG_SOWUPCALL
162 new_so->so_snd.sb_flags |= SB_UPCALL;
163 #endif
164 }
165
166 if (sa && from)
167 {
168 if (fromlen > sa->sa_len) fromlen = sa->sa_len;
169 memcpy(from, sa, fromlen);
170 }
171 if (sa) FREE(sa, M_SONAME);
172
173 /*
174 * If the socket has been marked as inactive by soacceptfilter(),
175 * disallow further operations on it. We explicitly call shutdown
176 * on both data directions to ensure that SS_CANT{RCV,SEND}MORE
177 * states are set for the socket. This would also flush out data
178 * hanging off the receive list of this socket.
179 */
180 if (new_so->so_flags & SOF_DEFUNCT) {
181 (void) soshutdownlock(new_so, SHUT_RD);
182 (void) soshutdownlock(new_so, SHUT_WR);
183 (void) sodisconnectlocked(new_so);
184 }
185
186 *new_sock = new_so;
187 if (dosocklock)
188 socket_unlock(new_so, 1);
189 return error;
190 }
191
192 errno_t
193 sock_bind(
194 socket_t sock,
195 const struct sockaddr *to)
196 {
197 if (sock == NULL || to == NULL) return EINVAL;
198
199 return sobind(sock, (struct sockaddr*)(uintptr_t)to);
200 }
201
202 errno_t
203 sock_connect(
204 socket_t sock,
205 const struct sockaddr *to,
206 int flags)
207 {
208 int error = 0;
209 lck_mtx_t *mutex_held;
210
211 if (sock == NULL || to == NULL) return EINVAL;
212
213 socket_lock(sock, 1);
214
215 if ((sock->so_state & SS_ISCONNECTING) &&
216 ((sock->so_state & SS_NBIO) != 0 ||
217 (flags & MSG_DONTWAIT) != 0)) {
218 socket_unlock(sock, 1);
219 return EALREADY;
220 }
221 error = soconnectlock(sock, (struct sockaddr*)(uintptr_t)to, 0);
222 if (!error) {
223 if ((sock->so_state & SS_ISCONNECTING) &&
224 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
225 socket_unlock(sock, 1);
226 return EINPROGRESS;
227 }
228
229 if (sock->so_proto->pr_getlock != NULL)
230 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
231 else
232 mutex_held = sock->so_proto->pr_domain->dom_mtx;
233
234 while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
235 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH,
236 "sock_connect", NULL);
237 if (error)
238 break;
239 }
240
241 if (error == 0) {
242 error = sock->so_error;
243 sock->so_error = 0;
244 }
245 }
246 else {
247 sock->so_state &= ~SS_ISCONNECTING;
248 }
249 socket_unlock(sock, 1);
250 return error;
251 }
252
253 errno_t
254 sock_connectwait(
255 socket_t sock,
256 const struct timeval *tv)
257 {
258 lck_mtx_t * mutex_held;
259 errno_t retval = 0;
260 struct timespec ts;
261
262 socket_lock(sock, 1);
263
264 // Check if we're already connected or if we've already errored out
265 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error) {
266 if (sock->so_error) {
267 retval = sock->so_error;
268 sock->so_error = 0;
269 }
270 else {
271 if ((sock->so_state & SS_ISCONNECTED) != 0)
272 retval = 0;
273 else
274 retval = EINVAL;
275 }
276 goto done;
277 }
278
279 // copied translation from timeval to hertz from SO_RCVTIMEO handling
280 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
281 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
282 retval = EDOM;
283 goto done;
284 }
285
286 ts.tv_sec = tv->tv_sec;
287 ts.tv_nsec = (tv->tv_usec * NSEC_PER_USEC);
288 if ( (ts.tv_sec + (ts.tv_nsec/NSEC_PER_SEC))/100 > SHRT_MAX) {
289 retval = EDOM;
290 goto done;
291 }
292
293 if (sock->so_proto->pr_getlock != NULL)
294 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
295 else
296 mutex_held = sock->so_proto->pr_domain->dom_mtx;
297
298 msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts);
299
300 // Check if we're still waiting to connect
301 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
302 retval = EINPROGRESS;
303 goto done;
304 }
305
306 if (sock->so_error) {
307 retval = sock->so_error;
308 sock->so_error = 0;
309 }
310
311 done:
312 socket_unlock(sock, 1);
313 return retval;
314 }
315
316 errno_t
317 sock_nointerrupt(
318 socket_t sock,
319 int on)
320 {
321 socket_lock(sock, 1);
322
323 if (on) {
324 sock->so_rcv.sb_flags |= SB_NOINTR; // This isn't safe
325 sock->so_snd.sb_flags |= SB_NOINTR; // This isn't safe
326 }
327 else {
328 sock->so_rcv.sb_flags &= ~SB_NOINTR; // This isn't safe
329 sock->so_snd.sb_flags &= ~SB_NOINTR; // This isn't safe
330 }
331
332 socket_unlock(sock, 1);
333
334 return 0;
335 }
336
337 errno_t
338 sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen)
339 {
340 int error;
341 struct sockaddr *sa = NULL;
342
343 if (sock == NULL || peername == NULL || peernamelen < 0)
344 return (EINVAL);
345
346 socket_lock(sock, 1);
347 if (!(sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING))) {
348 socket_unlock(sock, 1);
349 return (ENOTCONN);
350 }
351 error = sogetaddr_locked(sock, &sa, 1);
352 socket_unlock(sock, 1);
353 if (error == 0) {
354 if (peernamelen > sa->sa_len)
355 peernamelen = sa->sa_len;
356 memcpy(peername, sa, peernamelen);
357 FREE(sa, M_SONAME);
358 }
359 return (error);
360 }
361
362 errno_t
363 sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen)
364 {
365 int error;
366 struct sockaddr *sa = NULL;
367
368 if (sock == NULL || sockname == NULL || socknamelen < 0)
369 return (EINVAL);
370
371 socket_lock(sock, 1);
372 error = sogetaddr_locked(sock, &sa, 0);
373 socket_unlock(sock, 1);
374 if (error == 0) {
375 if (socknamelen > sa->sa_len)
376 socknamelen = sa->sa_len;
377 memcpy(sockname, sa, socknamelen);
378 FREE(sa, M_SONAME);
379 }
380 return (error);
381 }
382
383 __private_extern__ int
384 sogetaddr_locked(struct socket *so, struct sockaddr **psa, int peer)
385 {
386 int error;
387
388 if (so == NULL || psa == NULL)
389 return (EINVAL);
390
391 *psa = NULL;
392 error = peer ? so->so_proto->pr_usrreqs->pru_peeraddr(so, psa) :
393 so->so_proto->pr_usrreqs->pru_sockaddr(so, psa);
394
395 if (error == 0 && *psa == NULL) {
396 error = ENOMEM;
397 } else if (error != 0 && *psa != NULL) {
398 FREE(*psa, M_SONAME);
399 *psa = NULL;
400 }
401 return (error);
402 }
403
404 errno_t
405 sock_getaddr(socket_t sock, struct sockaddr **psa, int peer)
406 {
407 int error;
408
409 if (sock == NULL || psa == NULL)
410 return (EINVAL);
411
412 socket_lock(sock, 1);
413 error = sogetaddr_locked(sock, psa, peer);
414 socket_unlock(sock, 1);
415
416 return (error);
417 }
418
419 void
420 sock_freeaddr(struct sockaddr *sa)
421 {
422 if (sa != NULL)
423 FREE(sa, M_SONAME);
424 }
425
426 errno_t
427 sock_getsockopt(
428 socket_t sock,
429 int level,
430 int optname,
431 void *optval,
432 int *optlen)
433 {
434 int error = 0;
435 struct sockopt sopt;
436
437 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
438 sopt.sopt_dir = SOPT_GET;
439 sopt.sopt_level = level;
440 sopt.sopt_name = optname;
441 sopt.sopt_val = CAST_USER_ADDR_T(optval);
442 sopt.sopt_valsize = *optlen;
443 sopt.sopt_p = kernproc;
444 error = sogetopt(sock, &sopt); /* will lock socket */
445 if (error == 0) *optlen = sopt.sopt_valsize;
446 return error;
447 }
448
449 errno_t
450 sock_ioctl(
451 socket_t sock,
452 unsigned long request,
453 void *argp)
454 {
455 return soioctl(sock, request, argp, kernproc); /* will lock socket */
456 }
457
458 errno_t
459 sock_setsockopt(
460 socket_t sock,
461 int level,
462 int optname,
463 const void *optval,
464 int optlen)
465 {
466 struct sockopt sopt;
467
468 if (sock == NULL || optval == NULL) return EINVAL;
469 sopt.sopt_dir = SOPT_SET;
470 sopt.sopt_level = level;
471 sopt.sopt_name = optname;
472 sopt.sopt_val = CAST_USER_ADDR_T(optval);
473 sopt.sopt_valsize = optlen;
474 sopt.sopt_p = kernproc;
475 return sosetopt(sock, &sopt); /* will lock socket */
476 }
477
478 errno_t
479 sock_settclassopt(
480 socket_t sock,
481 const void *optval,
482 size_t optlen) {
483
484 errno_t error = 0;
485 struct sockopt sopt;
486
487 if (sock == NULL || optval == NULL || optlen == 0) return EINVAL;
488
489 sopt.sopt_dir = SOPT_SET;
490 sopt.sopt_val = CAST_USER_ADDR_T(optval);
491 sopt.sopt_valsize = optlen;
492 sopt.sopt_p = kernproc;
493
494 socket_lock(sock, 1);
495 if (!(sock->so_state & SS_ISCONNECTED)) {
496 /* If the socket is not connected then we don't know
497 * if the destination is on LAN or not. Skip
498 * setting traffic class in this case
499 */
500 error = ENOTCONN;
501 goto out;
502 }
503
504 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL || sock->so_pcb == NULL) {
505 error = EINVAL;
506 goto out;
507 }
508
509 /* Check if the destination address is LAN or link local address.
510 * We do not want to set traffic class bits if the destination
511 * is not local
512 */
513 if (!so_isdstlocal(sock)) {
514 goto out;
515 }
516
517 switch (sock->so_proto->pr_domain->dom_family) {
518 case AF_INET:
519 sopt.sopt_level = IPPROTO_IP;
520 sopt.sopt_name = IP_TOS;
521 break;
522 case AF_INET6:
523 sopt.sopt_level = IPPROTO_IPV6;
524 sopt.sopt_name = IPV6_TCLASS;
525 break;
526 default:
527 error = EINVAL;
528 goto out;
529 }
530
531 socket_unlock(sock, 1);
532 return sosetopt(sock, &sopt);
533 out:
534 socket_unlock(sock, 1);
535 return error;
536 }
537
538 errno_t
539 sock_gettclassopt(
540 socket_t sock,
541 void *optval,
542 size_t *optlen) {
543
544 errno_t error = 0;
545 struct sockopt sopt;
546
547 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
548
549 sopt.sopt_dir = SOPT_GET;
550 sopt.sopt_val = CAST_USER_ADDR_T(optval);
551 sopt.sopt_valsize = *optlen;
552 sopt.sopt_p = kernproc;
553
554 socket_lock(sock, 1);
555 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL) {
556 socket_unlock(sock, 1);
557 return EINVAL;
558 }
559
560 switch (sock->so_proto->pr_domain->dom_family) {
561 case AF_INET:
562 sopt.sopt_level = IPPROTO_IP;
563 sopt.sopt_name = IP_TOS;
564 break;
565 case AF_INET6:
566 sopt.sopt_level = IPPROTO_IPV6;
567 sopt.sopt_name = IPV6_TCLASS;
568 break;
569 default:
570 socket_unlock(sock, 1);
571 return EINVAL;
572
573 }
574 socket_unlock(sock, 1);
575 error = sogetopt(sock, &sopt); /* will lock socket */
576 if (error == 0) *optlen = sopt.sopt_valsize;
577 return error;
578 }
579
580 errno_t
581 sock_listen(
582 socket_t sock,
583 int backlog)
584 {
585 if (sock == NULL) return EINVAL;
586 return solisten(sock, backlog); /* will lock socket */
587 }
588
589 static errno_t
590 sock_receive_internal(
591 socket_t sock,
592 struct msghdr *msg,
593 mbuf_t *data,
594 int flags,
595 size_t *recvdlen)
596 {
597 uio_t auio;
598 struct mbuf *control = NULL;
599 int error = 0;
600 int length = 0;
601 struct sockaddr *fromsa;
602 char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ];
603
604 if (sock == NULL) return EINVAL;
605
606 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
607 0, UIO_SYSSPACE, UIO_READ,
608 &uio_buf[0], sizeof(uio_buf));
609 if (msg && data == NULL) {
610 int i;
611 struct iovec *tempp = msg->msg_iov;
612
613 for (i = 0; i < msg->msg_iovlen; i++) {
614 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
615 }
616 if (uio_resid(auio) < 0) return EINVAL;
617 }
618 else {
619 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
620 }
621 length = uio_resid(auio);
622
623 if (recvdlen)
624 *recvdlen = 0;
625
626 /* let pru_soreceive handle the socket locking */
627 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
628 data, (msg && msg->msg_control) ? &control : NULL, &flags);
629 if (error) goto cleanup;
630
631 if (recvdlen)
632 *recvdlen = length - uio_resid(auio);
633 if (msg) {
634 msg->msg_flags = flags;
635
636 if (msg->msg_name)
637 {
638 int salen;
639 salen = msg->msg_namelen;
640 if (msg->msg_namelen > 0 && fromsa != 0)
641 {
642 salen = MIN(salen, fromsa->sa_len);
643 memcpy(msg->msg_name, fromsa,
644 msg->msg_namelen > fromsa->sa_len ? fromsa->sa_len : msg->msg_namelen);
645 }
646 }
647
648 if (msg->msg_control)
649 {
650 struct mbuf* m = control;
651 u_char* ctlbuf = msg->msg_control;
652 int clen = msg->msg_controllen;
653 msg->msg_controllen = 0;
654
655 while (m && clen > 0)
656 {
657 unsigned int tocopy;
658 if (clen >= m->m_len)
659 {
660 tocopy = m->m_len;
661 }
662 else
663 {
664 msg->msg_flags |= MSG_CTRUNC;
665 tocopy = clen;
666 }
667 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
668 ctlbuf += tocopy;
669 clen -= tocopy;
670 m = m->m_next;
671 }
672 msg->msg_controllen = (uintptr_t)ctlbuf - (uintptr_t)msg->msg_control;
673 }
674 }
675
676 cleanup:
677 if (control) m_freem(control);
678 if (fromsa) FREE(fromsa, M_SONAME);
679 return error;
680 }
681
682 errno_t
683 sock_receive(
684 socket_t sock,
685 struct msghdr *msg,
686 int flags,
687 size_t *recvdlen)
688 {
689 if ((msg == NULL) ||
690 (msg->msg_iovlen < 1) ||
691 (msg->msg_iov[0].iov_len == 0) ||
692 (msg->msg_iov[0].iov_base == NULL))
693 return EINVAL;
694 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
695 }
696
697 errno_t
698 sock_receivembuf(
699 socket_t sock,
700 struct msghdr *msg,
701 mbuf_t *data,
702 int flags,
703 size_t *recvlen)
704 {
705 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg &&
706 (msg->msg_iov != NULL || msg->msg_iovlen != 0)))
707 return EINVAL;
708 return sock_receive_internal(sock, msg, data, flags, recvlen);
709 }
710
711 errno_t
712 sock_send_internal(
713 socket_t sock,
714 const struct msghdr *msg,
715 mbuf_t data,
716 int flags,
717 size_t *sentlen)
718 {
719 uio_t auio = NULL;
720 struct mbuf *control = NULL;
721 int error = 0;
722 int datalen = 0;
723 char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ];
724
725 if (sock == NULL) {
726 error = EINVAL;
727 goto errorout;
728 }
729
730 if (data == 0 && msg != NULL) {
731 struct iovec *tempp = msg->msg_iov;
732
733 auio = uio_createwithbuffer(msg->msg_iovlen, 0, UIO_SYSSPACE, UIO_WRITE,
734 &uio_buf[0], sizeof(uio_buf));
735 if (tempp != NULL)
736 {
737 int i;
738
739 for (i = 0; i < msg->msg_iovlen; i++) {
740 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
741 }
742
743 if (uio_resid(auio) < 0) {
744 error = EINVAL;
745 goto errorout;
746 }
747 }
748 }
749
750 if (sentlen)
751 *sentlen = 0;
752
753 if (auio)
754 datalen = uio_resid(auio);
755 else
756 datalen = data->m_pkthdr.len;
757
758 if (msg && msg->msg_control)
759 {
760 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
761 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
762 control = m_get(M_NOWAIT, MT_CONTROL);
763 if (control == NULL) {
764 error = ENOMEM;
765 goto errorout;
766 }
767 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
768 control->m_len = msg->msg_controllen;
769 }
770
771 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
772 (struct sockaddr*)msg->msg_name : NULL, auio, data, control, flags);
773
774 /*
775 * Residual data is possible in the case of IO vectors but not
776 * in the mbuf case since the latter is treated as atomic send.
777 * If pru_sosend() consumed a portion of the iovecs data and
778 * the error returned is transient, treat it as success; this
779 * is consistent with sendit() behavior.
780 */
781 if (auio != NULL && uio_resid(auio) != datalen &&
782 (error == ERESTART || error == EINTR || error == EWOULDBLOCK))
783 error = 0;
784
785 if (error == 0 && sentlen != NULL) {
786 if (auio != NULL)
787 *sentlen = datalen - uio_resid(auio);
788 else
789 *sentlen = datalen;
790 }
791
792 return error;
793
794 /*
795 * In cases where we detect an error before returning, we need to
796 * free the mbuf chain if there is one. sosend (and pru_sosend) will
797 * free the mbuf chain if they encounter an error.
798 */
799 errorout:
800 if (control)
801 m_freem(control);
802 if (data)
803 m_freem(data);
804 if (sentlen)
805 *sentlen = 0;
806 return error;
807 }
808
809 errno_t
810 sock_send(
811 socket_t sock,
812 const struct msghdr *msg,
813 int flags,
814 size_t *sentlen)
815 {
816 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1)
817 return EINVAL;
818 return sock_send_internal(sock, msg, NULL, flags, sentlen);
819 }
820
821 errno_t
822 sock_sendmbuf(
823 socket_t sock,
824 const struct msghdr *msg,
825 mbuf_t data,
826 int flags,
827 size_t *sentlen)
828 {
829 if (data == NULL || (msg &&
830 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
831 if (data)
832 m_freem(data);
833 return EINVAL;
834 }
835 return sock_send_internal(sock, msg, data, flags, sentlen);
836 }
837
838 errno_t
839 sock_shutdown(
840 socket_t sock,
841 int how)
842 {
843 if (sock == NULL) return EINVAL;
844 return soshutdown(sock, how);
845 }
846
847
848 errno_t
849 sock_socket(
850 int domain,
851 int type,
852 int protocol,
853 sock_upcall callback,
854 void* context,
855 socket_t *new_so)
856 {
857 int error = 0;
858 if (new_so == NULL) return EINVAL;
859 /* socreate will create an initial so_count */
860 error = socreate(domain, new_so, type, protocol);
861 if (error == 0 && callback)
862 {
863 (*new_so)->so_rcv.sb_flags |= SB_UPCALL;
864 #if CONFIG_SOWUPCALL
865 (*new_so)->so_snd.sb_flags |= SB_UPCALL;
866 #endif
867 (*new_so)->so_upcall = (so_upcall)callback;
868 (*new_so)->so_upcallarg = context;
869 }
870 return error;
871 }
872
873 void
874 sock_close(
875 socket_t sock)
876 {
877 if (sock == NULL) return;
878 soclose(sock);
879 }
880
881 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
882 void
883 sock_retain(
884 socket_t sock)
885 {
886 if (sock == NULL) return;
887 socket_lock(sock, 1);
888 sock->so_retaincnt++;
889 sock->so_usecount++; /* add extra reference for holding the socket */
890 socket_unlock(sock, 1);
891 }
892
893 /* Do we want this to be APPLE_PRIVATE API? */
894 void
895 sock_release(socket_t sock)
896 {
897 if (sock == NULL)
898 return;
899 socket_lock(sock, 1);
900
901 if (sock->so_flags & SOF_UPCALLINUSE)
902 soclose_wait_locked(sock);
903
904 sock->so_retaincnt--;
905 if (sock->so_retaincnt < 0)
906 panic("sock_release: negative retain count for sock=%p "
907 "cnt=%x\n", sock, sock->so_retaincnt);
908 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2)) {
909 /* close socket only if the FD is not holding it */
910 soclose_locked(sock);
911 } else {
912 /* remove extra reference holding the socket */
913 sock->so_usecount--;
914 }
915 socket_unlock(sock, 1);
916 }
917
918 errno_t
919 sock_setpriv(
920 socket_t sock,
921 int on)
922 {
923 if (sock == NULL) return EINVAL;
924 socket_lock(sock, 1);
925 if (on)
926 {
927 sock->so_state |= SS_PRIV;
928 }
929 else
930 {
931 sock->so_state &= ~SS_PRIV;
932 }
933 socket_unlock(sock, 1);
934 return 0;
935 }
936
937 int
938 sock_isconnected(
939 socket_t sock)
940 {
941 int retval;
942 socket_lock(sock, 1);
943 retval = (sock->so_state & SS_ISCONNECTED) != 0;
944 socket_unlock(sock, 1);
945 return (retval);
946 }
947
948 int
949 sock_isnonblocking(
950 socket_t sock)
951 {
952 int retval;
953 socket_lock(sock, 1);
954 retval = (sock->so_state & SS_NBIO) != 0;
955 socket_unlock(sock, 1);
956 return (retval);
957 }
958
959 errno_t
960 sock_gettype(
961 socket_t sock,
962 int *outDomain,
963 int *outType,
964 int *outProtocol)
965 {
966 socket_lock(sock, 1);
967 if (outDomain)
968 *outDomain = sock->so_proto->pr_domain->dom_family;
969 if (outType)
970 *outType = sock->so_type;
971 if (outProtocol)
972 *outProtocol = sock->so_proto->pr_protocol;
973 socket_unlock(sock, 1);
974 return 0;
975 }
976
977 /*
978 * Return the listening socket of a pre-accepted socket. It returns the
979 * listener (so_head) value of a given socket. This is intended to be
980 * called by a socket filter during a filter attach (sf_attach) callback.
981 * The value returned by this routine is safe to be used only in the
982 * context of that callback, because we hold the listener's lock across
983 * the sflt_initsock() call.
984 */
985 socket_t
986 sock_getlistener(socket_t sock)
987 {
988 return (sock->so_head);
989 }