]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kpi_socket.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socket.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define __KPI__
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/types.h>
33#include <sys/socket.h>
34#include <sys/socketvar.h>
35#include <sys/param.h>
36#include <sys/proc.h>
37#include <sys/errno.h>
38#include <sys/malloc.h>
39#include <sys/protosw.h>
40#include <sys/domain.h>
41#include <sys/mbuf.h>
42#include <sys/mcache.h>
43#include <sys/fcntl.h>
44#include <sys/filio.h>
45#include <sys/uio_internal.h>
46#include <kern/locks.h>
47#include <net/net_api_stats.h>
48#include <netinet/in.h>
49#include <libkern/OSAtomic.h>
50#include <stdbool.h>
51
52static errno_t sock_send_internal(socket_t, const struct msghdr *,
53 mbuf_t, int, size_t *);
54
55#undef sock_accept
56#undef sock_socket
57errno_t sock_accept(socket_t so, struct sockaddr *from, int fromlen,
58 int flags, sock_upcall callback, void *cookie, socket_t *new_so);
59errno_t sock_socket(int domain, int type, int protocol, sock_upcall callback,
60 void *context, socket_t *new_so);
61
62static errno_t sock_accept_common(socket_t sock, struct sockaddr *from,
63 int fromlen, int flags, sock_upcall callback, void *cookie,
64 socket_t *new_sock, bool is_internal);
65static errno_t sock_socket_common(int domain, int type, int protocol,
66 sock_upcall callback, void *context, socket_t *new_so, bool is_internal);
67
68errno_t
69sock_accept_common(socket_t sock, struct sockaddr *from, int fromlen, int flags,
70 sock_upcall callback, void *cookie, socket_t *new_sock, bool is_internal)
71{
72 struct sockaddr *sa;
73 struct socket *new_so;
74 lck_mtx_t *mutex_held;
75 int dosocklock;
76 errno_t error = 0;
77
78 if (sock == NULL || new_sock == NULL) {
79 return EINVAL;
80 }
81
82 socket_lock(sock, 1);
83 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
84 socket_unlock(sock, 1);
85 return EINVAL;
86 }
87 if ((flags & ~(MSG_DONTWAIT)) != 0) {
88 socket_unlock(sock, 1);
89 return ENOTSUP;
90 }
91check_again:
92 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
93 sock->so_comp.tqh_first == NULL) {
94 socket_unlock(sock, 1);
95 return EWOULDBLOCK;
96 }
97
98 if (sock->so_proto->pr_getlock != NULL) {
99 mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK);
100 dosocklock = 1;
101 } else {
102 mutex_held = sock->so_proto->pr_domain->dom_mtx;
103 dosocklock = 0;
104 }
105
106 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
107 if (sock->so_state & SS_CANTRCVMORE) {
108 sock->so_error = ECONNABORTED;
109 break;
110 }
111 error = msleep((caddr_t)&sock->so_timeo, mutex_held,
112 PSOCK | PCATCH, "sock_accept", NULL);
113 if (error != 0) {
114 socket_unlock(sock, 1);
115 return error;
116 }
117 }
118 if (sock->so_error != 0) {
119 error = sock->so_error;
120 sock->so_error = 0;
121 socket_unlock(sock, 1);
122 return error;
123 }
124
125 so_acquire_accept_list(sock, NULL);
126 if (TAILQ_EMPTY(&sock->so_comp)) {
127 so_release_accept_list(sock);
128 goto check_again;
129 }
130 new_so = TAILQ_FIRST(&sock->so_comp);
131 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
132 new_so->so_state &= ~SS_COMP;
133 new_so->so_head = NULL;
134 sock->so_qlen--;
135
136 so_release_accept_list(sock);
137
138 /*
139 * Count the accepted socket as an in-kernel socket
140 */
141 new_so->so_flags1 |= SOF1_IN_KERNEL_SOCKET;
142 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_in_kernel_total);
143 if (is_internal) {
144 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_in_kernel_os_total);
145 }
146
147 /*
148 * Pass the pre-accepted socket to any interested socket filter(s).
149 * Upon failure, the socket would have been closed by the callee.
150 */
151 if (new_so->so_filt != NULL) {
152 /*
153 * Temporarily drop the listening socket's lock before we
154 * hand off control over to the socket filter(s), but keep
155 * a reference so that it won't go away. We'll grab it
156 * again once we're done with the filter(s).
157 */
158 socket_unlock(sock, 0);
159 if ((error = soacceptfilter(new_so, sock)) != 0) {
160 /* Drop reference on listening socket */
161 sodereference(sock);
162 return error;
163 }
164 socket_lock(sock, 0);
165 }
166
167 if (dosocklock) {
168 LCK_MTX_ASSERT(new_so->so_proto->pr_getlock(new_so, 0),
169 LCK_MTX_ASSERT_NOTOWNED);
170 socket_lock(new_so, 1);
171 }
172
173 (void) soacceptlock(new_so, &sa, 0);
174
175 socket_unlock(sock, 1); /* release the head */
176
177 /* see comments in sock_setupcall() */
178 if (callback != NULL) {
179#if (defined(__arm__) || defined(__arm64__))
180 sock_setupcalls_locked(new_so, callback, cookie, callback, cookie, 0);
181#else /* (defined(__arm__) || defined(__arm64__)) */
182 sock_setupcalls_locked(new_so, callback, cookie, NULL, NULL, 0);
183#endif /* (defined(__arm__) || defined(__arm64__)) */
184 }
185
186 if (sa != NULL && from != NULL) {
187 if (fromlen > sa->sa_len) {
188 fromlen = sa->sa_len;
189 }
190 memcpy(from, sa, fromlen);
191 }
192 if (sa != NULL) {
193 FREE(sa, M_SONAME);
194 }
195
196 /*
197 * If the socket has been marked as inactive by sosetdefunct(),
198 * disallow further operations on it.
199 */
200 if (new_so->so_flags & SOF_DEFUNCT) {
201 (void) sodefunct(current_proc(), new_so,
202 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL);
203 }
204 *new_sock = new_so;
205 if (dosocklock) {
206 socket_unlock(new_so, 1);
207 }
208 return error;
209}
210
211errno_t
212sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags,
213 sock_upcall callback, void *cookie, socket_t *new_sock)
214{
215 return sock_accept_common(sock, from, fromlen, flags,
216 callback, cookie, new_sock, false);
217}
218
219errno_t
220sock_accept_internal(socket_t sock, struct sockaddr *from, int fromlen, int flags,
221 sock_upcall callback, void *cookie, socket_t *new_sock)
222{
223 return sock_accept_common(sock, from, fromlen, flags,
224 callback, cookie, new_sock, true);
225}
226
227errno_t
228sock_bind(socket_t sock, const struct sockaddr *to)
229{
230 int error = 0;
231 struct sockaddr *sa = NULL;
232 struct sockaddr_storage ss;
233 boolean_t want_free = TRUE;
234
235 if (sock == NULL || to == NULL) {
236 return EINVAL;
237 }
238
239 if (to->sa_len > sizeof(ss)) {
240 sa = kheap_alloc(KHEAP_TEMP, to->sa_len, Z_WAITOK);
241 if (sa == NULL) {
242 return ENOBUFS;
243 }
244 } else {
245 sa = (struct sockaddr *)&ss;
246 want_free = FALSE;
247 }
248 memcpy(sa, to, to->sa_len);
249
250 error = sobindlock(sock, sa, 1); /* will lock socket */
251
252 if (sa != NULL && want_free == TRUE) {
253 kheap_free(KHEAP_TEMP, sa, sa->sa_len);
254 }
255
256 return error;
257}
258
259errno_t
260sock_connect(socket_t sock, const struct sockaddr *to, int flags)
261{
262 int error = 0;
263 lck_mtx_t *mutex_held;
264 struct sockaddr *sa = NULL;
265 struct sockaddr_storage ss;
266 boolean_t want_free = TRUE;
267
268 if (sock == NULL || to == NULL) {
269 return EINVAL;
270 }
271
272 if (to->sa_len > sizeof(ss)) {
273 sa = kheap_alloc(KHEAP_TEMP, to->sa_len,
274 (flags & MSG_DONTWAIT) ? Z_NOWAIT : Z_WAITOK);
275 if (sa == NULL) {
276 return ENOBUFS;
277 }
278 } else {
279 sa = (struct sockaddr *)&ss;
280 want_free = FALSE;
281 }
282 memcpy(sa, to, to->sa_len);
283
284 socket_lock(sock, 1);
285
286 if ((sock->so_state & SS_ISCONNECTING) &&
287 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
288 error = EALREADY;
289 goto out;
290 }
291 error = soconnectlock(sock, sa, 0);
292 if (!error) {
293 if ((sock->so_state & SS_ISCONNECTING) &&
294 ((sock->so_state & SS_NBIO) != 0 ||
295 (flags & MSG_DONTWAIT) != 0)) {
296 error = EINPROGRESS;
297 goto out;
298 }
299
300 if (sock->so_proto->pr_getlock != NULL) {
301 mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK);
302 } else {
303 mutex_held = sock->so_proto->pr_domain->dom_mtx;
304 }
305
306 while ((sock->so_state & SS_ISCONNECTING) &&
307 sock->so_error == 0) {
308 error = msleep((caddr_t)&sock->so_timeo,
309 mutex_held, PSOCK | PCATCH, "sock_connect", NULL);
310 if (error != 0) {
311 break;
312 }
313 }
314
315 if (error == 0) {
316 error = sock->so_error;
317 sock->so_error = 0;
318 }
319 } else {
320 sock->so_state &= ~SS_ISCONNECTING;
321 }
322out:
323 socket_unlock(sock, 1);
324
325 if (sa != NULL && want_free == TRUE) {
326 kheap_free(KHEAP_TEMP, sa, sa->sa_len);
327 }
328
329 return error;
330}
331
332errno_t
333sock_connectwait(socket_t sock, const struct timeval *tv)
334{
335 lck_mtx_t *mutex_held;
336 errno_t retval = 0;
337 struct timespec ts;
338
339 socket_lock(sock, 1);
340
341 /* Check if we're already connected or if we've already errored out */
342 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error != 0) {
343 if (sock->so_error != 0) {
344 retval = sock->so_error;
345 sock->so_error = 0;
346 } else {
347 if ((sock->so_state & SS_ISCONNECTED) != 0) {
348 retval = 0;
349 } else {
350 retval = EINVAL;
351 }
352 }
353 goto done;
354 }
355
356 /* copied translation from timeval to hertz from SO_RCVTIMEO handling */
357 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
358 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
359 retval = EDOM;
360 goto done;
361 }
362
363 ts.tv_sec = tv->tv_sec;
364 ts.tv_nsec = (tv->tv_usec * (integer_t)NSEC_PER_USEC);
365 if ((ts.tv_sec + (ts.tv_nsec / (long)NSEC_PER_SEC)) / 100 > SHRT_MAX) {
366 retval = EDOM;
367 goto done;
368 }
369
370 if (sock->so_proto->pr_getlock != NULL) {
371 mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK);
372 } else {
373 mutex_held = sock->so_proto->pr_domain->dom_mtx;
374 }
375
376 msleep((caddr_t)&sock->so_timeo, mutex_held,
377 PSOCK, "sock_connectwait", &ts);
378
379 /* Check if we're still waiting to connect */
380 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
381 retval = EINPROGRESS;
382 goto done;
383 }
384
385 if (sock->so_error != 0) {
386 retval = sock->so_error;
387 sock->so_error = 0;
388 }
389
390done:
391 socket_unlock(sock, 1);
392 return retval;
393}
394
395errno_t
396sock_nointerrupt(socket_t sock, int on)
397{
398 socket_lock(sock, 1);
399
400 if (on) {
401 sock->so_rcv.sb_flags |= SB_NOINTR; /* This isn't safe */
402 sock->so_snd.sb_flags |= SB_NOINTR; /* This isn't safe */
403 } else {
404 sock->so_rcv.sb_flags &= ~SB_NOINTR; /* This isn't safe */
405 sock->so_snd.sb_flags &= ~SB_NOINTR; /* This isn't safe */
406 }
407
408 socket_unlock(sock, 1);
409
410 return 0;
411}
412
413errno_t
414sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen)
415{
416 int error;
417 struct sockaddr *sa = NULL;
418
419 if (sock == NULL || peername == NULL || peernamelen < 0) {
420 return EINVAL;
421 }
422
423 socket_lock(sock, 1);
424 if (!(sock->so_state & (SS_ISCONNECTED | SS_ISCONFIRMING))) {
425 socket_unlock(sock, 1);
426 return ENOTCONN;
427 }
428 error = sogetaddr_locked(sock, &sa, 1);
429 socket_unlock(sock, 1);
430 if (error == 0) {
431 if (peernamelen > sa->sa_len) {
432 peernamelen = sa->sa_len;
433 }
434 memcpy(peername, sa, peernamelen);
435 FREE(sa, M_SONAME);
436 }
437 return error;
438}
439
440errno_t
441sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen)
442{
443 int error;
444 struct sockaddr *sa = NULL;
445
446 if (sock == NULL || sockname == NULL || socknamelen < 0) {
447 return EINVAL;
448 }
449
450 socket_lock(sock, 1);
451 error = sogetaddr_locked(sock, &sa, 0);
452 socket_unlock(sock, 1);
453 if (error == 0) {
454 if (socknamelen > sa->sa_len) {
455 socknamelen = sa->sa_len;
456 }
457 memcpy(sockname, sa, socknamelen);
458 FREE(sa, M_SONAME);
459 }
460 return error;
461}
462
463__private_extern__ int
464sogetaddr_locked(struct socket *so, struct sockaddr **psa, int peer)
465{
466 int error;
467
468 if (so == NULL || psa == NULL) {
469 return EINVAL;
470 }
471
472 *psa = NULL;
473 error = peer ? so->so_proto->pr_usrreqs->pru_peeraddr(so, psa) :
474 so->so_proto->pr_usrreqs->pru_sockaddr(so, psa);
475
476 if (error == 0 && *psa == NULL) {
477 error = ENOMEM;
478 } else if (error != 0) {
479 FREE(*psa, M_SONAME);
480 }
481 return error;
482}
483
484errno_t
485sock_getaddr(socket_t sock, struct sockaddr **psa, int peer)
486{
487 int error;
488
489 if (sock == NULL || psa == NULL) {
490 return EINVAL;
491 }
492
493 socket_lock(sock, 1);
494 error = sogetaddr_locked(sock, psa, peer);
495 socket_unlock(sock, 1);
496
497 return error;
498}
499
500void
501sock_freeaddr(struct sockaddr *sa)
502{
503 FREE(sa, M_SONAME);
504}
505
506errno_t
507sock_getsockopt(socket_t sock, int level, int optname, void *optval,
508 int *optlen)
509{
510 int error = 0;
511 struct sockopt sopt;
512
513 if (sock == NULL || optval == NULL || optlen == NULL) {
514 return EINVAL;
515 }
516
517 sopt.sopt_dir = SOPT_GET;
518 sopt.sopt_level = level;
519 sopt.sopt_name = optname;
520 sopt.sopt_val = CAST_USER_ADDR_T(optval);
521 sopt.sopt_valsize = *optlen;
522 sopt.sopt_p = kernproc;
523 error = sogetoptlock(sock, &sopt, 1); /* will lock socket */
524 if (error == 0) {
525 *optlen = (uint32_t)sopt.sopt_valsize;
526 }
527 return error;
528}
529
530errno_t
531sock_ioctl(socket_t sock, unsigned long request, void *argp)
532{
533 return soioctl(sock, request, argp, kernproc); /* will lock socket */
534}
535
536errno_t
537sock_setsockopt(socket_t sock, int level, int optname, const void *optval,
538 int optlen)
539{
540 struct sockopt sopt;
541
542 if (sock == NULL || optval == NULL) {
543 return EINVAL;
544 }
545
546 sopt.sopt_dir = SOPT_SET;
547 sopt.sopt_level = level;
548 sopt.sopt_name = optname;
549 sopt.sopt_val = CAST_USER_ADDR_T(optval);
550 sopt.sopt_valsize = optlen;
551 sopt.sopt_p = kernproc;
552 return sosetoptlock(sock, &sopt, 1); /* will lock socket */
553}
554
555/*
556 * This follows the recommended mappings between DSCP code points
557 * and WMM access classes.
558 */
559static uint32_t
560so_tc_from_dscp(uint8_t dscp)
561{
562 uint32_t tc;
563
564 if (dscp >= 0x30 && dscp <= 0x3f) {
565 tc = SO_TC_VO;
566 } else if (dscp >= 0x20 && dscp <= 0x2f) {
567 tc = SO_TC_VI;
568 } else if (dscp >= 0x08 && dscp <= 0x17) {
569 tc = SO_TC_BK_SYS;
570 } else {
571 tc = SO_TC_BE;
572 }
573
574 return tc;
575}
576
577errno_t
578sock_settclassopt(socket_t sock, const void *optval, size_t optlen)
579{
580 errno_t error = 0;
581 struct sockopt sopt;
582 int sotc;
583
584 if (sock == NULL || optval == NULL || optlen != sizeof(int)) {
585 return EINVAL;
586 }
587
588 socket_lock(sock, 1);
589 if (!(sock->so_state & SS_ISCONNECTED)) {
590 /*
591 * If the socket is not connected then we don't know
592 * if the destination is on LAN or not. Skip
593 * setting traffic class in this case
594 */
595 error = ENOTCONN;
596 goto out;
597 }
598
599 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL ||
600 sock->so_pcb == NULL) {
601 error = EINVAL;
602 goto out;
603 }
604
605 /*
606 * Set the socket traffic class based on the passed DSCP code point
607 * regardless of the scope of the destination
608 */
609 sotc = so_tc_from_dscp((uint8_t)((*(const int *)optval) >> 2));
610
611 sopt.sopt_dir = SOPT_SET;
612 sopt.sopt_val = CAST_USER_ADDR_T(&sotc);
613 sopt.sopt_valsize = sizeof(sotc);
614 sopt.sopt_p = kernproc;
615 sopt.sopt_level = SOL_SOCKET;
616 sopt.sopt_name = SO_TRAFFIC_CLASS;
617
618 error = sosetoptlock(sock, &sopt, 0); /* already locked */
619
620 if (error != 0) {
621 printf("%s: sosetopt SO_TRAFFIC_CLASS failed %d\n",
622 __func__, error);
623 goto out;
624 }
625
626 /*
627 * Check if the destination address is LAN or link local address.
628 * We do not want to set traffic class bits if the destination
629 * is not local.
630 */
631 if (!so_isdstlocal(sock)) {
632 goto out;
633 }
634
635 sopt.sopt_dir = SOPT_SET;
636 sopt.sopt_val = CAST_USER_ADDR_T(optval);
637 sopt.sopt_valsize = optlen;
638 sopt.sopt_p = kernproc;
639
640 switch (SOCK_DOM(sock)) {
641 case PF_INET:
642 sopt.sopt_level = IPPROTO_IP;
643 sopt.sopt_name = IP_TOS;
644 break;
645 case PF_INET6:
646 sopt.sopt_level = IPPROTO_IPV6;
647 sopt.sopt_name = IPV6_TCLASS;
648 break;
649 default:
650 error = EINVAL;
651 goto out;
652 }
653
654 error = sosetoptlock(sock, &sopt, 0); /* already locked */
655 socket_unlock(sock, 1);
656 return error;
657out:
658 socket_unlock(sock, 1);
659 return error;
660}
661
662errno_t
663sock_gettclassopt(socket_t sock, void *optval, size_t *optlen)
664{
665 errno_t error = 0;
666 struct sockopt sopt;
667
668 if (sock == NULL || optval == NULL || optlen == NULL) {
669 return EINVAL;
670 }
671
672 sopt.sopt_dir = SOPT_GET;
673 sopt.sopt_val = CAST_USER_ADDR_T(optval);
674 sopt.sopt_valsize = *optlen;
675 sopt.sopt_p = kernproc;
676
677 socket_lock(sock, 1);
678 if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL) {
679 socket_unlock(sock, 1);
680 return EINVAL;
681 }
682
683 switch (SOCK_DOM(sock)) {
684 case PF_INET:
685 sopt.sopt_level = IPPROTO_IP;
686 sopt.sopt_name = IP_TOS;
687 break;
688 case PF_INET6:
689 sopt.sopt_level = IPPROTO_IPV6;
690 sopt.sopt_name = IPV6_TCLASS;
691 break;
692 default:
693 socket_unlock(sock, 1);
694 return EINVAL;
695 }
696 error = sogetoptlock(sock, &sopt, 0); /* already locked */
697 socket_unlock(sock, 1);
698 if (error == 0) {
699 *optlen = sopt.sopt_valsize;
700 }
701 return error;
702}
703
704errno_t
705sock_listen(socket_t sock, int backlog)
706{
707 if (sock == NULL) {
708 return EINVAL;
709 }
710
711 return solisten(sock, backlog); /* will lock socket */
712}
713
714errno_t
715sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data,
716 int flags, size_t *recvdlen)
717{
718 uio_t auio;
719 struct mbuf *control = NULL;
720 int error = 0;
721 user_ssize_t length = 0;
722 struct sockaddr *fromsa = NULL;
723 char uio_buf[UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0)];
724
725 if (sock == NULL) {
726 return EINVAL;
727 }
728
729 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
730 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));
731 if (msg != NULL && data == NULL) {
732 int i;
733 struct iovec *tempp = msg->msg_iov;
734
735 for (i = 0; i < msg->msg_iovlen; i++) {
736 uio_addiov(auio,
737 CAST_USER_ADDR_T((tempp + i)->iov_base),
738 (tempp + i)->iov_len);
739 }
740 if (uio_resid(auio) < 0) {
741 return EINVAL;
742 }
743 } else if (recvdlen != NULL) {
744 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
745 }
746 length = uio_resid(auio);
747
748 if (recvdlen != NULL) {
749 *recvdlen = 0;
750 }
751
752 /* let pru_soreceive handle the socket locking */
753 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
754 data, (msg && msg->msg_control) ? &control : NULL, &flags);
755 if (error != 0) {
756 goto cleanup;
757 }
758
759 if (recvdlen != NULL) {
760 *recvdlen = length - uio_resid(auio);
761 }
762 if (msg != NULL) {
763 msg->msg_flags = flags;
764
765 if (msg->msg_name != NULL) {
766 int salen;
767 salen = msg->msg_namelen;
768 if (msg->msg_namelen > 0 && fromsa != NULL) {
769 salen = MIN(salen, fromsa->sa_len);
770 memcpy(msg->msg_name, fromsa,
771 msg->msg_namelen > fromsa->sa_len ?
772 fromsa->sa_len : msg->msg_namelen);
773 }
774 }
775
776 if (msg->msg_control != NULL) {
777 struct mbuf *m = control;
778 u_char *ctlbuf = msg->msg_control;
779 int clen = msg->msg_controllen;
780
781 msg->msg_controllen = 0;
782
783 while (m != NULL && clen > 0) {
784 unsigned int tocopy;
785
786 if (clen >= m->m_len) {
787 tocopy = m->m_len;
788 } else {
789 msg->msg_flags |= MSG_CTRUNC;
790 tocopy = clen;
791 }
792 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
793 ctlbuf += tocopy;
794 clen -= tocopy;
795 m = m->m_next;
796 }
797 msg->msg_controllen =
798 (socklen_t)((uintptr_t)ctlbuf - (uintptr_t)msg->msg_control);
799 }
800 }
801
802cleanup:
803 if (control != NULL) {
804 m_freem(control);
805 }
806 FREE(fromsa, M_SONAME);
807 return error;
808}
809
810errno_t
811sock_receive(socket_t sock, struct msghdr *msg, int flags, size_t *recvdlen)
812{
813 if ((msg == NULL) || (msg->msg_iovlen < 1) ||
814 (msg->msg_iov[0].iov_len == 0) ||
815 (msg->msg_iov[0].iov_base == NULL)) {
816 return EINVAL;
817 }
818
819 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
820}
821
822errno_t
823sock_receivembuf(socket_t sock, struct msghdr *msg, mbuf_t *data, int flags,
824 size_t *recvlen)
825{
826 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg != NULL &&
827 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
828 return EINVAL;
829 }
830
831 return sock_receive_internal(sock, msg, data, flags, recvlen);
832}
833
834errno_t
835sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data,
836 int flags, size_t *sentlen)
837{
838 uio_t auio = NULL;
839 struct mbuf *control = NULL;
840 int error = 0;
841 user_ssize_t datalen = 0;
842 char uio_buf[UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1))];
843
844 if (sock == NULL) {
845 error = EINVAL;
846 goto errorout;
847 }
848
849 if (data == NULL && msg != NULL) {
850 struct iovec *tempp = msg->msg_iov;
851
852 auio = uio_createwithbuffer(msg->msg_iovlen, 0,
853 UIO_SYSSPACE, UIO_WRITE, &uio_buf[0], sizeof(uio_buf));
854 if (tempp != NULL) {
855 int i;
856
857 for (i = 0; i < msg->msg_iovlen; i++) {
858 uio_addiov(auio,
859 CAST_USER_ADDR_T((tempp + i)->iov_base),
860 (tempp + i)->iov_len);
861 }
862
863 if (uio_resid(auio) < 0) {
864 error = EINVAL;
865 goto errorout;
866 }
867 }
868 }
869
870 if (sentlen != NULL) {
871 *sentlen = 0;
872 }
873
874 if (auio != NULL) {
875 datalen = uio_resid(auio);
876 } else {
877 datalen = data->m_pkthdr.len;
878 }
879
880 if (msg != NULL && msg->msg_control) {
881 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) {
882 error = EINVAL;
883 goto errorout;
884 }
885
886 if ((size_t)msg->msg_controllen > MLEN) {
887 error = EINVAL;
888 goto errorout;
889 }
890
891 control = m_get(M_NOWAIT, MT_CONTROL);
892 if (control == NULL) {
893 error = ENOMEM;
894 goto errorout;
895 }
896 memcpy(mtod(control, caddr_t), msg->msg_control,
897 msg->msg_controllen);
898 control->m_len = msg->msg_controllen;
899 }
900
901 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
902 (struct sockaddr *)msg->msg_name : NULL, auio, data,
903 control, flags);
904
905 /*
906 * Residual data is possible in the case of IO vectors but not
907 * in the mbuf case since the latter is treated as atomic send.
908 * If pru_sosend() consumed a portion of the iovecs data and
909 * the error returned is transient, treat it as success; this
910 * is consistent with sendit() behavior.
911 */
912 if (auio != NULL && uio_resid(auio) != datalen &&
913 (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) {
914 error = 0;
915 }
916
917 if (error == 0 && sentlen != NULL) {
918 if (auio != NULL) {
919 *sentlen = datalen - uio_resid(auio);
920 } else {
921 *sentlen = datalen;
922 }
923 }
924
925 return error;
926
927/*
928 * In cases where we detect an error before returning, we need to
929 * free the mbuf chain if there is one. sosend (and pru_sosend) will
930 * free the mbuf chain if they encounter an error.
931 */
932errorout:
933 if (control) {
934 m_freem(control);
935 }
936 if (data) {
937 m_freem(data);
938 }
939 if (sentlen) {
940 *sentlen = 0;
941 }
942 return error;
943}
944
945errno_t
946sock_send(socket_t sock, const struct msghdr *msg, int flags, size_t *sentlen)
947{
948 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1) {
949 return EINVAL;
950 }
951
952 return sock_send_internal(sock, msg, NULL, flags, sentlen);
953}
954
955errno_t
956sock_sendmbuf(socket_t sock, const struct msghdr *msg, mbuf_t data,
957 int flags, size_t *sentlen)
958{
959 if (data == NULL || (msg != NULL && (msg->msg_iov != NULL ||
960 msg->msg_iovlen != 0))) {
961 if (data != NULL) {
962 m_freem(data);
963 }
964 return EINVAL;
965 }
966 return sock_send_internal(sock, msg, data, flags, sentlen);
967}
968
969errno_t
970sock_shutdown(socket_t sock, int how)
971{
972 if (sock == NULL) {
973 return EINVAL;
974 }
975
976 return soshutdown(sock, how);
977}
978
979errno_t
980sock_socket_common(int domain, int type, int protocol, sock_upcall callback,
981 void *context, socket_t *new_so, bool is_internal)
982{
983 int error = 0;
984
985 if (new_so == NULL) {
986 return EINVAL;
987 }
988
989 /* socreate will create an initial so_count */
990 error = socreate(domain, new_so, type, protocol);
991 if (error == 0) {
992 /*
993 * This is an in-kernel socket
994 */
995 (*new_so)->so_flags1 |= SOF1_IN_KERNEL_SOCKET;
996 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_in_kernel_total);
997 if (is_internal) {
998 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_in_kernel_os_total);
999 }
1000
1001 /* see comments in sock_setupcall() */
1002 if (callback != NULL) {
1003 sock_setupcall(*new_so, callback, context);
1004 }
1005 /*
1006 * last_pid and last_upid should be zero for sockets
1007 * created using sock_socket
1008 */
1009 (*new_so)->last_pid = 0;
1010 (*new_so)->last_upid = 0;
1011 }
1012 return error;
1013}
1014
1015errno_t
1016sock_socket_internal(int domain, int type, int protocol, sock_upcall callback,
1017 void *context, socket_t *new_so)
1018{
1019 return sock_socket_common(domain, type, protocol, callback,
1020 context, new_so, true);
1021}
1022
1023errno_t
1024sock_socket(int domain, int type, int protocol, sock_upcall callback,
1025 void *context, socket_t *new_so)
1026{
1027 return sock_socket_common(domain, type, protocol, callback,
1028 context, new_so, false);
1029}
1030
1031void
1032sock_close(socket_t sock)
1033{
1034 if (sock == NULL) {
1035 return;
1036 }
1037
1038 soclose(sock);
1039}
1040
1041/* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04) */
1042void
1043sock_retain(socket_t sock)
1044{
1045 if (sock == NULL) {
1046 return;
1047 }
1048
1049 socket_lock(sock, 1);
1050 sock->so_retaincnt++;
1051 sock->so_usecount++; /* add extra reference for holding the socket */
1052 socket_unlock(sock, 1);
1053}
1054
1055/* Do we want this to be APPLE_PRIVATE API? */
1056void
1057sock_release(socket_t sock)
1058{
1059 if (sock == NULL) {
1060 return;
1061 }
1062
1063 socket_lock(sock, 1);
1064 if (sock->so_upcallusecount > 0) {
1065 soclose_wait_locked(sock);
1066 }
1067
1068 sock->so_retaincnt--;
1069 if (sock->so_retaincnt < 0) {
1070 panic("%s: negative retain count (%d) for sock=%p\n",
1071 __func__, sock->so_retaincnt, sock);
1072 /* NOTREACHED */
1073 }
1074 /*
1075 * Check SS_NOFDREF in case a close happened as sock_retain()
1076 * was grabbing the lock
1077 */
1078 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2) &&
1079 (!(sock->so_state & SS_NOFDREF) ||
1080 (sock->so_flags & SOF_MP_SUBFLOW))) {
1081 /* close socket only if the FD is not holding it */
1082 soclose_locked(sock);
1083 } else {
1084 /* remove extra reference holding the socket */
1085 VERIFY(sock->so_usecount > 1);
1086 sock->so_usecount--;
1087 }
1088 socket_unlock(sock, 1);
1089}
1090
1091errno_t
1092sock_setpriv(socket_t sock, int on)
1093{
1094 if (sock == NULL) {
1095 return EINVAL;
1096 }
1097
1098 socket_lock(sock, 1);
1099 if (on) {
1100 sock->so_state |= SS_PRIV;
1101 } else {
1102 sock->so_state &= ~SS_PRIV;
1103 }
1104 socket_unlock(sock, 1);
1105 return 0;
1106}
1107
1108int
1109sock_isconnected(socket_t sock)
1110{
1111 int retval;
1112
1113 socket_lock(sock, 1);
1114 retval = ((sock->so_state & SS_ISCONNECTED) ? 1 : 0);
1115 socket_unlock(sock, 1);
1116 return retval;
1117}
1118
1119int
1120sock_isnonblocking(socket_t sock)
1121{
1122 int retval;
1123
1124 socket_lock(sock, 1);
1125 retval = ((sock->so_state & SS_NBIO) ? 1 : 0);
1126 socket_unlock(sock, 1);
1127 return retval;
1128}
1129
1130errno_t
1131sock_gettype(socket_t sock, int *outDomain, int *outType, int *outProtocol)
1132{
1133 socket_lock(sock, 1);
1134 if (outDomain != NULL) {
1135 *outDomain = SOCK_DOM(sock);
1136 }
1137 if (outType != NULL) {
1138 *outType = sock->so_type;
1139 }
1140 if (outProtocol != NULL) {
1141 *outProtocol = SOCK_PROTO(sock);
1142 }
1143 socket_unlock(sock, 1);
1144 return 0;
1145}
1146
1147/*
1148 * Return the listening socket of a pre-accepted socket. It returns the
1149 * listener (so_head) value of a given socket. This is intended to be
1150 * called by a socket filter during a filter attach (sf_attach) callback.
1151 * The value returned by this routine is safe to be used only in the
1152 * context of that callback, because we hold the listener's lock across
1153 * the sflt_initsock() call.
1154 */
1155socket_t
1156sock_getlistener(socket_t sock)
1157{
1158 return sock->so_head;
1159}
1160
1161static inline void
1162sock_set_tcp_stream_priority(socket_t sock)
1163{
1164 if ((SOCK_DOM(sock) == PF_INET || SOCK_DOM(sock) == PF_INET6) &&
1165 SOCK_TYPE(sock) == SOCK_STREAM) {
1166 set_tcp_stream_priority(sock);
1167 }
1168}
1169
1170/*
1171 * Caller must have ensured socket is valid and won't be going away.
1172 */
1173void
1174socket_set_traffic_mgt_flags_locked(socket_t sock, u_int8_t flags)
1175{
1176 u_int32_t soflags1 = 0;
1177
1178 if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) {
1179 soflags1 |= SOF1_TRAFFIC_MGT_SO_BACKGROUND;
1180 }
1181 if ((flags & TRAFFIC_MGT_TCP_RECVBG)) {
1182 soflags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG;
1183 }
1184
1185 (void) OSBitOrAtomic(soflags1, &sock->so_flags1);
1186
1187 sock_set_tcp_stream_priority(sock);
1188}
1189
1190void
1191socket_set_traffic_mgt_flags(socket_t sock, u_int8_t flags)
1192{
1193 socket_lock(sock, 1);
1194 socket_set_traffic_mgt_flags_locked(sock, flags);
1195 socket_unlock(sock, 1);
1196}
1197
1198/*
1199 * Caller must have ensured socket is valid and won't be going away.
1200 */
1201void
1202socket_clear_traffic_mgt_flags_locked(socket_t sock, u_int8_t flags)
1203{
1204 u_int32_t soflags1 = 0;
1205
1206 if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) {
1207 soflags1 |= SOF1_TRAFFIC_MGT_SO_BACKGROUND;
1208 }
1209 if ((flags & TRAFFIC_MGT_TCP_RECVBG)) {
1210 soflags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG;
1211 }
1212
1213 (void) OSBitAndAtomic(~soflags1, &sock->so_flags1);
1214
1215 sock_set_tcp_stream_priority(sock);
1216}
1217
1218void
1219socket_clear_traffic_mgt_flags(socket_t sock, u_int8_t flags)
1220{
1221 socket_lock(sock, 1);
1222 socket_clear_traffic_mgt_flags_locked(sock, flags);
1223 socket_unlock(sock, 1);
1224}
1225
1226
1227/*
1228 * Caller must have ensured socket is valid and won't be going away.
1229 */
1230errno_t
1231socket_defunct(struct proc *p, socket_t so, int level)
1232{
1233 errno_t retval;
1234
1235 if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC &&
1236 level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) {
1237 return EINVAL;
1238 }
1239
1240 socket_lock(so, 1);
1241 /*
1242 * SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC level is meant to tear down
1243 * all of mDNSResponder IPC sockets, currently those of AF_UNIX; note
1244 * that this is an implementation artifact of mDNSResponder. We do
1245 * a quick test against the socket buffers for SB_UNIX, since that
1246 * would have been set by unp_attach() at socket creation time.
1247 */
1248 if (level == SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC &&
1249 (so->so_rcv.sb_flags & so->so_snd.sb_flags & SB_UNIX) != SB_UNIX) {
1250 socket_unlock(so, 1);
1251 return EOPNOTSUPP;
1252 }
1253 retval = sosetdefunct(p, so, level, TRUE);
1254 if (retval == 0) {
1255 retval = sodefunct(p, so, level);
1256 }
1257 socket_unlock(so, 1);
1258 return retval;
1259}
1260
1261void
1262sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext,
1263 sock_upcall wcallback, void *wcontext, int locked)
1264{
1265 if (rcallback != NULL) {
1266 sock->so_rcv.sb_flags |= SB_UPCALL;
1267 if (locked) {
1268 sock->so_rcv.sb_flags |= SB_UPCALL_LOCK;
1269 }
1270 sock->so_rcv.sb_upcall = rcallback;
1271 sock->so_rcv.sb_upcallarg = rcontext;
1272 } else {
1273 sock->so_rcv.sb_flags &= ~(SB_UPCALL | SB_UPCALL_LOCK);
1274 sock->so_rcv.sb_upcall = NULL;
1275 sock->so_rcv.sb_upcallarg = NULL;
1276 }
1277
1278 if (wcallback != NULL) {
1279 sock->so_snd.sb_flags |= SB_UPCALL;
1280 if (locked) {
1281 sock->so_snd.sb_flags |= SB_UPCALL_LOCK;
1282 }
1283 sock->so_snd.sb_upcall = wcallback;
1284 sock->so_snd.sb_upcallarg = wcontext;
1285 } else {
1286 sock->so_snd.sb_flags &= ~(SB_UPCALL | SB_UPCALL_LOCK);
1287 sock->so_snd.sb_upcall = NULL;
1288 sock->so_snd.sb_upcallarg = NULL;
1289 }
1290}
1291
1292errno_t
1293sock_setupcall(socket_t sock, sock_upcall callback, void *context)
1294{
1295 if (sock == NULL) {
1296 return EINVAL;
1297 }
1298
1299 /*
1300 * Note that we don't wait for any in progress upcall to complete.
1301 * On embedded, sock_setupcall() causes both read and write
1302 * callbacks to be set; on desktop, only read callback is set
1303 * to maintain legacy KPI behavior.
1304 *
1305 * The newer sock_setupcalls() KPI should be used instead to set
1306 * the read and write callbacks and their respective parameters.
1307 */
1308 socket_lock(sock, 1);
1309#if (defined(__arm__) || defined(__arm64__))
1310 sock_setupcalls_locked(sock, callback, context, callback, context, 0);
1311#else /* (defined(__arm__) || defined(__arm64__)) */
1312 sock_setupcalls_locked(sock, callback, context, NULL, NULL, 0);
1313#endif /* (defined(__arm__) || defined(__arm64__)) */
1314 socket_unlock(sock, 1);
1315
1316 return 0;
1317}
1318
1319errno_t
1320sock_setupcalls(socket_t sock, sock_upcall rcallback, void *rcontext,
1321 sock_upcall wcallback, void *wcontext)
1322{
1323 if (sock == NULL) {
1324 return EINVAL;
1325 }
1326
1327 /*
1328 * Note that we don't wait for any in progress upcall to complete.
1329 */
1330 socket_lock(sock, 1);
1331 sock_setupcalls_locked(sock, rcallback, rcontext, wcallback, wcontext, 0);
1332 socket_unlock(sock, 1);
1333
1334 return 0;
1335}
1336
1337void
1338sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, void *econtext,
1339 long emask)
1340{
1341 socket_lock_assert_owned(sock);
1342
1343 /*
1344 * Note that we don't wait for any in progress upcall to complete.
1345 */
1346 if (ecallback != NULL) {
1347 sock->so_event = ecallback;
1348 sock->so_eventarg = econtext;
1349 sock->so_eventmask = (uint32_t)emask;
1350 } else {
1351 sock->so_event = sonullevent;
1352 sock->so_eventarg = NULL;
1353 sock->so_eventmask = 0;
1354 }
1355}
1356
1357errno_t
1358sock_catchevents(socket_t sock, sock_evupcall ecallback, void *econtext,
1359 long emask)
1360{
1361 if (sock == NULL) {
1362 return EINVAL;
1363 }
1364
1365 socket_lock(sock, 1);
1366 sock_catchevents_locked(sock, ecallback, econtext, emask);
1367 socket_unlock(sock, 1);
1368
1369 return 0;
1370}
1371
1372/*
1373 * Returns true whether or not a socket belongs to the kernel.
1374 */
1375int
1376sock_iskernel(socket_t so)
1377{
1378 return so && so->last_pid == 0;
1379}