2 * Copyright (c) 1998-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1988, 1990, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/domain.h>
73 #include <sys/kernel.h>
74 #include <sys/proc_internal.h>
75 #include <sys/kauth.h>
76 #include <sys/malloc.h>
78 #include <sys/mcache.h>
79 #include <sys/protosw.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/syslog.h>
87 #include <kern/locks.h>
88 #include <net/route.h>
89 #include <netinet/in.h>
90 #include <netinet/in_pcb.h>
91 #include <sys/kdebug.h>
92 #include <libkern/OSAtomic.h>
95 #include <security/mac_framework.h>
98 #include <mach/vm_param.h>
100 /* TODO: this should be in a header file somewhere */
101 extern void postevent(struct socket
*, struct sockbuf
*, int);
103 #define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4)
104 #define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5)
106 static inline void sbcompress(struct sockbuf
*, struct mbuf
*, struct mbuf
*);
107 static struct socket
*sonewconn_internal(struct socket
*, int);
108 static int sbappendaddr_internal(struct sockbuf
*, struct sockaddr
*,
109 struct mbuf
*, struct mbuf
*);
110 static int sbappendcontrol_internal(struct sockbuf
*, struct mbuf
*,
112 static void soevent_ifdenied(struct socket
*);
115 * Primitive routines for operating on sockets and socket buffers
117 static int soqlimitcompat
= 1;
118 static int soqlencomp
= 0;
121 * Based on the number of mbuf clusters configured, high_sb_max and sb_max can
122 * get scaled up or down to suit that memory configuration. high_sb_max is a
123 * higher limit on sb_max that is checked when sb_max gets set through sysctl.
126 u_int32_t sb_max
= SB_MAX
; /* XXX should be static */
127 u_int32_t high_sb_max
= SB_MAX
;
129 static u_int32_t sb_efficiency
= 8; /* parameter for sbreserve() */
130 __private_extern__
int32_t total_sbmb_cnt
= 0;
132 /* Control whether to throttle sockets eligible to be throttled */
133 __private_extern__ u_int32_t net_io_policy_throttled
= 0;
134 static int sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS
;
136 u_int32_t net_io_policy_log
= 0; /* log socket policy changes */
137 #if CONFIG_PROC_UUID_POLICY
138 u_int32_t net_io_policy_uuid
= 1; /* enable UUID socket policy */
139 #endif /* CONFIG_PROC_UUID_POLICY */
142 * Procedures to manipulate state flags of socket
143 * and do appropriate wakeups. Normal sequence from the
144 * active (originating) side is that soisconnecting() is
145 * called during processing of connect() call,
146 * resulting in an eventual call to soisconnected() if/when the
147 * connection is established. When the connection is torn down
148 * soisdisconnecting() is called during processing of disconnect() call,
149 * and soisdisconnected() is called when the connection to the peer
150 * is totally severed. The semantics of these routines are such that
151 * connectionless protocols can call soisconnected() and soisdisconnected()
152 * only, bypassing the in-progress calls when setting up a ``connection''
155 * From the passive side, a socket is created with
156 * two queues of sockets: so_incomp for connections in progress
157 * and so_comp for connections already made and awaiting user acceptance.
158 * As a protocol is preparing incoming connections, it creates a socket
159 * structure queued on so_incomp by calling sonewconn(). When the connection
160 * is established, soisconnected() is called, and transfers the
161 * socket structure to so_comp, making it available to accept().
163 * If a socket is closed with sockets on either
164 * so_incomp or so_comp, these sockets are dropped.
166 * If higher level protocols are implemented in
167 * the kernel, the wakeups done here will sometimes
168 * cause software-interrupt process scheduling.
171 soisconnecting(struct socket
*so
)
174 so
->so_state
&= ~(SS_ISCONNECTED
|SS_ISDISCONNECTING
);
175 so
->so_state
|= SS_ISCONNECTING
;
177 sflt_notify(so
, sock_evt_connecting
, NULL
);
181 soisconnected(struct socket
*so
)
183 struct socket
*head
= so
->so_head
;
185 so
->so_state
&= ~(SS_ISCONNECTING
|SS_ISDISCONNECTING
|SS_ISCONFIRMING
);
186 so
->so_state
|= SS_ISCONNECTED
;
188 sflt_notify(so
, sock_evt_connected
, NULL
);
190 if (head
&& (so
->so_state
& SS_INCOMP
)) {
191 so
->so_state
&= ~SS_INCOMP
;
192 so
->so_state
|= SS_COMP
;
193 if (head
->so_proto
->pr_getlock
!= NULL
) {
194 socket_unlock(so
, 0);
195 socket_lock(head
, 1);
197 postevent(head
, 0, EV_RCONN
);
198 TAILQ_REMOVE(&head
->so_incomp
, so
, so_list
);
200 TAILQ_INSERT_TAIL(&head
->so_comp
, so
, so_list
);
202 wakeup_one((caddr_t
)&head
->so_timeo
);
203 if (head
->so_proto
->pr_getlock
!= NULL
) {
204 socket_unlock(head
, 1);
208 postevent(so
, 0, EV_WCONN
);
209 wakeup((caddr_t
)&so
->so_timeo
);
212 soevent(so
, SO_FILT_HINT_LOCKED
| SO_FILT_HINT_CONNECTED
|
213 SO_FILT_HINT_CONNINFO_UPDATED
);
218 soisdisconnecting(struct socket
*so
)
220 so
->so_state
&= ~SS_ISCONNECTING
;
221 so
->so_state
|= (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
);
222 soevent(so
, SO_FILT_HINT_LOCKED
);
223 sflt_notify(so
, sock_evt_disconnecting
, NULL
);
224 wakeup((caddr_t
)&so
->so_timeo
);
230 soisdisconnected(struct socket
*so
)
232 so
->so_state
&= ~(SS_ISCONNECTING
|SS_ISCONNECTED
|SS_ISDISCONNECTING
);
233 so
->so_state
|= (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
);
234 soevent(so
, SO_FILT_HINT_LOCKED
| SO_FILT_HINT_DISCONNECTED
|
235 SO_FILT_HINT_CONNINFO_UPDATED
);
236 sflt_notify(so
, sock_evt_disconnected
, NULL
);
237 wakeup((caddr_t
)&so
->so_timeo
);
243 * This function will issue a wakeup like soisdisconnected but it will not
244 * notify the socket filters. This will avoid unlocking the socket
245 * in the midst of closing it.
248 sodisconnectwakeup(struct socket
*so
)
250 so
->so_state
&= ~(SS_ISCONNECTING
|SS_ISCONNECTED
|SS_ISDISCONNECTING
);
251 so
->so_state
|= (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
);
252 soevent(so
, SO_FILT_HINT_LOCKED
| SO_FILT_HINT_DISCONNECTED
|
253 SO_FILT_HINT_CONNINFO_UPDATED
);
254 wakeup((caddr_t
)&so
->so_timeo
);
260 * When an attempt at a new connection is noted on a socket
261 * which accepts connections, sonewconn is called. If the
262 * connection is possible (subject to space constraints, etc.)
263 * then we allocate a new structure, propoerly linked into the
264 * data structure of the original socket, and return this.
265 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
267 static struct socket
*
268 sonewconn_internal(struct socket
*head
, int connstatus
)
270 int so_qlen
, error
= 0;
272 lck_mtx_t
*mutex_held
;
274 if (head
->so_proto
->pr_getlock
!= NULL
)
275 mutex_held
= (*head
->so_proto
->pr_getlock
)(head
, 0);
277 mutex_held
= head
->so_proto
->pr_domain
->dom_mtx
;
278 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
282 * This is the default case; so_qlen represents the
283 * sum of both incomplete and completed queues.
285 so_qlen
= head
->so_qlen
;
288 * When kern.ipc.soqlencomp is set to 1, so_qlen
289 * represents only the completed queue. Since we
290 * cannot let the incomplete queue goes unbounded
291 * (in case of SYN flood), we cap the incomplete
292 * queue length to at most somaxconn, and use that
293 * as so_qlen so that we fail immediately below.
295 so_qlen
= head
->so_qlen
- head
->so_incqlen
;
296 if (head
->so_incqlen
> somaxconn
)
301 (soqlimitcompat
? head
->so_qlimit
: (3 * head
->so_qlimit
/ 2)))
302 return ((struct socket
*)0);
303 so
= soalloc(1, SOCK_DOM(head
), head
->so_type
);
305 return ((struct socket
*)0);
306 /* check if head was closed during the soalloc */
307 if (head
->so_proto
== NULL
) {
309 return ((struct socket
*)0);
312 so
->so_type
= head
->so_type
;
313 so
->so_options
= head
->so_options
&~ SO_ACCEPTCONN
;
314 so
->so_linger
= head
->so_linger
;
315 so
->so_state
= head
->so_state
| SS_NOFDREF
;
316 so
->so_proto
= head
->so_proto
;
317 so
->so_timeo
= head
->so_timeo
;
318 so
->so_pgid
= head
->so_pgid
;
319 kauth_cred_ref(head
->so_cred
);
320 so
->so_cred
= head
->so_cred
;
321 so
->last_pid
= head
->last_pid
;
322 so
->last_upid
= head
->last_upid
;
323 memcpy(so
->last_uuid
, head
->last_uuid
, sizeof (so
->last_uuid
));
324 if (head
->so_flags
& SOF_DELEGATED
) {
325 so
->e_pid
= head
->e_pid
;
326 so
->e_upid
= head
->e_upid
;
327 memcpy(so
->e_uuid
, head
->e_uuid
, sizeof (so
->e_uuid
));
329 /* inherit socket options stored in so_flags */
330 so
->so_flags
= head
->so_flags
&
331 (SOF_NOSIGPIPE
| SOF_NOADDRAVAIL
| SOF_REUSESHAREUID
|
332 SOF_NOTIFYCONFLICT
| SOF_BINDRANDOMPORT
| SOF_NPX_SETOPTSHUT
|
333 SOF_NODEFUNCT
| SOF_PRIVILEGED_TRAFFIC_CLASS
| SOF_NOTSENT_LOWAT
|
334 SOF_USELRO
| SOF_DELEGATED
);
336 so
->next_lock_lr
= 0;
337 so
->next_unlock_lr
= 0;
339 so
->so_rcv
.sb_flags
|= SB_RECV
; /* XXX */
340 so
->so_rcv
.sb_so
= so
->so_snd
.sb_so
= so
;
341 TAILQ_INIT(&so
->so_evlist
);
343 #if CONFIG_MACF_SOCKET
344 mac_socket_label_associate_accept(head
, so
);
347 /* inherit traffic management properties of listener */
348 so
->so_traffic_mgt_flags
=
349 head
->so_traffic_mgt_flags
& (TRAFFIC_MGT_SO_BACKGROUND
);
350 so
->so_background_thread
= head
->so_background_thread
;
351 so
->so_traffic_class
= head
->so_traffic_class
;
353 if (soreserve(so
, head
->so_snd
.sb_hiwat
, head
->so_rcv
.sb_hiwat
)) {
355 return ((struct socket
*)0);
357 so
->so_rcv
.sb_flags
|= (head
->so_rcv
.sb_flags
& SB_USRSIZE
);
358 so
->so_snd
.sb_flags
|= (head
->so_snd
.sb_flags
& SB_USRSIZE
);
361 * Must be done with head unlocked to avoid deadlock
362 * for protocol with per socket mutexes.
364 if (head
->so_proto
->pr_unlock
)
365 socket_unlock(head
, 0);
366 if (((*so
->so_proto
->pr_usrreqs
->pru_attach
)(so
, 0, NULL
) != 0) ||
369 if (head
->so_proto
->pr_unlock
)
370 socket_lock(head
, 0);
371 return ((struct socket
*)0);
373 if (head
->so_proto
->pr_unlock
) {
374 socket_lock(head
, 0);
376 * Radar 7385998 Recheck that the head is still accepting
377 * to avoid race condition when head is getting closed.
379 if ((head
->so_options
& SO_ACCEPTCONN
) == 0) {
380 so
->so_state
&= ~SS_NOFDREF
;
382 return ((struct socket
*)0);
386 atomic_add_32(&so
->so_proto
->pr_domain
->dom_refs
, 1);
388 /* Insert in head appropriate lists */
392 * Since this socket is going to be inserted into the incomp
393 * queue, it can be picked up by another thread in
394 * tcp_dropdropablreq to get dropped before it is setup..
395 * To prevent this race, set in-progress flag which can be
398 so
->so_flags
|= SOF_INCOMP_INPROGRESS
;
401 TAILQ_INSERT_TAIL(&head
->so_comp
, so
, so_list
);
402 so
->so_state
|= SS_COMP
;
404 TAILQ_INSERT_TAIL(&head
->so_incomp
, so
, so_list
);
405 so
->so_state
|= SS_INCOMP
;
410 /* Attach socket filters for this protocol */
414 so
->so_state
|= connstatus
;
416 wakeup((caddr_t
)&head
->so_timeo
);
423 sonewconn(struct socket
*head
, int connstatus
, const struct sockaddr
*from
)
425 int error
= sflt_connectin(head
, from
);
430 return (sonewconn_internal(head
, connstatus
));
434 * Socantsendmore indicates that no more data will be sent on the
435 * socket; it would normally be applied to a socket when the user
436 * informs the system that no more data is to be sent, by the protocol
437 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
438 * will be received, and will normally be applied to the socket by a
439 * protocol when it detects that the peer will send no more data.
440 * Data queued for reading in the socket may yet be read.
444 socantsendmore(struct socket
*so
)
446 so
->so_state
|= SS_CANTSENDMORE
;
447 soevent(so
, SO_FILT_HINT_LOCKED
| SO_FILT_HINT_CANTSENDMORE
);
448 sflt_notify(so
, sock_evt_cantsendmore
, NULL
);
453 socantrcvmore(struct socket
*so
)
455 so
->so_state
|= SS_CANTRCVMORE
;
456 soevent(so
, SO_FILT_HINT_LOCKED
| SO_FILT_HINT_CANTRCVMORE
);
457 sflt_notify(so
, sock_evt_cantrecvmore
, NULL
);
462 * Wait for data to arrive at/drain from a socket buffer.
465 sbwait(struct sockbuf
*sb
)
467 boolean_t nointr
= (sb
->sb_flags
& SB_NOINTR
);
468 void *lr_saved
= __builtin_return_address(0);
469 struct socket
*so
= sb
->sb_so
;
470 lck_mtx_t
*mutex_held
;
475 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
476 __func__
, sb
, sb
->sb_flags
, lr_saved
);
478 } else if (so
->so_usecount
< 1) {
479 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
480 "lrh= %s\n", __func__
, sb
, sb
->sb_flags
, so
,
481 so
->so_usecount
, lr_saved
, solockhistory_nr(so
));
485 if (so
->so_proto
->pr_getlock
!= NULL
)
486 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
488 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
490 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
492 ts
.tv_sec
= sb
->sb_timeo
.tv_sec
;
493 ts
.tv_nsec
= sb
->sb_timeo
.tv_usec
* 1000;
496 VERIFY(sb
->sb_waiters
!= 0);
498 error
= msleep((caddr_t
)&sb
->sb_cc
, mutex_held
,
499 nointr
? PSOCK
: PSOCK
| PCATCH
,
500 nointr
? "sbwait_nointr" : "sbwait", &ts
);
502 VERIFY(sb
->sb_waiters
!= 0);
505 if (so
->so_usecount
< 1) {
506 panic("%s: 2 sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
507 "lrh= %s\n", __func__
, sb
, sb
->sb_flags
, so
,
508 so
->so_usecount
, lr_saved
, solockhistory_nr(so
));
512 if ((so
->so_state
& SS_DRAINING
) || (so
->so_flags
& SOF_DEFUNCT
)) {
514 if (so
->so_flags
& SOF_DEFUNCT
) {
515 SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
516 "(%d)\n", __func__
, proc_selfpid(),
517 (uint64_t)VM_KERNEL_ADDRPERM(so
),
518 SOCK_DOM(so
), SOCK_TYPE(so
), error
));
526 sbwakeup(struct sockbuf
*sb
)
528 if (sb
->sb_waiters
> 0)
529 wakeup((caddr_t
)&sb
->sb_cc
);
533 * Wakeup processes waiting on a socket buffer.
534 * Do asynchronous notification via SIGIO
535 * if the socket has the SS_ASYNC flag set.
538 sowakeup(struct socket
*so
, struct sockbuf
*sb
)
540 if (so
->so_flags
& SOF_DEFUNCT
) {
541 SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] si 0x%x, "
542 "fl 0x%x [%s]\n", __func__
, proc_selfpid(),
543 (uint64_t)VM_KERNEL_ADDRPERM(so
), SOCK_DOM(so
),
544 SOCK_TYPE(so
), (uint32_t)sb
->sb_sel
.si_flags
, sb
->sb_flags
,
545 (sb
->sb_flags
& SB_RECV
) ? "rcv" : "snd"));
548 sb
->sb_flags
&= ~SB_SEL
;
549 selwakeup(&sb
->sb_sel
);
551 if (so
->so_state
& SS_ASYNC
) {
553 gsignal(-so
->so_pgid
, SIGIO
);
554 else if (so
->so_pgid
> 0)
555 proc_signal(so
->so_pgid
, SIGIO
);
557 if (sb
->sb_flags
& SB_KNOTE
) {
558 KNOTE(&sb
->sb_sel
.si_note
, SO_FILT_HINT_LOCKED
);
560 if (sb
->sb_flags
& SB_UPCALL
) {
561 void (*sb_upcall
)(struct socket
*, void *, int);
562 caddr_t sb_upcallarg
;
564 sb_upcall
= sb
->sb_upcall
;
565 sb_upcallarg
= sb
->sb_upcallarg
;
566 /* Let close know that we're about to do an upcall */
567 so
->so_upcallusecount
++;
569 socket_unlock(so
, 0);
570 (*sb_upcall
)(so
, sb_upcallarg
, M_DONTWAIT
);
573 so
->so_upcallusecount
--;
574 /* Tell close that it's safe to proceed */
575 if ((so
->so_flags
& SOF_CLOSEWAIT
) &&
576 so
->so_upcallusecount
== 0)
577 wakeup((caddr_t
)&so
->so_upcallusecount
);
582 * Socket buffer (struct sockbuf) utility routines.
584 * Each socket contains two socket buffers: one for sending data and
585 * one for receiving data. Each buffer contains a queue of mbufs,
586 * information about the number of mbufs and amount of data in the
587 * queue, and other fields allowing select() statements and notification
588 * on data availability to be implemented.
590 * Data stored in a socket buffer is maintained as a list of records.
591 * Each record is a list of mbufs chained together with the m_next
592 * field. Records are chained together with the m_nextpkt field. The upper
593 * level routine soreceive() expects the following conventions to be
594 * observed when placing information in the receive buffer:
596 * 1. If the protocol requires each message be preceded by the sender's
597 * name, then a record containing that name must be present before
598 * any associated data (mbuf's must be of type MT_SONAME).
599 * 2. If the protocol supports the exchange of ``access rights'' (really
600 * just additional data associated with the message), and there are
601 * ``rights'' to be received, then a record containing this data
602 * should be present (mbuf's must be of type MT_RIGHTS).
603 * 3. If a name or rights record exists, then it must be followed by
604 * a data record, perhaps of zero length.
606 * Before using a new socket structure it is first necessary to reserve
607 * buffer space to the socket, by calling sbreserve(). This should commit
608 * some of the available buffer space in the system buffer pool for the
609 * socket (currently, it does nothing but enforce limits). The space
610 * should be released by calling sbrelease() when the socket is destroyed.
618 soreserve(struct socket
*so
, u_int32_t sndcc
, u_int32_t rcvcc
)
621 if (sbreserve(&so
->so_snd
, sndcc
) == 0)
624 so
->so_snd
.sb_idealsize
= sndcc
;
626 if (sbreserve(&so
->so_rcv
, rcvcc
) == 0)
629 so
->so_rcv
.sb_idealsize
= rcvcc
;
631 if (so
->so_rcv
.sb_lowat
== 0)
632 so
->so_rcv
.sb_lowat
= 1;
633 if (so
->so_snd
.sb_lowat
== 0)
634 so
->so_snd
.sb_lowat
= MCLBYTES
;
635 if (so
->so_snd
.sb_lowat
> so
->so_snd
.sb_hiwat
)
636 so
->so_snd
.sb_lowat
= so
->so_snd
.sb_hiwat
;
639 so
->so_snd
.sb_flags
&= ~SB_SEL
;
640 selthreadclear(&so
->so_snd
.sb_sel
);
641 sbrelease(&so
->so_snd
);
647 * Allot mbufs to a sockbuf.
648 * Attempt to scale mbmax so that mbcnt doesn't become limiting
649 * if buffering efficiency is near the normal case.
652 sbreserve(struct sockbuf
*sb
, u_int32_t cc
)
654 if ((u_quad_t
)cc
> (u_quad_t
)sb_max
* MCLBYTES
/ (MSIZE
+ MCLBYTES
))
657 sb
->sb_mbmax
= min(cc
* sb_efficiency
, sb_max
);
658 if (sb
->sb_lowat
> sb
->sb_hiwat
)
659 sb
->sb_lowat
= sb
->sb_hiwat
;
664 * Free mbufs held by a socket, and reserved mbuf space.
666 /* WARNING needs to do selthreadclear() before calling this */
668 sbrelease(struct sockbuf
*sb
)
676 * Routines to add and remove
677 * data from an mbuf queue.
679 * The routines sbappend() or sbappendrecord() are normally called to
680 * append new mbufs to a socket buffer, after checking that adequate
681 * space is available, comparing the function sbspace() with the amount
682 * of data to be added. sbappendrecord() differs from sbappend() in
683 * that data supplied is treated as the beginning of a new record.
684 * To place a sender's address, optional access rights, and data in a
685 * socket receive buffer, sbappendaddr() should be used. To place
686 * access rights and data in a socket receive buffer, sbappendrights()
687 * should be used. In either case, the new data begins a new record.
688 * Note that unlike sbappend() and sbappendrecord(), these routines check
689 * for the caller that there will be enough space to store the data.
690 * Each fails if there is not enough space, or if it cannot find mbufs
691 * to store additional information in.
693 * Reliable protocols may use the socket send buffer to hold data
694 * awaiting acknowledgement. Data is normally copied from a socket
695 * send buffer in a protocol with m_copy for output to a peer,
696 * and then removing the data from the socket buffer with sbdrop()
697 * or sbdroprecord() when the data is acknowledged by the peer.
701 * Append mbuf chain m to the last record in the
702 * socket buffer sb. The additional space associated
703 * the mbuf chain is recorded in sb. Empty mbufs are
704 * discarded and mbufs are compacted where possible.
707 sbappend(struct sockbuf
*sb
, struct mbuf
*m
)
709 struct socket
*so
= sb
->sb_so
;
711 if (m
== NULL
|| (sb
->sb_flags
& SB_DROP
)) {
717 SBLASTRECORDCHK(sb
, "sbappend 1");
719 if (sb
->sb_lastrecord
!= NULL
&& (sb
->sb_mbtail
->m_flags
& M_EOR
))
720 return (sbappendrecord(sb
, m
));
722 if (sb
->sb_flags
& SB_RECV
) {
723 int error
= sflt_data_in(so
, NULL
, &m
, NULL
, 0);
724 SBLASTRECORDCHK(sb
, "sbappend 2");
726 if (error
!= EJUSTRETURN
)
732 /* If this is the first record, it's also the last record */
733 if (sb
->sb_lastrecord
== NULL
)
734 sb
->sb_lastrecord
= m
;
736 sbcompress(sb
, m
, sb
->sb_mbtail
);
737 SBLASTRECORDCHK(sb
, "sbappend 3");
742 * Similar to sbappend, except that this is optimized for stream sockets.
745 sbappendstream(struct sockbuf
*sb
, struct mbuf
*m
)
747 struct socket
*so
= sb
->sb_so
;
749 if (m
== NULL
|| (sb
->sb_flags
& SB_DROP
)) {
755 if (m
->m_nextpkt
!= NULL
|| (sb
->sb_mb
!= sb
->sb_lastrecord
)) {
756 panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
757 m
->m_nextpkt
, sb
->sb_mb
, sb
->sb_lastrecord
);
761 SBLASTMBUFCHK(sb
, __func__
);
763 if (sb
->sb_flags
& SB_RECV
) {
764 int error
= sflt_data_in(so
, NULL
, &m
, NULL
, 0);
765 SBLASTRECORDCHK(sb
, "sbappendstream 1");
767 if (error
!= EJUSTRETURN
)
773 sbcompress(sb
, m
, sb
->sb_mbtail
);
774 sb
->sb_lastrecord
= sb
->sb_mb
;
775 SBLASTRECORDCHK(sb
, "sbappendstream 2");
781 sbcheck(struct sockbuf
*sb
)
785 u_int32_t len
= 0, mbcnt
= 0;
786 lck_mtx_t
*mutex_held
;
788 if (sb
->sb_so
->so_proto
->pr_getlock
!= NULL
)
789 mutex_held
= (*sb
->sb_so
->so_proto
->pr_getlock
)(sb
->sb_so
, 0);
791 mutex_held
= sb
->sb_so
->so_proto
->pr_domain
->dom_mtx
;
793 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
798 for (m
= sb
->sb_mb
; m
; m
= n
) {
800 for (; m
; m
= m
->m_next
) {
803 /* XXX pretty sure this is bogus */
804 if (m
->m_flags
& M_EXT
)
805 mbcnt
+= m
->m_ext
.ext_size
;
808 if (len
!= sb
->sb_cc
|| mbcnt
!= sb
->sb_mbcnt
) {
809 panic("cc %ld != %ld || mbcnt %ld != %ld\n", len
, sb
->sb_cc
,
810 mbcnt
, sb
->sb_mbcnt
);
816 sblastrecordchk(struct sockbuf
*sb
, const char *where
)
818 struct mbuf
*m
= sb
->sb_mb
;
820 while (m
&& m
->m_nextpkt
)
823 if (m
!= sb
->sb_lastrecord
) {
824 printf("sblastrecordchk: mb %p lastrecord %p last %p\n",
825 sb
->sb_mb
, sb
->sb_lastrecord
, m
);
826 printf("packet chain:\n");
827 for (m
= sb
->sb_mb
; m
!= NULL
; m
= m
->m_nextpkt
)
829 panic("sblastrecordchk from %s", where
);
834 sblastmbufchk(struct sockbuf
*sb
, const char *where
)
836 struct mbuf
*m
= sb
->sb_mb
;
839 while (m
&& m
->m_nextpkt
)
842 while (m
&& m
->m_next
)
845 if (m
!= sb
->sb_mbtail
) {
846 printf("sblastmbufchk: mb %p mbtail %p last %p\n",
847 sb
->sb_mb
, sb
->sb_mbtail
, m
);
848 printf("packet tree:\n");
849 for (m
= sb
->sb_mb
; m
!= NULL
; m
= m
->m_nextpkt
) {
851 for (n
= m
; n
!= NULL
; n
= n
->m_next
)
855 panic("sblastmbufchk from %s", where
);
860 * Similar to sbappend, except the mbuf chain begins a new record.
863 sbappendrecord(struct sockbuf
*sb
, struct mbuf
*m0
)
868 if (m0
== NULL
|| (sb
->sb_flags
& SB_DROP
)) {
874 for (m
= m0
; m
!= NULL
; m
= m
->m_next
)
877 if (space
> sbspace(sb
) && !(sb
->sb_flags
& SB_UNIX
)) {
882 if (sb
->sb_flags
& SB_RECV
) {
883 int error
= sflt_data_in(sb
->sb_so
, NULL
, &m0
, NULL
,
884 sock_data_filt_flag_record
);
886 SBLASTRECORDCHK(sb
, "sbappendrecord 1");
887 if (error
!= EJUSTRETURN
)
894 * Note this permits zero length records.
897 SBLASTRECORDCHK(sb
, "sbappendrecord 2");
898 if (sb
->sb_lastrecord
!= NULL
) {
899 sb
->sb_lastrecord
->m_nextpkt
= m0
;
903 sb
->sb_lastrecord
= m0
;
908 if (m
&& (m0
->m_flags
& M_EOR
)) {
909 m0
->m_flags
&= ~M_EOR
;
912 sbcompress(sb
, m
, m0
);
913 SBLASTRECORDCHK(sb
, "sbappendrecord 3");
918 * As above except that OOB data
919 * is inserted at the beginning of the sockbuf,
920 * but after any other OOB data.
923 sbinsertoob(struct sockbuf
*sb
, struct mbuf
*m0
)
931 SBLASTRECORDCHK(sb
, "sbinsertoob 1");
933 if ((sb
->sb_flags
& SB_RECV
) != 0) {
934 int error
= sflt_data_in(sb
->sb_so
, NULL
, &m0
, NULL
,
935 sock_data_filt_flag_oob
);
937 SBLASTRECORDCHK(sb
, "sbinsertoob 2");
939 if (error
!= EJUSTRETURN
) {
946 for (mp
= &sb
->sb_mb
; *mp
; mp
= &((*mp
)->m_nextpkt
)) {
952 continue; /* WANT next train */
957 goto again
; /* inspect THIS train further */
962 * Put the first mbuf on the queue.
963 * Note this permits zero length records.
968 /* m0 is actually the new tail */
969 sb
->sb_lastrecord
= m0
;
974 if (m
&& (m0
->m_flags
& M_EOR
)) {
975 m0
->m_flags
&= ~M_EOR
;
978 sbcompress(sb
, m
, m0
);
979 SBLASTRECORDCHK(sb
, "sbinsertoob 3");
984 * Append address and data, and optionally, control (ancillary) data
985 * to the receive queue of a socket. If present,
986 * m0 must include a packet header with total length.
987 * Returns 0 if no space in sockbuf or insufficient mbufs.
989 * Returns: 0 No space/out of mbufs
993 sbappendaddr_internal(struct sockbuf
*sb
, struct sockaddr
*asa
,
994 struct mbuf
*m0
, struct mbuf
*control
)
996 struct mbuf
*m
, *n
, *nlast
;
997 int space
= asa
->sa_len
;
999 if (m0
&& (m0
->m_flags
& M_PKTHDR
) == 0)
1000 panic("sbappendaddr");
1003 space
+= m0
->m_pkthdr
.len
;
1004 for (n
= control
; n
; n
= n
->m_next
) {
1006 if (n
->m_next
== 0) /* keep pointer to last control buf */
1009 if (space
> sbspace(sb
))
1011 if (asa
->sa_len
> MLEN
)
1013 MGET(m
, M_DONTWAIT
, MT_SONAME
);
1016 m
->m_len
= asa
->sa_len
;
1017 bcopy((caddr_t
)asa
, mtod(m
, caddr_t
), asa
->sa_len
);
1019 n
->m_next
= m0
; /* concatenate data to control */
1022 m
->m_next
= control
;
1024 SBLASTRECORDCHK(sb
, "sbappendadddr 1");
1026 for (n
= m
; n
->m_next
!= NULL
; n
= n
->m_next
)
1031 if (sb
->sb_lastrecord
!= NULL
) {
1032 sb
->sb_lastrecord
->m_nextpkt
= m
;
1036 sb
->sb_lastrecord
= m
;
1037 sb
->sb_mbtail
= nlast
;
1039 SBLASTMBUFCHK(sb
, __func__
);
1040 SBLASTRECORDCHK(sb
, "sbappendadddr 2");
1042 postevent(0, sb
, EV_RWBYTES
);
1047 * Returns: 0 Error: No space/out of mbufs/etc.
1050 * Imputed: (*error_out) errno for error
1052 * sflt_data_in:??? [whatever a filter author chooses]
1055 sbappendaddr(struct sockbuf
*sb
, struct sockaddr
*asa
, struct mbuf
*m0
,
1056 struct mbuf
*control
, int *error_out
)
1059 boolean_t sb_unix
= (sb
->sb_flags
& SB_UNIX
);
1064 if (m0
&& (m0
->m_flags
& M_PKTHDR
) == 0)
1065 panic("sbappendaddrorfree");
1067 if (sb
->sb_flags
& SB_DROP
) {
1070 if (control
!= NULL
&& !sb_unix
)
1072 if (error_out
!= NULL
)
1073 *error_out
= EINVAL
;
1077 /* Call socket data in filters */
1078 if ((sb
->sb_flags
& SB_RECV
) != 0) {
1080 error
= sflt_data_in(sb
->sb_so
, asa
, &m0
, &control
, 0);
1081 SBLASTRECORDCHK(sb
, __func__
);
1083 if (error
!= EJUSTRETURN
) {
1086 if (control
!= NULL
&& !sb_unix
)
1095 result
= sbappendaddr_internal(sb
, asa
, m0
, control
);
1099 if (control
!= NULL
&& !sb_unix
)
1102 *error_out
= ENOBUFS
;
1109 sbappendcontrol_internal(struct sockbuf
*sb
, struct mbuf
*m0
,
1110 struct mbuf
*control
)
1112 struct mbuf
*m
, *mlast
, *n
;
1116 panic("sbappendcontrol");
1118 for (m
= control
; ; m
= m
->m_next
) {
1123 n
= m
; /* save pointer to last control buffer */
1124 for (m
= m0
; m
; m
= m
->m_next
)
1126 if (space
> sbspace(sb
) && !(sb
->sb_flags
& SB_UNIX
))
1128 n
->m_next
= m0
; /* concatenate data to control */
1129 SBLASTRECORDCHK(sb
, "sbappendcontrol 1");
1131 for (m
= control
; m
->m_next
!= NULL
; m
= m
->m_next
)
1136 if (sb
->sb_lastrecord
!= NULL
) {
1137 sb
->sb_lastrecord
->m_nextpkt
= control
;
1139 sb
->sb_mb
= control
;
1141 sb
->sb_lastrecord
= control
;
1142 sb
->sb_mbtail
= mlast
;
1144 SBLASTMBUFCHK(sb
, __func__
);
1145 SBLASTRECORDCHK(sb
, "sbappendcontrol 2");
1147 postevent(0, sb
, EV_RWBYTES
);
1152 sbappendcontrol(struct sockbuf
*sb
, struct mbuf
*m0
, struct mbuf
*control
,
1156 boolean_t sb_unix
= (sb
->sb_flags
& SB_UNIX
);
1161 if (sb
->sb_flags
& SB_DROP
) {
1164 if (control
!= NULL
&& !sb_unix
)
1166 if (error_out
!= NULL
)
1167 *error_out
= EINVAL
;
1171 if (sb
->sb_flags
& SB_RECV
) {
1174 error
= sflt_data_in(sb
->sb_so
, NULL
, &m0
, &control
, 0);
1175 SBLASTRECORDCHK(sb
, __func__
);
1177 if (error
!= EJUSTRETURN
) {
1180 if (control
!= NULL
&& !sb_unix
)
1189 result
= sbappendcontrol_internal(sb
, m0
, control
);
1193 if (control
!= NULL
&& !sb_unix
)
1196 *error_out
= ENOBUFS
;
1203 * Append a contiguous TCP data blob with TCP sequence number as control data
1204 * as a new msg to the receive socket buffer.
1207 sbappendmsgstream_rcv(struct sockbuf
*sb
, struct mbuf
*m
, uint32_t seqnum
,
1210 struct mbuf
*m_eor
= NULL
;
1211 u_int32_t data_len
= 0;
1213 struct socket
*so
= sb
->sb_so
;
1215 VERIFY((m
->m_flags
& M_PKTHDR
) && m_pktlen(m
) > 0);
1216 VERIFY(so
->so_msg_state
!= NULL
);
1217 VERIFY(sb
->sb_flags
& SB_RECV
);
1219 /* Keep the TCP sequence number in the mbuf pkthdr */
1220 m
->m_pkthdr
.msg_seq
= seqnum
;
1222 /* find last mbuf and set M_EOR */
1223 for (m_eor
= m
; ; m_eor
= m_eor
->m_next
) {
1225 * If the msg is unordered, we need to account for
1226 * these bytes in receive socket buffer size. Otherwise,
1227 * the receive window advertised will shrink because
1228 * of the additional unordered bytes added to the
1232 m_eor
->m_flags
|= M_UNORDERED_DATA
;
1233 data_len
+= m_eor
->m_len
;
1234 so
->so_msg_state
->msg_uno_bytes
+= m_eor
->m_len
;
1236 m_eor
->m_flags
&= ~M_UNORDERED_DATA
;
1239 if (m_eor
->m_next
== NULL
)
1243 /* set EOR flag at end of byte blob */
1244 m_eor
->m_flags
|= M_EOR
;
1246 /* expand the receive socket buffer to allow unordered data */
1247 if (unordered
&& !sbreserve(sb
, sb
->sb_hiwat
+ data_len
)) {
1249 * Could not allocate memory for unordered data, it
1250 * means this packet will have to be delivered in order
1252 printf("%s: could not reserve space for unordered data\n",
1256 ret
= sbappendrecord(sb
, m
);
1261 * TCP streams have message based out of order delivery support, or have
1262 * Multipath TCP support, or are regular TCP sockets
1265 sbappendstream_rcvdemux(struct socket
*so
, struct mbuf
*m
, uint32_t seqnum
,
1270 if ((m
!= NULL
) && (m_pktlen(m
) <= 0)) {
1275 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1276 ret
= sbappendmsgstream_rcv(&so
->so_rcv
, m
, seqnum
, unordered
);
1279 else if (so
->so_flags
& SOF_MPTCP_TRUE
) {
1280 ret
= sbappendmptcpstream_rcv(&so
->so_rcv
, m
);
1284 ret
= sbappendstream(&so
->so_rcv
, m
);
1291 sbappendmptcpstream_rcv(struct sockbuf
*sb
, struct mbuf
*m
)
1293 struct socket
*so
= sb
->sb_so
;
1295 VERIFY(m
== NULL
|| (m
->m_flags
& M_PKTHDR
));
1296 /* SB_NOCOMPRESS must be set prevent loss of M_PKTHDR data */
1297 VERIFY((sb
->sb_flags
& (SB_RECV
|SB_NOCOMPRESS
)) ==
1298 (SB_RECV
|SB_NOCOMPRESS
));
1300 if (m
== NULL
|| m_pktlen(m
) == 0 || (sb
->sb_flags
& SB_DROP
) ||
1301 (so
->so_state
& SS_CANTRCVMORE
)) {
1306 /* the socket is not closed, so SOF_MP_SUBFLOW must be set */
1307 VERIFY(so
->so_flags
& SOF_MP_SUBFLOW
);
1309 if (m
->m_nextpkt
!= NULL
|| (sb
->sb_mb
!= sb
->sb_lastrecord
)) {
1310 panic("%s: nexpkt %p || mb %p != lastrecord %p\n", __func__
,
1311 m
->m_nextpkt
, sb
->sb_mb
, sb
->sb_lastrecord
);
1315 SBLASTMBUFCHK(sb
, __func__
);
1317 mptcp_adj_rmap(so
, m
);
1319 /* No filter support (SB_RECV) on mptcp subflow sockets */
1321 sbcompress(sb
, m
, sb
->sb_mbtail
);
1322 sb
->sb_lastrecord
= sb
->sb_mb
;
1323 SBLASTRECORDCHK(sb
, __func__
);
1329 * Append message to send socket buffer based on priority.
1332 sbappendmsg_snd(struct sockbuf
*sb
, struct mbuf
*m
)
1334 struct socket
*so
= sb
->sb_so
;
1335 struct msg_priq
*priq
;
1338 VERIFY(so
->so_msg_state
!= NULL
);
1340 if (m
->m_nextpkt
!= NULL
|| (sb
->sb_mb
!= sb
->sb_lastrecord
))
1341 panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n",
1342 m
->m_nextpkt
, sb
->sb_mb
, sb
->sb_lastrecord
);
1344 SBLASTMBUFCHK(sb
, __func__
);
1346 if (m
== NULL
|| (sb
->sb_flags
& SB_DROP
) || so
->so_msg_state
== NULL
) {
1352 priq
= &so
->so_msg_state
->msg_priq
[m
->m_pkthdr
.msg_pri
];
1354 /* note if we need to propogate M_EOR to the last mbuf */
1355 if (m
->m_flags
& M_EOR
) {
1358 /* Reset M_EOR from the first mbuf */
1359 m
->m_flags
&= ~(M_EOR
);
1362 if (priq
->msgq_head
== NULL
) {
1363 VERIFY(priq
->msgq_tail
== NULL
&& priq
->msgq_lastmsg
== NULL
);
1364 priq
->msgq_head
= priq
->msgq_lastmsg
= m
;
1366 VERIFY(priq
->msgq_tail
->m_next
== NULL
);
1368 /* Check if the last message has M_EOR flag set */
1369 if (priq
->msgq_tail
->m_flags
& M_EOR
) {
1370 /* Insert as a new message */
1371 priq
->msgq_lastmsg
->m_nextpkt
= m
;
1373 /* move the lastmsg pointer */
1374 priq
->msgq_lastmsg
= m
;
1376 /* Append to the existing message */
1377 priq
->msgq_tail
->m_next
= m
;
1381 /* Update accounting and the queue tail pointer */
1383 while (m
->m_next
!= NULL
) {
1385 priq
->msgq_bytes
+= m
->m_len
;
1389 priq
->msgq_bytes
+= m
->m_len
;
1392 m
->m_flags
|= M_EOR
;
1395 * Since the user space can not write a new msg
1396 * without completing the previous one, we can
1397 * reset this flag to start sending again.
1399 priq
->msgq_flags
&= ~(MSGQ_MSG_NOTDONE
);
1402 priq
->msgq_tail
= m
;
1404 SBLASTRECORDCHK(sb
, "sbappendstream 2");
1405 postevent(0, sb
, EV_RWBYTES
);
1410 * Pull data from priority queues to the serial snd queue
1411 * right before sending.
1414 sbpull_unordered_data(struct socket
*so
, int32_t off
, int32_t len
)
1417 struct msg_priq
*priq
= NULL
;
1419 VERIFY(so
->so_msg_state
!= NULL
);
1421 topull
= (off
+ len
) - so
->so_msg_state
->msg_serial_bytes
;
1424 while (i
>= MSG_PRI_MIN
&& topull
> 0) {
1425 struct mbuf
*m
= NULL
, *mqhead
= NULL
, *mend
= NULL
;
1426 priq
= &so
->so_msg_state
->msg_priq
[i
];
1427 if ((priq
->msgq_flags
& MSGQ_MSG_NOTDONE
) &&
1428 priq
->msgq_head
== NULL
) {
1430 * We were in the middle of sending
1431 * a message and we have not seen the
1434 VERIFY(priq
->msgq_lastmsg
== NULL
&&
1435 priq
->msgq_tail
== NULL
);
1438 if (priq
->msgq_head
!= NULL
) {
1439 int32_t bytes
= 0, topull_tmp
= topull
;
1441 * We found a msg while scanning the priority
1442 * queue from high to low priority.
1444 m
= priq
->msgq_head
;
1449 * Move bytes from the priority queue to the
1450 * serial queue. Compute the number of bytes
1453 while (mqhead
->m_next
!= NULL
&& topull_tmp
> 0) {
1454 bytes
+= mqhead
->m_len
;
1455 topull_tmp
-= mqhead
->m_len
;
1457 mqhead
= mqhead
->m_next
;
1460 if (mqhead
->m_next
== NULL
) {
1462 * If we have only one more mbuf left,
1463 * move the last mbuf of this message to
1464 * serial queue and set the head of the
1465 * queue to be the next message.
1467 bytes
+= mqhead
->m_len
;
1469 mqhead
= m
->m_nextpkt
;
1470 if (!(mend
->m_flags
& M_EOR
)) {
1472 * We have not seen the end of
1473 * this message, so we can not
1476 priq
->msgq_flags
|= MSGQ_MSG_NOTDONE
;
1479 mend
->m_flags
&= ~(M_EOR
);
1482 /* propogate the next msg pointer */
1483 mqhead
->m_nextpkt
= m
->m_nextpkt
;
1485 priq
->msgq_head
= mqhead
;
1488 * if the lastmsg pointer points to
1489 * the mbuf that is being dequeued, update
1490 * it to point to the new head.
1492 if (priq
->msgq_lastmsg
== m
)
1493 priq
->msgq_lastmsg
= priq
->msgq_head
;
1495 m
->m_nextpkt
= NULL
;
1496 mend
->m_next
= NULL
;
1498 if (priq
->msgq_head
== NULL
) {
1499 /* Moved all messages, update tail */
1500 priq
->msgq_tail
= NULL
;
1501 VERIFY(priq
->msgq_lastmsg
== NULL
);
1504 /* Move it to serial sb_mb queue */
1505 if (so
->so_snd
.sb_mb
== NULL
) {
1506 so
->so_snd
.sb_mb
= m
;
1508 so
->so_snd
.sb_mbtail
->m_next
= m
;
1511 priq
->msgq_bytes
-= bytes
;
1512 VERIFY(priq
->msgq_bytes
>= 0);
1513 sbwakeup(&so
->so_snd
);
1515 so
->so_msg_state
->msg_serial_bytes
+= bytes
;
1516 so
->so_snd
.sb_mbtail
= mend
;
1517 so
->so_snd
.sb_lastrecord
= so
->so_snd
.sb_mb
;
1520 (off
+ len
) - so
->so_msg_state
->msg_serial_bytes
;
1522 if (priq
->msgq_flags
& MSGQ_MSG_NOTDONE
)
1528 sblastrecordchk(&so
->so_snd
, "sbpull_unordered_data");
1529 sblastmbufchk(&so
->so_snd
, "sbpull_unordered_data");
1533 * Compress mbuf chain m into the socket
1534 * buffer sb following mbuf n. If n
1535 * is null, the buffer is presumed empty.
1538 sbcompress(struct sockbuf
*sb
, struct mbuf
*m
, struct mbuf
*n
)
1540 int eor
= 0, compress
= (!(sb
->sb_flags
& SB_NOCOMPRESS
));
1544 /* There is nothing to compress; just update the tail */
1545 for (; n
->m_next
!= NULL
; n
= n
->m_next
)
1552 eor
|= m
->m_flags
& M_EOR
;
1553 if (compress
&& m
->m_len
== 0 && (eor
== 0 ||
1554 (((o
= m
->m_next
) || (o
= n
)) && o
->m_type
== m
->m_type
))) {
1555 if (sb
->sb_lastrecord
== m
)
1556 sb
->sb_lastrecord
= m
->m_next
;
1560 if (compress
&& n
!= NULL
&& (n
->m_flags
& M_EOR
) == 0 &&
1564 m
->m_len
<= MCLBYTES
/ 4 && /* XXX: Don't copy too much */
1565 m
->m_len
<= M_TRAILINGSPACE(n
) &&
1566 n
->m_type
== m
->m_type
) {
1567 bcopy(mtod(m
, caddr_t
), mtod(n
, caddr_t
) + n
->m_len
,
1568 (unsigned)m
->m_len
);
1569 n
->m_len
+= m
->m_len
;
1570 sb
->sb_cc
+= m
->m_len
;
1571 if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
&&
1572 m
->m_type
!= MT_OOBDATA
) {
1573 /* XXX: Probably don't need */
1574 sb
->sb_ctl
+= m
->m_len
;
1586 m
->m_flags
&= ~M_EOR
;
1594 printf("semi-panic: sbcompress\n");
1597 SBLASTMBUFCHK(sb
, __func__
);
1598 postevent(0, sb
, EV_RWBYTES
);
1602 sb_empty_assert(struct sockbuf
*sb
, const char *where
)
1604 if (!(sb
->sb_cc
== 0 && sb
->sb_mb
== NULL
&& sb
->sb_mbcnt
== 0 &&
1605 sb
->sb_mbtail
== NULL
&& sb
->sb_lastrecord
== NULL
)) {
1606 panic("%s: sb %p so %p cc %d mbcnt %d mb %p mbtail %p "
1607 "lastrecord %p\n", where
, sb
, sb
->sb_so
, sb
->sb_cc
,
1608 sb
->sb_mbcnt
, sb
->sb_mb
, sb
->sb_mbtail
,
1615 sbflush_priq(struct msg_priq
*priq
)
1618 m
= priq
->msgq_head
;
1621 priq
->msgq_head
= priq
->msgq_tail
= priq
->msgq_lastmsg
= NULL
;
1622 priq
->msgq_bytes
= priq
->msgq_flags
= 0;
1626 * Free all mbufs in a sockbuf.
1627 * Check that all resources are reclaimed.
1630 sbflush(struct sockbuf
*sb
)
1632 void *lr_saved
= __builtin_return_address(0);
1633 struct socket
*so
= sb
->sb_so
;
1635 lck_mtx_t
*mutex_held
;
1639 /* so_usecount may be 0 if we get here from sofreelastref() */
1641 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
1642 __func__
, sb
, sb
->sb_flags
, lr_saved
);
1644 } else if (so
->so_usecount
< 0) {
1645 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
1646 "lrh= %s\n", __func__
, sb
, sb
->sb_flags
, so
,
1647 so
->so_usecount
, lr_saved
, solockhistory_nr(so
));
1652 * XXX: This code is currently commented out, because we may get here
1653 * as part of sofreelastref(), and at that time, pr_getlock() may no
1654 * longer be able to return us the lock; this will be fixed in future.
1656 if (so
->so_proto
->pr_getlock
!= NULL
)
1657 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
1659 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
1661 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
1665 * Obtain lock on the socket buffer (SB_LOCK). This is required
1666 * to prevent the socket buffer from being unexpectedly altered
1667 * while it is used by another thread in socket send/receive.
1669 * sblock() must not fail here, hence the assertion.
1671 (void) sblock(sb
, SBL_WAIT
| SBL_NOINTR
| SBL_IGNDEFUNCT
);
1672 VERIFY(sb
->sb_flags
& SB_LOCK
);
1674 while (sb
->sb_mbcnt
> 0) {
1676 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
1677 * we would loop forever. Panic instead.
1679 if (!sb
->sb_cc
&& (sb
->sb_mb
== NULL
|| sb
->sb_mb
->m_len
))
1681 sbdrop(sb
, (int)sb
->sb_cc
);
1684 if (!(sb
->sb_flags
& SB_RECV
) && (so
->so_flags
& SOF_ENABLE_MSGS
)) {
1685 VERIFY(so
->so_msg_state
!= NULL
);
1686 for (i
= MSG_PRI_MIN
; i
<= MSG_PRI_MAX
; ++i
) {
1687 sbflush_priq(&so
->so_msg_state
->msg_priq
[i
]);
1689 so
->so_msg_state
->msg_serial_bytes
= 0;
1690 so
->so_msg_state
->msg_uno_bytes
= 0;
1693 sb_empty_assert(sb
, __func__
);
1694 postevent(0, sb
, EV_RWBYTES
);
1696 sbunlock(sb
, TRUE
); /* keep socket locked */
1700 * Drop data from (the front of) a sockbuf.
1701 * use m_freem_list to free the mbuf structures
1702 * under a single lock... this is done by pruning
1703 * the top of the tree from the body by keeping track
1704 * of where we get to in the tree and then zeroing the
1705 * two pertinent pointers m_nextpkt and m_next
1706 * the socket buffer is then updated to point at the new
1707 * top of the tree and the pruned area is released via
1711 sbdrop(struct sockbuf
*sb
, int len
)
1713 struct mbuf
*m
, *free_list
, *ml
;
1714 struct mbuf
*next
, *last
;
1716 next
= (m
= sb
->sb_mb
) ? m
->m_nextpkt
: 0;
1718 if ((m
!= NULL
) && (len
> 0) &&
1719 (!(sb
->sb_flags
& SB_RECV
)) &&
1720 ((sb
->sb_so
->so_flags
& SOF_MP_SUBFLOW
) ||
1721 ((SOCK_CHECK_DOM(sb
->sb_so
, PF_MULTIPATH
)) &&
1722 (SOCK_CHECK_PROTO(sb
->sb_so
, IPPROTO_TCP
))))) {
1723 mptcp_preproc_sbdrop(m
, (unsigned int)len
);
1726 KERNEL_DEBUG((DBG_FNC_SBDROP
| DBG_FUNC_START
), sb
, len
, 0, 0, 0);
1728 free_list
= last
= m
;
1729 ml
= (struct mbuf
*)0;
1735 * temporarily replacing this panic with printf
1736 * because it occurs occasionally when closing
1737 * a socket when there is no harm in ignoring
1738 * it. This problem will be investigated
1741 /* panic("sbdrop"); */
1742 printf("sbdrop - count not zero\n");
1745 * zero the counts. if we have no mbufs,
1746 * we have no data (PR-2986815)
1750 if (!(sb
->sb_flags
& SB_RECV
) &&
1751 (sb
->sb_so
->so_flags
& SOF_ENABLE_MSGS
)) {
1752 sb
->sb_so
->so_msg_state
->
1753 msg_serial_bytes
= 0;
1758 next
= m
->m_nextpkt
;
1761 if (m
->m_len
> len
) {
1765 if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
&&
1766 m
->m_type
!= MT_OOBDATA
)
1776 while (m
&& m
->m_len
== 0) {
1783 ml
->m_next
= (struct mbuf
*)0;
1784 last
->m_nextpkt
= (struct mbuf
*)0;
1785 m_freem_list(free_list
);
1789 m
->m_nextpkt
= next
;
1795 * First part is an inline SB_EMPTY_FIXUP(). Second part
1796 * makes sure sb_lastrecord is up-to-date if we dropped
1797 * part of the last record.
1801 sb
->sb_mbtail
= NULL
;
1802 sb
->sb_lastrecord
= NULL
;
1803 } else if (m
->m_nextpkt
== NULL
) {
1804 sb
->sb_lastrecord
= m
;
1807 postevent(0, sb
, EV_RWBYTES
);
1809 KERNEL_DEBUG((DBG_FNC_SBDROP
| DBG_FUNC_END
), sb
, 0, 0, 0, 0);
1813 * Drop a record off the front of a sockbuf
1814 * and move the next record to the front.
1817 sbdroprecord(struct sockbuf
*sb
)
1819 struct mbuf
*m
, *mn
;
1823 sb
->sb_mb
= m
->m_nextpkt
;
1831 postevent(0, sb
, EV_RWBYTES
);
1835 * Create a "control" mbuf containing the specified data
1836 * with the specified type for presentation on a socket buffer.
1839 sbcreatecontrol(caddr_t p
, int size
, int type
, int level
)
1844 if (CMSG_SPACE((u_int
)size
) > MLEN
)
1845 return ((struct mbuf
*)NULL
);
1846 if ((m
= m_get(M_DONTWAIT
, MT_CONTROL
)) == NULL
)
1847 return ((struct mbuf
*)NULL
);
1848 cp
= mtod(m
, struct cmsghdr
*);
1849 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1850 /* XXX check size? */
1851 (void) memcpy(CMSG_DATA(cp
), p
, size
);
1852 m
->m_len
= CMSG_SPACE(size
);
1853 cp
->cmsg_len
= CMSG_LEN(size
);
1854 cp
->cmsg_level
= level
;
1855 cp
->cmsg_type
= type
;
1860 sbcreatecontrol_mbuf(caddr_t p
, int size
, int type
, int level
, struct mbuf
**mp
)
1866 *mp
= sbcreatecontrol(p
, size
, type
, level
);
1870 if (CMSG_SPACE((u_int
)size
) + (*mp
)->m_len
> MLEN
) {
1871 mp
= &(*mp
)->m_next
;
1872 *mp
= sbcreatecontrol(p
, size
, type
, level
);
1878 cp
= (struct cmsghdr
*)(void *)(mtod(m
, char *) + m
->m_len
);
1879 /* CMSG_SPACE ensures 32-bit alignment */
1880 VERIFY(IS_P2ALIGNED(cp
, sizeof (u_int32_t
)));
1881 m
->m_len
+= CMSG_SPACE(size
);
1883 /* XXX check size? */
1884 (void) memcpy(CMSG_DATA(cp
), p
, size
);
1885 cp
->cmsg_len
= CMSG_LEN(size
);
1886 cp
->cmsg_level
= level
;
1887 cp
->cmsg_type
= type
;
1894 * Some routines that return EOPNOTSUPP for entry points that are not
1895 * supported by a protocol. Fill in as needed.
1898 pru_abort_notsupp(struct socket
*so
)
1901 return (EOPNOTSUPP
);
1905 pru_accept_notsupp(struct socket
*so
, struct sockaddr
**nam
)
1907 #pragma unused(so, nam)
1908 return (EOPNOTSUPP
);
1912 pru_attach_notsupp(struct socket
*so
, int proto
, struct proc
*p
)
1914 #pragma unused(so, proto, p)
1915 return (EOPNOTSUPP
);
1919 pru_bind_notsupp(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
1921 #pragma unused(so, nam, p)
1922 return (EOPNOTSUPP
);
1926 pru_connect_notsupp(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
1928 #pragma unused(so, nam, p)
1929 return (EOPNOTSUPP
);
1933 pru_connect2_notsupp(struct socket
*so1
, struct socket
*so2
)
1935 #pragma unused(so1, so2)
1936 return (EOPNOTSUPP
);
1940 pru_connectx_notsupp(struct socket
*so
, struct sockaddr_list
**src_sl
,
1941 struct sockaddr_list
**dst_sl
, struct proc
*p
, uint32_t ifscope
,
1942 associd_t aid
, connid_t
*pcid
, uint32_t flags
, void *arg
,
1945 #pragma unused(so, src_sl, dst_sl, p, ifscope, aid, pcid, flags, arg, arglen)
1946 return (EOPNOTSUPP
);
1950 pru_control_notsupp(struct socket
*so
, u_long cmd
, caddr_t data
,
1951 struct ifnet
*ifp
, struct proc
*p
)
1953 #pragma unused(so, cmd, data, ifp, p)
1954 return (EOPNOTSUPP
);
1958 pru_detach_notsupp(struct socket
*so
)
1961 return (EOPNOTSUPP
);
1965 pru_disconnect_notsupp(struct socket
*so
)
1968 return (EOPNOTSUPP
);
1972 pru_disconnectx_notsupp(struct socket
*so
, associd_t aid
, connid_t cid
)
1974 #pragma unused(so, aid, cid)
1975 return (EOPNOTSUPP
);
1979 pru_listen_notsupp(struct socket
*so
, struct proc
*p
)
1981 #pragma unused(so, p)
1982 return (EOPNOTSUPP
);
1986 pru_peeloff_notsupp(struct socket
*so
, associd_t aid
, struct socket
**psop
)
1988 #pragma unused(so, aid, psop)
1989 return (EOPNOTSUPP
);
1993 pru_peeraddr_notsupp(struct socket
*so
, struct sockaddr
**nam
)
1995 #pragma unused(so, nam)
1996 return (EOPNOTSUPP
);
2000 pru_rcvd_notsupp(struct socket
*so
, int flags
)
2002 #pragma unused(so, flags)
2003 return (EOPNOTSUPP
);
2007 pru_rcvoob_notsupp(struct socket
*so
, struct mbuf
*m
, int flags
)
2009 #pragma unused(so, m, flags)
2010 return (EOPNOTSUPP
);
2014 pru_send_notsupp(struct socket
*so
, int flags
, struct mbuf
*m
,
2015 struct sockaddr
*addr
, struct mbuf
*control
, struct proc
*p
)
2017 #pragma unused(so, flags, m, addr, control, p)
2018 return (EOPNOTSUPP
);
2022 * This isn't really a ``null'' operation, but it's the default one
2023 * and doesn't do anything destructive.
2026 pru_sense_null(struct socket
*so
, void *ub
, int isstat64
)
2028 if (isstat64
!= 0) {
2029 struct stat64
*sb64
;
2031 sb64
= (struct stat64
*)ub
;
2032 sb64
->st_blksize
= so
->so_snd
.sb_hiwat
;
2036 sb
= (struct stat
*)ub
;
2037 sb
->st_blksize
= so
->so_snd
.sb_hiwat
;
2045 pru_sosend_notsupp(struct socket
*so
, struct sockaddr
*addr
, struct uio
*uio
,
2046 struct mbuf
*top
, struct mbuf
*control
, int flags
)
2048 #pragma unused(so, addr, uio, top, control, flags)
2049 return (EOPNOTSUPP
);
2053 pru_soreceive_notsupp(struct socket
*so
, struct sockaddr
**paddr
,
2054 struct uio
*uio
, struct mbuf
**mp0
, struct mbuf
**controlp
, int *flagsp
)
2056 #pragma unused(so, paddr, uio, mp0, controlp, flagsp)
2057 return (EOPNOTSUPP
);
2061 pru_shutdown_notsupp(struct socket
*so
)
2064 return (EOPNOTSUPP
);
2068 pru_sockaddr_notsupp(struct socket
*so
, struct sockaddr
**nam
)
2070 #pragma unused(so, nam)
2071 return (EOPNOTSUPP
);
2075 pru_sopoll_notsupp(struct socket
*so
, int events
, kauth_cred_t cred
, void *wql
)
2077 #pragma unused(so, events, cred, wql)
2078 return (EOPNOTSUPP
);
2082 pru_socheckopt_null(struct socket
*so
, struct sockopt
*sopt
)
2084 #pragma unused(so, sopt)
2086 * Allow all options for set/get by default.
2092 pru_sanitize(struct pr_usrreqs
*pru
)
2094 #define DEFAULT(foo, bar) if ((foo) == NULL) (foo) = (bar)
2095 DEFAULT(pru
->pru_abort
, pru_abort_notsupp
);
2096 DEFAULT(pru
->pru_accept
, pru_accept_notsupp
);
2097 DEFAULT(pru
->pru_attach
, pru_attach_notsupp
);
2098 DEFAULT(pru
->pru_bind
, pru_bind_notsupp
);
2099 DEFAULT(pru
->pru_connect
, pru_connect_notsupp
);
2100 DEFAULT(pru
->pru_connect2
, pru_connect2_notsupp
);
2101 DEFAULT(pru
->pru_connectx
, pru_connectx_notsupp
);
2102 DEFAULT(pru
->pru_control
, pru_control_notsupp
);
2103 DEFAULT(pru
->pru_detach
, pru_detach_notsupp
);
2104 DEFAULT(pru
->pru_disconnect
, pru_disconnect_notsupp
);
2105 DEFAULT(pru
->pru_disconnectx
, pru_disconnectx_notsupp
);
2106 DEFAULT(pru
->pru_listen
, pru_listen_notsupp
);
2107 DEFAULT(pru
->pru_peeloff
, pru_peeloff_notsupp
);
2108 DEFAULT(pru
->pru_peeraddr
, pru_peeraddr_notsupp
);
2109 DEFAULT(pru
->pru_rcvd
, pru_rcvd_notsupp
);
2110 DEFAULT(pru
->pru_rcvoob
, pru_rcvoob_notsupp
);
2111 DEFAULT(pru
->pru_send
, pru_send_notsupp
);
2112 DEFAULT(pru
->pru_sense
, pru_sense_null
);
2113 DEFAULT(pru
->pru_shutdown
, pru_shutdown_notsupp
);
2114 DEFAULT(pru
->pru_sockaddr
, pru_sockaddr_notsupp
);
2115 DEFAULT(pru
->pru_sopoll
, pru_sopoll_notsupp
);
2116 DEFAULT(pru
->pru_soreceive
, pru_soreceive_notsupp
);
2117 DEFAULT(pru
->pru_sosend
, pru_sosend_notsupp
);
2118 DEFAULT(pru
->pru_socheckopt
, pru_socheckopt_null
);
2123 * The following are macros on BSD and functions on Darwin
2127 * Do we need to notify the other side when I/O is possible?
2131 sb_notify(struct sockbuf
*sb
)
2133 return (sb
->sb_waiters
> 0 ||
2134 (sb
->sb_flags
& (SB_SEL
|SB_ASYNC
|SB_UPCALL
|SB_KNOTE
)));
2138 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
2139 * This is problematical if the fields are unsigned, as the space might
2140 * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
2141 * overflow and return 0.
2144 sbspace(struct sockbuf
*sb
)
2146 int space
= imin((int)(sb
->sb_hiwat
- sb
->sb_cc
),
2147 (int)(sb
->sb_mbmax
- sb
->sb_mbcnt
));
2155 * If this socket has priority queues, check if there is enough
2156 * space in the priority queue for this msg.
2159 msgq_sbspace(struct socket
*so
, struct mbuf
*control
)
2161 int space
= 0, error
;
2163 VERIFY(so
->so_type
== SOCK_STREAM
&& SOCK_PROTO(so
) == IPPROTO_TCP
&&
2165 error
= tcp_get_msg_priority(control
, &msgpri
);
2168 space
= (so
->so_snd
.sb_idealsize
/ MSG_PRI_COUNT
) -
2169 so
->so_msg_state
->msg_priq
[msgpri
].msgq_bytes
;
2175 /* do we have to send all at once on a socket? */
2177 sosendallatonce(struct socket
*so
)
2179 return (so
->so_proto
->pr_flags
& PR_ATOMIC
);
2182 /* can we read something from so? */
2184 soreadable(struct socket
*so
)
2186 return (so
->so_rcv
.sb_cc
>= so
->so_rcv
.sb_lowat
||
2187 (so
->so_state
& SS_CANTRCVMORE
) ||
2188 so
->so_comp
.tqh_first
|| so
->so_error
);
2191 /* can we write something to so? */
2194 sowriteable(struct socket
*so
)
2196 return ((!so_wait_for_if_feedback(so
) &&
2197 sbspace(&(so
)->so_snd
) >= (so
)->so_snd
.sb_lowat
&&
2198 ((so
->so_state
& SS_ISCONNECTED
) ||
2199 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) == 0)) ||
2200 (so
->so_state
& SS_CANTSENDMORE
) ||
2204 /* adjust counters in sb reflecting allocation of m */
2207 sballoc(struct sockbuf
*sb
, struct mbuf
*m
)
2210 sb
->sb_cc
+= m
->m_len
;
2211 if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
&&
2212 m
->m_type
!= MT_OOBDATA
)
2213 sb
->sb_ctl
+= m
->m_len
;
2214 sb
->sb_mbcnt
+= MSIZE
;
2216 if (m
->m_flags
& M_EXT
) {
2217 sb
->sb_mbcnt
+= m
->m_ext
.ext_size
;
2218 cnt
+= (m
->m_ext
.ext_size
>> MSIZESHIFT
);
2220 OSAddAtomic(cnt
, &total_sbmb_cnt
);
2221 VERIFY(total_sbmb_cnt
> 0);
2224 /* adjust counters in sb reflecting freeing of m */
2226 sbfree(struct sockbuf
*sb
, struct mbuf
*m
)
2230 sb
->sb_cc
-= m
->m_len
;
2231 if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
&&
2232 m
->m_type
!= MT_OOBDATA
)
2233 sb
->sb_ctl
-= m
->m_len
;
2234 sb
->sb_mbcnt
-= MSIZE
;
2235 if (m
->m_flags
& M_EXT
) {
2236 sb
->sb_mbcnt
-= m
->m_ext
.ext_size
;
2237 cnt
-= (m
->m_ext
.ext_size
>> MSIZESHIFT
);
2239 OSAddAtomic(cnt
, &total_sbmb_cnt
);
2240 VERIFY(total_sbmb_cnt
>= 0);
2244 * Set lock on sockbuf sb; sleep if lock is already held.
2245 * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
2246 * Returns error without lock if sleep is interrupted.
2249 sblock(struct sockbuf
*sb
, uint32_t flags
)
2251 boolean_t nointr
= ((sb
->sb_flags
& SB_NOINTR
) || (flags
& SBL_NOINTR
));
2252 void *lr_saved
= __builtin_return_address(0);
2253 struct socket
*so
= sb
->sb_so
;
2257 VERIFY((flags
& SBL_VALID
) == flags
);
2259 /* so_usecount may be 0 if we get here from sofreelastref() */
2261 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2262 __func__
, sb
, sb
->sb_flags
, lr_saved
);
2264 } else if (so
->so_usecount
< 0) {
2265 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2266 "lrh= %s\n", __func__
, sb
, sb
->sb_flags
, so
,
2267 so
->so_usecount
, lr_saved
, solockhistory_nr(so
));
2271 if ((sb
->sb_flags
& SB_LOCK
) && !(flags
& SBL_WAIT
))
2272 return (EWOULDBLOCK
);
2275 * We may get here from sorflush(), in which case "sb" may not
2276 * point to the real socket buffer. Use the actual socket buffer
2277 * address from the socket instead.
2279 wchan
= (sb
->sb_flags
& SB_RECV
) ?
2280 &so
->so_rcv
.sb_flags
: &so
->so_snd
.sb_flags
;
2282 while (sb
->sb_flags
& SB_LOCK
) {
2283 lck_mtx_t
*mutex_held
;
2286 * XXX: This code should be moved up above outside of this loop;
2287 * however, we may get here as part of sofreelastref(), and
2288 * at that time pr_getlock() may no longer be able to return
2289 * us the lock. This will be fixed in future.
2291 if (so
->so_proto
->pr_getlock
!= NULL
)
2292 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
2294 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
2296 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
2299 VERIFY(sb
->sb_wantlock
!= 0);
2301 error
= msleep(wchan
, mutex_held
,
2302 nointr
? PSOCK
: PSOCK
| PCATCH
,
2303 nointr
? "sb_lock_nointr" : "sb_lock", NULL
);
2305 VERIFY(sb
->sb_wantlock
!= 0);
2308 if (error
== 0 && (so
->so_flags
& SOF_DEFUNCT
) &&
2309 !(flags
& SBL_IGNDEFUNCT
)) {
2311 SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
2312 "(%d)\n", __func__
, proc_selfpid(),
2313 (uint64_t)VM_KERNEL_ADDRPERM(so
),
2314 SOCK_DOM(so
), SOCK_TYPE(so
), error
));
2320 sb
->sb_flags
|= SB_LOCK
;
2325 * Release lock on sockbuf sb
2328 sbunlock(struct sockbuf
*sb
, boolean_t keeplocked
)
2330 void *lr_saved
= __builtin_return_address(0);
2331 struct socket
*so
= sb
->sb_so
;
2333 /* so_usecount may be 0 if we get here from sofreelastref() */
2335 panic("%s: null so, sb=%p sb_flags=0x%x lr=%p\n",
2336 __func__
, sb
, sb
->sb_flags
, lr_saved
);
2338 } else if (so
->so_usecount
< 0) {
2339 panic("%s: sb=%p sb_flags=0x%x sb_so=%p usecount=%d lr=%p "
2340 "lrh= %s\n", __func__
, sb
, sb
->sb_flags
, so
,
2341 so
->so_usecount
, lr_saved
, solockhistory_nr(so
));
2345 VERIFY(sb
->sb_flags
& SB_LOCK
);
2346 sb
->sb_flags
&= ~SB_LOCK
;
2348 if (sb
->sb_wantlock
> 0) {
2350 * We may get here from sorflush(), in which case "sb" may not
2351 * point to the real socket buffer. Use the actual socket
2352 * buffer address from the socket instead.
2354 wakeup((sb
->sb_flags
& SB_RECV
) ? &so
->so_rcv
.sb_flags
:
2355 &so
->so_snd
.sb_flags
);
2358 if (!keeplocked
) { /* unlock on exit */
2359 lck_mtx_t
*mutex_held
;
2361 if (so
->so_proto
->pr_getlock
!= NULL
)
2362 mutex_held
= (*so
->so_proto
->pr_getlock
)(so
, 0);
2364 mutex_held
= so
->so_proto
->pr_domain
->dom_mtx
;
2366 lck_mtx_assert(mutex_held
, LCK_MTX_ASSERT_OWNED
);
2368 VERIFY(so
->so_usecount
!= 0);
2370 so
->unlock_lr
[so
->next_unlock_lr
] = lr_saved
;
2371 so
->next_unlock_lr
= (so
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
2372 lck_mtx_unlock(mutex_held
);
2377 sorwakeup(struct socket
*so
)
2379 if (sb_notify(&so
->so_rcv
))
2380 sowakeup(so
, &so
->so_rcv
);
2384 sowwakeup(struct socket
*so
)
2386 if (sb_notify(&so
->so_snd
))
2387 sowakeup(so
, &so
->so_snd
);
2391 soevent(struct socket
*so
, long hint
)
2393 if (so
->so_flags
& SOF_KNOTE
)
2394 KNOTE(&so
->so_klist
, hint
);
2396 soevupcall(so
, hint
);
2398 /* Don't post an event if this a subflow socket */
2399 if ((hint
& SO_FILT_HINT_IFDENIED
) && !(so
->so_flags
& SOF_MP_SUBFLOW
))
2400 soevent_ifdenied(so
);
2404 soevupcall(struct socket
*so
, u_int32_t hint
)
2406 void (*so_event
)(struct socket
*, void *, uint32_t);
2408 if ((so_event
= so
->so_event
) != NULL
) {
2409 caddr_t so_eventarg
= so
->so_eventarg
;
2411 hint
&= so
->so_eventmask
;
2413 socket_unlock(so
, 0);
2414 so
->so_event(so
, so_eventarg
, hint
);
2421 soevent_ifdenied(struct socket
*so
)
2423 struct kev_netpolicy_ifdenied ev_ifdenied
;
2425 bzero(&ev_ifdenied
, sizeof (ev_ifdenied
));
2427 * The event consumer is interested about the effective {upid,pid,uuid}
2428 * info which can be different than the those related to the process
2429 * that recently performed a system call on the socket, i.e. when the
2430 * socket is delegated.
2432 if (so
->so_flags
& SOF_DELEGATED
) {
2433 ev_ifdenied
.ev_data
.eupid
= so
->e_upid
;
2434 ev_ifdenied
.ev_data
.epid
= so
->e_pid
;
2435 uuid_copy(ev_ifdenied
.ev_data
.euuid
, so
->e_uuid
);
2437 ev_ifdenied
.ev_data
.eupid
= so
->last_upid
;
2438 ev_ifdenied
.ev_data
.epid
= so
->last_pid
;
2439 uuid_copy(ev_ifdenied
.ev_data
.euuid
, so
->last_uuid
);
2442 if (++so
->so_ifdenied_notifies
> 1) {
2444 * Allow for at most one kernel event to be generated per
2445 * socket; so_ifdenied_notifies is reset upon changes in
2446 * the UUID policy. See comments in inp_update_policy.
2448 if (net_io_policy_log
) {
2451 uuid_unparse(ev_ifdenied
.ev_data
.euuid
, buf
);
2452 log(LOG_DEBUG
, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2453 "euuid %s%s has %d redundant events supressed\n",
2454 __func__
, so
->last_pid
,
2455 (uint64_t)VM_KERNEL_ADDRPERM(so
), SOCK_DOM(so
),
2456 SOCK_TYPE(so
), ev_ifdenied
.ev_data
.epid
, buf
,
2457 ((so
->so_flags
& SOF_DELEGATED
) ?
2458 " [delegated]" : ""), so
->so_ifdenied_notifies
);
2461 if (net_io_policy_log
) {
2464 uuid_unparse(ev_ifdenied
.ev_data
.euuid
, buf
);
2465 log(LOG_DEBUG
, "%s[%d]: so 0x%llx [%d,%d] epid %d "
2466 "euuid %s%s event posted\n", __func__
,
2467 so
->last_pid
, (uint64_t)VM_KERNEL_ADDRPERM(so
),
2468 SOCK_DOM(so
), SOCK_TYPE(so
),
2469 ev_ifdenied
.ev_data
.epid
, buf
,
2470 ((so
->so_flags
& SOF_DELEGATED
) ?
2471 " [delegated]" : ""));
2473 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED
, &ev_ifdenied
.ev_data
,
2474 sizeof (ev_ifdenied
));
2479 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
2482 dup_sockaddr(struct sockaddr
*sa
, int canwait
)
2484 struct sockaddr
*sa2
;
2486 MALLOC(sa2
, struct sockaddr
*, sa
->sa_len
, M_SONAME
,
2487 canwait
? M_WAITOK
: M_NOWAIT
);
2489 bcopy(sa
, sa2
, sa
->sa_len
);
2494 * Create an external-format (``xsocket'') structure using the information
2495 * in the kernel-format socket structure pointed to by so. This is done
2496 * to reduce the spew of irrelevant information over this interface,
2497 * to isolate user code from changes in the kernel structure, and
2498 * potentially to provide information-hiding if we decide that
2499 * some of this information should be hidden from users.
2502 sotoxsocket(struct socket
*so
, struct xsocket
*xso
)
2504 xso
->xso_len
= sizeof (*xso
);
2505 xso
->xso_so
= (_XSOCKET_PTR(struct socket
*))VM_KERNEL_ADDRPERM(so
);
2506 xso
->so_type
= so
->so_type
;
2507 xso
->so_options
= (short)(so
->so_options
& 0xffff);
2508 xso
->so_linger
= so
->so_linger
;
2509 xso
->so_state
= so
->so_state
;
2510 xso
->so_pcb
= (_XSOCKET_PTR(caddr_t
))VM_KERNEL_ADDRPERM(so
->so_pcb
);
2512 xso
->xso_protocol
= SOCK_PROTO(so
);
2513 xso
->xso_family
= SOCK_DOM(so
);
2515 xso
->xso_protocol
= xso
->xso_family
= 0;
2517 xso
->so_qlen
= so
->so_qlen
;
2518 xso
->so_incqlen
= so
->so_incqlen
;
2519 xso
->so_qlimit
= so
->so_qlimit
;
2520 xso
->so_timeo
= so
->so_timeo
;
2521 xso
->so_error
= so
->so_error
;
2522 xso
->so_pgid
= so
->so_pgid
;
2523 xso
->so_oobmark
= so
->so_oobmark
;
2524 sbtoxsockbuf(&so
->so_snd
, &xso
->so_snd
);
2525 sbtoxsockbuf(&so
->so_rcv
, &xso
->so_rcv
);
2526 xso
->so_uid
= kauth_cred_getuid(so
->so_cred
);
2532 sotoxsocket64(struct socket
*so
, struct xsocket64
*xso
)
2534 xso
->xso_len
= sizeof (*xso
);
2535 xso
->xso_so
= (u_int64_t
)VM_KERNEL_ADDRPERM(so
);
2536 xso
->so_type
= so
->so_type
;
2537 xso
->so_options
= (short)(so
->so_options
& 0xffff);
2538 xso
->so_linger
= so
->so_linger
;
2539 xso
->so_state
= so
->so_state
;
2540 xso
->so_pcb
= (u_int64_t
)VM_KERNEL_ADDRPERM(so
->so_pcb
);
2542 xso
->xso_protocol
= SOCK_PROTO(so
);
2543 xso
->xso_family
= SOCK_DOM(so
);
2545 xso
->xso_protocol
= xso
->xso_family
= 0;
2547 xso
->so_qlen
= so
->so_qlen
;
2548 xso
->so_incqlen
= so
->so_incqlen
;
2549 xso
->so_qlimit
= so
->so_qlimit
;
2550 xso
->so_timeo
= so
->so_timeo
;
2551 xso
->so_error
= so
->so_error
;
2552 xso
->so_pgid
= so
->so_pgid
;
2553 xso
->so_oobmark
= so
->so_oobmark
;
2554 sbtoxsockbuf(&so
->so_snd
, &xso
->so_snd
);
2555 sbtoxsockbuf(&so
->so_rcv
, &xso
->so_rcv
);
2556 xso
->so_uid
= kauth_cred_getuid(so
->so_cred
);
2561 * This does the same for sockbufs. Note that the xsockbuf structure,
2562 * since it is always embedded in a socket, does not include a self
2563 * pointer nor a length. We make this entry point public in case
2564 * some other mechanism needs it.
2567 sbtoxsockbuf(struct sockbuf
*sb
, struct xsockbuf
*xsb
)
2569 xsb
->sb_cc
= sb
->sb_cc
;
2570 xsb
->sb_hiwat
= sb
->sb_hiwat
;
2571 xsb
->sb_mbcnt
= sb
->sb_mbcnt
;
2572 xsb
->sb_mbmax
= sb
->sb_mbmax
;
2573 xsb
->sb_lowat
= sb
->sb_lowat
;
2574 xsb
->sb_flags
= sb
->sb_flags
;
2575 xsb
->sb_timeo
= (short)
2576 (sb
->sb_timeo
.tv_sec
* hz
) + sb
->sb_timeo
.tv_usec
/ tick
;
2577 if (xsb
->sb_timeo
== 0 && sb
->sb_timeo
.tv_usec
!= 0)
2582 * Based on the policy set by an all knowing decison maker, throttle sockets
2583 * that either have been marked as belonging to "background" process.
2586 soisthrottled(struct socket
*so
)
2589 * On non-embedded, we rely on implicit throttling by the
2590 * application, as we're missing the system wide "decision maker"
2593 (so
->so_traffic_mgt_flags
& TRAFFIC_MGT_SO_BACKGROUND
));
2597 soisprivilegedtraffic(struct socket
*so
)
2599 return ((so
->so_flags
& SOF_PRIVILEGED_TRAFFIC_CLASS
) ? 1 : 0);
2603 soissrcbackground(struct socket
*so
)
2605 return ((so
->so_traffic_mgt_flags
& TRAFFIC_MGT_SO_BACKGROUND
) ||
2606 IS_SO_TC_BACKGROUND(so
->so_traffic_class
));
2610 * Here is the definition of some of the basic objects in the kern.ipc
2611 * branch of the MIB.
2613 SYSCTL_NODE(_kern
, KERN_IPC
, ipc
,
2614 CTLFLAG_RW
|CTLFLAG_LOCKED
|CTLFLAG_ANYBODY
, 0, "IPC");
2616 /* Check that the maximum socket buffer size is within a range */
2619 sysctl_sb_max SYSCTL_HANDLER_ARGS
2621 #pragma unused(oidp, arg1, arg2)
2622 u_int32_t new_value
;
2624 int error
= sysctl_io_number(req
, sb_max
, sizeof (u_int32_t
),
2625 &new_value
, &changed
);
2626 if (!error
&& changed
) {
2627 if (new_value
> LOW_SB_MAX
&& new_value
<= high_sb_max
) {
2637 sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS
2639 #pragma unused(arg1, arg2)
2642 i
= net_io_policy_throttled
;
2644 err
= sysctl_handle_int(oidp
, &i
, 0, req
);
2645 if (err
!= 0 || req
->newptr
== USER_ADDR_NULL
)
2648 if (i
!= net_io_policy_throttled
)
2649 SOTHROTTLELOG(("throttle: network IO policy throttling is "
2650 "now %s\n", i
? "ON" : "OFF"));
2652 net_io_policy_throttled
= i
;
2657 SYSCTL_PROC(_kern_ipc
, KIPC_MAXSOCKBUF
, maxsockbuf
,
2658 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2659 &sb_max
, 0, &sysctl_sb_max
, "IU", "Maximum socket buffer size");
2661 SYSCTL_INT(_kern_ipc
, OID_AUTO
, maxsockets
,
2662 CTLFLAG_RD
| CTLFLAG_LOCKED
, &maxsockets
, 0,
2663 "Maximum number of sockets avaliable");
2665 SYSCTL_INT(_kern_ipc
, KIPC_SOCKBUF_WASTE
, sockbuf_waste_factor
,
2666 CTLFLAG_RW
| CTLFLAG_LOCKED
, &sb_efficiency
, 0, "");
2668 SYSCTL_INT(_kern_ipc
, KIPC_NMBCLUSTERS
, nmbclusters
,
2669 CTLFLAG_RD
| CTLFLAG_LOCKED
, &nmbclusters
, 0, "");
2671 SYSCTL_INT(_kern_ipc
, OID_AUTO
, njcl
,
2672 CTLFLAG_RD
| CTLFLAG_LOCKED
, &njcl
, 0, "");
2674 SYSCTL_INT(_kern_ipc
, OID_AUTO
, njclbytes
,
2675 CTLFLAG_RD
| CTLFLAG_LOCKED
, &njclbytes
, 0, "");
2677 SYSCTL_INT(_kern_ipc
, KIPC_SOQLIMITCOMPAT
, soqlimitcompat
,
2678 CTLFLAG_RW
| CTLFLAG_LOCKED
, &soqlimitcompat
, 1,
2679 "Enable socket queue limit compatibility");
2681 SYSCTL_INT(_kern_ipc
, OID_AUTO
, soqlencomp
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
2682 &soqlencomp
, 0, "Listen backlog represents only complete queue");
2684 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, io_policy
, CTLFLAG_RW
, 0, "network IO policy");
2686 SYSCTL_PROC(_kern_ipc_io_policy
, OID_AUTO
, throttled
,
2687 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &net_io_policy_throttled
, 0,
2688 sysctl_io_policy_throttled
, "I", "");
2690 SYSCTL_INT(_kern_ipc_io_policy
, OID_AUTO
, log
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
2691 &net_io_policy_log
, 0, "");
2693 #if CONFIG_PROC_UUID_POLICY
2694 SYSCTL_INT(_kern_ipc_io_policy
, OID_AUTO
, uuid
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
2695 &net_io_policy_uuid
, 0, "");
2696 #endif /* CONFIG_PROC_UUID_POLICY */