]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/uipc_socket.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / bsd / kern / uipc_socket.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
26 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
27 /*
28 * Copyright (c) 1982, 1986, 1988, 1990, 1993
29 * The Regents of the University of California. All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
60 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.16 2001/06/14 20:46:06 ume Exp $
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/filedesc.h>
66 #include <sys/proc.h>
67 #include <sys/file.h>
68 #include <sys/fcntl.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/domain.h>
72 #include <sys/kernel.h>
73 #include <sys/event.h>
74 #include <sys/poll.h>
75 #include <sys/protosw.h>
76 #include <sys/socket.h>
77 #include <sys/socketvar.h>
78 #include <sys/resourcevar.h>
79 #include <sys/signalvar.h>
80 #include <sys/sysctl.h>
81 #include <sys/uio.h>
82 #include <sys/ev.h>
83 #include <sys/kdebug.h>
84 #include <net/route.h>
85 #include <netinet/in.h>
86 #include <netinet/in_pcb.h>
87 #include <kern/zalloc.h>
88 #include <machine/limits.h>
89
90 int so_cache_hw = 0;
91 int so_cache_timeouts = 0;
92 int so_cache_max_freed = 0;
93 int cached_sock_count = 0;
94 struct socket *socket_cache_head = 0;
95 struct socket *socket_cache_tail = 0;
96 u_long so_cache_time = 0;
97 int so_cache_init_done = 0;
98 struct zone *so_cache_zone;
99 extern int get_inpcb_str_size();
100 extern int get_tcp_str_size();
101
102 #include <machine/limits.h>
103
104 static void filt_sordetach(struct knote *kn);
105 static int filt_soread(struct knote *kn, long hint);
106 static void filt_sowdetach(struct knote *kn);
107 static int filt_sowrite(struct knote *kn, long hint);
108 static int filt_solisten(struct knote *kn, long hint);
109
110 static struct filterops solisten_filtops =
111 { 1, NULL, filt_sordetach, filt_solisten };
112 static struct filterops soread_filtops =
113 { 1, NULL, filt_sordetach, filt_soread };
114 static struct filterops sowrite_filtops =
115 { 1, NULL, filt_sowdetach, filt_sowrite };
116
117 int socket_debug = 0;
118 int socket_zone = M_SOCKET;
119 so_gen_t so_gencnt; /* generation count for sockets */
120
121 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
122 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
123
124 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
125 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
126 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
127 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
128 #define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
129 #define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
130 #define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
131
132
133 SYSCTL_DECL(_kern_ipc);
134
135 static int somaxconn = SOMAXCONN;
136 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn,
137 0, "");
138
139 /* Should we get a maximum also ??? */
140 static int sosendmaxchain = 65536;
141 static int sosendminchain = 16384;
142 static int sorecvmincopy = 16384;
143 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain,
144 0, "");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, sorecvmincopy, CTLFLAG_RW, &sorecvmincopy,
146 0, "");
147
148 void so_cache_timer();
149 struct mbuf *m_getpackets(int, int, int);
150
151
152 /*
153 * Socket operation routines.
154 * These routines are called by the routines in
155 * sys_socket.c or from a system process, and
156 * implement the semantics of socket operations by
157 * switching out to the protocol specific routines.
158 */
159
160 #ifdef __APPLE__
161 void socketinit()
162 {
163 vm_size_t str_size;
164
165 so_cache_init_done = 1;
166
167 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
168 str_size = (vm_size_t)( sizeof(struct socket) + 4 +
169 get_inpcb_str_size() + 4 +
170 get_tcp_str_size());
171 so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone");
172 #if TEMPDEBUG
173 kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
174 #endif
175
176 }
177
178 void cached_sock_alloc(so, waitok)
179 struct socket **so;
180 int waitok;
181
182 {
183 caddr_t temp;
184 int s;
185 register u_long offset;
186
187
188 s = splnet();
189 if (cached_sock_count) {
190 cached_sock_count--;
191 *so = socket_cache_head;
192 if (*so == 0)
193 panic("cached_sock_alloc: cached sock is null");
194
195 socket_cache_head = socket_cache_head->cache_next;
196 if (socket_cache_head)
197 socket_cache_head->cache_prev = 0;
198 else
199 socket_cache_tail = 0;
200 splx(s);
201
202 temp = (*so)->so_saved_pcb;
203 bzero((caddr_t)*so, sizeof(struct socket));
204 #if TEMPDEBUG
205 kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so,
206 cached_sock_count);
207 #endif
208 (*so)->so_saved_pcb = temp;
209 }
210 else {
211 #if TEMPDEBUG
212 kprintf("Allocating cached sock %x from memory\n", *so);
213 #endif
214
215 splx(s);
216 if (waitok)
217 *so = (struct socket *) zalloc(so_cache_zone);
218 else
219 *so = (struct socket *) zalloc_noblock(so_cache_zone);
220
221 if (*so == 0)
222 return;
223
224 bzero((caddr_t)*so, sizeof(struct socket));
225
226 /*
227 * Define offsets for extra structures into our single block of
228 * memory. Align extra structures on longword boundaries.
229 */
230
231
232 offset = (u_long) *so;
233 offset += sizeof(struct socket);
234 if (offset & 0x3) {
235 offset += 4;
236 offset &= 0xfffffffc;
237 }
238 (*so)->so_saved_pcb = (caddr_t) offset;
239 offset += get_inpcb_str_size();
240 if (offset & 0x3) {
241 offset += 4;
242 offset &= 0xfffffffc;
243 }
244
245 ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset;
246 #if TEMPDEBUG
247 kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so,
248 (*so)->so_saved_pcb,
249 ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb);
250 #endif
251 }
252
253 (*so)->cached_in_sock_layer = 1;
254 }
255
256
257 void cached_sock_free(so)
258 struct socket *so;
259 {
260 int s;
261
262
263 s = splnet();
264 if (++cached_sock_count > MAX_CACHED_SOCKETS) {
265 --cached_sock_count;
266 splx(s);
267 #if TEMPDEBUG
268 kprintf("Freeing overflowed cached socket %x\n", so);
269 #endif
270 zfree(so_cache_zone, (vm_offset_t) so);
271 }
272 else {
273 #if TEMPDEBUG
274 kprintf("Freeing socket %x into cache\n", so);
275 #endif
276 if (so_cache_hw < cached_sock_count)
277 so_cache_hw = cached_sock_count;
278
279 so->cache_next = socket_cache_head;
280 so->cache_prev = 0;
281 if (socket_cache_head)
282 socket_cache_head->cache_prev = so;
283 else
284 socket_cache_tail = so;
285
286 so->cache_timestamp = so_cache_time;
287 socket_cache_head = so;
288 splx(s);
289 }
290
291 #if TEMPDEBUG
292 kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count);
293 #endif
294
295
296 }
297
298
299 void so_cache_timer()
300 {
301 register struct socket *p;
302 register int s;
303 register int n_freed = 0;
304 boolean_t funnel_state;
305
306 funnel_state = thread_funnel_set(network_flock, TRUE);
307
308 ++so_cache_time;
309
310 s = splnet();
311
312 while (p = socket_cache_tail)
313 {
314 if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT)
315 break;
316
317 so_cache_timeouts++;
318
319 if (socket_cache_tail = p->cache_prev)
320 p->cache_prev->cache_next = 0;
321 if (--cached_sock_count == 0)
322 socket_cache_head = 0;
323
324 splx(s);
325
326 zfree(so_cache_zone, (vm_offset_t) p);
327
328 splnet();
329 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH)
330 {
331 so_cache_max_freed++;
332 break;
333 }
334 }
335 splx(s);
336
337 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
338
339 (void) thread_funnel_set(network_flock, FALSE);
340
341 }
342 #endif /* __APPLE__ */
343
344 /*
345 * Get a socket structure from our zone, and initialize it.
346 * We don't implement `waitok' yet (see comments in uipc_domain.c).
347 * Note that it would probably be better to allocate socket
348 * and PCB at the same time, but I'm not convinced that all
349 * the protocols can be easily modified to do this.
350 */
351 struct socket *
352 soalloc(waitok, dom, type)
353 int waitok;
354 int dom;
355 int type;
356 {
357 struct socket *so;
358
359 if ((dom == PF_INET) && (type == SOCK_STREAM))
360 cached_sock_alloc(&so, waitok);
361 else
362 {
363 so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK);
364 if (so)
365 bzero(so, sizeof *so);
366 }
367 /* XXX race condition for reentrant kernel */
368
369 if (so) {
370 so->so_gencnt = ++so_gencnt;
371 so->so_zone = socket_zone;
372 }
373
374 return so;
375 }
376
377 int
378 socreate(dom, aso, type, proto)
379 int dom;
380 struct socket **aso;
381 register int type;
382 int proto;
383 {
384 struct proc *p = current_proc();
385 register struct protosw *prp;
386 register struct socket *so;
387 register int error = 0;
388 #if TCPDEBUG
389 extern int tcpconsdebug;
390 #endif
391 if (proto)
392 prp = pffindproto(dom, proto, type);
393 else
394 prp = pffindtype(dom, type);
395
396 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
397 return (EPROTONOSUPPORT);
398 #ifndef __APPLE__
399
400 if (p->p_prison && jail_socket_unixiproute_only &&
401 prp->pr_domain->dom_family != PF_LOCAL &&
402 prp->pr_domain->dom_family != PF_INET &&
403 prp->pr_domain->dom_family != PF_ROUTE) {
404 return (EPROTONOSUPPORT);
405 }
406
407 #endif
408 if (prp->pr_type != type)
409 return (EPROTOTYPE);
410 so = soalloc(p != 0, dom, type);
411 if (so == 0)
412 return (ENOBUFS);
413
414 TAILQ_INIT(&so->so_incomp);
415 TAILQ_INIT(&so->so_comp);
416 so->so_type = type;
417
418 #ifdef __APPLE__
419 if (p != 0) {
420 if (p->p_ucred->cr_uid == 0)
421 so->so_state = SS_PRIV;
422
423 so->so_uid = p->p_ucred->cr_uid;
424 }
425 #else
426 so->so_cred = p->p_ucred;
427 crhold(so->so_cred);
428 #endif
429 so->so_proto = prp;
430 #ifdef __APPLE__
431 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
432 if (prp->pr_sfilter.tqh_first)
433 error = sfilter_init(so);
434 if (error == 0)
435 #endif
436 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
437 if (error) {
438 /*
439 * Warning:
440 * If so_pcb is not zero, the socket will be leaked,
441 * so protocol attachment handler must be coded carefuly
442 */
443 so->so_state |= SS_NOFDREF;
444 sofree(so);
445 return (error);
446 }
447 #ifdef __APPLE__
448 prp->pr_domain->dom_refs++;
449 so->so_rcv.sb_so = so->so_snd.sb_so = so;
450 TAILQ_INIT(&so->so_evlist);
451 #if TCPDEBUG
452 if (tcpconsdebug == 2)
453 so->so_options |= SO_DEBUG;
454 #endif
455 #endif
456
457 *aso = so;
458 return (0);
459 }
460
461 int
462 sobind(so, nam)
463 struct socket *so;
464 struct sockaddr *nam;
465
466 {
467 struct proc *p = current_proc();
468 int error;
469 struct kextcb *kp;
470 int s = splnet();
471
472 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
473 if (error == 0) {
474 kp = sotokextcb(so);
475 while (kp) {
476 if (kp->e_soif && kp->e_soif->sf_sobind) {
477 error = (*kp->e_soif->sf_sobind)(so, nam, kp);
478 if (error) {
479 if (error == EJUSTRETURN) {
480 error = 0;
481 break;
482 }
483 splx(s);
484 return(error);
485 }
486 }
487 kp = kp->e_next;
488 }
489 }
490 splx(s);
491 return (error);
492 }
493
494 void
495 sodealloc(so)
496 struct socket *so;
497 {
498 so->so_gencnt = ++so_gencnt;
499
500 #ifndef __APPLE__
501 if (so->so_rcv.sb_hiwat)
502 (void)chgsbsize(so->so_cred->cr_uidinfo,
503 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
504 if (so->so_snd.sb_hiwat)
505 (void)chgsbsize(so->so_cred->cr_uidinfo,
506 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
507 #ifdef INET
508 if (so->so_accf != NULL) {
509 if (so->so_accf->so_accept_filter != NULL &&
510 so->so_accf->so_accept_filter->accf_destroy != NULL) {
511 so->so_accf->so_accept_filter->accf_destroy(so);
512 }
513 if (so->so_accf->so_accept_filter_str != NULL)
514 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
515 FREE(so->so_accf, M_ACCF);
516 }
517 #endif /* INET */
518 crfree(so->so_cred);
519 zfreei(so->so_zone, so);
520 #else
521 if (so->cached_in_sock_layer == 1)
522 cached_sock_free(so);
523 else
524 _FREE_ZONE(so, sizeof(*so), so->so_zone);
525 #endif /* __APPLE__ */
526 }
527
528 int
529 solisten(so, backlog)
530 register struct socket *so;
531 int backlog;
532
533 {
534 struct kextcb *kp;
535 struct proc *p = current_proc();
536 int s, error;
537
538 s = splnet();
539 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
540 if (error) {
541 splx(s);
542 return (error);
543 }
544 if (TAILQ_EMPTY(&so->so_comp))
545 so->so_options |= SO_ACCEPTCONN;
546 if (backlog < 0 || backlog > somaxconn)
547 backlog = somaxconn;
548 so->so_qlimit = backlog;
549 kp = sotokextcb(so);
550 while (kp) {
551 if (kp->e_soif && kp->e_soif->sf_solisten) {
552 error = (*kp->e_soif->sf_solisten)(so, kp);
553 if (error) {
554 if (error == EJUSTRETURN) {
555 error = 0;
556 break;
557 }
558 splx(s);
559 return(error);
560 }
561 }
562 kp = kp->e_next;
563 }
564
565 splx(s);
566 return (0);
567 }
568
569
570 void
571 sofree(so)
572 register struct socket *so;
573 {
574 int error;
575 struct kextcb *kp;
576 struct socket *head = so->so_head;
577
578 kp = sotokextcb(so);
579 while (kp) {
580 if (kp->e_soif && kp->e_soif->sf_sofree) {
581 error = (*kp->e_soif->sf_sofree)(so, kp);
582 if (error) {
583 selthreadclear(&so->so_snd.sb_sel);
584 selthreadclear(&so->so_rcv.sb_sel);
585 return; /* void fn */
586 }
587 }
588 kp = kp->e_next;
589 }
590
591 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
592 #ifdef __APPLE__
593 selthreadclear(&so->so_snd.sb_sel);
594 selthreadclear(&so->so_rcv.sb_sel);
595 #endif
596 return;
597 }
598 if (head != NULL) {
599 if (so->so_state & SS_INCOMP) {
600 TAILQ_REMOVE(&head->so_incomp, so, so_list);
601 head->so_incqlen--;
602 } else if (so->so_state & SS_COMP) {
603 /*
604 * We must not decommission a socket that's
605 * on the accept(2) queue. If we do, then
606 * accept(2) may hang after select(2) indicated
607 * that the listening socket was ready.
608 */
609 #ifdef __APPLE__
610 selthreadclear(&so->so_snd.sb_sel);
611 selthreadclear(&so->so_rcv.sb_sel);
612 #endif
613 return;
614 } else {
615 panic("sofree: not queued");
616 }
617 head->so_qlen--;
618 so->so_state &= ~SS_INCOMP;
619 so->so_head = NULL;
620 }
621 #ifdef __APPLE__
622 selthreadclear(&so->so_snd.sb_sel);
623 sbrelease(&so->so_snd);
624 #endif
625 sorflush(so);
626 sfilter_term(so);
627 sodealloc(so);
628 }
629
630 /*
631 * Close a socket on last file table reference removal.
632 * Initiate disconnect if connected.
633 * Free socket when disconnect complete.
634 */
635 int
636 soclose(so)
637 register struct socket *so;
638 {
639 int s = splnet(); /* conservative */
640 int error = 0;
641 struct kextcb *kp;
642
643 #ifndef __APPLE__
644 funsetown(so->so_sigio);
645 #endif
646 kp = sotokextcb(so);
647 while (kp) {
648 if (kp->e_soif && kp->e_soif->sf_soclose) {
649 error = (*kp->e_soif->sf_soclose)(so, kp);
650 if (error) {
651 splx(s);
652 return((error == EJUSTRETURN) ? 0 : error);
653 }
654 }
655 kp = kp->e_next;
656 }
657
658 if (so->so_options & SO_ACCEPTCONN) {
659 struct socket *sp, *sonext;
660
661 sp = TAILQ_FIRST(&so->so_incomp);
662 for (; sp != NULL; sp = sonext) {
663 sonext = TAILQ_NEXT(sp, so_list);
664 (void) soabort(sp);
665 }
666 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
667 sonext = TAILQ_NEXT(sp, so_list);
668 /* Dequeue from so_comp since sofree() won't do it */
669 TAILQ_REMOVE(&so->so_comp, sp, so_list);
670 so->so_qlen--;
671 sp->so_state &= ~SS_COMP;
672 sp->so_head = NULL;
673 (void) soabort(sp);
674 }
675
676 }
677 if (so->so_pcb == 0)
678 goto discard;
679 if (so->so_state & SS_ISCONNECTED) {
680 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
681 error = sodisconnect(so);
682 if (error)
683 goto drop;
684 }
685 if (so->so_options & SO_LINGER) {
686 if ((so->so_state & SS_ISDISCONNECTING) &&
687 (so->so_state & SS_NBIO))
688 goto drop;
689 while (so->so_state & SS_ISCONNECTED) {
690 error = tsleep((caddr_t)&so->so_timeo,
691 PSOCK | PCATCH, "soclos", so->so_linger);
692 if (error)
693 break;
694 }
695 }
696 }
697 drop:
698 if (so->so_pcb) {
699 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
700 if (error == 0)
701 error = error2;
702 }
703 discard:
704 if (so->so_pcb && so->so_state & SS_NOFDREF)
705 panic("soclose: NOFDREF");
706 so->so_state |= SS_NOFDREF;
707 #ifdef __APPLE__
708 so->so_proto->pr_domain->dom_refs--;
709 evsofree(so);
710 #endif
711 sofree(so);
712 splx(s);
713 return (error);
714 }
715
716 /*
717 * Must be called at splnet...
718 */
719 int
720 soabort(so)
721 struct socket *so;
722 {
723 int error;
724
725 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
726 if (error) {
727 sofree(so);
728 return error;
729 }
730 return (0);
731 }
732
733 int
734 soaccept(so, nam)
735 register struct socket *so;
736 struct sockaddr **nam;
737 {
738 int s = splnet();
739 int error;
740 struct kextcb *kp;
741
742 if ((so->so_state & SS_NOFDREF) == 0)
743 panic("soaccept: !NOFDREF");
744 so->so_state &= ~SS_NOFDREF;
745 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
746 if (error == 0) {
747 kp = sotokextcb(so);
748 while (kp) {
749 if (kp->e_soif && kp->e_soif->sf_soaccept) {
750 error = (*kp->e_soif->sf_soaccept)(so, nam, kp);
751 if (error) {
752 if (error == EJUSTRETURN) {
753 error = 0;
754 break;
755 }
756 splx(s);
757 return(error);
758 }
759 }
760 kp = kp->e_next;
761 }
762 }
763
764
765 splx(s);
766 return (error);
767 }
768
769 int
770 soconnect(so, nam)
771 register struct socket *so;
772 struct sockaddr *nam;
773
774 {
775 int s;
776 int error;
777 struct proc *p = current_proc();
778 struct kextcb *kp;
779
780 if (so->so_options & SO_ACCEPTCONN)
781 return (EOPNOTSUPP);
782 s = splnet();
783 /*
784 * If protocol is connection-based, can only connect once.
785 * Otherwise, if connected, try to disconnect first.
786 * This allows user to disconnect by connecting to, e.g.,
787 * a null address.
788 */
789 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
790 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
791 (error = sodisconnect(so))))
792 error = EISCONN;
793 else {
794 /*
795 * Run connect filter before calling protocol:
796 * - non-blocking connect returns before completion;
797 * - allows filters to modify address.
798 */
799 kp = sotokextcb(so);
800 while (kp) {
801 if (kp->e_soif && kp->e_soif->sf_soconnect) {
802 error = (*kp->e_soif->sf_soconnect)(so, nam, kp);
803 if (error) {
804 if (error == EJUSTRETURN) {
805 error = 0;
806 }
807 splx(s);
808 return(error);
809 }
810 }
811 kp = kp->e_next;
812 }
813 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
814 }
815 splx(s);
816 return (error);
817 }
818
819 int
820 soconnect2(so1, so2)
821 register struct socket *so1;
822 struct socket *so2;
823 {
824 int s = splnet();
825 int error;
826 struct kextcb *kp;
827
828 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
829 if (error == 0) {
830 kp = sotokextcb(so1);
831 while (kp) {
832 if (kp->e_soif && kp->e_soif->sf_soconnect2) {
833 error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp);
834 if (error) {
835 if (error == EJUSTRETURN) {
836 return 0;
837 break;
838 }
839 splx(s);
840 return(error);
841 }
842 }
843 kp = kp->e_next;
844 }
845 }
846 splx(s);
847 return (error);
848 }
849
850 int
851 sodisconnect(so)
852 register struct socket *so;
853 {
854 int s = splnet();
855 int error;
856 struct kextcb *kp;
857
858 if ((so->so_state & SS_ISCONNECTED) == 0) {
859 error = ENOTCONN;
860 goto bad;
861 }
862 if (so->so_state & SS_ISDISCONNECTING) {
863 error = EALREADY;
864 goto bad;
865 }
866 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
867 if (error == 0) {
868 kp = sotokextcb(so);
869 while (kp) {
870 if (kp->e_soif && kp->e_soif->sf_sodisconnect) {
871 error = (*kp->e_soif->sf_sodisconnect)(so, kp);
872 if (error) {
873 if (error == EJUSTRETURN) {
874 error = 0;
875 break;
876 }
877 splx(s);
878 return(error);
879 }
880 }
881 kp = kp->e_next;
882 }
883 }
884
885 bad:
886 splx(s);
887 return (error);
888 }
889
890 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)
891 /*
892 * Send on a socket.
893 * If send must go all at once and message is larger than
894 * send buffering, then hard error.
895 * Lock against other senders.
896 * If must go all at once and not enough room now, then
897 * inform user that this would block and do nothing.
898 * Otherwise, if nonblocking, send as much as possible.
899 * The data to be sent is described by "uio" if nonzero,
900 * otherwise by the mbuf chain "top" (which must be null
901 * if uio is not). Data provided in mbuf chain must be small
902 * enough to send all at once.
903 *
904 * Returns nonzero on error, timeout or signal; callers
905 * must check for short counts if EINTR/ERESTART are returned.
906 * Data and control buffers are freed on return.
907 * Experiment:
908 * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf
909 * MSG_SEND: go thru as for MSG_HOLD on current fragment, then
910 * point at the mbuf chain being constructed and go from there.
911 */
912 int
913 sosend(so, addr, uio, top, control, flags)
914 register struct socket *so;
915 struct sockaddr *addr;
916 struct uio *uio;
917 struct mbuf *top;
918 struct mbuf *control;
919 int flags;
920
921 {
922 struct mbuf **mp;
923 register struct mbuf *m, *freelist = NULL;
924 register long space, len, resid;
925 int clen = 0, error, s, dontroute, mlen, sendflags;
926 int atomic = sosendallatonce(so) || top;
927 struct proc *p = current_proc();
928 struct kextcb *kp;
929
930 if (uio)
931 resid = uio->uio_resid;
932 else
933 resid = top->m_pkthdr.len;
934
935 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START),
936 so,
937 resid,
938 so->so_snd.sb_cc,
939 so->so_snd.sb_lowat,
940 so->so_snd.sb_hiwat);
941
942 /*
943 * In theory resid should be unsigned.
944 * However, space must be signed, as it might be less than 0
945 * if we over-committed, and we must use a signed comparison
946 * of space and resid. On the other hand, a negative resid
947 * causes us to loop sending 0-length segments to the protocol.
948 *
949 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
950 * type sockets since that's an error.
951 */
952 if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
953 error = EINVAL;
954 goto out;
955 }
956
957 dontroute =
958 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
959 (so->so_proto->pr_flags & PR_ATOMIC);
960 if (p)
961 p->p_stats->p_ru.ru_msgsnd++;
962 if (control)
963 clen = control->m_len;
964 #define snderr(errno) { error = errno; splx(s); goto release; }
965
966 restart:
967 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
968 if (error)
969 goto out;
970 do {
971 s = splnet();
972 if (so->so_state & SS_CANTSENDMORE)
973 snderr(EPIPE);
974 if (so->so_error) {
975 error = so->so_error;
976 so->so_error = 0;
977 splx(s);
978 goto release;
979 }
980 if ((so->so_state & SS_ISCONNECTED) == 0) {
981 /*
982 * `sendto' and `sendmsg' is allowed on a connection-
983 * based socket if it supports implied connect.
984 * Return ENOTCONN if not connected and no address is
985 * supplied.
986 */
987 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
988 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
989 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
990 !(resid == 0 && clen != 0))
991 snderr(ENOTCONN);
992 } else if (addr == 0 && !(flags&MSG_HOLD))
993 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
994 ENOTCONN : EDESTADDRREQ);
995 }
996 space = sbspace(&so->so_snd);
997 if (flags & MSG_OOB)
998 space += 1024;
999 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1000 clen > so->so_snd.sb_hiwat)
1001 snderr(EMSGSIZE);
1002 if (space < resid + clen &&
1003 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1004 if (so->so_state & SS_NBIO)
1005 snderr(EWOULDBLOCK);
1006 sbunlock(&so->so_snd);
1007 error = sbwait(&so->so_snd);
1008 splx(s);
1009 if (error)
1010 goto out;
1011 goto restart;
1012 }
1013 splx(s);
1014 mp = &top;
1015 space -= clen;
1016
1017 do {
1018 if (uio == NULL) {
1019 /*
1020 * Data is prepackaged in "top".
1021 */
1022 resid = 0;
1023 if (flags & MSG_EOR)
1024 top->m_flags |= M_EOR;
1025 } else {
1026 boolean_t dropped_funnel = FALSE;
1027 int chainlength;
1028 int bytes_to_copy;
1029
1030 bytes_to_copy = min(resid, space);
1031
1032 if (sosendminchain > 0) {
1033 if (bytes_to_copy >= sosendminchain) {
1034 dropped_funnel = TRUE;
1035 (void)thread_funnel_set(network_flock, FALSE);
1036 }
1037 chainlength = 0;
1038 } else
1039 chainlength = sosendmaxchain;
1040
1041 do {
1042
1043 if (bytes_to_copy >= MINCLSIZE) {
1044 /*
1045 * try to maintain a local cache of mbuf clusters needed to complete this write
1046 * the list is further limited to the number that are currently needed to fill the socket
1047 * this mechanism allows a large number of mbufs/clusters to be grabbed under a single
1048 * mbuf lock... if we can't get any clusters, than fall back to trying for mbufs
1049 * if we fail early (or miscalcluate the number needed) make sure to release any clusters
1050 * we haven't yet consumed.
1051 */
1052 if ((m = freelist) == NULL) {
1053 int num_needed;
1054 int hdrs_needed = 0;
1055
1056 if (top == 0)
1057 hdrs_needed = 1;
1058 num_needed = bytes_to_copy / MCLBYTES;
1059
1060 if ((bytes_to_copy - (num_needed * MCLBYTES)) >= MINCLSIZE)
1061 num_needed++;
1062
1063 if ((freelist = m_getpackets(num_needed, hdrs_needed, M_WAIT)) == NULL)
1064 goto getpackets_failed;
1065 m = freelist;
1066 }
1067 freelist = m->m_next;
1068 m->m_next = NULL;
1069
1070 mlen = MCLBYTES;
1071 len = min(mlen, bytes_to_copy);
1072 } else {
1073 getpackets_failed:
1074 if (top == 0) {
1075 MGETHDR(m, M_WAIT, MT_DATA);
1076 mlen = MHLEN;
1077 m->m_pkthdr.len = 0;
1078 m->m_pkthdr.rcvif = (struct ifnet *)0;
1079 } else {
1080 MGET(m, M_WAIT, MT_DATA);
1081 mlen = MLEN;
1082 }
1083 len = min(mlen, bytes_to_copy);
1084 /*
1085 * For datagram protocols, leave room
1086 * for protocol headers in first mbuf.
1087 */
1088 if (atomic && top == 0 && len < mlen)
1089 MH_ALIGN(m, len);
1090 }
1091 chainlength += len;
1092
1093 space -= len;
1094
1095 error = uiomove(mtod(m, caddr_t), (int)len, uio);
1096
1097 resid = uio->uio_resid;
1098
1099 m->m_len = len;
1100 *mp = m;
1101 top->m_pkthdr.len += len;
1102 if (error)
1103 break;
1104 mp = &m->m_next;
1105 if (resid <= 0) {
1106 if (flags & MSG_EOR)
1107 top->m_flags |= M_EOR;
1108 break;
1109 }
1110 bytes_to_copy = min(resid, space);
1111
1112 } while (space > 0 && (chainlength < sosendmaxchain || atomic || resid < MINCLSIZE));
1113
1114 if (dropped_funnel == TRUE)
1115 (void)thread_funnel_set(network_flock, TRUE);
1116 if (error)
1117 goto release;
1118 }
1119
1120 if (flags & (MSG_HOLD|MSG_SEND))
1121 { /* Enqueue for later, go away if HOLD */
1122 register struct mbuf *mb1;
1123 if (so->so_temp && (flags & MSG_FLUSH))
1124 { m_freem(so->so_temp);
1125 so->so_temp = NULL;
1126 }
1127 if (so->so_temp)
1128 so->so_tail->m_next = top;
1129 else
1130 so->so_temp = top;
1131 mb1 = top;
1132 while (mb1->m_next)
1133 mb1 = mb1->m_next;
1134 so->so_tail = mb1;
1135 if (flags&MSG_HOLD)
1136 { top = NULL;
1137 goto release;
1138 }
1139 top = so->so_temp;
1140 }
1141 if (dontroute)
1142 so->so_options |= SO_DONTROUTE;
1143 s = splnet(); /* XXX */
1144 /* Compute flags here, for pru_send and NKEs */
1145 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
1146 /*
1147 * If the user set MSG_EOF, the protocol
1148 * understands this flag and nothing left to
1149 * send then use PRU_SEND_EOF instead of PRU_SEND.
1150 */
1151 ((flags & MSG_EOF) &&
1152 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1153 (resid <= 0)) ?
1154 PRUS_EOF :
1155 /* If there is more to send set PRUS_MORETOCOME */
1156 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
1157 kp = sotokextcb(so);
1158 while (kp)
1159 { if (kp->e_soif && kp->e_soif->sf_sosend) {
1160 error = (*kp->e_soif->sf_sosend)(so, &addr,
1161 &uio, &top,
1162 &control,
1163 &sendflags,
1164 kp);
1165 if (error) {
1166 splx(s);
1167 if (error == EJUSTRETURN) {
1168 sbunlock(&so->so_snd);
1169
1170 if (freelist)
1171 m_freem_list(freelist);
1172 return(0);
1173 }
1174 goto release;
1175 }
1176 }
1177 kp = kp->e_next;
1178 }
1179
1180 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1181 sendflags, top, addr, control, p);
1182 splx(s);
1183 #ifdef __APPLE__
1184 if (flags & MSG_SEND)
1185 so->so_temp = NULL;
1186 #endif
1187 if (dontroute)
1188 so->so_options &= ~SO_DONTROUTE;
1189 clen = 0;
1190 control = 0;
1191 top = 0;
1192 mp = &top;
1193 if (error)
1194 goto release;
1195 } while (resid && space > 0);
1196 } while (resid);
1197
1198 release:
1199 sbunlock(&so->so_snd);
1200 out:
1201 if (top)
1202 m_freem(top);
1203 if (control)
1204 m_freem(control);
1205 if (freelist)
1206 m_freem_list(freelist);
1207
1208 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END,
1209 so,
1210 resid,
1211 so->so_snd.sb_cc,
1212 space,
1213 error);
1214
1215 return (error);
1216 }
1217
1218 /*
1219 * Implement receive operations on a socket.
1220 * We depend on the way that records are added to the sockbuf
1221 * by sbappend*. In particular, each record (mbufs linked through m_next)
1222 * must begin with an address if the protocol so specifies,
1223 * followed by an optional mbuf or mbufs containing ancillary data,
1224 * and then zero or more mbufs of data.
1225 * In order to avoid blocking network interrupts for the entire time here,
1226 * we splx() while doing the actual copy to user space.
1227 * Although the sockbuf is locked, new data may still be appended,
1228 * and thus we must maintain consistency of the sockbuf during that time.
1229 *
1230 * The caller may receive the data as a single mbuf chain by supplying
1231 * an mbuf **mp0 for use in returning the chain. The uio is then used
1232 * only for the count in uio_resid.
1233 */
1234 int
1235 soreceive(so, psa, uio, mp0, controlp, flagsp)
1236 register struct socket *so;
1237 struct sockaddr **psa;
1238 struct uio *uio;
1239 struct mbuf **mp0;
1240 struct mbuf **controlp;
1241 int *flagsp;
1242 {
1243 register struct mbuf *m, **mp, *ml;
1244 register int flags, len, error, s, offset;
1245 struct protosw *pr = so->so_proto;
1246 struct mbuf *nextrecord;
1247 int moff, type = 0;
1248 int orig_resid = uio->uio_resid;
1249 struct kextcb *kp;
1250 volatile struct mbuf *free_list;
1251 volatile int delayed_copy_len;
1252 int can_delay;
1253 int need_event;
1254 struct proc *p = current_proc();
1255
1256
1257 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START,
1258 so,
1259 uio->uio_resid,
1260 so->so_rcv.sb_cc,
1261 so->so_rcv.sb_lowat,
1262 so->so_rcv.sb_hiwat);
1263
1264 kp = sotokextcb(so);
1265 while (kp) {
1266 if (kp->e_soif && kp->e_soif->sf_soreceive) {
1267 error = (*kp->e_soif->sf_soreceive)(so, psa, &uio,
1268 mp0, controlp,
1269 flagsp, kp);
1270 if (error) {
1271 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1272 return((error == EJUSTRETURN) ? 0 : error);
1273 }
1274 }
1275 kp = kp->e_next;
1276 }
1277
1278 mp = mp0;
1279 if (psa)
1280 *psa = 0;
1281 if (controlp)
1282 *controlp = 0;
1283 if (flagsp)
1284 flags = *flagsp &~ MSG_EOR;
1285 else
1286 flags = 0;
1287 /*
1288 * When SO_WANTOOBFLAG is set we try to get out-of-band data
1289 * regardless of the flags argument. Here is the case were
1290 * out-of-band data is not inline.
1291 */
1292 if ((flags & MSG_OOB) ||
1293 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1294 (so->so_options & SO_OOBINLINE) == 0 &&
1295 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
1296 m = m_get(M_WAIT, MT_DATA);
1297 if (m == NULL) {
1298 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, ENOBUFS,0,0,0,0);
1299 return (ENOBUFS);
1300 }
1301 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1302 if (error)
1303 goto bad;
1304 do {
1305 error = uiomove(mtod(m, caddr_t),
1306 (int) min(uio->uio_resid, m->m_len), uio);
1307 m = m_free(m);
1308 } while (uio->uio_resid && error == 0 && m);
1309 bad:
1310 if (m)
1311 m_freem(m);
1312 #ifdef __APPLE__
1313 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
1314 if (error == EWOULDBLOCK || error == EINVAL) {
1315 /*
1316 * Let's try to get normal data:
1317 * EWOULDBLOCK: out-of-band data not receive yet;
1318 * EINVAL: out-of-band data already read.
1319 */
1320 error = 0;
1321 goto nooob;
1322 } else if (error == 0 && flagsp)
1323 *flagsp |= MSG_OOB;
1324 }
1325 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1326 #endif
1327 return (error);
1328 }
1329 nooob:
1330 if (mp)
1331 *mp = (struct mbuf *)0;
1332 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1333 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1334
1335
1336 free_list = (struct mbuf *)0;
1337 delayed_copy_len = 0;
1338 restart:
1339 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1340 if (error) {
1341 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1342 return (error);
1343 }
1344 s = splnet();
1345
1346 m = so->so_rcv.sb_mb;
1347 /*
1348 * If we have less data than requested, block awaiting more
1349 * (subject to any timeout) if:
1350 * 1. the current count is less than the low water mark, or
1351 * 2. MSG_WAITALL is set, and it is possible to do the entire
1352 * receive operation at once if we block (resid <= hiwat).
1353 * 3. MSG_DONTWAIT is not set
1354 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1355 * we have to do the receive in sections, and thus risk returning
1356 * a short count if a timeout or signal occurs after we start.
1357 */
1358 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
1359 so->so_rcv.sb_cc < uio->uio_resid) &&
1360 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1361 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1362 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1363
1364 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
1365 if (so->so_error) {
1366 if (m)
1367 goto dontblock;
1368 error = so->so_error;
1369 if ((flags & MSG_PEEK) == 0)
1370 so->so_error = 0;
1371 goto release;
1372 }
1373 if (so->so_state & SS_CANTRCVMORE) {
1374 if (m)
1375 goto dontblock;
1376 else
1377 goto release;
1378 }
1379 for (; m; m = m->m_next)
1380 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1381 m = so->so_rcv.sb_mb;
1382 goto dontblock;
1383 }
1384 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1385 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1386 error = ENOTCONN;
1387 goto release;
1388 }
1389 if (uio->uio_resid == 0)
1390 goto release;
1391 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
1392 error = EWOULDBLOCK;
1393 goto release;
1394 }
1395 sbunlock(&so->so_rcv);
1396 if (socket_debug)
1397 printf("Waiting for socket data\n");
1398
1399 error = sbwait(&so->so_rcv);
1400 if (socket_debug)
1401 printf("SORECEIVE - sbwait returned %d\n", error);
1402 splx(s);
1403 if (error) {
1404 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1405 return (error);
1406 }
1407 goto restart;
1408 }
1409 dontblock:
1410 #ifndef __APPLE__
1411 if (uio->uio_procp)
1412 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
1413 #else /* __APPLE__ */
1414 /*
1415 * 2207985
1416 * This should be uio->uio-procp; however, some callers of this
1417 * function use auto variables with stack garbage, and fail to
1418 * fill out the uio structure properly.
1419 */
1420 if (p)
1421 p->p_stats->p_ru.ru_msgrcv++;
1422 #endif /* __APPLE__ */
1423 nextrecord = m->m_nextpkt;
1424 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
1425 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1426 orig_resid = 0;
1427 if (psa) {
1428 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
1429 mp0 == 0);
1430 if ((*psa == 0) && (flags & MSG_NEEDSA)) {
1431 error = EWOULDBLOCK;
1432 goto release;
1433 }
1434 }
1435 if (flags & MSG_PEEK) {
1436 m = m->m_next;
1437 } else {
1438 sbfree(&so->so_rcv, m);
1439 MFREE(m, so->so_rcv.sb_mb);
1440 m = so->so_rcv.sb_mb;
1441 }
1442 }
1443 while (m && m->m_type == MT_CONTROL && error == 0) {
1444 if (flags & MSG_PEEK) {
1445 if (controlp)
1446 *controlp = m_copy(m, 0, m->m_len);
1447 m = m->m_next;
1448 } else {
1449 sbfree(&so->so_rcv, m);
1450 if (controlp) {
1451 if (pr->pr_domain->dom_externalize &&
1452 mtod(m, struct cmsghdr *)->cmsg_type ==
1453 SCM_RIGHTS)
1454 error = (*pr->pr_domain->dom_externalize)(m);
1455 *controlp = m;
1456 so->so_rcv.sb_mb = m->m_next;
1457 m->m_next = 0;
1458 m = so->so_rcv.sb_mb;
1459 } else {
1460 MFREE(m, so->so_rcv.sb_mb);
1461 m = so->so_rcv.sb_mb;
1462 }
1463 }
1464 if (controlp) {
1465 orig_resid = 0;
1466 controlp = &(*controlp)->m_next;
1467 }
1468 }
1469 if (m) {
1470 if ((flags & MSG_PEEK) == 0)
1471 m->m_nextpkt = nextrecord;
1472 type = m->m_type;
1473 if (type == MT_OOBDATA)
1474 flags |= MSG_OOB;
1475 }
1476 moff = 0;
1477 offset = 0;
1478
1479 if (!(flags & MSG_PEEK) && uio->uio_resid > sorecvmincopy)
1480 can_delay = 1;
1481 else
1482 can_delay = 0;
1483
1484 need_event = 0;
1485
1486
1487 while (m && (uio->uio_resid - delayed_copy_len) > 0 && error == 0) {
1488 if (m->m_type == MT_OOBDATA) {
1489 if (type != MT_OOBDATA)
1490 break;
1491 } else if (type == MT_OOBDATA)
1492 break;
1493 #ifndef __APPLE__
1494 /*
1495 * This assertion needs rework. The trouble is Appletalk is uses many
1496 * mbuf types (NOT listed in mbuf.h!) which will trigger this panic.
1497 * For now just remove the assertion... CSM 9/98
1498 */
1499 else
1500 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1501 ("receive 3"));
1502 #else
1503 /*
1504 * Make sure to allways set MSG_OOB event when getting
1505 * out of band data inline.
1506 */
1507 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1508 (so->so_options & SO_OOBINLINE) != 0 &&
1509 (so->so_state & SS_RCVATMARK) != 0) {
1510 flags |= MSG_OOB;
1511 }
1512 #endif
1513 so->so_state &= ~SS_RCVATMARK;
1514 len = uio->uio_resid - delayed_copy_len;
1515 if (so->so_oobmark && len > so->so_oobmark - offset)
1516 len = so->so_oobmark - offset;
1517 if (len > m->m_len - moff)
1518 len = m->m_len - moff;
1519 /*
1520 * If mp is set, just pass back the mbufs.
1521 * Otherwise copy them out via the uio, then free.
1522 * Sockbuf must be consistent here (points to current mbuf,
1523 * it points to next record) when we drop priority;
1524 * we must note any additions to the sockbuf when we
1525 * block interrupts again.
1526 */
1527 if (mp == 0) {
1528 if (can_delay && len == m->m_len) {
1529 /*
1530 * only delay the copy if we're consuming the
1531 * mbuf and we're NOT in MSG_PEEK mode
1532 * and we have enough data to make it worthwile
1533 * to drop and retake the funnel... can_delay
1534 * reflects the state of the 2 latter constraints
1535 * moff should always be zero in these cases
1536 */
1537 delayed_copy_len += len;
1538 } else {
1539 splx(s);
1540
1541 if (delayed_copy_len) {
1542 error = sodelayed_copy(uio, &free_list, &delayed_copy_len);
1543
1544 if (error) {
1545 s = splnet();
1546 goto release;
1547 }
1548 if (m != so->so_rcv.sb_mb) {
1549 /*
1550 * can only get here if MSG_PEEK is not set
1551 * therefore, m should point at the head of the rcv queue...
1552 * if it doesn't, it means something drastically changed
1553 * while we were out from behind the funnel in sodelayed_copy...
1554 * perhaps a RST on the stream... in any event, the stream has
1555 * been interrupted... it's probably best just to return
1556 * whatever data we've moved and let the caller sort it out...
1557 */
1558 break;
1559 }
1560 }
1561 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
1562
1563 s = splnet();
1564 if (error)
1565 goto release;
1566 }
1567 } else
1568 uio->uio_resid -= len;
1569
1570 if (len == m->m_len - moff) {
1571 if (m->m_flags & M_EOR)
1572 flags |= MSG_EOR;
1573 if (flags & MSG_PEEK) {
1574 m = m->m_next;
1575 moff = 0;
1576 } else {
1577 nextrecord = m->m_nextpkt;
1578 sbfree(&so->so_rcv, m);
1579
1580 if (mp) {
1581 *mp = m;
1582 mp = &m->m_next;
1583 so->so_rcv.sb_mb = m = m->m_next;
1584 *mp = (struct mbuf *)0;
1585 } else {
1586 m->m_nextpkt = 0;
1587 if (free_list == NULL)
1588 free_list = m;
1589 else
1590 ml->m_next = m;
1591 ml = m;
1592 so->so_rcv.sb_mb = m = m->m_next;
1593 ml->m_next = 0;
1594 }
1595 if (m)
1596 m->m_nextpkt = nextrecord;
1597 }
1598 } else {
1599 if (flags & MSG_PEEK)
1600 moff += len;
1601 else {
1602 if (mp)
1603 *mp = m_copym(m, 0, len, M_WAIT);
1604 m->m_data += len;
1605 m->m_len -= len;
1606 so->so_rcv.sb_cc -= len;
1607 }
1608 }
1609 if (so->so_oobmark) {
1610 if ((flags & MSG_PEEK) == 0) {
1611 so->so_oobmark -= len;
1612 if (so->so_oobmark == 0) {
1613 so->so_state |= SS_RCVATMARK;
1614 /*
1615 * delay posting the actual event until after
1616 * any delayed copy processing has finished
1617 */
1618 need_event = 1;
1619 break;
1620 }
1621 } else {
1622 offset += len;
1623 if (offset == so->so_oobmark)
1624 break;
1625 }
1626 }
1627 if (flags & MSG_EOR)
1628 break;
1629 /*
1630 * If the MSG_WAITALL or MSG_WAITSTREAM flag is set (for non-atomic socket),
1631 * we must not quit until "uio->uio_resid == 0" or an error
1632 * termination. If a signal/timeout occurs, return
1633 * with a short count but without error.
1634 * Keep sockbuf locked against other readers.
1635 */
1636 while (flags & (MSG_WAITALL|MSG_WAITSTREAM) && m == 0 && (uio->uio_resid - delayed_copy_len) > 0 &&
1637 !sosendallatonce(so) && !nextrecord) {
1638 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1639 goto release;
1640
1641 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1642 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1643 if (sbwait(&so->so_rcv)) {
1644 error = 0;
1645 goto release;
1646 }
1647 /*
1648 * have to wait until after we get back from the sbwait to do the copy because
1649 * we will drop the funnel if we have enough data that has been delayed... by dropping
1650 * the funnel we open up a window allowing the netisr thread to process the incoming packets
1651 * and to change the state of this socket... we're issuing the sbwait because
1652 * the socket is empty and we're expecting the netisr thread to wake us up when more
1653 * packets arrive... if we allow that processing to happen and then sbwait, we
1654 * could stall forever with packets sitting in the socket if no further packets
1655 * arrive from the remote side.
1656 *
1657 * we want to copy before we've collected all the data to satisfy this request to
1658 * allow the copy to overlap the incoming packet processing on an MP system
1659 */
1660 if (delayed_copy_len > sorecvmincopy && (delayed_copy_len > (so->so_rcv.sb_hiwat / 2))) {
1661
1662 error = sodelayed_copy(uio, &free_list, &delayed_copy_len);
1663
1664 if (error)
1665 goto release;
1666 }
1667 m = so->so_rcv.sb_mb;
1668 if (m) {
1669 nextrecord = m->m_nextpkt;
1670 }
1671 }
1672 }
1673
1674 if (m && pr->pr_flags & PR_ATOMIC) {
1675 #ifdef __APPLE__
1676 if (so->so_options & SO_DONTTRUNC)
1677 flags |= MSG_RCVMORE;
1678 else {
1679 #endif
1680 flags |= MSG_TRUNC;
1681 if ((flags & MSG_PEEK) == 0)
1682 (void) sbdroprecord(&so->so_rcv);
1683 #ifdef __APPLE__
1684 }
1685 #endif
1686 }
1687 if ((flags & MSG_PEEK) == 0) {
1688 if (m == 0)
1689 so->so_rcv.sb_mb = nextrecord;
1690 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1691 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1692 }
1693 #ifdef __APPLE__
1694 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0)
1695 flags |= MSG_HAVEMORE;
1696
1697 if (delayed_copy_len) {
1698 error = sodelayed_copy(uio, &free_list, &delayed_copy_len);
1699
1700 if (error)
1701 goto release;
1702 }
1703 if (free_list) {
1704 m_freem_list((struct mbuf *)free_list);
1705 free_list = (struct mbuf *)0;
1706 }
1707 if (need_event)
1708 postevent(so, 0, EV_OOB);
1709 #endif
1710 if (orig_resid == uio->uio_resid && orig_resid &&
1711 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1712 sbunlock(&so->so_rcv);
1713 splx(s);
1714 goto restart;
1715 }
1716
1717 if (flagsp)
1718 *flagsp |= flags;
1719 release:
1720 if (delayed_copy_len) {
1721 error = sodelayed_copy(uio, &free_list, &delayed_copy_len);
1722 }
1723 if (free_list) {
1724 m_freem_list((struct mbuf *)free_list);
1725 }
1726 sbunlock(&so->so_rcv);
1727 splx(s);
1728
1729 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
1730 so,
1731 uio->uio_resid,
1732 so->so_rcv.sb_cc,
1733 0,
1734 error);
1735
1736 return (error);
1737 }
1738
1739
1740 int sodelayed_copy(struct uio *uio, struct mbuf **free_list, int *resid)
1741 {
1742 int error = 0;
1743 boolean_t dropped_funnel = FALSE;
1744 struct mbuf *m;
1745
1746 m = *free_list;
1747
1748 if (*resid >= sorecvmincopy) {
1749 dropped_funnel = TRUE;
1750
1751 (void)thread_funnel_set(network_flock, FALSE);
1752 }
1753 while (m && error == 0) {
1754
1755 error = uiomove(mtod(m, caddr_t), (int)m->m_len, uio);
1756
1757 m = m->m_next;
1758 }
1759 m_freem_list(*free_list);
1760
1761 *free_list = (struct mbuf *)NULL;
1762 *resid = 0;
1763
1764 if (dropped_funnel == TRUE)
1765 (void)thread_funnel_set(network_flock, TRUE);
1766
1767 return (error);
1768 }
1769
1770
1771 int
1772 soshutdown(so, how)
1773 register struct socket *so;
1774 register int how;
1775 {
1776 register struct protosw *pr = so->so_proto;
1777 struct kextcb *kp;
1778 int ret;
1779
1780
1781 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0);
1782 kp = sotokextcb(so);
1783 while (kp) {
1784 if (kp->e_soif && kp->e_soif->sf_soshutdown) {
1785 ret = (*kp->e_soif->sf_soshutdown)(so, how, kp);
1786 if (ret) {
1787 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1788 return((ret == EJUSTRETURN) ? 0 : ret);
1789 }
1790 }
1791 kp = kp->e_next;
1792 }
1793
1794 if (how != SHUT_WR) {
1795 sorflush(so);
1796 postevent(so, 0, EV_RCLOSED);
1797 }
1798 if (how != SHUT_RD) {
1799 ret = ((*pr->pr_usrreqs->pru_shutdown)(so));
1800 postevent(so, 0, EV_WCLOSED);
1801 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1802 return(ret);
1803 }
1804
1805 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1806 return (0);
1807 }
1808
1809 void
1810 sorflush(so)
1811 register struct socket *so;
1812 {
1813 register struct sockbuf *sb = &so->so_rcv;
1814 register struct protosw *pr = so->so_proto;
1815 register int s, error;
1816 struct sockbuf asb;
1817 struct kextcb *kp;
1818
1819 kp = sotokextcb(so);
1820 while (kp) {
1821 if (kp->e_soif && kp->e_soif->sf_sorflush) {
1822 if ((*kp->e_soif->sf_sorflush)(so, kp))
1823 return;
1824 }
1825 kp = kp->e_next;
1826 }
1827
1828 sb->sb_flags |= SB_NOINTR;
1829 (void) sblock(sb, M_WAIT);
1830 s = splimp();
1831 socantrcvmore(so);
1832 sbunlock(sb);
1833 #ifdef __APPLE__
1834 selthreadclear(&sb->sb_sel);
1835 #endif
1836 asb = *sb;
1837 bzero((caddr_t)sb, sizeof (*sb));
1838 if (asb.sb_flags & SB_KNOTE) {
1839 sb->sb_sel.si_note = asb.sb_sel.si_note;
1840 sb->sb_flags = SB_KNOTE;
1841 }
1842 splx(s);
1843 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1844 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1845
1846 sbrelease(&asb);
1847 }
1848
1849 /*
1850 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1851 * an additional variant to handle the case where the option value needs
1852 * to be some kind of integer, but not a specific size.
1853 * In addition to their use here, these functions are also called by the
1854 * protocol-level pr_ctloutput() routines.
1855 */
1856 int
1857 sooptcopyin(sopt, buf, len, minlen)
1858 struct sockopt *sopt;
1859 void *buf;
1860 size_t len;
1861 size_t minlen;
1862 {
1863 size_t valsize;
1864
1865 /*
1866 * If the user gives us more than we wanted, we ignore it,
1867 * but if we don't get the minimum length the caller
1868 * wants, we return EINVAL. On success, sopt->sopt_valsize
1869 * is set to however much we actually retrieved.
1870 */
1871 if ((valsize = sopt->sopt_valsize) < minlen)
1872 return EINVAL;
1873 if (valsize > len)
1874 sopt->sopt_valsize = valsize = len;
1875
1876 if (sopt->sopt_p != 0)
1877 return (copyin(sopt->sopt_val, buf, valsize));
1878
1879 bcopy(sopt->sopt_val, buf, valsize);
1880 return 0;
1881 }
1882
1883 int
1884 sosetopt(so, sopt)
1885 struct socket *so;
1886 struct sockopt *sopt;
1887 {
1888 int error, optval;
1889 struct linger l;
1890 struct timeval tv;
1891 short val;
1892 struct kextcb *kp;
1893
1894 if (sopt->sopt_dir != SOPT_SET) {
1895 sopt->sopt_dir = SOPT_SET;
1896 }
1897
1898 kp = sotokextcb(so);
1899 while (kp) {
1900 if (kp->e_soif && kp->e_soif->sf_socontrol) {
1901 error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1902 if (error)
1903 return((error == EJUSTRETURN) ? 0 : error);
1904 }
1905 kp = kp->e_next;
1906 }
1907
1908 error = 0;
1909 if (sopt->sopt_level != SOL_SOCKET) {
1910 if (so->so_proto && so->so_proto->pr_ctloutput)
1911 return ((*so->so_proto->pr_ctloutput)
1912 (so, sopt));
1913 error = ENOPROTOOPT;
1914 } else {
1915 switch (sopt->sopt_name) {
1916 case SO_LINGER:
1917 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1918 if (error)
1919 goto bad;
1920
1921 so->so_linger = l.l_linger;
1922 if (l.l_onoff)
1923 so->so_options |= SO_LINGER;
1924 else
1925 so->so_options &= ~SO_LINGER;
1926 break;
1927
1928 case SO_DEBUG:
1929 case SO_KEEPALIVE:
1930 case SO_DONTROUTE:
1931 case SO_USELOOPBACK:
1932 case SO_BROADCAST:
1933 case SO_REUSEADDR:
1934 case SO_REUSEPORT:
1935 case SO_OOBINLINE:
1936 case SO_TIMESTAMP:
1937 #ifdef __APPLE__
1938 case SO_DONTTRUNC:
1939 case SO_WANTMORE:
1940 case SO_WANTOOBFLAG:
1941 #endif
1942 error = sooptcopyin(sopt, &optval, sizeof optval,
1943 sizeof optval);
1944 if (error)
1945 goto bad;
1946 if (optval)
1947 so->so_options |= sopt->sopt_name;
1948 else
1949 so->so_options &= ~sopt->sopt_name;
1950 break;
1951
1952 case SO_SNDBUF:
1953 case SO_RCVBUF:
1954 case SO_SNDLOWAT:
1955 case SO_RCVLOWAT:
1956 error = sooptcopyin(sopt, &optval, sizeof optval,
1957 sizeof optval);
1958 if (error)
1959 goto bad;
1960
1961 /*
1962 * Values < 1 make no sense for any of these
1963 * options, so disallow them.
1964 */
1965 if (optval < 1) {
1966 error = EINVAL;
1967 goto bad;
1968 }
1969
1970 switch (sopt->sopt_name) {
1971 case SO_SNDBUF:
1972 case SO_RCVBUF:
1973 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1974 &so->so_snd : &so->so_rcv,
1975 (u_long) optval) == 0) {
1976 error = ENOBUFS;
1977 goto bad;
1978 }
1979 break;
1980
1981 /*
1982 * Make sure the low-water is never greater than
1983 * the high-water.
1984 */
1985 case SO_SNDLOWAT:
1986 so->so_snd.sb_lowat =
1987 (optval > so->so_snd.sb_hiwat) ?
1988 so->so_snd.sb_hiwat : optval;
1989 break;
1990 case SO_RCVLOWAT:
1991 so->so_rcv.sb_lowat =
1992 (optval > so->so_rcv.sb_hiwat) ?
1993 so->so_rcv.sb_hiwat : optval;
1994 break;
1995 }
1996 break;
1997
1998 case SO_SNDTIMEO:
1999 case SO_RCVTIMEO:
2000 error = sooptcopyin(sopt, &tv, sizeof tv,
2001 sizeof tv);
2002 if (error)
2003 goto bad;
2004
2005 /* assert(hz > 0); */
2006 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
2007 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
2008 error = EDOM;
2009 goto bad;
2010 }
2011 /* assert(tick > 0); */
2012 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
2013 {
2014 long tmp = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
2015 if (tmp > SHRT_MAX) {
2016 error = EDOM;
2017 goto bad;
2018 }
2019 val = tmp;
2020 }
2021
2022 switch (sopt->sopt_name) {
2023 case SO_SNDTIMEO:
2024 so->so_snd.sb_timeo = val;
2025 break;
2026 case SO_RCVTIMEO:
2027 so->so_rcv.sb_timeo = val;
2028 break;
2029 }
2030 break;
2031
2032 case SO_NKE:
2033 {
2034 struct so_nke nke;
2035 struct NFDescriptor *nf1, *nf2 = NULL;
2036
2037 error = sooptcopyin(sopt, &nke,
2038 sizeof nke, sizeof nke);
2039 if (error)
2040 goto bad;
2041
2042 error = nke_insert(so, &nke);
2043 break;
2044 }
2045
2046 case SO_NOSIGPIPE:
2047 error = sooptcopyin(sopt, &optval, sizeof optval,
2048 sizeof optval);
2049 if (error)
2050 goto bad;
2051 if (optval)
2052 so->so_flags |= SOF_NOSIGPIPE;
2053 else
2054 so->so_flags &= ~SOF_NOSIGPIPE;
2055
2056 break;
2057
2058 case SO_NOADDRERR:
2059 error = sooptcopyin(sopt, &optval, sizeof optval,
2060 sizeof optval);
2061 if (error)
2062 goto bad;
2063 if (optval)
2064 so->so_flags |= SOF_NOADDRAVAIL;
2065 else
2066 so->so_flags &= ~SOF_NOADDRAVAIL;
2067
2068 break;
2069
2070 default:
2071 error = ENOPROTOOPT;
2072 break;
2073 }
2074 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
2075 (void) ((*so->so_proto->pr_ctloutput)
2076 (so, sopt));
2077 }
2078 }
2079 bad:
2080 return (error);
2081 }
2082
2083 /* Helper routine for getsockopt */
2084 int
2085 sooptcopyout(sopt, buf, len)
2086 struct sockopt *sopt;
2087 void *buf;
2088 size_t len;
2089 {
2090 int error;
2091 size_t valsize;
2092
2093 error = 0;
2094
2095 /*
2096 * Documented get behavior is that we always return a value,
2097 * possibly truncated to fit in the user's buffer.
2098 * Traditional behavior is that we always tell the user
2099 * precisely how much we copied, rather than something useful
2100 * like the total amount we had available for her.
2101 * Note that this interface is not idempotent; the entire answer must
2102 * generated ahead of time.
2103 */
2104 valsize = min(len, sopt->sopt_valsize);
2105 sopt->sopt_valsize = valsize;
2106 if (sopt->sopt_val != 0) {
2107 if (sopt->sopt_p != 0)
2108 error = copyout(buf, sopt->sopt_val, valsize);
2109 else
2110 bcopy(buf, sopt->sopt_val, valsize);
2111 }
2112 return error;
2113 }
2114
2115 int
2116 sogetopt(so, sopt)
2117 struct socket *so;
2118 struct sockopt *sopt;
2119 {
2120 int error, optval;
2121 struct linger l;
2122 struct timeval tv;
2123 struct mbuf *m;
2124 struct kextcb *kp;
2125
2126 if (sopt->sopt_dir != SOPT_GET) {
2127 sopt->sopt_dir = SOPT_GET;
2128 }
2129
2130 kp = sotokextcb(so);
2131 while (kp) {
2132 if (kp->e_soif && kp->e_soif->sf_socontrol) {
2133 error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
2134 if (error)
2135 return((error == EJUSTRETURN) ? 0 : error);
2136 }
2137 kp = kp->e_next;
2138 }
2139
2140 error = 0;
2141 if (sopt->sopt_level != SOL_SOCKET) {
2142 if (so->so_proto && so->so_proto->pr_ctloutput) {
2143 return ((*so->so_proto->pr_ctloutput)
2144 (so, sopt));
2145 } else
2146 return (ENOPROTOOPT);
2147 } else {
2148 switch (sopt->sopt_name) {
2149 case SO_LINGER:
2150 l.l_onoff = so->so_options & SO_LINGER;
2151 l.l_linger = so->so_linger;
2152 error = sooptcopyout(sopt, &l, sizeof l);
2153 break;
2154
2155 case SO_USELOOPBACK:
2156 case SO_DONTROUTE:
2157 case SO_DEBUG:
2158 case SO_KEEPALIVE:
2159 case SO_REUSEADDR:
2160 case SO_REUSEPORT:
2161 case SO_BROADCAST:
2162 case SO_OOBINLINE:
2163 case SO_TIMESTAMP:
2164 #ifdef __APPLE__
2165 case SO_DONTTRUNC:
2166 case SO_WANTMORE:
2167 case SO_WANTOOBFLAG:
2168 #endif
2169 optval = so->so_options & sopt->sopt_name;
2170 integer:
2171 error = sooptcopyout(sopt, &optval, sizeof optval);
2172 break;
2173
2174 case SO_TYPE:
2175 optval = so->so_type;
2176 goto integer;
2177
2178 #ifdef __APPLE__
2179 case SO_NREAD:
2180 {
2181 int pkt_total;
2182 struct mbuf *m1;
2183
2184 pkt_total = 0;
2185 m1 = so->so_rcv.sb_mb;
2186 if (so->so_proto->pr_flags & PR_ATOMIC)
2187 {
2188 #if 0
2189 kprintf("SKT CC: %d\n", so->so_rcv.sb_cc);
2190 #endif
2191 while (m1) {
2192 if (m1->m_type == MT_DATA)
2193 pkt_total += m1->m_len;
2194 #if 0
2195 kprintf("CNT: %d/%d\n", m1->m_len, pkt_total);
2196 #endif
2197 m1 = m1->m_next;
2198 }
2199 optval = pkt_total;
2200 } else
2201 optval = so->so_rcv.sb_cc;
2202 #if 0
2203 kprintf("RTN: %d\n", optval);
2204 #endif
2205 goto integer;
2206 }
2207 #endif
2208 case SO_ERROR:
2209 optval = so->so_error;
2210 so->so_error = 0;
2211 goto integer;
2212
2213 case SO_SNDBUF:
2214 optval = so->so_snd.sb_hiwat;
2215 goto integer;
2216
2217 case SO_RCVBUF:
2218 optval = so->so_rcv.sb_hiwat;
2219 goto integer;
2220
2221 case SO_SNDLOWAT:
2222 optval = so->so_snd.sb_lowat;
2223 goto integer;
2224
2225 case SO_RCVLOWAT:
2226 optval = so->so_rcv.sb_lowat;
2227 goto integer;
2228
2229 case SO_SNDTIMEO:
2230 case SO_RCVTIMEO:
2231 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2232 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2233
2234 tv.tv_sec = optval / hz;
2235 tv.tv_usec = (optval % hz) * tick;
2236 error = sooptcopyout(sopt, &tv, sizeof tv);
2237 break;
2238
2239 case SO_NOSIGPIPE:
2240 optval = (so->so_flags & SOF_NOSIGPIPE);
2241 goto integer;
2242
2243 case SO_NOADDRERR:
2244 optval = (so->so_flags & SOF_NOADDRAVAIL);
2245 goto integer;
2246
2247 default:
2248 error = ENOPROTOOPT;
2249 break;
2250 }
2251 return (error);
2252 }
2253 }
2254
2255 #ifdef __APPLE__
2256 /*
2257 * Network filter support
2258 */
2259 /* Run the list of filters, creating extension control blocks */
2260 sfilter_init(register struct socket *so)
2261 { struct kextcb *kp, **kpp;
2262 struct protosw *prp;
2263 struct NFDescriptor *nfp;
2264
2265 prp = so->so_proto;
2266 nfp = prp->pr_sfilter.tqh_first; /* non-null */
2267 kpp = &so->so_ext;
2268 kp = NULL;
2269 while (nfp)
2270 { MALLOC(kp, struct kextcb *, sizeof(*kp),
2271 M_TEMP, M_WAITOK);
2272 if (kp == NULL)
2273 return(ENOBUFS); /* so_free will clean up */
2274 *kpp = kp;
2275 kpp = &kp->e_next;
2276 kp->e_next = NULL;
2277 kp->e_fcb = NULL;
2278 kp->e_nfd = nfp;
2279 kp->e_soif = nfp->nf_soif;
2280 kp->e_sout = nfp->nf_soutil;
2281 /*
2282 * Ignore return value for create
2283 * Everyone gets a chance at startup
2284 */
2285 if (kp->e_soif && kp->e_soif->sf_socreate)
2286 (*kp->e_soif->sf_socreate)(so, prp, kp);
2287 nfp = nfp->nf_next.tqe_next;
2288 }
2289 return(0);
2290 }
2291
2292 /*
2293 * Run the list of filters, freeing extension control blocks
2294 * Assumes the soif/soutil blocks have been handled.
2295 */
2296 sfilter_term(struct socket *so)
2297 { struct kextcb *kp, *kp1;
2298
2299 kp = so->so_ext;
2300 while (kp)
2301 { kp1 = kp->e_next;
2302 /*
2303 * Ignore return code on termination; everyone must
2304 * get terminated.
2305 */
2306 if (kp->e_soif && kp->e_soif->sf_sofree)
2307 kp->e_soif->sf_sofree(so, kp);
2308 FREE(kp, M_TEMP);
2309 kp = kp1;
2310 }
2311 return(0);
2312 }
2313 #endif __APPLE__
2314
2315 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
2316 int
2317 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2318 {
2319 struct mbuf *m, *m_prev;
2320 int sopt_size = sopt->sopt_valsize;
2321
2322 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2323 if (m == 0)
2324 return ENOBUFS;
2325 if (sopt_size > MLEN) {
2326 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2327 if ((m->m_flags & M_EXT) == 0) {
2328 m_free(m);
2329 return ENOBUFS;
2330 }
2331 m->m_len = min(MCLBYTES, sopt_size);
2332 } else {
2333 m->m_len = min(MLEN, sopt_size);
2334 }
2335 sopt_size -= m->m_len;
2336 *mp = m;
2337 m_prev = m;
2338
2339 while (sopt_size) {
2340 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2341 if (m == 0) {
2342 m_freem(*mp);
2343 return ENOBUFS;
2344 }
2345 if (sopt_size > MLEN) {
2346 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2347 if ((m->m_flags & M_EXT) == 0) {
2348 m_freem(*mp);
2349 return ENOBUFS;
2350 }
2351 m->m_len = min(MCLBYTES, sopt_size);
2352 } else {
2353 m->m_len = min(MLEN, sopt_size);
2354 }
2355 sopt_size -= m->m_len;
2356 m_prev->m_next = m;
2357 m_prev = m;
2358 }
2359 return 0;
2360 }
2361
2362 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2363 int
2364 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2365 {
2366 struct mbuf *m0 = m;
2367
2368 if (sopt->sopt_val == NULL)
2369 return 0;
2370 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2371 if (sopt->sopt_p != NULL) {
2372 int error;
2373
2374 error = copyin(sopt->sopt_val, mtod(m, char *),
2375 m->m_len);
2376 if (error != 0) {
2377 m_freem(m0);
2378 return(error);
2379 }
2380 } else
2381 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2382 sopt->sopt_valsize -= m->m_len;
2383 (caddr_t)sopt->sopt_val += m->m_len;
2384 m = m->m_next;
2385 }
2386 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2387 panic("soopt_mcopyin");
2388 return 0;
2389 }
2390
2391 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2392 int
2393 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2394 {
2395 struct mbuf *m0 = m;
2396 size_t valsize = 0;
2397
2398 if (sopt->sopt_val == NULL)
2399 return 0;
2400 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2401 if (sopt->sopt_p != NULL) {
2402 int error;
2403
2404 error = copyout(mtod(m, char *), sopt->sopt_val,
2405 m->m_len);
2406 if (error != 0) {
2407 m_freem(m0);
2408 return(error);
2409 }
2410 } else
2411 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2412 sopt->sopt_valsize -= m->m_len;
2413 (caddr_t)sopt->sopt_val += m->m_len;
2414 valsize += m->m_len;
2415 m = m->m_next;
2416 }
2417 if (m != NULL) {
2418 /* enough soopt buffer should be given from user-land */
2419 m_freem(m0);
2420 return(EINVAL);
2421 }
2422 sopt->sopt_valsize = valsize;
2423 return 0;
2424 }
2425
2426 void
2427 sohasoutofband(so)
2428 register struct socket *so;
2429 {
2430 struct proc *p;
2431 struct kextcb *kp;
2432
2433 kp = sotokextcb(so);
2434 while (kp) {
2435 if (kp->e_soif && kp->e_soif->sf_sohasoutofband) {
2436 if ((*kp->e_soif->sf_sohasoutofband)(so, kp))
2437 return;
2438 }
2439 kp = kp->e_next;
2440 }
2441 if (so->so_pgid < 0)
2442 gsignal(-so->so_pgid, SIGURG);
2443 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
2444 psignal(p, SIGURG);
2445 selwakeup(&so->so_rcv.sb_sel);
2446 }
2447
2448 int
2449 sopoll(struct socket *so, int events, struct ucred *cred, void * wql)
2450 {
2451 struct proc *p = current_proc();
2452 int revents = 0;
2453 int s = splnet();
2454
2455 if (events & (POLLIN | POLLRDNORM))
2456 if (soreadable(so))
2457 revents |= events & (POLLIN | POLLRDNORM);
2458
2459 if (events & (POLLOUT | POLLWRNORM))
2460 if (sowriteable(so))
2461 revents |= events & (POLLOUT | POLLWRNORM);
2462
2463 if (events & (POLLPRI | POLLRDBAND))
2464 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
2465 revents |= events & (POLLPRI | POLLRDBAND);
2466
2467 if (revents == 0) {
2468 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2469 /* Darwin sets the flag first, BSD calls selrecord first */
2470 so->so_rcv.sb_flags |= SB_SEL;
2471 selrecord(p, &so->so_rcv.sb_sel, wql);
2472 }
2473
2474 if (events & (POLLOUT | POLLWRNORM)) {
2475 /* Darwin sets the flag first, BSD calls selrecord first */
2476 so->so_snd.sb_flags |= SB_SEL;
2477 selrecord(p, &so->so_snd.sb_sel, wql);
2478 }
2479 }
2480
2481 splx(s);
2482 return (revents);
2483 }
2484
2485
2486 int
2487 soo_kqfilter(struct file *fp, struct knote *kn, struct proc *p)
2488 {
2489 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2490 struct sockbuf *sb;
2491 int s;
2492
2493 switch (kn->kn_filter) {
2494 case EVFILT_READ:
2495 if (so->so_options & SO_ACCEPTCONN)
2496 kn->kn_fop = &solisten_filtops;
2497 else
2498 kn->kn_fop = &soread_filtops;
2499 sb = &so->so_rcv;
2500 break;
2501 case EVFILT_WRITE:
2502 kn->kn_fop = &sowrite_filtops;
2503 sb = &so->so_snd;
2504 break;
2505 default:
2506 return (1);
2507 }
2508
2509 if (sb->sb_sel.si_flags & SI_INITED)
2510 return (1);
2511
2512 s = splnet();
2513 if (KNOTE_ATTACH(&sb->sb_sel.si_note, kn))
2514 sb->sb_flags |= SB_KNOTE;
2515 splx(s);
2516 return (0);
2517 }
2518
2519 static void
2520 filt_sordetach(struct knote *kn)
2521 {
2522 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2523 int s = splnet();
2524
2525 if (so->so_rcv.sb_flags & SB_KNOTE &&
2526 !(so->so_rcv.sb_sel.si_flags & SI_INITED))
2527 if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn))
2528 so->so_rcv.sb_flags &= ~SB_KNOTE;
2529 splx(s);
2530 }
2531
2532 /*ARGSUSED*/
2533 static int
2534 filt_soread(struct knote *kn, long hint)
2535 {
2536 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2537
2538 kn->kn_data = so->so_rcv.sb_cc;
2539 if (so->so_state & SS_CANTRCVMORE) {
2540 kn->kn_flags |= EV_EOF;
2541 kn->kn_fflags = so->so_error;
2542 return (1);
2543 }
2544 if (so->so_error) /* temporary udp error */
2545 return (1);
2546 if (kn->kn_sfflags & NOTE_LOWAT)
2547 return (kn->kn_data >= kn->kn_sdata);
2548 return (kn->kn_data >= so->so_rcv.sb_lowat);
2549 }
2550
2551 static void
2552 filt_sowdetach(struct knote *kn)
2553 {
2554 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2555 int s = splnet();
2556
2557 if(so->so_snd.sb_flags & SB_KNOTE &&
2558 !(so->so_snd.sb_sel.si_flags & SI_INITED))
2559 if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn))
2560 so->so_snd.sb_flags &= ~SB_KNOTE;
2561 splx(s);
2562 }
2563
2564 /*ARGSUSED*/
2565 static int
2566 filt_sowrite(struct knote *kn, long hint)
2567 {
2568 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2569
2570 kn->kn_data = sbspace(&so->so_snd);
2571 if (so->so_state & SS_CANTSENDMORE) {
2572 kn->kn_flags |= EV_EOF;
2573 kn->kn_fflags = so->so_error;
2574 return (1);
2575 }
2576 if (so->so_error) /* temporary udp error */
2577 return (1);
2578 if (((so->so_state & SS_ISCONNECTED) == 0) &&
2579 (so->so_proto->pr_flags & PR_CONNREQUIRED))
2580 return (0);
2581 if (kn->kn_sfflags & NOTE_LOWAT)
2582 return (kn->kn_data >= kn->kn_sdata);
2583 return (kn->kn_data >= so->so_snd.sb_lowat);
2584 }
2585
2586 /*ARGSUSED*/
2587 static int
2588 filt_solisten(struct knote *kn, long hint)
2589 {
2590 struct socket *so = (struct socket *)kn->kn_fp->f_data;
2591
2592 kn->kn_data = so->so_qlen;
2593 return (! TAILQ_EMPTY(&so->so_comp));
2594 }
2595