]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/uipc_socket.c
xnu-201.tar.gz
[apple/xnu.git] / bsd / kern / uipc_socket.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
23/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24/*
25 * Copyright (c) 1982, 1986, 1988, 1990, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
57 */
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/proc.h>
62#include <sys/fcntl.h>
63#include <sys/malloc.h>
64#include <sys/mbuf.h>
65#include <sys/domain.h>
66#include <sys/kernel.h>
67#include <sys/poll.h>
68#include <sys/protosw.h>
69#include <sys/socket.h>
70#include <sys/socketvar.h>
71#include <sys/resourcevar.h>
72#include <sys/signalvar.h>
73#include <sys/sysctl.h>
74#include <sys/uio.h>
75#include <sys/ev.h>
76#include <sys/kdebug.h>
77#include <net/route.h>
78#include <netinet/in.h>
79#include <netinet/in_pcb.h>
80#include <kern/zalloc.h>
81#include <machine/limits.h>
82
83int so_cache_hw = 0;
84int so_cache_timeouts = 0;
85int so_cache_max_freed = 0;
86int cached_sock_count = 0;
87struct socket *socket_cache_head = 0;
88struct socket *socket_cache_tail = 0;
89u_long so_cache_time = 0;
90int so_cache_init_done = 0;
91struct zone *so_cache_zone;
92extern int get_inpcb_str_size();
93extern int get_tcp_str_size();
94
95#include <machine/limits.h>
96
97int socket_debug = 0;
98int socket_zone = M_SOCKET;
99so_gen_t so_gencnt; /* generation count for sockets */
100
101MALLOC_DEFINE(M_SONAME, "soname", "socket name");
102MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
103
104#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
105#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
106#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
107#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
108#define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
109#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
110#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
111
112
113SYSCTL_DECL(_kern_ipc);
114
115static int somaxconn = SOMAXCONN;
116SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn,
117 0, "");
118
119/* Should we get a maximum also ??? */
120static int sosendminchain = 16384;
121SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain,
122 0, "");
123
124void so_cache_timer();
125
126/*
127 * Socket operation routines.
128 * These routines are called by the routines in
129 * sys_socket.c or from a system process, and
130 * implement the semantics of socket operations by
131 * switching out to the protocol specific routines.
132 */
133
134void socketinit()
135{
136 vm_size_t str_size;
137
138 so_cache_init_done = 1;
139
140 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
141 str_size = (vm_size_t)( sizeof(struct socket) + 4 +
142 get_inpcb_str_size() + 4 +
143 get_tcp_str_size());
144 so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone");
145#if TEMPDEBUG
146 kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
147#endif
148
149}
150
151void cached_sock_alloc(so, waitok)
152struct socket **so;
153int waitok;
154
155{
156 caddr_t temp;
157 int s;
158 register u_long offset;
159
160
161 s = splnet();
162 if (cached_sock_count) {
163 cached_sock_count--;
164 *so = socket_cache_head;
165 if (*so == 0)
166 panic("cached_sock_alloc: cached sock is null");
167
168 socket_cache_head = socket_cache_head->cache_next;
169 if (socket_cache_head)
170 socket_cache_head->cache_prev = 0;
171 else
172 socket_cache_tail = 0;
173 splx(s);
174
175 temp = (*so)->so_saved_pcb;
176 bzero((caddr_t)*so, sizeof(struct socket));
177#if TEMPDEBUG
178 kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so,
179 cached_sock_count);
180#endif
181 (*so)->so_saved_pcb = temp;
182 }
183 else {
184#if TEMPDEBUG
185 kprintf("Allocating cached sock %x from memory\n", *so);
186#endif
187
188 splx(s);
189 if (waitok)
190 *so = (struct socket *) zalloc(so_cache_zone);
191 else
192 *so = (struct socket *) zalloc_noblock(so_cache_zone);
193
194 if (*so == 0)
195 return;
196
197 bzero((caddr_t)*so, sizeof(struct socket));
198
199 /*
200 * Define offsets for extra structures into our single block of
201 * memory. Align extra structures on longword boundaries.
202 */
203
204
205 offset = (u_long) *so;
206 offset += sizeof(struct socket);
207 if (offset & 0x3) {
208 offset += 4;
209 offset &= 0xfffffffc;
210 }
211 (*so)->so_saved_pcb = (caddr_t) offset;
212 offset += get_inpcb_str_size();
213 if (offset & 0x3) {
214 offset += 4;
215 offset &= 0xfffffffc;
216 }
217
218 ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset;
219#if TEMPDEBUG
220 kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so,
221 (*so)->so_saved_pcb,
222 ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb);
223#endif
224 }
225
226 (*so)->cached_in_sock_layer = 1;
227}
228
229
230void cached_sock_free(so)
231struct socket *so;
232{
233 int s;
234
235
236 s = splnet();
237 if (++cached_sock_count > MAX_CACHED_SOCKETS) {
238 --cached_sock_count;
239 splx(s);
240#if TEMPDEBUG
241 kprintf("Freeing overflowed cached socket %x\n", so);
242#endif
243 zfree(so_cache_zone, (vm_offset_t) so);
244 }
245 else {
246#if TEMPDEBUG
247 kprintf("Freeing socket %x into cache\n", so);
248#endif
249 if (so_cache_hw < cached_sock_count)
250 so_cache_hw = cached_sock_count;
251
252 so->cache_next = socket_cache_head;
253 so->cache_prev = 0;
254 if (socket_cache_head)
255 socket_cache_head->cache_prev = so;
256 else
257 socket_cache_tail = so;
258
259 so->cache_timestamp = so_cache_time;
260 socket_cache_head = so;
261 splx(s);
262 }
263
264#if TEMPDEBUG
265 kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count);
266#endif
267
268
269}
270
271
272void so_cache_timer()
273{
274 register struct socket *p;
275 register int s;
276 register int n_freed = 0;
277 boolean_t funnel_state;
278
279 funnel_state = thread_funnel_set(network_flock, TRUE);
280
281 ++so_cache_time;
282
283 s = splnet();
284
285 while (p = socket_cache_tail)
286 {
287 if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT)
288 break;
289
290 so_cache_timeouts++;
291
292 if (socket_cache_tail = p->cache_prev)
293 p->cache_prev->cache_next = 0;
294 if (--cached_sock_count == 0)
295 socket_cache_head = 0;
296
297 splx(s);
298
299 zfree(so_cache_zone, (vm_offset_t) p);
300
301 splnet();
302 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH)
303 {
304 so_cache_max_freed++;
305 break;
306 }
307 }
308 splx(s);
309
310 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
311
312 (void) thread_funnel_set(network_flock, FALSE);
313
314}
315
316
317/*
318 * Get a socket structure from our zone, and initialize it.
319 * We don't implement `waitok' yet (see comments in uipc_domain.c).
320 * Note that it would probably be better to allocate socket
321 * and PCB at the same time, but I'm not convinced that all
322 * the protocols can be easily modified to do this.
323 */
324struct socket *
325soalloc(waitok, dom, type)
326 int waitok;
327 int dom;
328 int type;
329{
330 struct socket *so;
331
332 if ((dom == PF_INET) && (type == SOCK_STREAM))
333 cached_sock_alloc(&so, waitok);
334 else
335 {
336 so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK);
337 if (so)
338 bzero(so, sizeof *so);
339 }
340 /* XXX race condition for reentrant kernel */
341
342 if (so) {
343 so->so_gencnt = ++so_gencnt;
344 so->so_zone = socket_zone;
345 }
346
347 return so;
348}
349
350int
351socreate(dom, aso, type, proto)
352 int dom;
353 struct socket **aso;
354 register int type;
355 int proto;
356
357{
358 struct proc *p = current_proc();
359 register struct protosw *prp;
360 struct socket *so;
361 register int error = 0;
362
363 if (proto)
364 prp = pffindproto(dom, proto, type);
365 else
366 prp = pffindtype(dom, type);
367 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
368 return (EPROTONOSUPPORT);
369 if (prp->pr_type != type)
370 return (EPROTOTYPE);
371 so = soalloc(p != 0, dom, type);
372 if (so == 0)
373 return (ENOBUFS);
374
375 TAILQ_INIT(&so->so_incomp);
376 TAILQ_INIT(&so->so_comp);
377 so->so_type = type;
378
379 if (p != 0) {
380 if (p->p_ucred->cr_uid == 0)
381 so->so_state = SS_PRIV;
382
383 so->so_uid = p->p_ucred->cr_uid;
384 }
385
386 so->so_proto = prp;
387 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
388 if (prp->pr_sfilter.tqh_first)
389 error = sfilter_init(so);
390 if (error == 0)
391 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
392
393 if (error) {
394 so->so_state |= SS_NOFDREF;
395 sofree(so);
396 return (error);
397 }
398 prp->pr_domain->dom_refs++;
399 so->so_rcv.sb_so = so->so_snd.sb_so = so;
400 TAILQ_INIT(&so->so_evlist);
401 *aso = so;
402 return (0);
403}
404
405int
406sobind(so, nam)
407 struct socket *so;
408 struct sockaddr *nam;
409
410{
411 struct proc *p = current_proc();
412 int error;
413 struct kextcb *kp;
414 int s = splnet();
415
416 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
417 if (error == 0) /* ??? */
418 { kp = sotokextcb(so);
419 while (kp)
420 { if (kp->e_soif && kp->e_soif->sf_sobind)
421 { error = (*kp->e_soif->sf_sobind)(so, nam, kp);
422 if (error)
423 { if (error == EJUSTRETURN)
424 break;
425 splx(s);
426 return(error);
427 }
428 }
429 kp = kp->e_next;
430 }
431 }
432 splx(s);
433 return (error);
434}
435
436void
437sodealloc(so)
438 struct socket *so;
439{
440 so->so_gencnt = ++so_gencnt;
441
442 if (so->cached_in_sock_layer == 1)
443 cached_sock_free(so);
444 else
445 _FREE_ZONE(so, sizeof(*so), so->so_zone);
446}
447
448int
449solisten(so, backlog)
450 register struct socket *so;
451 int backlog;
452
453{
454 struct kextcb *kp;
455 struct proc *p = current_proc();
456 int s, error;
457
458 s = splnet();
459 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
460 if (error) {
461 splx(s);
462 return (error);
463 }
e3027f41 464 if (TAILQ_EMPTY(&so->so_comp))
1c79356b
A
465 so->so_options |= SO_ACCEPTCONN;
466 if (backlog < 0 || backlog > somaxconn)
467 backlog = somaxconn;
468 so->so_qlimit = backlog;
469 kp = sotokextcb(so);
470 while (kp)
471 {
472 if (kp->e_soif && kp->e_soif->sf_solisten)
473 { error = (*kp->e_soif->sf_solisten)(so, kp);
474 if (error)
475 { if (error == EJUSTRETURN)
476 break;
477 splx(s);
478 return(error);
479 }
480 }
481 kp = kp->e_next;
482 }
483
484 splx(s);
485 return (0);
486}
487
488
489void
490sofree(so)
491 register struct socket *so;
492{ int error;
493 struct kextcb *kp;
494 struct socket *head = so->so_head;
495
496 kp = sotokextcb(so);
497 while (kp)
498 { if (kp->e_soif && kp->e_soif->sf_sofree)
499 { error = (*kp->e_soif->sf_sofree)(so, kp);
0b4e3aa0
A
500 if (error) {
501 selthreadclear(&so->so_snd.sb_sel);
502 selthreadclear(&so->so_rcv.sb_sel);
1c79356b 503 return; /* void fn */
0b4e3aa0 504 }
1c79356b
A
505 }
506 kp = kp->e_next;
507 }
508
0b4e3aa0
A
509 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
510 selthreadclear(&so->so_snd.sb_sel);
511 selthreadclear(&so->so_rcv.sb_sel);
1c79356b 512 return;
0b4e3aa0 513 }
e3027f41
A
514 if (head != NULL) {
515 if (so->so_state & SS_INCOMP) {
516 TAILQ_REMOVE(&head->so_incomp, so, so_list);
517 head->so_incqlen--;
518 } else if (so->so_state & SS_COMP) {
519 /*
520 * We must not decommission a socket that's
521 * on the accept(2) queue. If we do, then
522 * accept(2) may hang after select(2) indicated
523 * that the listening socket was ready.
524 */
0b4e3aa0
A
525 selthreadclear(&so->so_snd.sb_sel);
526 selthreadclear(&so->so_rcv.sb_sel);
e3027f41
A
527 return;
528 } else {
529 panic("sofree: not queued");
530 }
1c79356b
A
531 head->so_qlen--;
532 so->so_state &= ~(SS_INCOMP|SS_COMP);
533 so->so_head = NULL;
534 }
535
0b4e3aa0 536 selthreadclear(&so->so_snd.sb_sel);
1c79356b
A
537 sbrelease(&so->so_snd);
538 sorflush(so);
539 sfilter_term(so);
540 sodealloc(so);
541}
542
543/*
544 * Close a socket on last file table reference removal.
545 * Initiate disconnect if connected.
546 * Free socket when disconnect complete.
547 */
548int
549soclose(so)
550 register struct socket *so;
551{
552 int s = splnet(); /* conservative */
553 int error = 0;
554 struct kextcb *kp;
555
556#if FB31SIG
557 funsetown(so->so_pgid);
558#endif
559 kp = sotokextcb(so);
560 while (kp)
561 { if (kp->e_soif && kp->e_soif->sf_soclose)
562 { error = (*kp->e_soif->sf_soclose)(so, kp);
563 if (error)
564 { splx(s);
565 return((error == EJUSTRETURN) ? 0 : error);
566 }
567 }
568 kp = kp->e_next;
569 }
570
571 if (so->so_options & SO_ACCEPTCONN) {
572 struct socket *sp, *sonext;
573
e3027f41
A
574 sp = TAILQ_FIRST(&so->so_incomp);
575 for (; sp != NULL; sp = sonext) {
576 sonext = TAILQ_NEXT(sp, so_list);
577 (void) soabort(sp);
578 }
579 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
580 sonext = TAILQ_NEXT(sp, so_list);
581 /* Dequeue from so_comp since sofree() won't do it */
582 TAILQ_REMOVE(&so->so_comp, sp, so_list);
583 so->so_qlen--;
584 sp->so_state &= ~SS_COMP;
585 sp->so_head = NULL;
586 (void) soabort(sp);
587 }
588
1c79356b
A
589 }
590 if (so->so_pcb == 0)
591 goto discard;
592 if (so->so_state & SS_ISCONNECTED) {
593 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
594 error = sodisconnect(so);
595 if (error)
596 goto drop;
597 }
598 if (so->so_options & SO_LINGER) {
599 if ((so->so_state & SS_ISDISCONNECTING) &&
600 (so->so_state & SS_NBIO))
601 goto drop;
602 while (so->so_state & SS_ISCONNECTED) {
603 error = tsleep((caddr_t)&so->so_timeo,
604 PSOCK | PCATCH, "soclos", so->so_linger);
605 if (error)
606 break;
607 }
608 }
609 }
610drop:
611 if (so->so_pcb) {
612 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
613 if (error == 0)
614 error = error2;
615 }
616discard:
e3027f41 617 if (so->so_pcb && so->so_state & SS_NOFDREF)
1c79356b
A
618 panic("soclose: NOFDREF");
619 so->so_state |= SS_NOFDREF;
620 so->so_proto->pr_domain->dom_refs--;
621 evsofree(so);
622 sofree(so);
623 splx(s);
624 return (error);
625}
626
627/*
628 * Must be called at splnet...
629 */
630int
631soabort(so)
632 struct socket *so;
633{
634
635 return (*so->so_proto->pr_usrreqs->pru_abort)(so);
636}
637
638int
639soaccept(so, nam)
640 register struct socket *so;
641 struct sockaddr **nam;
642{ int s = splnet();
643 int error;
644 struct kextcb *kp;
645
646 if ((so->so_state & SS_NOFDREF) == 0)
647 panic("soaccept: !NOFDREF");
648 so->so_state &= ~SS_NOFDREF;
649 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
650 if (error == 0)
651 { kp = sotokextcb(so);
652 while (kp) {
653 if (kp->e_soif && kp->e_soif->sf_soaccept)
654 { error = (*kp->e_soif->sf_soaccept)(so, nam, kp);
655 if (error)
656 { if (error == EJUSTRETURN)
657 break;
658 splx(s);
659 return(error);
660 }
661 }
662 kp = kp->e_next;
663 }
664 }
665
666
667 splx(s);
668 return (error);
669}
670
671int
672soconnect(so, nam)
673 register struct socket *so;
674 struct sockaddr *nam;
675
676{
677 int s;
678 int error;
679 struct proc *p = current_proc();
680 struct kextcb *kp;
681
682 if (so->so_options & SO_ACCEPTCONN)
683 return (EOPNOTSUPP);
684 s = splnet();
685 /*
686 * If protocol is connection-based, can only connect once.
687 * Otherwise, if connected, try to disconnect first.
688 * This allows user to disconnect by connecting to, e.g.,
689 * a null address.
690 */
691 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
692 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
693 (error = sodisconnect(so))))
694 error = EISCONN;
695 else {
696 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
697 if (error == 0)
698 {
699 kp = sotokextcb(so);
700 while (kp)
701 {
702 if (kp->e_soif && kp->e_soif->sf_soconnect)
703 { error = (*kp->e_soif->sf_soconnect)(so, nam, kp);
704 if (error)
705 { if (error == EJUSTRETURN)
706 break;
707 splx(s);
708 return(error);
709 }
710 }
711 kp = kp->e_next;
712 }
713 }
714 }
715
716 splx(s);
717 return (error);
718}
719
720int
721soconnect2(so1, so2)
722 register struct socket *so1;
723 struct socket *so2;
724{
725 int s = splnet();
726 int error;
727 struct kextcb *kp;
728
729 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
730 if (error == 0)
731 { kp = sotokextcb(so1);
732 while (kp)
733 { if (kp->e_soif && kp->e_soif->sf_soconnect2)
734 { error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp);
735 if (error)
736 { if (error == EJUSTRETURN)
737 break;
738 splx(s);
739 return(error);
740 }
741 }
742 kp = kp->e_next;
743 }
744 }
745 splx(s);
746 return (error);
747}
748
749int
750sodisconnect(so)
751 register struct socket *so;
752{
753 int s = splnet();
754 int error;
755 struct kextcb *kp;
756
757 if ((so->so_state & SS_ISCONNECTED) == 0) {
758 error = ENOTCONN;
759 goto bad;
760 }
761 if (so->so_state & SS_ISDISCONNECTING) {
762 error = EALREADY;
763 goto bad;
764 }
765 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
766
767 if (error == 0)
768 { kp = sotokextcb(so);
769 while (kp)
770 { if (kp->e_soif && kp->e_soif->sf_sodisconnect)
771 { error = (*kp->e_soif->sf_sodisconnect)(so, kp);
772 if (error)
773 { if (error == EJUSTRETURN)
774 break;
775 splx(s);
776 return(error);
777 }
778 }
779 kp = kp->e_next;
780 }
781 }
782
783bad:
784 splx(s);
785 return (error);
786}
787
788#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)
789/*
790 * Send on a socket.
791 * If send must go all at once and message is larger than
792 * send buffering, then hard error.
793 * Lock against other senders.
794 * If must go all at once and not enough room now, then
795 * inform user that this would block and do nothing.
796 * Otherwise, if nonblocking, send as much as possible.
797 * The data to be sent is described by "uio" if nonzero,
798 * otherwise by the mbuf chain "top" (which must be null
799 * if uio is not). Data provided in mbuf chain must be small
800 * enough to send all at once.
801 *
802 * Returns nonzero on error, timeout or signal; callers
803 * must check for short counts if EINTR/ERESTART are returned.
804 * Data and control buffers are freed on return.
805 * Experiment:
806 * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf
807 * MSG_SEND: go thru as for MSG_HOLD on current fragment, then
808 * point at the mbuf chain being constructed and go from there.
809 */
810int
811sosend(so, addr, uio, top, control, flags)
812 register struct socket *so;
813 struct sockaddr *addr;
814 struct uio *uio;
815 struct mbuf *top;
816 struct mbuf *control;
817 int flags;
818
819{
820 struct mbuf **mp;
821 register struct mbuf *m;
822 register long space, len, resid;
823 int clen = 0, error, s, dontroute, mlen, sendflags;
824 int atomic = sosendallatonce(so) || top;
825 struct proc *p = current_proc();
826 struct kextcb *kp;
827
828 if (uio)
829 resid = uio->uio_resid;
830 else
831 resid = top->m_pkthdr.len;
832
833 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START),
834 so,
835 resid,
836 so->so_snd.sb_cc,
837 so->so_snd.sb_lowat,
838 so->so_snd.sb_hiwat);
839
840 /*
841 * In theory resid should be unsigned.
842 * However, space must be signed, as it might be less than 0
843 * if we over-committed, and we must use a signed comparison
844 * of space and resid. On the other hand, a negative resid
845 * causes us to loop sending 0-length segments to the protocol.
846 *
847 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
848 * type sockets since that's an error.
849 */
850 if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
851 error = EINVAL;
852 goto out;
853 }
854
855 dontroute =
856 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
857 (so->so_proto->pr_flags & PR_ATOMIC);
858 if (p)
859 p->p_stats->p_ru.ru_msgsnd++;
860 if (control)
861 clen = control->m_len;
862#define snderr(errno) { error = errno; splx(s); goto release; }
863
864restart:
865 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
866 if (error)
867 goto out;
868 do {
869 s = splnet();
870 if (so->so_state & SS_CANTSENDMORE)
871 snderr(EPIPE);
872 if (so->so_error) {
873 error = so->so_error;
874 so->so_error = 0;
875 splx(s);
876 goto release;
877 }
878 if ((so->so_state & SS_ISCONNECTED) == 0) {
879 /*
880 * `sendto' and `sendmsg' is allowed on a connection-
881 * based socket if it supports implied connect.
882 * Return ENOTCONN if not connected and no address is
883 * supplied.
884 */
885 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
886 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
887 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
888 !(resid == 0 && clen != 0))
889 snderr(ENOTCONN);
890 } else if (addr == 0 && !(flags&MSG_HOLD))
891 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
892 ENOTCONN : EDESTADDRREQ);
893 }
894 space = sbspace(&so->so_snd);
895 if (flags & MSG_OOB)
896 space += 1024;
897 if ((atomic && resid > so->so_snd.sb_hiwat) ||
898 clen > so->so_snd.sb_hiwat)
899 snderr(EMSGSIZE);
900 if (space < resid + clen && uio &&
901 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
902 if (so->so_state & SS_NBIO)
903 snderr(EWOULDBLOCK);
904 sbunlock(&so->so_snd);
905 error = sbwait(&so->so_snd);
906 splx(s);
907 if (error)
908 goto out;
909 goto restart;
910 }
911 splx(s);
912 mp = &top;
913 space -= clen;
914 do {
915 if (uio == NULL) {
916 /*
917 * Data is prepackaged in "top".
918 */
919 resid = 0;
920 if (flags & MSG_EOR)
921 top->m_flags |= M_EOR;
922 } else {
923 boolean_t funnel_state = TRUE;
924 int chainmbufs = (sosendminchain > 0 && resid >= sosendminchain);
925
926 if (chainmbufs)
927 funnel_state = thread_funnel_set(network_flock, FALSE);
928 do {
929 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
930 if (top == 0) {
931 MGETHDR(m, M_WAIT, MT_DATA);
932 mlen = MHLEN;
933 m->m_pkthdr.len = 0;
934 m->m_pkthdr.rcvif = (struct ifnet *)0;
935 } else {
936 MGET(m, M_WAIT, MT_DATA);
937 mlen = MLEN;
938 }
939 if (resid >= MINCLSIZE) {
940 MCLGET(m, M_WAIT);
941 if ((m->m_flags & M_EXT) == 0)
942 goto nopages;
943 mlen = MCLBYTES;
944 len = min(min(mlen, resid), space);
945 } else {
946nopages:
947 len = min(min(mlen, resid), space);
948 /*
949 * For datagram protocols, leave room
950 * for protocol headers in first mbuf.
951 */
952 if (atomic && top == 0 && len < mlen)
953 MH_ALIGN(m, len);
954 }
955 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
956 space -= len;
957 error = uiomove(mtod(m, caddr_t), (int)len, uio);
958 resid = uio->uio_resid;
959
960 m->m_len = len;
961 *mp = m;
962 top->m_pkthdr.len += len;
963 if (error)
964 break;
965 mp = &m->m_next;
966 if (resid <= 0) {
967 if (flags & MSG_EOR)
968 top->m_flags |= M_EOR;
969 break;
970 }
971 } while (space > 0 && (chainmbufs || atomic || resid < MINCLSIZE));
972 if (chainmbufs)
973 funnel_state = thread_funnel_set(network_flock, TRUE);
974 if (error)
975 goto release;
976 }
977
978 if (flags & (MSG_HOLD|MSG_SEND))
979 { /* Enqueue for later, go away if HOLD */
980 register struct mbuf *mb1;
981 if (so->so_temp && (flags & MSG_FLUSH))
982 { m_freem(so->so_temp);
983 so->so_temp = NULL;
984 }
985 if (so->so_temp)
986 so->so_tail->m_next = top;
987 else
988 so->so_temp = top;
989 mb1 = top;
990 while (mb1->m_next)
991 mb1 = mb1->m_next;
992 so->so_tail = mb1;
993 if (flags&MSG_HOLD)
994 { top = NULL;
995 goto release;
996 }
997 top = so->so_temp;
998 }
999 if (dontroute)
1000 so->so_options |= SO_DONTROUTE;
1001 s = splnet(); /* XXX */
1002 kp = sotokextcb(so);
1003 /* Compute flags here, for pru_send and NKEs */
1004 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
1005 /*
1006 * If the user set MSG_EOF, the protocol
1007 * understands this flag and nothing left to
1008 * send then use PRU_SEND_EOF instead of PRU_SEND.
1009 */
1010 ((flags & MSG_EOF) &&
1011 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1012 (resid <= 0)) ?
1013 PRUS_EOF :
1014 /* If there is more to send set PRUS_MORETOCOME */
1015 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
1016 while (kp)
1017 { if (kp->e_soif && kp->e_soif->sf_sosend)
1018 { error = (*kp->e_soif->sf_sosend)(so, &addr,
1019 &uio, &top,
1020 &control,
1021 &sendflags,
1022 kp);
1023 if (error)
1024 { splx(s);
1025 if (error == EJUSTRETURN)
1026 { sbunlock(&so->so_snd);
1027 return(0);
1028 }
1029 goto release;
1030 }
1031 }
1032 kp = kp->e_next;
1033 }
1034
1035 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1036 sendflags, top, addr, control, p);
1037 splx(s);
1038 if (flags & MSG_SEND)
1039 so->so_temp = NULL;
1040
1041 if (dontroute)
1042 so->so_options &= ~SO_DONTROUTE;
1043 clen = 0;
1044 control = 0;
1045 top = 0;
1046 mp = &top;
1047 if (error)
1048 goto release;
1049 } while (resid && space > 0);
1050 } while (resid);
1051
1052release:
1053 sbunlock(&so->so_snd);
1054out:
1055 if (top)
1056 m_freem(top);
1057 if (control)
1058 m_freem(control);
1059
1060 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END,
1061 so,
1062 resid,
1063 so->so_snd.sb_cc,
1064 space,
1065 error);
1066
1067 return (error);
1068}
1069
1070/*
1071 * Implement receive operations on a socket.
1072 * We depend on the way that records are added to the sockbuf
1073 * by sbappend*. In particular, each record (mbufs linked through m_next)
1074 * must begin with an address if the protocol so specifies,
1075 * followed by an optional mbuf or mbufs containing ancillary data,
1076 * and then zero or more mbufs of data.
1077 * In order to avoid blocking network interrupts for the entire time here,
1078 * we splx() while doing the actual copy to user space.
1079 * Although the sockbuf is locked, new data may still be appended,
1080 * and thus we must maintain consistency of the sockbuf during that time.
1081 *
1082 * The caller may receive the data as a single mbuf chain by supplying
1083 * an mbuf **mp0 for use in returning the chain. The uio is then used
1084 * only for the count in uio_resid.
1085 */
1086int
1087soreceive(so, psa, uio, mp0, controlp, flagsp)
1088 register struct socket *so;
1089 struct sockaddr **psa;
1090 struct uio *uio;
1091 struct mbuf **mp0;
1092 struct mbuf **controlp;
1093 int *flagsp;
1094{
1095 register struct mbuf *m, **mp;
1096 register int flags, len, error, s, offset;
1097 struct protosw *pr = so->so_proto;
1098 struct mbuf *nextrecord;
1099 int moff, type = 0;
1100 int orig_resid = uio->uio_resid;
1101 struct kextcb *kp;
1102
1103 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START,
1104 so,
1105 uio->uio_resid,
1106 so->so_rcv.sb_cc,
1107 so->so_rcv.sb_lowat,
1108 so->so_rcv.sb_hiwat);
1109
1110 kp = sotokextcb(so);
1111 while (kp)
1112 { if (kp->e_soif && kp->e_soif->sf_soreceive)
1113 { error = (*kp->e_soif->sf_soreceive)(so, psa, &uio,
1114 mp0, controlp,
1115 flagsp, kp);
1116 if (error)
1117 return((error == EJUSTRETURN) ? 0 : error);
1118 }
1119 kp = kp->e_next;
1120 }
1121
1122 mp = mp0;
1123 if (psa)
1124 *psa = 0;
1125 if (controlp)
1126 *controlp = 0;
1127 if (flagsp)
1128 flags = *flagsp &~ MSG_EOR;
1129 else
1130 flags = 0;
1131 /*
1132 * When SO_WANTOOBFLAG is set we try to get out-of-band data
1133 * regardless of the flags argument. Here is the case were
1134 * out-of-band data is not inline.
1135 */
1136 if ((flags & MSG_OOB) ||
1137 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1138 (so->so_options & SO_OOBINLINE) == 0 &&
1139 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
1140 m = m_get(M_WAIT, MT_DATA);
1141 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1142 if (error)
1143 goto bad;
1144 do {
1145 error = uiomove(mtod(m, caddr_t),
1146 (int) min(uio->uio_resid, m->m_len), uio);
1147 m = m_free(m);
1148 } while (uio->uio_resid && error == 0 && m);
1149bad:
1150 if (m)
1151 m_freem(m);
1152 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
1153 if (error == EWOULDBLOCK || error == EINVAL) {
1154 /*
1155 * Let's try to get normal data:
1156 * EWOULDBLOCK: out-of-band data not receive yet;
1157 * EINVAL: out-of-band data already read.
1158 */
1159 error = 0;
1160 goto nooob;
1161 } else if (error == 0 && flagsp)
1162 *flagsp |= MSG_OOB;
1163 }
1164 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1165 return (error);
1166 }
1167nooob:
1168 if (mp)
1169 *mp = (struct mbuf *)0;
1170 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1171 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1172
1173restart:
1174 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags)))
1175 {
1176 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1177 return (error);
1178 }
1179 s = splnet();
1180
1181 m = so->so_rcv.sb_mb;
1182 /*
1183 * If we have less data than requested, block awaiting more
1184 * (subject to any timeout) if:
1185 * 1. the current count is less than the low water mark, or
1186 * 2. MSG_WAITALL is set, and it is possible to do the entire
1187 * receive operation at once if we block (resid <= hiwat).
1188 * 3. MSG_DONTWAIT is not set
1189 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1190 * we have to do the receive in sections, and thus risk returning
1191 * a short count if a timeout or signal occurs after we start.
1192 */
1193 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
1194 so->so_rcv.sb_cc < uio->uio_resid) &&
1195 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1196 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1197 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1198 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
1199 if (so->so_error) {
1200 if (m)
1201 goto dontblock;
1202 error = so->so_error;
1203 if ((flags & MSG_PEEK) == 0)
1204 so->so_error = 0;
1205 goto release;
1206 }
1207 if (so->so_state & SS_CANTRCVMORE) {
1208 if (m)
1209 goto dontblock;
1210 else
1211 goto release;
1212 }
1213 for (; m; m = m->m_next)
1214 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1215 m = so->so_rcv.sb_mb;
1216 goto dontblock;
1217 }
1218 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1219 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1220 error = ENOTCONN;
1221 goto release;
1222 }
1223 if (uio->uio_resid == 0)
1224 goto release;
1225 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
1226 error = EWOULDBLOCK;
1227 goto release;
1228 }
1229 sbunlock(&so->so_rcv);
1230 if (socket_debug)
1231 printf("Waiting for socket data\n");
1232 error = sbwait(&so->so_rcv);
1233 if (socket_debug)
1234 printf("SORECEIVE - sbwait returned %d\n", error);
1235 splx(s);
1236 if (error)
1237 {
1238 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1239 return (error);
1240 }
1241 goto restart;
1242 }
1243dontblock:
1244#ifdef notyet /* XXXX */
1245 if (uio->uio_procp)
1246 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
1247#endif
1248 nextrecord = m->m_nextpkt;
1249 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
1250 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1251 orig_resid = 0;
1252 if (psa)
1253 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
1254 mp0 == 0);
1255 if (flags & MSG_PEEK) {
1256 m = m->m_next;
1257 } else {
1258 sbfree(&so->so_rcv, m);
1259 MFREE(m, so->so_rcv.sb_mb);
1260 m = so->so_rcv.sb_mb;
1261 }
1262 }
1263 while (m && m->m_type == MT_CONTROL && error == 0) {
1264 if (flags & MSG_PEEK) {
1265 if (controlp)
1266 *controlp = m_copy(m, 0, m->m_len);
1267 m = m->m_next;
1268 } else {
1269 sbfree(&so->so_rcv, m);
1270 if (controlp) {
1271 if (pr->pr_domain->dom_externalize &&
1272 mtod(m, struct cmsghdr *)->cmsg_type ==
1273 SCM_RIGHTS)
1274 error = (*pr->pr_domain->dom_externalize)(m);
1275 *controlp = m;
1276 so->so_rcv.sb_mb = m->m_next;
1277 m->m_next = 0;
1278 m = so->so_rcv.sb_mb;
1279 } else {
1280 MFREE(m, so->so_rcv.sb_mb);
1281 m = so->so_rcv.sb_mb;
1282 }
1283 }
1284 if (controlp) {
1285 orig_resid = 0;
1286 controlp = &(*controlp)->m_next;
1287 }
1288 }
1289 if (m) {
1290 if ((flags & MSG_PEEK) == 0)
1291 m->m_nextpkt = nextrecord;
1292 type = m->m_type;
1293 if (type == MT_OOBDATA)
1294 flags |= MSG_OOB;
1295 }
1296 moff = 0;
1297 offset = 0;
1298 while (m && uio->uio_resid > 0 && error == 0) {
1299 if (m->m_type == MT_OOBDATA) {
1300 if (type != MT_OOBDATA)
1301 break;
1302 } else if (type == MT_OOBDATA)
1303 break;
1304#if 0
1305/*
1306 * This assertion needs rework. The trouble is Appletalk is uses many
1307 * mbuf types (NOT listed in mbuf.h!) which will trigger this panic.
1308 * For now just remove the assertion... CSM 9/98
1309 */
1310 else
1311 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1312 ("receive 3"));
1313#endif
1314 /*
1315 * Make sure to allways set MSG_OOB event when getting
1316 * out of band data inline.
1317 */
1318 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1319 (so->so_options & SO_OOBINLINE) != 0 &&
1320 (so->so_state & SS_RCVATMARK) != 0) {
1321 flags |= MSG_OOB;
1322 }
1323 so->so_state &= ~SS_RCVATMARK;
1324 len = uio->uio_resid;
1325 if (so->so_oobmark && len > so->so_oobmark - offset)
1326 len = so->so_oobmark - offset;
1327 if (len > m->m_len - moff)
1328 len = m->m_len - moff;
1329 /*
1330 * If mp is set, just pass back the mbufs.
1331 * Otherwise copy them out via the uio, then free.
1332 * Sockbuf must be consistent here (points to current mbuf,
1333 * it points to next record) when we drop priority;
1334 * we must note any additions to the sockbuf when we
1335 * block interrupts again.
1336 */
1337 if (mp == 0) {
1338 splx(s);
1339 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
1340 s = splnet();
1341 if (error)
1342 goto release;
1343 } else
1344 uio->uio_resid -= len;
1345 if (len == m->m_len - moff) {
1346 if (m->m_flags & M_EOR)
1347 flags |= MSG_EOR;
1348 if (flags & MSG_PEEK) {
1349 m = m->m_next;
1350 moff = 0;
1351 } else {
1352 nextrecord = m->m_nextpkt;
1353 sbfree(&so->so_rcv, m);
1354 if (mp) {
1355 *mp = m;
1356 mp = &m->m_next;
1357 so->so_rcv.sb_mb = m = m->m_next;
1358 *mp = (struct mbuf *)0;
1359 } else {
1360 MFREE(m, so->so_rcv.sb_mb);
1361 m = so->so_rcv.sb_mb;
1362 }
1363 if (m)
1364 m->m_nextpkt = nextrecord;
1365 }
1366 } else {
1367 if (flags & MSG_PEEK)
1368 moff += len;
1369 else {
1370 if (mp)
1371 *mp = m_copym(m, 0, len, M_WAIT);
1372 m->m_data += len;
1373 m->m_len -= len;
1374 so->so_rcv.sb_cc -= len;
1375 }
1376 }
1377 if (so->so_oobmark) {
1378 if ((flags & MSG_PEEK) == 0) {
1379 so->so_oobmark -= len;
1380 if (so->so_oobmark == 0) {
1381 so->so_state |= SS_RCVATMARK;
1382 postevent(so, 0, EV_OOB);
1383 break;
1384 }
1385 } else {
1386 offset += len;
1387 if (offset == so->so_oobmark)
1388 break;
1389 }
1390 }
1391 if (flags & MSG_EOR)
1392 break;
1393 /*
1394 * If the MSG_WAITALL flag is set (for non-atomic socket),
1395 * we must not quit until "uio->uio_resid == 0" or an error
1396 * termination. If a signal/timeout occurs, return
1397 * with a short count but without error.
1398 * Keep sockbuf locked against other readers.
1399 */
1400 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
1401 !sosendallatonce(so) && !nextrecord) {
1402 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1403 break;
1404 error = sbwait(&so->so_rcv);
1405 if (error) {
1406 sbunlock(&so->so_rcv);
1407 splx(s);
1408 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0);
1409 return (0);
1410 }
1411 m = so->so_rcv.sb_mb;
1412 if (m)
1413 nextrecord = m->m_nextpkt;
1414 }
1415 }
1416
1417 if (m && pr->pr_flags & PR_ATOMIC) {
1418 if (so->so_options & SO_DONTTRUNC)
1419 flags |= MSG_RCVMORE;
1420 else
1421 { flags |= MSG_TRUNC;
1422 if ((flags & MSG_PEEK) == 0)
1423 (void) sbdroprecord(&so->so_rcv);
1424 }
1425 }
1426 if ((flags & MSG_PEEK) == 0) {
1427 if (m == 0)
1428 so->so_rcv.sb_mb = nextrecord;
1429 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1430 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1431 }
1432 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0)
1433 flags |= MSG_HAVEMORE;
1434 if (orig_resid == uio->uio_resid && orig_resid &&
1435 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1436 sbunlock(&so->so_rcv);
1437 splx(s);
1438 goto restart;
1439 }
1440
1441 if (flagsp)
1442 *flagsp |= flags;
1443release:
1444 sbunlock(&so->so_rcv);
1445 splx(s);
1446
1447 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
1448 so,
1449 uio->uio_resid,
1450 so->so_rcv.sb_cc,
1451 0,
1452 error);
1453
1454 return (error);
1455}
1456
1457int
1458soshutdown(so, how)
1459 register struct socket *so;
1460 register int how;
1461{
1462 register struct protosw *pr = so->so_proto;
1463 struct kextcb *kp;
1464 int ret;
1465
1466
1467 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0);
1468 kp = sotokextcb(so);
1469 while (kp)
1470 { if (kp->e_soif && kp->e_soif->sf_soshutdown)
1471 { ret = (*kp->e_soif->sf_soshutdown)(so, how, kp);
1472 if (ret)
1473 return((ret == EJUSTRETURN) ? 0 : ret);
1474 }
1475 kp = kp->e_next;
1476 }
1477
1478 how++;
1479 if (how & FREAD) {
1480 sorflush(so);
1481 postevent(so, 0, EV_RCLOSED);
1482 }
1483 if (how & FWRITE) {
1484 ret = ((*pr->pr_usrreqs->pru_shutdown)(so));
1485 postevent(so, 0, EV_WCLOSED);
1486 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1487 return(ret);
1488 }
1489
1490 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1491 return (0);
1492}
1493
1494void
1495sorflush(so)
1496 register struct socket *so;
1497{
1498 register struct sockbuf *sb = &so->so_rcv;
1499 register struct protosw *pr = so->so_proto;
1500 register int s, error;
1501 struct sockbuf asb;
1502 struct kextcb *kp;
1503
1504 kp = sotokextcb(so);
1505 while (kp)
1506 { if (kp->e_soif && kp->e_soif->sf_sorflush)
1507 { if ((*kp->e_soif->sf_sorflush)(so, kp))
1508 return;
1509 }
1510 kp = kp->e_next;
1511 }
1512
1513 sb->sb_flags |= SB_NOINTR;
1514 (void) sblock(sb, M_WAIT);
1515 s = splimp();
1516 socantrcvmore(so);
1517 sbunlock(sb);
0b4e3aa0 1518 selthreadclear(&sb->sb_sel);
1c79356b
A
1519 asb = *sb;
1520 bzero((caddr_t)sb, sizeof (*sb));
1521 splx(s);
1522 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1523 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1524 sbrelease(&asb);
1525}
1526
1527/*
1528 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1529 * an additional variant to handle the case where the option value needs
1530 * to be some kind of integer, but not a specific size.
1531 * In addition to their use here, these functions are also called by the
1532 * protocol-level pr_ctloutput() routines.
1533 */
1534int
1535sooptcopyin(sopt, buf, len, minlen)
1536 struct sockopt *sopt;
1537 void *buf;
1538 size_t len;
1539 size_t minlen;
1540{
1541 size_t valsize;
1542
1543 /*
1544 * If the user gives us more than we wanted, we ignore it,
1545 * but if we don't get the minimum length the caller
1546 * wants, we return EINVAL. On success, sopt->sopt_valsize
1547 * is set to however much we actually retrieved.
1548 */
1549 if ((valsize = sopt->sopt_valsize) < minlen)
1550 return EINVAL;
1551 if (valsize > len)
1552 sopt->sopt_valsize = valsize = len;
1553
1554 if (sopt->sopt_p != 0)
1555 return (copyin(sopt->sopt_val, buf, valsize));
1556
1557 bcopy(sopt->sopt_val, buf, valsize);
1558 return 0;
1559}
1560
1561int
1562sosetopt(so, sopt)
1563 struct socket *so;
1564 struct sockopt *sopt;
1565{
1566 int error, optval;
1567 struct linger l;
1568 struct timeval tv;
1569 short val;
1570 struct kextcb *kp;
1571
1572 kp = sotokextcb(so);
1573 while (kp)
1574 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1575 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1576 if (error)
1577 return((error == EJUSTRETURN) ? 0 : error);
1578 }
1579 kp = kp->e_next;
1580 }
1581
1582 error = 0;
1583 if (sopt->sopt_level != SOL_SOCKET) {
1584 if (so->so_proto && so->so_proto->pr_ctloutput)
1585 return ((*so->so_proto->pr_ctloutput)
1586 (so, sopt));
1587 error = ENOPROTOOPT;
1588 } else {
1589 switch (sopt->sopt_name) {
1590 case SO_LINGER:
1591 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1592 if (error)
1593 goto bad;
1594
1595 so->so_linger = l.l_linger;
1596 if (l.l_onoff)
1597 so->so_options |= SO_LINGER;
1598 else
1599 so->so_options &= ~SO_LINGER;
1600 break;
1601
1602 case SO_DEBUG:
1603 case SO_KEEPALIVE:
1604 case SO_DONTROUTE:
1605 case SO_USELOOPBACK:
1606 case SO_BROADCAST:
1607 case SO_REUSEADDR:
1608 case SO_REUSEPORT:
1609 case SO_OOBINLINE:
1610 case SO_TIMESTAMP:
1611 case SO_DONTTRUNC:
1612 case SO_WANTMORE:
1613 case SO_WANTOOBFLAG:
1614 error = sooptcopyin(sopt, &optval, sizeof optval,
1615 sizeof optval);
1616 if (error)
1617 goto bad;
1618 if (optval)
1619 so->so_options |= sopt->sopt_name;
1620 else
1621 so->so_options &= ~sopt->sopt_name;
1622 break;
1623
1624 case SO_SNDBUF:
1625 case SO_RCVBUF:
1626 case SO_SNDLOWAT:
1627 case SO_RCVLOWAT:
1628 error = sooptcopyin(sopt, &optval, sizeof optval,
1629 sizeof optval);
1630 if (error)
1631 goto bad;
1632
1633 /*
1634 * Values < 1 make no sense for any of these
1635 * options, so disallow them.
1636 */
1637 if (optval < 1) {
1638 error = EINVAL;
1639 goto bad;
1640 }
1641
1642 switch (sopt->sopt_name) {
1643 case SO_SNDBUF:
1644 case SO_RCVBUF:
1645 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1646 &so->so_snd : &so->so_rcv,
1647 (u_long) optval) == 0) {
1648 error = ENOBUFS;
1649 goto bad;
1650 }
1651 break;
1652
1653 /*
1654 * Make sure the low-water is never greater than
1655 * the high-water.
1656 */
1657 case SO_SNDLOWAT:
1658 so->so_snd.sb_lowat =
1659 (optval > so->so_snd.sb_hiwat) ?
1660 so->so_snd.sb_hiwat : optval;
1661 break;
1662 case SO_RCVLOWAT:
1663 so->so_rcv.sb_lowat =
1664 (optval > so->so_rcv.sb_hiwat) ?
1665 so->so_rcv.sb_hiwat : optval;
1666 break;
1667 }
1668 break;
1669
1670 case SO_SNDTIMEO:
1671 case SO_RCVTIMEO:
1672 error = sooptcopyin(sopt, &tv, sizeof tv,
1673 sizeof tv);
1674 if (error)
1675 goto bad;
1676
1677 if (tv.tv_sec > SHRT_MAX / hz - hz) {
1678 error = EDOM;
1679 goto bad;
1680 }
1681 val = tv.tv_sec * hz + tv.tv_usec / tick;
1682
1683 switch (sopt->sopt_name) {
1684 case SO_SNDTIMEO:
1685 so->so_snd.sb_timeo = val;
1686 break;
1687 case SO_RCVTIMEO:
1688 so->so_rcv.sb_timeo = val;
1689 break;
1690 }
1691 break;
1692
1693 case SO_NKE:
1694 { struct so_nke nke;
1695 struct NFDescriptor *nf1, *nf2 = NULL;
1696
1697 error = sooptcopyin(sopt, &nke,
1698 sizeof nke, sizeof nke);
1699 if (error)
1700 goto bad;
1701
1702 error = nke_insert(so, &nke);
1703 break;
1704 }
1705
1706 default:
1707 error = ENOPROTOOPT;
1708 break;
1709 }
1710 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1711 (void) ((*so->so_proto->pr_ctloutput)
1712 (so, sopt));
1713 }
1714 }
1715bad:
1716 return (error);
1717}
1718
1719/* Helper routine for getsockopt */
1720int
1721sooptcopyout(sopt, buf, len)
1722 struct sockopt *sopt;
1723 void *buf;
1724 size_t len;
1725{
1726 int error;
1727 size_t valsize;
1728
1729 error = 0;
1730
1731 /*
1732 * Documented get behavior is that we always return a value,
1733 * possibly truncated to fit in the user's buffer.
1734 * Traditional behavior is that we always tell the user
1735 * precisely how much we copied, rather than something useful
1736 * like the total amount we had available for her.
1737 * Note that this interface is not idempotent; the entire answer must
1738 * generated ahead of time.
1739 */
1740 valsize = min(len, sopt->sopt_valsize);
1741 sopt->sopt_valsize = valsize;
1742 if (sopt->sopt_val != 0) {
1743 if (sopt->sopt_p != 0)
1744 error = copyout(buf, sopt->sopt_val, valsize);
1745 else
1746 bcopy(buf, sopt->sopt_val, valsize);
1747 }
1748 return error;
1749}
1750
1751int
1752sogetopt(so, sopt)
1753 struct socket *so;
1754 struct sockopt *sopt;
1755{
1756 int error, optval;
1757 struct linger l;
1758 struct timeval tv;
1759 struct mbuf *m;
1760 struct kextcb *kp;
1761
1762 kp = sotokextcb(so);
1763 while (kp)
1764 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1765 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1766 if (error)
1767 return((error == EJUSTRETURN) ? 0 : error);
1768 }
1769 kp = kp->e_next;
1770 }
1771
1772 error = 0;
1773 if (sopt->sopt_level != SOL_SOCKET) {
1774 if (so->so_proto && so->so_proto->pr_ctloutput) {
1775 return ((*so->so_proto->pr_ctloutput)
1776 (so, sopt));
1777 } else
1778 return (ENOPROTOOPT);
1779 } else {
1780 switch (sopt->sopt_name) {
1781 case SO_LINGER:
1782 l.l_onoff = so->so_options & SO_LINGER;
1783 l.l_linger = so->so_linger;
1784 error = sooptcopyout(sopt, &l, sizeof l);
1785 break;
1786
1787 case SO_USELOOPBACK:
1788 case SO_DONTROUTE:
1789 case SO_DEBUG:
1790 case SO_KEEPALIVE:
1791 case SO_REUSEADDR:
1792 case SO_REUSEPORT:
1793 case SO_BROADCAST:
1794 case SO_OOBINLINE:
1795 case SO_TIMESTAMP:
1796 case SO_DONTTRUNC:
1797 case SO_WANTMORE:
1798 case SO_WANTOOBFLAG:
1799 optval = so->so_options & sopt->sopt_name;
1800integer:
1801 error = sooptcopyout(sopt, &optval, sizeof optval);
1802 break;
1803
1804 case SO_TYPE:
1805 optval = so->so_type;
1806 goto integer;
1807
1808 case SO_NREAD:
1809 { int pkt_total;
1810 struct mbuf *m1;
1811
1812 pkt_total = 0;
1813 m1 = so->so_rcv.sb_mb;
1814 if (so->so_proto->pr_flags & PR_ATOMIC)
1815 {
1816#if 0
1817 kprintf("SKT CC: %d\n", so->so_rcv.sb_cc);
1818#endif
1819 while (m1)
1820 { if (m1->m_type == MT_DATA)
1821 pkt_total += m1->m_len;
1822#if 0
1823 kprintf("CNT: %d/%d\n", m1->m_len, pkt_total);
1824#endif
1825 m1 = m1->m_next;
1826 }
1827 optval = pkt_total;
1828 } else
1829 optval = so->so_rcv.sb_cc;
1830#if 0
1831 kprintf("RTN: %d\n", optval);
1832#endif
1833 goto integer;
1834 }
1835 case SO_ERROR:
1836 optval = so->so_error;
1837 so->so_error = 0;
1838 goto integer;
1839
1840 case SO_SNDBUF:
1841 optval = so->so_snd.sb_hiwat;
1842 goto integer;
1843
1844 case SO_RCVBUF:
1845 optval = so->so_rcv.sb_hiwat;
1846 goto integer;
1847
1848 case SO_SNDLOWAT:
1849 optval = so->so_snd.sb_lowat;
1850 goto integer;
1851
1852 case SO_RCVLOWAT:
1853 optval = so->so_rcv.sb_lowat;
1854 goto integer;
1855
1856 case SO_SNDTIMEO:
1857 case SO_RCVTIMEO:
1858 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1859 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1860
1861 tv.tv_sec = optval / hz;
1862 tv.tv_usec = (optval % hz) * tick;
1863 error = sooptcopyout(sopt, &tv, sizeof tv);
1864 break;
1865
1866 default:
1867 error = ENOPROTOOPT;
1868 break;
1869 }
1870 return (error);
1871 }
1872}
1873
1874void
1875sohasoutofband(so)
1876 register struct socket *so;
1877{
1878 struct proc *p;
1879
1880 struct kextcb *kp;
1881
1882 kp = sotokextcb(so);
1883 while (kp)
1884 { if (kp->e_soif && kp->e_soif->sf_sohasoutofband)
1885 { if ((*kp->e_soif->sf_sohasoutofband)(so, kp))
1886 return;
1887 }
1888 kp = kp->e_next;
1889 }
1890 if (so->so_pgid < 0)
1891 gsignal(-so->so_pgid, SIGURG);
1892 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1893 psignal(p, SIGURG);
1c79356b 1894 selwakeup(&so->so_rcv.sb_sel);
1c79356b
A
1895}
1896
1897/*
1898 * Network filter support
1899 */
1900/* Run the list of filters, creating extension control blocks */
1901sfilter_init(register struct socket *so)
1902{ struct kextcb *kp, **kpp;
1903 struct protosw *prp;
1904 struct NFDescriptor *nfp;
1905
1906 prp = so->so_proto;
1907 nfp = prp->pr_sfilter.tqh_first; /* non-null */
1908 kpp = &so->so_ext;
1909 kp = NULL;
1910 while (nfp)
1911 { MALLOC(kp, struct kextcb *, sizeof(*kp),
1912 M_TEMP, M_WAITOK);
1913 if (kp == NULL)
1914 return(ENOBUFS); /* so_free will clean up */
1915 *kpp = kp;
1916 kpp = &kp->e_next;
1917 kp->e_next = NULL;
1918 kp->e_fcb = NULL;
1919 kp->e_nfd = nfp;
1920 kp->e_soif = nfp->nf_soif;
1921 kp->e_sout = nfp->nf_soutil;
1922 /*
1923 * Ignore return value for create
1924 * Everyone gets a chance at startup
1925 */
1926 if (kp->e_soif && kp->e_soif->sf_socreate)
1927 (*kp->e_soif->sf_socreate)(so, prp, kp);
1928 nfp = nfp->nf_next.tqe_next;
1929 }
1930 return(0);
1931}
1932
1933
1934/*
1935 * Run the list of filters, freeing extension control blocks
1936 * Assumes the soif/soutil blocks have been handled.
1937 */
1938sfilter_term(struct socket *so)
1939{ struct kextcb *kp, *kp1;
1940
1941 kp = so->so_ext;
1942 while (kp)
1943 { kp1 = kp->e_next;
1944 /*
1945 * Ignore return code on termination; everyone must
1946 * get terminated.
1947 */
1948 if (kp->e_soif && kp->e_soif->sf_sofree)
1949 kp->e_soif->sf_sofree(so, kp);
1950 FREE(kp, M_TEMP);
1951 kp = kp1;
1952 }
1953 return(0);
1954}
1955
1956
1957int
0b4e3aa0 1958sopoll(struct socket *so, int events, struct ucred *cred, void * wql)
1c79356b
A
1959{
1960 struct proc *p = current_proc();
1961 int revents = 0;
1962 int s = splnet();
1963
1964 if (events & (POLLIN | POLLRDNORM))
1965 if (soreadable(so))
1966 revents |= events & (POLLIN | POLLRDNORM);
1967
1968 if (events & (POLLOUT | POLLWRNORM))
1969 if (sowriteable(so))
1970 revents |= events & (POLLOUT | POLLWRNORM);
1971
1972 if (events & (POLLPRI | POLLRDBAND))
1973 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1974 revents |= events & (POLLPRI | POLLRDBAND);
1975
1976 if (revents == 0) {
1977 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
0b4e3aa0
A
1978 so->so_rcv.sb_flags |= SB_SEL;
1979 selrecord(p, &so->so_rcv.sb_sel, wql);
1c79356b
A
1980 }
1981
1982 if (events & (POLLOUT | POLLWRNORM)) {
0b4e3aa0
A
1983 so->so_snd.sb_flags |= SB_SEL;
1984 selrecord(p, &so->so_snd.sb_sel, wql);
1c79356b
A
1985 }
1986 }
1987
1988 splx(s);
1989 return (revents);
1990}
1991
1992/*#### IPv6 Integration. Added new routines */
1993int
1994sooptgetm(struct sockopt *sopt, struct mbuf **mp)
1995{
1996 struct mbuf *m, *m_prev;
1997 int sopt_size = sopt->sopt_valsize;
1998
1999 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2000 if (m == 0)
2001 return ENOBUFS;
2002 if (sopt_size > MLEN) {
2003 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2004 if ((m->m_flags & M_EXT) == 0) {
2005 m_free(m);
2006 return ENOBUFS;
2007 }
2008 m->m_len = min(MCLBYTES, sopt_size);
2009 } else {
2010 m->m_len = min(MLEN, sopt_size);
2011 }
2012 sopt_size -= m->m_len;
2013 *mp = m;
2014 m_prev = m;
2015
2016 while (sopt_size) {
2017 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2018 if (m == 0) {
2019 m_freem(*mp);
2020 return ENOBUFS;
2021 }
2022 if (sopt_size > MLEN) {
2023 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2024 if ((m->m_flags & M_EXT) == 0) {
2025 m_freem(*mp);
2026 return ENOBUFS;
2027 }
2028 m->m_len = min(MCLBYTES, sopt_size);
2029 } else {
2030 m->m_len = min(MLEN, sopt_size);
2031 }
2032 sopt_size -= m->m_len;
2033 m_prev->m_next = m;
2034 m_prev = m;
2035 }
2036 return 0;
2037}
2038
2039/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2040int
2041sooptmcopyin(struct sockopt *sopt, struct mbuf *m)
2042{
2043 struct mbuf *m0 = m;
2044
2045 if (sopt->sopt_val == NULL)
2046 return 0;
2047 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2048 if (sopt->sopt_p != NULL) {
2049 int error;
2050
2051 error = copyin(sopt->sopt_val, mtod(m, char *),
2052 m->m_len);
2053 if (error != 0) {
2054 m_freem(m0);
2055 return(error);
2056 }
2057 } else
2058 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2059 sopt->sopt_valsize -= m->m_len;
2060 (caddr_t)sopt->sopt_val += m->m_len;
2061 m = m->m_next;
2062 }
2063 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2064 panic("sooptmcopyin");
2065 return 0;
2066}
2067
2068/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2069int
2070sooptmcopyout(struct sockopt *sopt, struct mbuf *m)
2071{
2072 struct mbuf *m0 = m;
2073 size_t valsize = 0;
2074
2075 if (sopt->sopt_val == NULL)
2076 return 0;
2077 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2078 if (sopt->sopt_p != NULL) {
2079 int error;
2080
2081 error = copyout(mtod(m, char *), sopt->sopt_val,
2082 m->m_len);
2083 if (error != 0) {
2084 m_freem(m0);
2085 return(error);
2086 }
2087 } else
2088 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2089 sopt->sopt_valsize -= m->m_len;
2090 (caddr_t)sopt->sopt_val += m->m_len;
2091 valsize += m->m_len;
2092 m = m->m_next;
2093 }
2094 if (m != NULL) {
2095 /* enough soopt buffer should be given from user-land */
2096 m_freem(m0);
2097 return(EINVAL);
2098 }
2099 sopt->sopt_valsize = valsize;
2100 return 0;
2101}
2102