]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/uipc_socket.c
669d3487467e0fbb1cd704f7457afcbc33f9c0b7
[apple/xnu.git] / bsd / kern / uipc_socket.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1982, 1986, 1988, 1990, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
57 */
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/proc.h>
62 #include <sys/fcntl.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/domain.h>
66 #include <sys/kernel.h>
67 #include <sys/poll.h>
68 #include <sys/protosw.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/resourcevar.h>
72 #include <sys/signalvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/uio.h>
75 #include <sys/ev.h>
76 #include <sys/kdebug.h>
77 #include <net/route.h>
78 #include <netinet/in.h>
79 #include <netinet/in_pcb.h>
80 #include <kern/zalloc.h>
81 #include <machine/limits.h>
82
83 int so_cache_hw = 0;
84 int so_cache_timeouts = 0;
85 int so_cache_max_freed = 0;
86 int cached_sock_count = 0;
87 struct socket *socket_cache_head = 0;
88 struct socket *socket_cache_tail = 0;
89 u_long so_cache_time = 0;
90 int so_cache_init_done = 0;
91 struct zone *so_cache_zone;
92 extern int get_inpcb_str_size();
93 extern int get_tcp_str_size();
94
95 #include <machine/limits.h>
96
97 int socket_debug = 0;
98 int socket_zone = M_SOCKET;
99 so_gen_t so_gencnt; /* generation count for sockets */
100
101 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
102 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
103
104 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
105 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
106 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
107 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
108 #define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
109 #define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
110 #define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
111
112
113 SYSCTL_DECL(_kern_ipc);
114
115 static int somaxconn = SOMAXCONN;
116 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn,
117 0, "");
118
119 /* Should we get a maximum also ??? */
120 static int sosendminchain = 16384;
121 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain,
122 0, "");
123
124 void so_cache_timer();
125
126 /*
127 * Socket operation routines.
128 * These routines are called by the routines in
129 * sys_socket.c or from a system process, and
130 * implement the semantics of socket operations by
131 * switching out to the protocol specific routines.
132 */
133
134 void socketinit()
135 {
136 vm_size_t str_size;
137
138 so_cache_init_done = 1;
139
140 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
141 str_size = (vm_size_t)( sizeof(struct socket) + 4 +
142 get_inpcb_str_size() + 4 +
143 get_tcp_str_size());
144 so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone");
145 #if TEMPDEBUG
146 kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
147 #endif
148
149 }
150
151 void cached_sock_alloc(so, waitok)
152 struct socket **so;
153 int waitok;
154
155 {
156 caddr_t temp;
157 int s;
158 register u_long offset;
159
160
161 s = splnet();
162 if (cached_sock_count) {
163 cached_sock_count--;
164 *so = socket_cache_head;
165 if (*so == 0)
166 panic("cached_sock_alloc: cached sock is null");
167
168 socket_cache_head = socket_cache_head->cache_next;
169 if (socket_cache_head)
170 socket_cache_head->cache_prev = 0;
171 else
172 socket_cache_tail = 0;
173 splx(s);
174
175 temp = (*so)->so_saved_pcb;
176 bzero((caddr_t)*so, sizeof(struct socket));
177 #if TEMPDEBUG
178 kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so,
179 cached_sock_count);
180 #endif
181 (*so)->so_saved_pcb = temp;
182 }
183 else {
184 #if TEMPDEBUG
185 kprintf("Allocating cached sock %x from memory\n", *so);
186 #endif
187
188 splx(s);
189 if (waitok)
190 *so = (struct socket *) zalloc(so_cache_zone);
191 else
192 *so = (struct socket *) zalloc_noblock(so_cache_zone);
193
194 if (*so == 0)
195 return;
196
197 bzero((caddr_t)*so, sizeof(struct socket));
198
199 /*
200 * Define offsets for extra structures into our single block of
201 * memory. Align extra structures on longword boundaries.
202 */
203
204
205 offset = (u_long) *so;
206 offset += sizeof(struct socket);
207 if (offset & 0x3) {
208 offset += 4;
209 offset &= 0xfffffffc;
210 }
211 (*so)->so_saved_pcb = (caddr_t) offset;
212 offset += get_inpcb_str_size();
213 if (offset & 0x3) {
214 offset += 4;
215 offset &= 0xfffffffc;
216 }
217
218 ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset;
219 #if TEMPDEBUG
220 kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so,
221 (*so)->so_saved_pcb,
222 ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb);
223 #endif
224 }
225
226 (*so)->cached_in_sock_layer = 1;
227 }
228
229
230 void cached_sock_free(so)
231 struct socket *so;
232 {
233 int s;
234
235
236 s = splnet();
237 if (++cached_sock_count > MAX_CACHED_SOCKETS) {
238 --cached_sock_count;
239 splx(s);
240 #if TEMPDEBUG
241 kprintf("Freeing overflowed cached socket %x\n", so);
242 #endif
243 zfree(so_cache_zone, (vm_offset_t) so);
244 }
245 else {
246 #if TEMPDEBUG
247 kprintf("Freeing socket %x into cache\n", so);
248 #endif
249 if (so_cache_hw < cached_sock_count)
250 so_cache_hw = cached_sock_count;
251
252 so->cache_next = socket_cache_head;
253 so->cache_prev = 0;
254 if (socket_cache_head)
255 socket_cache_head->cache_prev = so;
256 else
257 socket_cache_tail = so;
258
259 so->cache_timestamp = so_cache_time;
260 socket_cache_head = so;
261 splx(s);
262 }
263
264 #if TEMPDEBUG
265 kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count);
266 #endif
267
268
269 }
270
271
272 void so_cache_timer()
273 {
274 register struct socket *p;
275 register int s;
276 register int n_freed = 0;
277 boolean_t funnel_state;
278
279 funnel_state = thread_funnel_set(network_flock, TRUE);
280
281 ++so_cache_time;
282
283 s = splnet();
284
285 while (p = socket_cache_tail)
286 {
287 if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT)
288 break;
289
290 so_cache_timeouts++;
291
292 if (socket_cache_tail = p->cache_prev)
293 p->cache_prev->cache_next = 0;
294 if (--cached_sock_count == 0)
295 socket_cache_head = 0;
296
297 splx(s);
298
299 zfree(so_cache_zone, (vm_offset_t) p);
300
301 splnet();
302 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH)
303 {
304 so_cache_max_freed++;
305 break;
306 }
307 }
308 splx(s);
309
310 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
311
312 (void) thread_funnel_set(network_flock, FALSE);
313
314 }
315
316
317 /*
318 * Get a socket structure from our zone, and initialize it.
319 * We don't implement `waitok' yet (see comments in uipc_domain.c).
320 * Note that it would probably be better to allocate socket
321 * and PCB at the same time, but I'm not convinced that all
322 * the protocols can be easily modified to do this.
323 */
324 struct socket *
325 soalloc(waitok, dom, type)
326 int waitok;
327 int dom;
328 int type;
329 {
330 struct socket *so;
331
332 if ((dom == PF_INET) && (type == SOCK_STREAM))
333 cached_sock_alloc(&so, waitok);
334 else
335 {
336 so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK);
337 if (so)
338 bzero(so, sizeof *so);
339 }
340 /* XXX race condition for reentrant kernel */
341
342 if (so) {
343 so->so_gencnt = ++so_gencnt;
344 so->so_zone = socket_zone;
345 }
346
347 return so;
348 }
349
350 int
351 socreate(dom, aso, type, proto)
352 int dom;
353 struct socket **aso;
354 register int type;
355 int proto;
356
357 {
358 struct proc *p = current_proc();
359 register struct protosw *prp;
360 struct socket *so;
361 register int error = 0;
362
363 if (proto)
364 prp = pffindproto(dom, proto, type);
365 else
366 prp = pffindtype(dom, type);
367 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
368 return (EPROTONOSUPPORT);
369 if (prp->pr_type != type)
370 return (EPROTOTYPE);
371 so = soalloc(p != 0, dom, type);
372 if (so == 0)
373 return (ENOBUFS);
374
375 TAILQ_INIT(&so->so_incomp);
376 TAILQ_INIT(&so->so_comp);
377 so->so_type = type;
378
379 if (p != 0) {
380 if (p->p_ucred->cr_uid == 0)
381 so->so_state = SS_PRIV;
382
383 so->so_uid = p->p_ucred->cr_uid;
384 }
385
386 so->so_proto = prp;
387 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
388 if (prp->pr_sfilter.tqh_first)
389 error = sfilter_init(so);
390 if (error == 0)
391 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
392
393 if (error) {
394 so->so_state |= SS_NOFDREF;
395 sofree(so);
396 return (error);
397 }
398 prp->pr_domain->dom_refs++;
399 so->so_rcv.sb_so = so->so_snd.sb_so = so;
400 TAILQ_INIT(&so->so_evlist);
401 *aso = so;
402 return (0);
403 }
404
405 int
406 sobind(so, nam)
407 struct socket *so;
408 struct sockaddr *nam;
409
410 {
411 struct proc *p = current_proc();
412 int error;
413 struct kextcb *kp;
414 int s = splnet();
415
416 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
417 if (error == 0) /* ??? */
418 { kp = sotokextcb(so);
419 while (kp)
420 { if (kp->e_soif && kp->e_soif->sf_sobind)
421 { error = (*kp->e_soif->sf_sobind)(so, nam, kp);
422 if (error)
423 { if (error == EJUSTRETURN)
424 break;
425 splx(s);
426 return(error);
427 }
428 }
429 kp = kp->e_next;
430 }
431 }
432 splx(s);
433 return (error);
434 }
435
436 void
437 sodealloc(so)
438 struct socket *so;
439 {
440 so->so_gencnt = ++so_gencnt;
441
442 if (so->cached_in_sock_layer == 1)
443 cached_sock_free(so);
444 else
445 _FREE_ZONE(so, sizeof(*so), so->so_zone);
446 }
447
448 int
449 solisten(so, backlog)
450 register struct socket *so;
451 int backlog;
452
453 {
454 struct kextcb *kp;
455 struct proc *p = current_proc();
456 int s, error;
457
458 s = splnet();
459 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
460 if (error) {
461 splx(s);
462 return (error);
463 }
464 if (so->so_comp.tqh_first == NULL)
465 so->so_options |= SO_ACCEPTCONN;
466 if (backlog < 0 || backlog > somaxconn)
467 backlog = somaxconn;
468 so->so_qlimit = backlog;
469 kp = sotokextcb(so);
470 while (kp)
471 {
472 if (kp->e_soif && kp->e_soif->sf_solisten)
473 { error = (*kp->e_soif->sf_solisten)(so, kp);
474 if (error)
475 { if (error == EJUSTRETURN)
476 break;
477 splx(s);
478 return(error);
479 }
480 }
481 kp = kp->e_next;
482 }
483
484 splx(s);
485 return (0);
486 }
487
488
489 void
490 sofree(so)
491 register struct socket *so;
492 { int error;
493 struct kextcb *kp;
494 struct socket *head = so->so_head;
495
496 kp = sotokextcb(so);
497 while (kp)
498 { if (kp->e_soif && kp->e_soif->sf_sofree)
499 { error = (*kp->e_soif->sf_sofree)(so, kp);
500 if (error)
501 return; /* void fn */
502 }
503 kp = kp->e_next;
504 }
505
506 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
507 return;
508 if (head != NULL) {
509 if (so->so_state & SS_INCOMP) {
510 TAILQ_REMOVE(&head->so_incomp, so, so_list);
511 head->so_incqlen--;
512 } else if (so->so_state & SS_COMP) {
513 TAILQ_REMOVE(&head->so_comp, so, so_list);
514 } else {
515 panic("sofree: not queued");
516 }
517 head->so_qlen--;
518 so->so_state &= ~(SS_INCOMP|SS_COMP);
519 so->so_head = NULL;
520 }
521
522 sbrelease(&so->so_snd);
523 sorflush(so);
524 sfilter_term(so);
525 sodealloc(so);
526 }
527
528 /*
529 * Close a socket on last file table reference removal.
530 * Initiate disconnect if connected.
531 * Free socket when disconnect complete.
532 */
533 int
534 soclose(so)
535 register struct socket *so;
536 {
537 int s = splnet(); /* conservative */
538 int error = 0;
539 struct kextcb *kp;
540
541 #if FB31SIG
542 funsetown(so->so_pgid);
543 #endif
544 kp = sotokextcb(so);
545 while (kp)
546 { if (kp->e_soif && kp->e_soif->sf_soclose)
547 { error = (*kp->e_soif->sf_soclose)(so, kp);
548 if (error)
549 { splx(s);
550 return((error == EJUSTRETURN) ? 0 : error);
551 }
552 }
553 kp = kp->e_next;
554 }
555
556 if (so->so_options & SO_ACCEPTCONN) {
557 struct socket *sp, *sonext;
558
559 for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sonext) {
560 sonext = sp->so_list.tqe_next;
561 (void) soabort(sp);
562 }
563 for (sp = so->so_comp.tqh_first; sp != NULL; sp = sonext) {
564 sonext = sp->so_list.tqe_next;
565 (void) soabort(sp);
566 }
567 }
568 if (so->so_pcb == 0)
569 goto discard;
570 if (so->so_state & SS_ISCONNECTED) {
571 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
572 error = sodisconnect(so);
573 if (error)
574 goto drop;
575 }
576 if (so->so_options & SO_LINGER) {
577 if ((so->so_state & SS_ISDISCONNECTING) &&
578 (so->so_state & SS_NBIO))
579 goto drop;
580 while (so->so_state & SS_ISCONNECTED) {
581 error = tsleep((caddr_t)&so->so_timeo,
582 PSOCK | PCATCH, "soclos", so->so_linger);
583 if (error)
584 break;
585 }
586 }
587 }
588 drop:
589 if (so->so_pcb) {
590 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
591 if (error == 0)
592 error = error2;
593 }
594 discard:
595 if (so->so_state & SS_NOFDREF)
596 panic("soclose: NOFDREF");
597 so->so_state |= SS_NOFDREF;
598 so->so_proto->pr_domain->dom_refs--;
599 evsofree(so);
600 sofree(so);
601 splx(s);
602 return (error);
603 }
604
605 /*
606 * Must be called at splnet...
607 */
608 int
609 soabort(so)
610 struct socket *so;
611 {
612
613 return (*so->so_proto->pr_usrreqs->pru_abort)(so);
614 }
615
616 int
617 soaccept(so, nam)
618 register struct socket *so;
619 struct sockaddr **nam;
620 { int s = splnet();
621 int error;
622 struct kextcb *kp;
623
624 if ((so->so_state & SS_NOFDREF) == 0)
625 panic("soaccept: !NOFDREF");
626 so->so_state &= ~SS_NOFDREF;
627 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
628 if (error == 0)
629 { kp = sotokextcb(so);
630 while (kp) {
631 if (kp->e_soif && kp->e_soif->sf_soaccept)
632 { error = (*kp->e_soif->sf_soaccept)(so, nam, kp);
633 if (error)
634 { if (error == EJUSTRETURN)
635 break;
636 splx(s);
637 return(error);
638 }
639 }
640 kp = kp->e_next;
641 }
642 }
643
644
645 splx(s);
646 return (error);
647 }
648
649 int
650 soconnect(so, nam)
651 register struct socket *so;
652 struct sockaddr *nam;
653
654 {
655 int s;
656 int error;
657 struct proc *p = current_proc();
658 struct kextcb *kp;
659
660 if (so->so_options & SO_ACCEPTCONN)
661 return (EOPNOTSUPP);
662 s = splnet();
663 /*
664 * If protocol is connection-based, can only connect once.
665 * Otherwise, if connected, try to disconnect first.
666 * This allows user to disconnect by connecting to, e.g.,
667 * a null address.
668 */
669 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
670 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
671 (error = sodisconnect(so))))
672 error = EISCONN;
673 else {
674 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
675 if (error == 0)
676 {
677 kp = sotokextcb(so);
678 while (kp)
679 {
680 if (kp->e_soif && kp->e_soif->sf_soconnect)
681 { error = (*kp->e_soif->sf_soconnect)(so, nam, kp);
682 if (error)
683 { if (error == EJUSTRETURN)
684 break;
685 splx(s);
686 return(error);
687 }
688 }
689 kp = kp->e_next;
690 }
691 }
692 }
693
694 splx(s);
695 return (error);
696 }
697
698 int
699 soconnect2(so1, so2)
700 register struct socket *so1;
701 struct socket *so2;
702 {
703 int s = splnet();
704 int error;
705 struct kextcb *kp;
706
707 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
708 if (error == 0)
709 { kp = sotokextcb(so1);
710 while (kp)
711 { if (kp->e_soif && kp->e_soif->sf_soconnect2)
712 { error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp);
713 if (error)
714 { if (error == EJUSTRETURN)
715 break;
716 splx(s);
717 return(error);
718 }
719 }
720 kp = kp->e_next;
721 }
722 }
723 splx(s);
724 return (error);
725 }
726
727 int
728 sodisconnect(so)
729 register struct socket *so;
730 {
731 int s = splnet();
732 int error;
733 struct kextcb *kp;
734
735 if ((so->so_state & SS_ISCONNECTED) == 0) {
736 error = ENOTCONN;
737 goto bad;
738 }
739 if (so->so_state & SS_ISDISCONNECTING) {
740 error = EALREADY;
741 goto bad;
742 }
743 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
744
745 if (error == 0)
746 { kp = sotokextcb(so);
747 while (kp)
748 { if (kp->e_soif && kp->e_soif->sf_sodisconnect)
749 { error = (*kp->e_soif->sf_sodisconnect)(so, kp);
750 if (error)
751 { if (error == EJUSTRETURN)
752 break;
753 splx(s);
754 return(error);
755 }
756 }
757 kp = kp->e_next;
758 }
759 }
760
761 bad:
762 splx(s);
763 return (error);
764 }
765
766 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)
767 /*
768 * Send on a socket.
769 * If send must go all at once and message is larger than
770 * send buffering, then hard error.
771 * Lock against other senders.
772 * If must go all at once and not enough room now, then
773 * inform user that this would block and do nothing.
774 * Otherwise, if nonblocking, send as much as possible.
775 * The data to be sent is described by "uio" if nonzero,
776 * otherwise by the mbuf chain "top" (which must be null
777 * if uio is not). Data provided in mbuf chain must be small
778 * enough to send all at once.
779 *
780 * Returns nonzero on error, timeout or signal; callers
781 * must check for short counts if EINTR/ERESTART are returned.
782 * Data and control buffers are freed on return.
783 * Experiment:
784 * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf
785 * MSG_SEND: go thru as for MSG_HOLD on current fragment, then
786 * point at the mbuf chain being constructed and go from there.
787 */
788 int
789 sosend(so, addr, uio, top, control, flags)
790 register struct socket *so;
791 struct sockaddr *addr;
792 struct uio *uio;
793 struct mbuf *top;
794 struct mbuf *control;
795 int flags;
796
797 {
798 struct mbuf **mp;
799 register struct mbuf *m;
800 register long space, len, resid;
801 int clen = 0, error, s, dontroute, mlen, sendflags;
802 int atomic = sosendallatonce(so) || top;
803 struct proc *p = current_proc();
804 struct kextcb *kp;
805
806 if (uio)
807 resid = uio->uio_resid;
808 else
809 resid = top->m_pkthdr.len;
810
811 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START),
812 so,
813 resid,
814 so->so_snd.sb_cc,
815 so->so_snd.sb_lowat,
816 so->so_snd.sb_hiwat);
817
818 /*
819 * In theory resid should be unsigned.
820 * However, space must be signed, as it might be less than 0
821 * if we over-committed, and we must use a signed comparison
822 * of space and resid. On the other hand, a negative resid
823 * causes us to loop sending 0-length segments to the protocol.
824 *
825 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
826 * type sockets since that's an error.
827 */
828 if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
829 error = EINVAL;
830 goto out;
831 }
832
833 dontroute =
834 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
835 (so->so_proto->pr_flags & PR_ATOMIC);
836 if (p)
837 p->p_stats->p_ru.ru_msgsnd++;
838 if (control)
839 clen = control->m_len;
840 #define snderr(errno) { error = errno; splx(s); goto release; }
841
842 restart:
843 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
844 if (error)
845 goto out;
846 do {
847 s = splnet();
848 if (so->so_state & SS_CANTSENDMORE)
849 snderr(EPIPE);
850 if (so->so_error) {
851 error = so->so_error;
852 so->so_error = 0;
853 splx(s);
854 goto release;
855 }
856 if ((so->so_state & SS_ISCONNECTED) == 0) {
857 /*
858 * `sendto' and `sendmsg' is allowed on a connection-
859 * based socket if it supports implied connect.
860 * Return ENOTCONN if not connected and no address is
861 * supplied.
862 */
863 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
864 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
865 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
866 !(resid == 0 && clen != 0))
867 snderr(ENOTCONN);
868 } else if (addr == 0 && !(flags&MSG_HOLD))
869 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
870 ENOTCONN : EDESTADDRREQ);
871 }
872 space = sbspace(&so->so_snd);
873 if (flags & MSG_OOB)
874 space += 1024;
875 if ((atomic && resid > so->so_snd.sb_hiwat) ||
876 clen > so->so_snd.sb_hiwat)
877 snderr(EMSGSIZE);
878 if (space < resid + clen && uio &&
879 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
880 if (so->so_state & SS_NBIO)
881 snderr(EWOULDBLOCK);
882 sbunlock(&so->so_snd);
883 error = sbwait(&so->so_snd);
884 splx(s);
885 if (error)
886 goto out;
887 goto restart;
888 }
889 splx(s);
890 mp = &top;
891 space -= clen;
892 do {
893 if (uio == NULL) {
894 /*
895 * Data is prepackaged in "top".
896 */
897 resid = 0;
898 if (flags & MSG_EOR)
899 top->m_flags |= M_EOR;
900 } else {
901 boolean_t funnel_state = TRUE;
902 int chainmbufs = (sosendminchain > 0 && resid >= sosendminchain);
903
904 if (chainmbufs)
905 funnel_state = thread_funnel_set(network_flock, FALSE);
906 do {
907 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
908 if (top == 0) {
909 MGETHDR(m, M_WAIT, MT_DATA);
910 mlen = MHLEN;
911 m->m_pkthdr.len = 0;
912 m->m_pkthdr.rcvif = (struct ifnet *)0;
913 } else {
914 MGET(m, M_WAIT, MT_DATA);
915 mlen = MLEN;
916 }
917 if (resid >= MINCLSIZE) {
918 MCLGET(m, M_WAIT);
919 if ((m->m_flags & M_EXT) == 0)
920 goto nopages;
921 mlen = MCLBYTES;
922 len = min(min(mlen, resid), space);
923 } else {
924 nopages:
925 len = min(min(mlen, resid), space);
926 /*
927 * For datagram protocols, leave room
928 * for protocol headers in first mbuf.
929 */
930 if (atomic && top == 0 && len < mlen)
931 MH_ALIGN(m, len);
932 }
933 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
934 space -= len;
935 error = uiomove(mtod(m, caddr_t), (int)len, uio);
936 resid = uio->uio_resid;
937
938 m->m_len = len;
939 *mp = m;
940 top->m_pkthdr.len += len;
941 if (error)
942 break;
943 mp = &m->m_next;
944 if (resid <= 0) {
945 if (flags & MSG_EOR)
946 top->m_flags |= M_EOR;
947 break;
948 }
949 } while (space > 0 && (chainmbufs || atomic || resid < MINCLSIZE));
950 if (chainmbufs)
951 funnel_state = thread_funnel_set(network_flock, TRUE);
952 if (error)
953 goto release;
954 }
955
956 if (flags & (MSG_HOLD|MSG_SEND))
957 { /* Enqueue for later, go away if HOLD */
958 register struct mbuf *mb1;
959 if (so->so_temp && (flags & MSG_FLUSH))
960 { m_freem(so->so_temp);
961 so->so_temp = NULL;
962 }
963 if (so->so_temp)
964 so->so_tail->m_next = top;
965 else
966 so->so_temp = top;
967 mb1 = top;
968 while (mb1->m_next)
969 mb1 = mb1->m_next;
970 so->so_tail = mb1;
971 if (flags&MSG_HOLD)
972 { top = NULL;
973 goto release;
974 }
975 top = so->so_temp;
976 }
977 if (dontroute)
978 so->so_options |= SO_DONTROUTE;
979 s = splnet(); /* XXX */
980 kp = sotokextcb(so);
981 /* Compute flags here, for pru_send and NKEs */
982 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
983 /*
984 * If the user set MSG_EOF, the protocol
985 * understands this flag and nothing left to
986 * send then use PRU_SEND_EOF instead of PRU_SEND.
987 */
988 ((flags & MSG_EOF) &&
989 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
990 (resid <= 0)) ?
991 PRUS_EOF :
992 /* If there is more to send set PRUS_MORETOCOME */
993 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
994 while (kp)
995 { if (kp->e_soif && kp->e_soif->sf_sosend)
996 { error = (*kp->e_soif->sf_sosend)(so, &addr,
997 &uio, &top,
998 &control,
999 &sendflags,
1000 kp);
1001 if (error)
1002 { splx(s);
1003 if (error == EJUSTRETURN)
1004 { sbunlock(&so->so_snd);
1005 return(0);
1006 }
1007 goto release;
1008 }
1009 }
1010 kp = kp->e_next;
1011 }
1012
1013 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1014 sendflags, top, addr, control, p);
1015 splx(s);
1016 if (flags & MSG_SEND)
1017 so->so_temp = NULL;
1018
1019 if (dontroute)
1020 so->so_options &= ~SO_DONTROUTE;
1021 clen = 0;
1022 control = 0;
1023 top = 0;
1024 mp = &top;
1025 if (error)
1026 goto release;
1027 } while (resid && space > 0);
1028 } while (resid);
1029
1030 release:
1031 sbunlock(&so->so_snd);
1032 out:
1033 if (top)
1034 m_freem(top);
1035 if (control)
1036 m_freem(control);
1037
1038 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END,
1039 so,
1040 resid,
1041 so->so_snd.sb_cc,
1042 space,
1043 error);
1044
1045 return (error);
1046 }
1047
1048 /*
1049 * Implement receive operations on a socket.
1050 * We depend on the way that records are added to the sockbuf
1051 * by sbappend*. In particular, each record (mbufs linked through m_next)
1052 * must begin with an address if the protocol so specifies,
1053 * followed by an optional mbuf or mbufs containing ancillary data,
1054 * and then zero or more mbufs of data.
1055 * In order to avoid blocking network interrupts for the entire time here,
1056 * we splx() while doing the actual copy to user space.
1057 * Although the sockbuf is locked, new data may still be appended,
1058 * and thus we must maintain consistency of the sockbuf during that time.
1059 *
1060 * The caller may receive the data as a single mbuf chain by supplying
1061 * an mbuf **mp0 for use in returning the chain. The uio is then used
1062 * only for the count in uio_resid.
1063 */
1064 int
1065 soreceive(so, psa, uio, mp0, controlp, flagsp)
1066 register struct socket *so;
1067 struct sockaddr **psa;
1068 struct uio *uio;
1069 struct mbuf **mp0;
1070 struct mbuf **controlp;
1071 int *flagsp;
1072 {
1073 register struct mbuf *m, **mp;
1074 register int flags, len, error, s, offset;
1075 struct protosw *pr = so->so_proto;
1076 struct mbuf *nextrecord;
1077 int moff, type = 0;
1078 int orig_resid = uio->uio_resid;
1079 struct kextcb *kp;
1080
1081 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START,
1082 so,
1083 uio->uio_resid,
1084 so->so_rcv.sb_cc,
1085 so->so_rcv.sb_lowat,
1086 so->so_rcv.sb_hiwat);
1087
1088 kp = sotokextcb(so);
1089 while (kp)
1090 { if (kp->e_soif && kp->e_soif->sf_soreceive)
1091 { error = (*kp->e_soif->sf_soreceive)(so, psa, &uio,
1092 mp0, controlp,
1093 flagsp, kp);
1094 if (error)
1095 return((error == EJUSTRETURN) ? 0 : error);
1096 }
1097 kp = kp->e_next;
1098 }
1099
1100 mp = mp0;
1101 if (psa)
1102 *psa = 0;
1103 if (controlp)
1104 *controlp = 0;
1105 if (flagsp)
1106 flags = *flagsp &~ MSG_EOR;
1107 else
1108 flags = 0;
1109 /*
1110 * When SO_WANTOOBFLAG is set we try to get out-of-band data
1111 * regardless of the flags argument. Here is the case were
1112 * out-of-band data is not inline.
1113 */
1114 if ((flags & MSG_OOB) ||
1115 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1116 (so->so_options & SO_OOBINLINE) == 0 &&
1117 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
1118 m = m_get(M_WAIT, MT_DATA);
1119 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1120 if (error)
1121 goto bad;
1122 do {
1123 error = uiomove(mtod(m, caddr_t),
1124 (int) min(uio->uio_resid, m->m_len), uio);
1125 m = m_free(m);
1126 } while (uio->uio_resid && error == 0 && m);
1127 bad:
1128 if (m)
1129 m_freem(m);
1130 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
1131 if (error == EWOULDBLOCK || error == EINVAL) {
1132 /*
1133 * Let's try to get normal data:
1134 * EWOULDBLOCK: out-of-band data not receive yet;
1135 * EINVAL: out-of-band data already read.
1136 */
1137 error = 0;
1138 goto nooob;
1139 } else if (error == 0 && flagsp)
1140 *flagsp |= MSG_OOB;
1141 }
1142 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1143 return (error);
1144 }
1145 nooob:
1146 if (mp)
1147 *mp = (struct mbuf *)0;
1148 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1149 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1150
1151 restart:
1152 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags)))
1153 {
1154 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1155 return (error);
1156 }
1157 s = splnet();
1158
1159 m = so->so_rcv.sb_mb;
1160 /*
1161 * If we have less data than requested, block awaiting more
1162 * (subject to any timeout) if:
1163 * 1. the current count is less than the low water mark, or
1164 * 2. MSG_WAITALL is set, and it is possible to do the entire
1165 * receive operation at once if we block (resid <= hiwat).
1166 * 3. MSG_DONTWAIT is not set
1167 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1168 * we have to do the receive in sections, and thus risk returning
1169 * a short count if a timeout or signal occurs after we start.
1170 */
1171 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
1172 so->so_rcv.sb_cc < uio->uio_resid) &&
1173 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1174 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1175 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1176 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
1177 if (so->so_error) {
1178 if (m)
1179 goto dontblock;
1180 error = so->so_error;
1181 if ((flags & MSG_PEEK) == 0)
1182 so->so_error = 0;
1183 goto release;
1184 }
1185 if (so->so_state & SS_CANTRCVMORE) {
1186 if (m)
1187 goto dontblock;
1188 else
1189 goto release;
1190 }
1191 for (; m; m = m->m_next)
1192 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1193 m = so->so_rcv.sb_mb;
1194 goto dontblock;
1195 }
1196 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1197 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1198 error = ENOTCONN;
1199 goto release;
1200 }
1201 if (uio->uio_resid == 0)
1202 goto release;
1203 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
1204 error = EWOULDBLOCK;
1205 goto release;
1206 }
1207 sbunlock(&so->so_rcv);
1208 if (socket_debug)
1209 printf("Waiting for socket data\n");
1210 error = sbwait(&so->so_rcv);
1211 if (socket_debug)
1212 printf("SORECEIVE - sbwait returned %d\n", error);
1213 splx(s);
1214 if (error)
1215 {
1216 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1217 return (error);
1218 }
1219 goto restart;
1220 }
1221 dontblock:
1222 #ifdef notyet /* XXXX */
1223 if (uio->uio_procp)
1224 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
1225 #endif
1226 nextrecord = m->m_nextpkt;
1227 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
1228 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1229 orig_resid = 0;
1230 if (psa)
1231 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
1232 mp0 == 0);
1233 if (flags & MSG_PEEK) {
1234 m = m->m_next;
1235 } else {
1236 sbfree(&so->so_rcv, m);
1237 MFREE(m, so->so_rcv.sb_mb);
1238 m = so->so_rcv.sb_mb;
1239 }
1240 }
1241 while (m && m->m_type == MT_CONTROL && error == 0) {
1242 if (flags & MSG_PEEK) {
1243 if (controlp)
1244 *controlp = m_copy(m, 0, m->m_len);
1245 m = m->m_next;
1246 } else {
1247 sbfree(&so->so_rcv, m);
1248 if (controlp) {
1249 if (pr->pr_domain->dom_externalize &&
1250 mtod(m, struct cmsghdr *)->cmsg_type ==
1251 SCM_RIGHTS)
1252 error = (*pr->pr_domain->dom_externalize)(m);
1253 *controlp = m;
1254 so->so_rcv.sb_mb = m->m_next;
1255 m->m_next = 0;
1256 m = so->so_rcv.sb_mb;
1257 } else {
1258 MFREE(m, so->so_rcv.sb_mb);
1259 m = so->so_rcv.sb_mb;
1260 }
1261 }
1262 if (controlp) {
1263 orig_resid = 0;
1264 controlp = &(*controlp)->m_next;
1265 }
1266 }
1267 if (m) {
1268 if ((flags & MSG_PEEK) == 0)
1269 m->m_nextpkt = nextrecord;
1270 type = m->m_type;
1271 if (type == MT_OOBDATA)
1272 flags |= MSG_OOB;
1273 }
1274 moff = 0;
1275 offset = 0;
1276 while (m && uio->uio_resid > 0 && error == 0) {
1277 if (m->m_type == MT_OOBDATA) {
1278 if (type != MT_OOBDATA)
1279 break;
1280 } else if (type == MT_OOBDATA)
1281 break;
1282 #if 0
1283 /*
1284 * This assertion needs rework. The trouble is Appletalk is uses many
1285 * mbuf types (NOT listed in mbuf.h!) which will trigger this panic.
1286 * For now just remove the assertion... CSM 9/98
1287 */
1288 else
1289 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1290 ("receive 3"));
1291 #endif
1292 /*
1293 * Make sure to allways set MSG_OOB event when getting
1294 * out of band data inline.
1295 */
1296 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1297 (so->so_options & SO_OOBINLINE) != 0 &&
1298 (so->so_state & SS_RCVATMARK) != 0) {
1299 flags |= MSG_OOB;
1300 }
1301 so->so_state &= ~SS_RCVATMARK;
1302 len = uio->uio_resid;
1303 if (so->so_oobmark && len > so->so_oobmark - offset)
1304 len = so->so_oobmark - offset;
1305 if (len > m->m_len - moff)
1306 len = m->m_len - moff;
1307 /*
1308 * If mp is set, just pass back the mbufs.
1309 * Otherwise copy them out via the uio, then free.
1310 * Sockbuf must be consistent here (points to current mbuf,
1311 * it points to next record) when we drop priority;
1312 * we must note any additions to the sockbuf when we
1313 * block interrupts again.
1314 */
1315 if (mp == 0) {
1316 splx(s);
1317 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
1318 s = splnet();
1319 if (error)
1320 goto release;
1321 } else
1322 uio->uio_resid -= len;
1323 if (len == m->m_len - moff) {
1324 if (m->m_flags & M_EOR)
1325 flags |= MSG_EOR;
1326 if (flags & MSG_PEEK) {
1327 m = m->m_next;
1328 moff = 0;
1329 } else {
1330 nextrecord = m->m_nextpkt;
1331 sbfree(&so->so_rcv, m);
1332 if (mp) {
1333 *mp = m;
1334 mp = &m->m_next;
1335 so->so_rcv.sb_mb = m = m->m_next;
1336 *mp = (struct mbuf *)0;
1337 } else {
1338 MFREE(m, so->so_rcv.sb_mb);
1339 m = so->so_rcv.sb_mb;
1340 }
1341 if (m)
1342 m->m_nextpkt = nextrecord;
1343 }
1344 } else {
1345 if (flags & MSG_PEEK)
1346 moff += len;
1347 else {
1348 if (mp)
1349 *mp = m_copym(m, 0, len, M_WAIT);
1350 m->m_data += len;
1351 m->m_len -= len;
1352 so->so_rcv.sb_cc -= len;
1353 }
1354 }
1355 if (so->so_oobmark) {
1356 if ((flags & MSG_PEEK) == 0) {
1357 so->so_oobmark -= len;
1358 if (so->so_oobmark == 0) {
1359 so->so_state |= SS_RCVATMARK;
1360 postevent(so, 0, EV_OOB);
1361 break;
1362 }
1363 } else {
1364 offset += len;
1365 if (offset == so->so_oobmark)
1366 break;
1367 }
1368 }
1369 if (flags & MSG_EOR)
1370 break;
1371 /*
1372 * If the MSG_WAITALL flag is set (for non-atomic socket),
1373 * we must not quit until "uio->uio_resid == 0" or an error
1374 * termination. If a signal/timeout occurs, return
1375 * with a short count but without error.
1376 * Keep sockbuf locked against other readers.
1377 */
1378 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
1379 !sosendallatonce(so) && !nextrecord) {
1380 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1381 break;
1382 error = sbwait(&so->so_rcv);
1383 if (error) {
1384 sbunlock(&so->so_rcv);
1385 splx(s);
1386 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0);
1387 return (0);
1388 }
1389 m = so->so_rcv.sb_mb;
1390 if (m)
1391 nextrecord = m->m_nextpkt;
1392 }
1393 }
1394
1395 if (m && pr->pr_flags & PR_ATOMIC) {
1396 if (so->so_options & SO_DONTTRUNC)
1397 flags |= MSG_RCVMORE;
1398 else
1399 { flags |= MSG_TRUNC;
1400 if ((flags & MSG_PEEK) == 0)
1401 (void) sbdroprecord(&so->so_rcv);
1402 }
1403 }
1404 if ((flags & MSG_PEEK) == 0) {
1405 if (m == 0)
1406 so->so_rcv.sb_mb = nextrecord;
1407 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1408 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1409 }
1410 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0)
1411 flags |= MSG_HAVEMORE;
1412 if (orig_resid == uio->uio_resid && orig_resid &&
1413 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1414 sbunlock(&so->so_rcv);
1415 splx(s);
1416 goto restart;
1417 }
1418
1419 if (flagsp)
1420 *flagsp |= flags;
1421 release:
1422 sbunlock(&so->so_rcv);
1423 splx(s);
1424
1425 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
1426 so,
1427 uio->uio_resid,
1428 so->so_rcv.sb_cc,
1429 0,
1430 error);
1431
1432 return (error);
1433 }
1434
1435 int
1436 soshutdown(so, how)
1437 register struct socket *so;
1438 register int how;
1439 {
1440 register struct protosw *pr = so->so_proto;
1441 struct kextcb *kp;
1442 int ret;
1443
1444
1445 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0);
1446 kp = sotokextcb(so);
1447 while (kp)
1448 { if (kp->e_soif && kp->e_soif->sf_soshutdown)
1449 { ret = (*kp->e_soif->sf_soshutdown)(so, how, kp);
1450 if (ret)
1451 return((ret == EJUSTRETURN) ? 0 : ret);
1452 }
1453 kp = kp->e_next;
1454 }
1455
1456 how++;
1457 if (how & FREAD) {
1458 sorflush(so);
1459 postevent(so, 0, EV_RCLOSED);
1460 }
1461 if (how & FWRITE) {
1462 ret = ((*pr->pr_usrreqs->pru_shutdown)(so));
1463 postevent(so, 0, EV_WCLOSED);
1464 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1465 return(ret);
1466 }
1467
1468 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1469 return (0);
1470 }
1471
1472 void
1473 sorflush(so)
1474 register struct socket *so;
1475 {
1476 register struct sockbuf *sb = &so->so_rcv;
1477 register struct protosw *pr = so->so_proto;
1478 register int s, error;
1479 struct sockbuf asb;
1480 struct kextcb *kp;
1481
1482 kp = sotokextcb(so);
1483 while (kp)
1484 { if (kp->e_soif && kp->e_soif->sf_sorflush)
1485 { if ((*kp->e_soif->sf_sorflush)(so, kp))
1486 return;
1487 }
1488 kp = kp->e_next;
1489 }
1490
1491 sb->sb_flags |= SB_NOINTR;
1492 (void) sblock(sb, M_WAIT);
1493 s = splimp();
1494 socantrcvmore(so);
1495 sbunlock(sb);
1496 asb = *sb;
1497 bzero((caddr_t)sb, sizeof (*sb));
1498 splx(s);
1499 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1500 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1501 sbrelease(&asb);
1502 }
1503
1504 /*
1505 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1506 * an additional variant to handle the case where the option value needs
1507 * to be some kind of integer, but not a specific size.
1508 * In addition to their use here, these functions are also called by the
1509 * protocol-level pr_ctloutput() routines.
1510 */
1511 int
1512 sooptcopyin(sopt, buf, len, minlen)
1513 struct sockopt *sopt;
1514 void *buf;
1515 size_t len;
1516 size_t minlen;
1517 {
1518 size_t valsize;
1519
1520 /*
1521 * If the user gives us more than we wanted, we ignore it,
1522 * but if we don't get the minimum length the caller
1523 * wants, we return EINVAL. On success, sopt->sopt_valsize
1524 * is set to however much we actually retrieved.
1525 */
1526 if ((valsize = sopt->sopt_valsize) < minlen)
1527 return EINVAL;
1528 if (valsize > len)
1529 sopt->sopt_valsize = valsize = len;
1530
1531 if (sopt->sopt_p != 0)
1532 return (copyin(sopt->sopt_val, buf, valsize));
1533
1534 bcopy(sopt->sopt_val, buf, valsize);
1535 return 0;
1536 }
1537
1538 int
1539 sosetopt(so, sopt)
1540 struct socket *so;
1541 struct sockopt *sopt;
1542 {
1543 int error, optval;
1544 struct linger l;
1545 struct timeval tv;
1546 short val;
1547 struct kextcb *kp;
1548
1549 kp = sotokextcb(so);
1550 while (kp)
1551 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1552 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1553 if (error)
1554 return((error == EJUSTRETURN) ? 0 : error);
1555 }
1556 kp = kp->e_next;
1557 }
1558
1559 error = 0;
1560 if (sopt->sopt_level != SOL_SOCKET) {
1561 if (so->so_proto && so->so_proto->pr_ctloutput)
1562 return ((*so->so_proto->pr_ctloutput)
1563 (so, sopt));
1564 error = ENOPROTOOPT;
1565 } else {
1566 switch (sopt->sopt_name) {
1567 case SO_LINGER:
1568 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1569 if (error)
1570 goto bad;
1571
1572 so->so_linger = l.l_linger;
1573 if (l.l_onoff)
1574 so->so_options |= SO_LINGER;
1575 else
1576 so->so_options &= ~SO_LINGER;
1577 break;
1578
1579 case SO_DEBUG:
1580 case SO_KEEPALIVE:
1581 case SO_DONTROUTE:
1582 case SO_USELOOPBACK:
1583 case SO_BROADCAST:
1584 case SO_REUSEADDR:
1585 case SO_REUSEPORT:
1586 case SO_OOBINLINE:
1587 case SO_TIMESTAMP:
1588 case SO_DONTTRUNC:
1589 case SO_WANTMORE:
1590 case SO_WANTOOBFLAG:
1591 error = sooptcopyin(sopt, &optval, sizeof optval,
1592 sizeof optval);
1593 if (error)
1594 goto bad;
1595 if (optval)
1596 so->so_options |= sopt->sopt_name;
1597 else
1598 so->so_options &= ~sopt->sopt_name;
1599 break;
1600
1601 case SO_SNDBUF:
1602 case SO_RCVBUF:
1603 case SO_SNDLOWAT:
1604 case SO_RCVLOWAT:
1605 error = sooptcopyin(sopt, &optval, sizeof optval,
1606 sizeof optval);
1607 if (error)
1608 goto bad;
1609
1610 /*
1611 * Values < 1 make no sense for any of these
1612 * options, so disallow them.
1613 */
1614 if (optval < 1) {
1615 error = EINVAL;
1616 goto bad;
1617 }
1618
1619 switch (sopt->sopt_name) {
1620 case SO_SNDBUF:
1621 case SO_RCVBUF:
1622 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1623 &so->so_snd : &so->so_rcv,
1624 (u_long) optval) == 0) {
1625 error = ENOBUFS;
1626 goto bad;
1627 }
1628 break;
1629
1630 /*
1631 * Make sure the low-water is never greater than
1632 * the high-water.
1633 */
1634 case SO_SNDLOWAT:
1635 so->so_snd.sb_lowat =
1636 (optval > so->so_snd.sb_hiwat) ?
1637 so->so_snd.sb_hiwat : optval;
1638 break;
1639 case SO_RCVLOWAT:
1640 so->so_rcv.sb_lowat =
1641 (optval > so->so_rcv.sb_hiwat) ?
1642 so->so_rcv.sb_hiwat : optval;
1643 break;
1644 }
1645 break;
1646
1647 case SO_SNDTIMEO:
1648 case SO_RCVTIMEO:
1649 error = sooptcopyin(sopt, &tv, sizeof tv,
1650 sizeof tv);
1651 if (error)
1652 goto bad;
1653
1654 if (tv.tv_sec > SHRT_MAX / hz - hz) {
1655 error = EDOM;
1656 goto bad;
1657 }
1658 val = tv.tv_sec * hz + tv.tv_usec / tick;
1659
1660 switch (sopt->sopt_name) {
1661 case SO_SNDTIMEO:
1662 so->so_snd.sb_timeo = val;
1663 break;
1664 case SO_RCVTIMEO:
1665 so->so_rcv.sb_timeo = val;
1666 break;
1667 }
1668 break;
1669
1670 case SO_NKE:
1671 { struct so_nke nke;
1672 struct NFDescriptor *nf1, *nf2 = NULL;
1673
1674 error = sooptcopyin(sopt, &nke,
1675 sizeof nke, sizeof nke);
1676 if (error)
1677 goto bad;
1678
1679 error = nke_insert(so, &nke);
1680 break;
1681 }
1682
1683 default:
1684 error = ENOPROTOOPT;
1685 break;
1686 }
1687 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1688 (void) ((*so->so_proto->pr_ctloutput)
1689 (so, sopt));
1690 }
1691 }
1692 bad:
1693 return (error);
1694 }
1695
1696 /* Helper routine for getsockopt */
1697 int
1698 sooptcopyout(sopt, buf, len)
1699 struct sockopt *sopt;
1700 void *buf;
1701 size_t len;
1702 {
1703 int error;
1704 size_t valsize;
1705
1706 error = 0;
1707
1708 /*
1709 * Documented get behavior is that we always return a value,
1710 * possibly truncated to fit in the user's buffer.
1711 * Traditional behavior is that we always tell the user
1712 * precisely how much we copied, rather than something useful
1713 * like the total amount we had available for her.
1714 * Note that this interface is not idempotent; the entire answer must
1715 * generated ahead of time.
1716 */
1717 valsize = min(len, sopt->sopt_valsize);
1718 sopt->sopt_valsize = valsize;
1719 if (sopt->sopt_val != 0) {
1720 if (sopt->sopt_p != 0)
1721 error = copyout(buf, sopt->sopt_val, valsize);
1722 else
1723 bcopy(buf, sopt->sopt_val, valsize);
1724 }
1725 return error;
1726 }
1727
1728 int
1729 sogetopt(so, sopt)
1730 struct socket *so;
1731 struct sockopt *sopt;
1732 {
1733 int error, optval;
1734 struct linger l;
1735 struct timeval tv;
1736 struct mbuf *m;
1737 struct kextcb *kp;
1738
1739 kp = sotokextcb(so);
1740 while (kp)
1741 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1742 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1743 if (error)
1744 return((error == EJUSTRETURN) ? 0 : error);
1745 }
1746 kp = kp->e_next;
1747 }
1748
1749 error = 0;
1750 if (sopt->sopt_level != SOL_SOCKET) {
1751 if (so->so_proto && so->so_proto->pr_ctloutput) {
1752 return ((*so->so_proto->pr_ctloutput)
1753 (so, sopt));
1754 } else
1755 return (ENOPROTOOPT);
1756 } else {
1757 switch (sopt->sopt_name) {
1758 case SO_LINGER:
1759 l.l_onoff = so->so_options & SO_LINGER;
1760 l.l_linger = so->so_linger;
1761 error = sooptcopyout(sopt, &l, sizeof l);
1762 break;
1763
1764 case SO_USELOOPBACK:
1765 case SO_DONTROUTE:
1766 case SO_DEBUG:
1767 case SO_KEEPALIVE:
1768 case SO_REUSEADDR:
1769 case SO_REUSEPORT:
1770 case SO_BROADCAST:
1771 case SO_OOBINLINE:
1772 case SO_TIMESTAMP:
1773 case SO_DONTTRUNC:
1774 case SO_WANTMORE:
1775 case SO_WANTOOBFLAG:
1776 optval = so->so_options & sopt->sopt_name;
1777 integer:
1778 error = sooptcopyout(sopt, &optval, sizeof optval);
1779 break;
1780
1781 case SO_TYPE:
1782 optval = so->so_type;
1783 goto integer;
1784
1785 case SO_NREAD:
1786 { int pkt_total;
1787 struct mbuf *m1;
1788
1789 pkt_total = 0;
1790 m1 = so->so_rcv.sb_mb;
1791 if (so->so_proto->pr_flags & PR_ATOMIC)
1792 {
1793 #if 0
1794 kprintf("SKT CC: %d\n", so->so_rcv.sb_cc);
1795 #endif
1796 while (m1)
1797 { if (m1->m_type == MT_DATA)
1798 pkt_total += m1->m_len;
1799 #if 0
1800 kprintf("CNT: %d/%d\n", m1->m_len, pkt_total);
1801 #endif
1802 m1 = m1->m_next;
1803 }
1804 optval = pkt_total;
1805 } else
1806 optval = so->so_rcv.sb_cc;
1807 #if 0
1808 kprintf("RTN: %d\n", optval);
1809 #endif
1810 goto integer;
1811 }
1812 case SO_ERROR:
1813 optval = so->so_error;
1814 so->so_error = 0;
1815 goto integer;
1816
1817 case SO_SNDBUF:
1818 optval = so->so_snd.sb_hiwat;
1819 goto integer;
1820
1821 case SO_RCVBUF:
1822 optval = so->so_rcv.sb_hiwat;
1823 goto integer;
1824
1825 case SO_SNDLOWAT:
1826 optval = so->so_snd.sb_lowat;
1827 goto integer;
1828
1829 case SO_RCVLOWAT:
1830 optval = so->so_rcv.sb_lowat;
1831 goto integer;
1832
1833 case SO_SNDTIMEO:
1834 case SO_RCVTIMEO:
1835 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1836 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1837
1838 tv.tv_sec = optval / hz;
1839 tv.tv_usec = (optval % hz) * tick;
1840 error = sooptcopyout(sopt, &tv, sizeof tv);
1841 break;
1842
1843 default:
1844 error = ENOPROTOOPT;
1845 break;
1846 }
1847 return (error);
1848 }
1849 }
1850
1851 void
1852 sohasoutofband(so)
1853 register struct socket *so;
1854 {
1855 struct proc *p;
1856
1857 struct kextcb *kp;
1858
1859 kp = sotokextcb(so);
1860 while (kp)
1861 { if (kp->e_soif && kp->e_soif->sf_sohasoutofband)
1862 { if ((*kp->e_soif->sf_sohasoutofband)(so, kp))
1863 return;
1864 }
1865 kp = kp->e_next;
1866 }
1867 if (so->so_pgid < 0)
1868 gsignal(-so->so_pgid, SIGURG);
1869 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1870 psignal(p, SIGURG);
1871 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1872 selwakeup(&so->so_rcv.sb_sel);
1873 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1874 }
1875
1876 /*
1877 * Network filter support
1878 */
1879 /* Run the list of filters, creating extension control blocks */
1880 sfilter_init(register struct socket *so)
1881 { struct kextcb *kp, **kpp;
1882 struct protosw *prp;
1883 struct NFDescriptor *nfp;
1884
1885 prp = so->so_proto;
1886 nfp = prp->pr_sfilter.tqh_first; /* non-null */
1887 kpp = &so->so_ext;
1888 kp = NULL;
1889 while (nfp)
1890 { MALLOC(kp, struct kextcb *, sizeof(*kp),
1891 M_TEMP, M_WAITOK);
1892 if (kp == NULL)
1893 return(ENOBUFS); /* so_free will clean up */
1894 *kpp = kp;
1895 kpp = &kp->e_next;
1896 kp->e_next = NULL;
1897 kp->e_fcb = NULL;
1898 kp->e_nfd = nfp;
1899 kp->e_soif = nfp->nf_soif;
1900 kp->e_sout = nfp->nf_soutil;
1901 /*
1902 * Ignore return value for create
1903 * Everyone gets a chance at startup
1904 */
1905 if (kp->e_soif && kp->e_soif->sf_socreate)
1906 (*kp->e_soif->sf_socreate)(so, prp, kp);
1907 nfp = nfp->nf_next.tqe_next;
1908 }
1909 return(0);
1910 }
1911
1912
1913 /*
1914 * Run the list of filters, freeing extension control blocks
1915 * Assumes the soif/soutil blocks have been handled.
1916 */
1917 sfilter_term(struct socket *so)
1918 { struct kextcb *kp, *kp1;
1919
1920 kp = so->so_ext;
1921 while (kp)
1922 { kp1 = kp->e_next;
1923 /*
1924 * Ignore return code on termination; everyone must
1925 * get terminated.
1926 */
1927 if (kp->e_soif && kp->e_soif->sf_sofree)
1928 kp->e_soif->sf_sofree(so, kp);
1929 FREE(kp, M_TEMP);
1930 kp = kp1;
1931 }
1932 return(0);
1933 }
1934
1935
1936 int
1937 sopoll(struct socket *so, int events, struct ucred *cred)
1938 {
1939 struct proc *p = current_proc();
1940 int revents = 0;
1941 int s = splnet();
1942
1943 if (events & (POLLIN | POLLRDNORM))
1944 if (soreadable(so))
1945 revents |= events & (POLLIN | POLLRDNORM);
1946
1947 if (events & (POLLOUT | POLLWRNORM))
1948 if (sowriteable(so))
1949 revents |= events & (POLLOUT | POLLWRNORM);
1950
1951 if (events & (POLLPRI | POLLRDBAND))
1952 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1953 revents |= events & (POLLPRI | POLLRDBAND);
1954
1955 if (revents == 0) {
1956 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1957 selrecord(p, &so->so_rcv.sb_sel);
1958 so->so_rcv.sb_flags |= SB_SEL;
1959 }
1960
1961 if (events & (POLLOUT | POLLWRNORM)) {
1962 selrecord(p, &so->so_snd.sb_sel);
1963 so->so_snd.sb_flags |= SB_SEL;
1964 }
1965 }
1966
1967 splx(s);
1968 return (revents);
1969 }
1970
1971 /*#### IPv6 Integration. Added new routines */
1972 int
1973 sooptgetm(struct sockopt *sopt, struct mbuf **mp)
1974 {
1975 struct mbuf *m, *m_prev;
1976 int sopt_size = sopt->sopt_valsize;
1977
1978 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1979 if (m == 0)
1980 return ENOBUFS;
1981 if (sopt_size > MLEN) {
1982 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1983 if ((m->m_flags & M_EXT) == 0) {
1984 m_free(m);
1985 return ENOBUFS;
1986 }
1987 m->m_len = min(MCLBYTES, sopt_size);
1988 } else {
1989 m->m_len = min(MLEN, sopt_size);
1990 }
1991 sopt_size -= m->m_len;
1992 *mp = m;
1993 m_prev = m;
1994
1995 while (sopt_size) {
1996 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1997 if (m == 0) {
1998 m_freem(*mp);
1999 return ENOBUFS;
2000 }
2001 if (sopt_size > MLEN) {
2002 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2003 if ((m->m_flags & M_EXT) == 0) {
2004 m_freem(*mp);
2005 return ENOBUFS;
2006 }
2007 m->m_len = min(MCLBYTES, sopt_size);
2008 } else {
2009 m->m_len = min(MLEN, sopt_size);
2010 }
2011 sopt_size -= m->m_len;
2012 m_prev->m_next = m;
2013 m_prev = m;
2014 }
2015 return 0;
2016 }
2017
2018 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2019 int
2020 sooptmcopyin(struct sockopt *sopt, struct mbuf *m)
2021 {
2022 struct mbuf *m0 = m;
2023
2024 if (sopt->sopt_val == NULL)
2025 return 0;
2026 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2027 if (sopt->sopt_p != NULL) {
2028 int error;
2029
2030 error = copyin(sopt->sopt_val, mtod(m, char *),
2031 m->m_len);
2032 if (error != 0) {
2033 m_freem(m0);
2034 return(error);
2035 }
2036 } else
2037 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2038 sopt->sopt_valsize -= m->m_len;
2039 (caddr_t)sopt->sopt_val += m->m_len;
2040 m = m->m_next;
2041 }
2042 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2043 panic("sooptmcopyin");
2044 return 0;
2045 }
2046
2047 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2048 int
2049 sooptmcopyout(struct sockopt *sopt, struct mbuf *m)
2050 {
2051 struct mbuf *m0 = m;
2052 size_t valsize = 0;
2053
2054 if (sopt->sopt_val == NULL)
2055 return 0;
2056 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2057 if (sopt->sopt_p != NULL) {
2058 int error;
2059
2060 error = copyout(mtod(m, char *), sopt->sopt_val,
2061 m->m_len);
2062 if (error != 0) {
2063 m_freem(m0);
2064 return(error);
2065 }
2066 } else
2067 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2068 sopt->sopt_valsize -= m->m_len;
2069 (caddr_t)sopt->sopt_val += m->m_len;
2070 valsize += m->m_len;
2071 m = m->m_next;
2072 }
2073 if (m != NULL) {
2074 /* enough soopt buffer should be given from user-land */
2075 m_freem(m0);
2076 return(EINVAL);
2077 }
2078 sopt->sopt_valsize = valsize;
2079 return 0;
2080 }
2081