]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/uipc_socket.c
56eb901ee44d2576a3c0258b5811c77ab1cf4935
[apple/xnu.git] / bsd / kern / uipc_socket.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1982, 1986, 1988, 1990, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. All advertising materials mentioning features or use of this software
37 * must display the following acknowledgement:
38 * This product includes software developed by the University of
39 * California, Berkeley and its contributors.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95
57 */
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/proc.h>
62 #include <sys/fcntl.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/domain.h>
66 #include <sys/kernel.h>
67 #include <sys/poll.h>
68 #include <sys/protosw.h>
69 #include <sys/socket.h>
70 #include <sys/socketvar.h>
71 #include <sys/resourcevar.h>
72 #include <sys/signalvar.h>
73 #include <sys/sysctl.h>
74 #include <sys/uio.h>
75 #include <sys/ev.h>
76 #include <sys/kdebug.h>
77 #include <net/route.h>
78 #include <netinet/in.h>
79 #include <netinet/in_pcb.h>
80 #include <kern/zalloc.h>
81 #include <machine/limits.h>
82
83 int so_cache_hw = 0;
84 int so_cache_timeouts = 0;
85 int so_cache_max_freed = 0;
86 int cached_sock_count = 0;
87 struct socket *socket_cache_head = 0;
88 struct socket *socket_cache_tail = 0;
89 u_long so_cache_time = 0;
90 int so_cache_init_done = 0;
91 struct zone *so_cache_zone;
92 extern int get_inpcb_str_size();
93 extern int get_tcp_str_size();
94
95 #include <machine/limits.h>
96
97 int socket_debug = 0;
98 int socket_zone = M_SOCKET;
99 so_gen_t so_gencnt; /* generation count for sockets */
100
101 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
102 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
103
104 #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
105 #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
106 #define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
107 #define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
108 #define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
109 #define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
110 #define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
111
112
113 SYSCTL_DECL(_kern_ipc);
114
115 static int somaxconn = SOMAXCONN;
116 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn,
117 0, "");
118
119 /* Should we get a maximum also ??? */
120 static int sosendminchain = 16384;
121 SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain,
122 0, "");
123
124 void so_cache_timer();
125
126 /*
127 * Socket operation routines.
128 * These routines are called by the routines in
129 * sys_socket.c or from a system process, and
130 * implement the semantics of socket operations by
131 * switching out to the protocol specific routines.
132 */
133
134 void socketinit()
135 {
136 vm_size_t str_size;
137
138 so_cache_init_done = 1;
139
140 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
141 str_size = (vm_size_t)( sizeof(struct socket) + 4 +
142 get_inpcb_str_size() + 4 +
143 get_tcp_str_size());
144 so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone");
145 #if TEMPDEBUG
146 kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
147 #endif
148
149 }
150
151 void cached_sock_alloc(so, waitok)
152 struct socket **so;
153 int waitok;
154
155 {
156 caddr_t temp;
157 int s;
158 register u_long offset;
159
160
161 s = splnet();
162 if (cached_sock_count) {
163 cached_sock_count--;
164 *so = socket_cache_head;
165 if (*so == 0)
166 panic("cached_sock_alloc: cached sock is null");
167
168 socket_cache_head = socket_cache_head->cache_next;
169 if (socket_cache_head)
170 socket_cache_head->cache_prev = 0;
171 else
172 socket_cache_tail = 0;
173 splx(s);
174
175 temp = (*so)->so_saved_pcb;
176 bzero((caddr_t)*so, sizeof(struct socket));
177 #if TEMPDEBUG
178 kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so,
179 cached_sock_count);
180 #endif
181 (*so)->so_saved_pcb = temp;
182 }
183 else {
184 #if TEMPDEBUG
185 kprintf("Allocating cached sock %x from memory\n", *so);
186 #endif
187
188 splx(s);
189 if (waitok)
190 *so = (struct socket *) zalloc(so_cache_zone);
191 else
192 *so = (struct socket *) zalloc_noblock(so_cache_zone);
193
194 if (*so == 0)
195 return;
196
197 bzero((caddr_t)*so, sizeof(struct socket));
198
199 /*
200 * Define offsets for extra structures into our single block of
201 * memory. Align extra structures on longword boundaries.
202 */
203
204
205 offset = (u_long) *so;
206 offset += sizeof(struct socket);
207 if (offset & 0x3) {
208 offset += 4;
209 offset &= 0xfffffffc;
210 }
211 (*so)->so_saved_pcb = (caddr_t) offset;
212 offset += get_inpcb_str_size();
213 if (offset & 0x3) {
214 offset += 4;
215 offset &= 0xfffffffc;
216 }
217
218 ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset;
219 #if TEMPDEBUG
220 kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so,
221 (*so)->so_saved_pcb,
222 ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb);
223 #endif
224 }
225
226 (*so)->cached_in_sock_layer = 1;
227 }
228
229
230 void cached_sock_free(so)
231 struct socket *so;
232 {
233 int s;
234
235
236 s = splnet();
237 if (++cached_sock_count > MAX_CACHED_SOCKETS) {
238 --cached_sock_count;
239 splx(s);
240 #if TEMPDEBUG
241 kprintf("Freeing overflowed cached socket %x\n", so);
242 #endif
243 zfree(so_cache_zone, (vm_offset_t) so);
244 }
245 else {
246 #if TEMPDEBUG
247 kprintf("Freeing socket %x into cache\n", so);
248 #endif
249 if (so_cache_hw < cached_sock_count)
250 so_cache_hw = cached_sock_count;
251
252 so->cache_next = socket_cache_head;
253 so->cache_prev = 0;
254 if (socket_cache_head)
255 socket_cache_head->cache_prev = so;
256 else
257 socket_cache_tail = so;
258
259 so->cache_timestamp = so_cache_time;
260 socket_cache_head = so;
261 splx(s);
262 }
263
264 #if TEMPDEBUG
265 kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count);
266 #endif
267
268
269 }
270
271
272 void so_cache_timer()
273 {
274 register struct socket *p;
275 register int s;
276 register int n_freed = 0;
277 boolean_t funnel_state;
278
279 funnel_state = thread_funnel_set(network_flock, TRUE);
280
281 ++so_cache_time;
282
283 s = splnet();
284
285 while (p = socket_cache_tail)
286 {
287 if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT)
288 break;
289
290 so_cache_timeouts++;
291
292 if (socket_cache_tail = p->cache_prev)
293 p->cache_prev->cache_next = 0;
294 if (--cached_sock_count == 0)
295 socket_cache_head = 0;
296
297 splx(s);
298
299 zfree(so_cache_zone, (vm_offset_t) p);
300
301 splnet();
302 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH)
303 {
304 so_cache_max_freed++;
305 break;
306 }
307 }
308 splx(s);
309
310 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
311
312 (void) thread_funnel_set(network_flock, FALSE);
313
314 }
315
316
317 /*
318 * Get a socket structure from our zone, and initialize it.
319 * We don't implement `waitok' yet (see comments in uipc_domain.c).
320 * Note that it would probably be better to allocate socket
321 * and PCB at the same time, but I'm not convinced that all
322 * the protocols can be easily modified to do this.
323 */
324 struct socket *
325 soalloc(waitok, dom, type)
326 int waitok;
327 int dom;
328 int type;
329 {
330 struct socket *so;
331
332 if ((dom == PF_INET) && (type == SOCK_STREAM))
333 cached_sock_alloc(&so, waitok);
334 else
335 {
336 so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK);
337 if (so)
338 bzero(so, sizeof *so);
339 }
340 /* XXX race condition for reentrant kernel */
341
342 if (so) {
343 so->so_gencnt = ++so_gencnt;
344 so->so_zone = socket_zone;
345 }
346
347 return so;
348 }
349
350 int
351 socreate(dom, aso, type, proto)
352 int dom;
353 struct socket **aso;
354 register int type;
355 int proto;
356
357 {
358 struct proc *p = current_proc();
359 register struct protosw *prp;
360 struct socket *so;
361 register int error = 0;
362
363 if (proto)
364 prp = pffindproto(dom, proto, type);
365 else
366 prp = pffindtype(dom, type);
367 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
368 return (EPROTONOSUPPORT);
369 if (prp->pr_type != type)
370 return (EPROTOTYPE);
371 so = soalloc(p != 0, dom, type);
372 if (so == 0)
373 return (ENOBUFS);
374
375 TAILQ_INIT(&so->so_incomp);
376 TAILQ_INIT(&so->so_comp);
377 so->so_type = type;
378
379 if (p != 0) {
380 if (p->p_ucred->cr_uid == 0)
381 so->so_state = SS_PRIV;
382
383 so->so_uid = p->p_ucred->cr_uid;
384 }
385
386 so->so_proto = prp;
387 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
388 if (prp->pr_sfilter.tqh_first)
389 error = sfilter_init(so);
390 if (error == 0)
391 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
392
393 if (error) {
394 so->so_state |= SS_NOFDREF;
395 sofree(so);
396 return (error);
397 }
398 prp->pr_domain->dom_refs++;
399 so->so_rcv.sb_so = so->so_snd.sb_so = so;
400 TAILQ_INIT(&so->so_evlist);
401 *aso = so;
402 return (0);
403 }
404
405 int
406 sobind(so, nam)
407 struct socket *so;
408 struct sockaddr *nam;
409
410 {
411 struct proc *p = current_proc();
412 int error;
413 struct kextcb *kp;
414 int s = splnet();
415
416 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
417 if (error == 0) /* ??? */
418 { kp = sotokextcb(so);
419 while (kp)
420 { if (kp->e_soif && kp->e_soif->sf_sobind)
421 { error = (*kp->e_soif->sf_sobind)(so, nam, kp);
422 if (error)
423 { if (error == EJUSTRETURN)
424 break;
425 splx(s);
426 return(error);
427 }
428 }
429 kp = kp->e_next;
430 }
431 }
432 splx(s);
433 return (error);
434 }
435
436 void
437 sodealloc(so)
438 struct socket *so;
439 {
440 so->so_gencnt = ++so_gencnt;
441
442 if (so->cached_in_sock_layer == 1)
443 cached_sock_free(so);
444 else
445 _FREE_ZONE(so, sizeof(*so), so->so_zone);
446 }
447
448 int
449 solisten(so, backlog)
450 register struct socket *so;
451 int backlog;
452
453 {
454 struct kextcb *kp;
455 struct proc *p = current_proc();
456 int s, error;
457
458 s = splnet();
459 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
460 if (error) {
461 splx(s);
462 return (error);
463 }
464 if (TAILQ_EMPTY(&so->so_comp))
465 so->so_options |= SO_ACCEPTCONN;
466 if (backlog < 0 || backlog > somaxconn)
467 backlog = somaxconn;
468 so->so_qlimit = backlog;
469 kp = sotokextcb(so);
470 while (kp)
471 {
472 if (kp->e_soif && kp->e_soif->sf_solisten)
473 { error = (*kp->e_soif->sf_solisten)(so, kp);
474 if (error)
475 { if (error == EJUSTRETURN)
476 break;
477 splx(s);
478 return(error);
479 }
480 }
481 kp = kp->e_next;
482 }
483
484 splx(s);
485 return (0);
486 }
487
488
489 void
490 sofree(so)
491 register struct socket *so;
492 { int error;
493 struct kextcb *kp;
494 struct socket *head = so->so_head;
495
496 kp = sotokextcb(so);
497 while (kp)
498 { if (kp->e_soif && kp->e_soif->sf_sofree)
499 { error = (*kp->e_soif->sf_sofree)(so, kp);
500 if (error)
501 return; /* void fn */
502 }
503 kp = kp->e_next;
504 }
505
506 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
507 return;
508 if (head != NULL) {
509 if (so->so_state & SS_INCOMP) {
510 TAILQ_REMOVE(&head->so_incomp, so, so_list);
511 head->so_incqlen--;
512 } else if (so->so_state & SS_COMP) {
513 /*
514 * We must not decommission a socket that's
515 * on the accept(2) queue. If we do, then
516 * accept(2) may hang after select(2) indicated
517 * that the listening socket was ready.
518 */
519 return;
520 } else {
521 panic("sofree: not queued");
522 }
523 head->so_qlen--;
524 so->so_state &= ~(SS_INCOMP|SS_COMP);
525 so->so_head = NULL;
526 }
527
528 sbrelease(&so->so_snd);
529 sorflush(so);
530 sfilter_term(so);
531 sodealloc(so);
532 }
533
534 /*
535 * Close a socket on last file table reference removal.
536 * Initiate disconnect if connected.
537 * Free socket when disconnect complete.
538 */
539 int
540 soclose(so)
541 register struct socket *so;
542 {
543 int s = splnet(); /* conservative */
544 int error = 0;
545 struct kextcb *kp;
546
547 #if FB31SIG
548 funsetown(so->so_pgid);
549 #endif
550 kp = sotokextcb(so);
551 while (kp)
552 { if (kp->e_soif && kp->e_soif->sf_soclose)
553 { error = (*kp->e_soif->sf_soclose)(so, kp);
554 if (error)
555 { splx(s);
556 return((error == EJUSTRETURN) ? 0 : error);
557 }
558 }
559 kp = kp->e_next;
560 }
561
562 if (so->so_options & SO_ACCEPTCONN) {
563 struct socket *sp, *sonext;
564
565 sp = TAILQ_FIRST(&so->so_incomp);
566 for (; sp != NULL; sp = sonext) {
567 sonext = TAILQ_NEXT(sp, so_list);
568 (void) soabort(sp);
569 }
570 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
571 sonext = TAILQ_NEXT(sp, so_list);
572 /* Dequeue from so_comp since sofree() won't do it */
573 TAILQ_REMOVE(&so->so_comp, sp, so_list);
574 so->so_qlen--;
575 sp->so_state &= ~SS_COMP;
576 sp->so_head = NULL;
577 (void) soabort(sp);
578 }
579
580 }
581 if (so->so_pcb == 0)
582 goto discard;
583 if (so->so_state & SS_ISCONNECTED) {
584 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
585 error = sodisconnect(so);
586 if (error)
587 goto drop;
588 }
589 if (so->so_options & SO_LINGER) {
590 if ((so->so_state & SS_ISDISCONNECTING) &&
591 (so->so_state & SS_NBIO))
592 goto drop;
593 while (so->so_state & SS_ISCONNECTED) {
594 error = tsleep((caddr_t)&so->so_timeo,
595 PSOCK | PCATCH, "soclos", so->so_linger);
596 if (error)
597 break;
598 }
599 }
600 }
601 drop:
602 if (so->so_pcb) {
603 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
604 if (error == 0)
605 error = error2;
606 }
607 discard:
608 if (so->so_pcb && so->so_state & SS_NOFDREF)
609 panic("soclose: NOFDREF");
610 so->so_state |= SS_NOFDREF;
611 so->so_proto->pr_domain->dom_refs--;
612 evsofree(so);
613 sofree(so);
614 splx(s);
615 return (error);
616 }
617
618 /*
619 * Must be called at splnet...
620 */
621 int
622 soabort(so)
623 struct socket *so;
624 {
625
626 return (*so->so_proto->pr_usrreqs->pru_abort)(so);
627 }
628
629 int
630 soaccept(so, nam)
631 register struct socket *so;
632 struct sockaddr **nam;
633 { int s = splnet();
634 int error;
635 struct kextcb *kp;
636
637 if ((so->so_state & SS_NOFDREF) == 0)
638 panic("soaccept: !NOFDREF");
639 so->so_state &= ~SS_NOFDREF;
640 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
641 if (error == 0)
642 { kp = sotokextcb(so);
643 while (kp) {
644 if (kp->e_soif && kp->e_soif->sf_soaccept)
645 { error = (*kp->e_soif->sf_soaccept)(so, nam, kp);
646 if (error)
647 { if (error == EJUSTRETURN)
648 break;
649 splx(s);
650 return(error);
651 }
652 }
653 kp = kp->e_next;
654 }
655 }
656
657
658 splx(s);
659 return (error);
660 }
661
662 int
663 soconnect(so, nam)
664 register struct socket *so;
665 struct sockaddr *nam;
666
667 {
668 int s;
669 int error;
670 struct proc *p = current_proc();
671 struct kextcb *kp;
672
673 if (so->so_options & SO_ACCEPTCONN)
674 return (EOPNOTSUPP);
675 s = splnet();
676 /*
677 * If protocol is connection-based, can only connect once.
678 * Otherwise, if connected, try to disconnect first.
679 * This allows user to disconnect by connecting to, e.g.,
680 * a null address.
681 */
682 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
683 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
684 (error = sodisconnect(so))))
685 error = EISCONN;
686 else {
687 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
688 if (error == 0)
689 {
690 kp = sotokextcb(so);
691 while (kp)
692 {
693 if (kp->e_soif && kp->e_soif->sf_soconnect)
694 { error = (*kp->e_soif->sf_soconnect)(so, nam, kp);
695 if (error)
696 { if (error == EJUSTRETURN)
697 break;
698 splx(s);
699 return(error);
700 }
701 }
702 kp = kp->e_next;
703 }
704 }
705 }
706
707 splx(s);
708 return (error);
709 }
710
711 int
712 soconnect2(so1, so2)
713 register struct socket *so1;
714 struct socket *so2;
715 {
716 int s = splnet();
717 int error;
718 struct kextcb *kp;
719
720 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
721 if (error == 0)
722 { kp = sotokextcb(so1);
723 while (kp)
724 { if (kp->e_soif && kp->e_soif->sf_soconnect2)
725 { error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp);
726 if (error)
727 { if (error == EJUSTRETURN)
728 break;
729 splx(s);
730 return(error);
731 }
732 }
733 kp = kp->e_next;
734 }
735 }
736 splx(s);
737 return (error);
738 }
739
740 int
741 sodisconnect(so)
742 register struct socket *so;
743 {
744 int s = splnet();
745 int error;
746 struct kextcb *kp;
747
748 if ((so->so_state & SS_ISCONNECTED) == 0) {
749 error = ENOTCONN;
750 goto bad;
751 }
752 if (so->so_state & SS_ISDISCONNECTING) {
753 error = EALREADY;
754 goto bad;
755 }
756 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
757
758 if (error == 0)
759 { kp = sotokextcb(so);
760 while (kp)
761 { if (kp->e_soif && kp->e_soif->sf_sodisconnect)
762 { error = (*kp->e_soif->sf_sodisconnect)(so, kp);
763 if (error)
764 { if (error == EJUSTRETURN)
765 break;
766 splx(s);
767 return(error);
768 }
769 }
770 kp = kp->e_next;
771 }
772 }
773
774 bad:
775 splx(s);
776 return (error);
777 }
778
779 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)
780 /*
781 * Send on a socket.
782 * If send must go all at once and message is larger than
783 * send buffering, then hard error.
784 * Lock against other senders.
785 * If must go all at once and not enough room now, then
786 * inform user that this would block and do nothing.
787 * Otherwise, if nonblocking, send as much as possible.
788 * The data to be sent is described by "uio" if nonzero,
789 * otherwise by the mbuf chain "top" (which must be null
790 * if uio is not). Data provided in mbuf chain must be small
791 * enough to send all at once.
792 *
793 * Returns nonzero on error, timeout or signal; callers
794 * must check for short counts if EINTR/ERESTART are returned.
795 * Data and control buffers are freed on return.
796 * Experiment:
797 * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf
798 * MSG_SEND: go thru as for MSG_HOLD on current fragment, then
799 * point at the mbuf chain being constructed and go from there.
800 */
801 int
802 sosend(so, addr, uio, top, control, flags)
803 register struct socket *so;
804 struct sockaddr *addr;
805 struct uio *uio;
806 struct mbuf *top;
807 struct mbuf *control;
808 int flags;
809
810 {
811 struct mbuf **mp;
812 register struct mbuf *m;
813 register long space, len, resid;
814 int clen = 0, error, s, dontroute, mlen, sendflags;
815 int atomic = sosendallatonce(so) || top;
816 struct proc *p = current_proc();
817 struct kextcb *kp;
818
819 if (uio)
820 resid = uio->uio_resid;
821 else
822 resid = top->m_pkthdr.len;
823
824 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START),
825 so,
826 resid,
827 so->so_snd.sb_cc,
828 so->so_snd.sb_lowat,
829 so->so_snd.sb_hiwat);
830
831 /*
832 * In theory resid should be unsigned.
833 * However, space must be signed, as it might be less than 0
834 * if we over-committed, and we must use a signed comparison
835 * of space and resid. On the other hand, a negative resid
836 * causes us to loop sending 0-length segments to the protocol.
837 *
838 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
839 * type sockets since that's an error.
840 */
841 if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
842 error = EINVAL;
843 goto out;
844 }
845
846 dontroute =
847 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
848 (so->so_proto->pr_flags & PR_ATOMIC);
849 if (p)
850 p->p_stats->p_ru.ru_msgsnd++;
851 if (control)
852 clen = control->m_len;
853 #define snderr(errno) { error = errno; splx(s); goto release; }
854
855 restart:
856 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
857 if (error)
858 goto out;
859 do {
860 s = splnet();
861 if (so->so_state & SS_CANTSENDMORE)
862 snderr(EPIPE);
863 if (so->so_error) {
864 error = so->so_error;
865 so->so_error = 0;
866 splx(s);
867 goto release;
868 }
869 if ((so->so_state & SS_ISCONNECTED) == 0) {
870 /*
871 * `sendto' and `sendmsg' is allowed on a connection-
872 * based socket if it supports implied connect.
873 * Return ENOTCONN if not connected and no address is
874 * supplied.
875 */
876 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
877 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
878 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
879 !(resid == 0 && clen != 0))
880 snderr(ENOTCONN);
881 } else if (addr == 0 && !(flags&MSG_HOLD))
882 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
883 ENOTCONN : EDESTADDRREQ);
884 }
885 space = sbspace(&so->so_snd);
886 if (flags & MSG_OOB)
887 space += 1024;
888 if ((atomic && resid > so->so_snd.sb_hiwat) ||
889 clen > so->so_snd.sb_hiwat)
890 snderr(EMSGSIZE);
891 if (space < resid + clen && uio &&
892 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
893 if (so->so_state & SS_NBIO)
894 snderr(EWOULDBLOCK);
895 sbunlock(&so->so_snd);
896 error = sbwait(&so->so_snd);
897 splx(s);
898 if (error)
899 goto out;
900 goto restart;
901 }
902 splx(s);
903 mp = &top;
904 space -= clen;
905 do {
906 if (uio == NULL) {
907 /*
908 * Data is prepackaged in "top".
909 */
910 resid = 0;
911 if (flags & MSG_EOR)
912 top->m_flags |= M_EOR;
913 } else {
914 boolean_t funnel_state = TRUE;
915 int chainmbufs = (sosendminchain > 0 && resid >= sosendminchain);
916
917 if (chainmbufs)
918 funnel_state = thread_funnel_set(network_flock, FALSE);
919 do {
920 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
921 if (top == 0) {
922 MGETHDR(m, M_WAIT, MT_DATA);
923 mlen = MHLEN;
924 m->m_pkthdr.len = 0;
925 m->m_pkthdr.rcvif = (struct ifnet *)0;
926 } else {
927 MGET(m, M_WAIT, MT_DATA);
928 mlen = MLEN;
929 }
930 if (resid >= MINCLSIZE) {
931 MCLGET(m, M_WAIT);
932 if ((m->m_flags & M_EXT) == 0)
933 goto nopages;
934 mlen = MCLBYTES;
935 len = min(min(mlen, resid), space);
936 } else {
937 nopages:
938 len = min(min(mlen, resid), space);
939 /*
940 * For datagram protocols, leave room
941 * for protocol headers in first mbuf.
942 */
943 if (atomic && top == 0 && len < mlen)
944 MH_ALIGN(m, len);
945 }
946 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0);
947 space -= len;
948 error = uiomove(mtod(m, caddr_t), (int)len, uio);
949 resid = uio->uio_resid;
950
951 m->m_len = len;
952 *mp = m;
953 top->m_pkthdr.len += len;
954 if (error)
955 break;
956 mp = &m->m_next;
957 if (resid <= 0) {
958 if (flags & MSG_EOR)
959 top->m_flags |= M_EOR;
960 break;
961 }
962 } while (space > 0 && (chainmbufs || atomic || resid < MINCLSIZE));
963 if (chainmbufs)
964 funnel_state = thread_funnel_set(network_flock, TRUE);
965 if (error)
966 goto release;
967 }
968
969 if (flags & (MSG_HOLD|MSG_SEND))
970 { /* Enqueue for later, go away if HOLD */
971 register struct mbuf *mb1;
972 if (so->so_temp && (flags & MSG_FLUSH))
973 { m_freem(so->so_temp);
974 so->so_temp = NULL;
975 }
976 if (so->so_temp)
977 so->so_tail->m_next = top;
978 else
979 so->so_temp = top;
980 mb1 = top;
981 while (mb1->m_next)
982 mb1 = mb1->m_next;
983 so->so_tail = mb1;
984 if (flags&MSG_HOLD)
985 { top = NULL;
986 goto release;
987 }
988 top = so->so_temp;
989 }
990 if (dontroute)
991 so->so_options |= SO_DONTROUTE;
992 s = splnet(); /* XXX */
993 kp = sotokextcb(so);
994 /* Compute flags here, for pru_send and NKEs */
995 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
996 /*
997 * If the user set MSG_EOF, the protocol
998 * understands this flag and nothing left to
999 * send then use PRU_SEND_EOF instead of PRU_SEND.
1000 */
1001 ((flags & MSG_EOF) &&
1002 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1003 (resid <= 0)) ?
1004 PRUS_EOF :
1005 /* If there is more to send set PRUS_MORETOCOME */
1006 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
1007 while (kp)
1008 { if (kp->e_soif && kp->e_soif->sf_sosend)
1009 { error = (*kp->e_soif->sf_sosend)(so, &addr,
1010 &uio, &top,
1011 &control,
1012 &sendflags,
1013 kp);
1014 if (error)
1015 { splx(s);
1016 if (error == EJUSTRETURN)
1017 { sbunlock(&so->so_snd);
1018 return(0);
1019 }
1020 goto release;
1021 }
1022 }
1023 kp = kp->e_next;
1024 }
1025
1026 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1027 sendflags, top, addr, control, p);
1028 splx(s);
1029 if (flags & MSG_SEND)
1030 so->so_temp = NULL;
1031
1032 if (dontroute)
1033 so->so_options &= ~SO_DONTROUTE;
1034 clen = 0;
1035 control = 0;
1036 top = 0;
1037 mp = &top;
1038 if (error)
1039 goto release;
1040 } while (resid && space > 0);
1041 } while (resid);
1042
1043 release:
1044 sbunlock(&so->so_snd);
1045 out:
1046 if (top)
1047 m_freem(top);
1048 if (control)
1049 m_freem(control);
1050
1051 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END,
1052 so,
1053 resid,
1054 so->so_snd.sb_cc,
1055 space,
1056 error);
1057
1058 return (error);
1059 }
1060
1061 /*
1062 * Implement receive operations on a socket.
1063 * We depend on the way that records are added to the sockbuf
1064 * by sbappend*. In particular, each record (mbufs linked through m_next)
1065 * must begin with an address if the protocol so specifies,
1066 * followed by an optional mbuf or mbufs containing ancillary data,
1067 * and then zero or more mbufs of data.
1068 * In order to avoid blocking network interrupts for the entire time here,
1069 * we splx() while doing the actual copy to user space.
1070 * Although the sockbuf is locked, new data may still be appended,
1071 * and thus we must maintain consistency of the sockbuf during that time.
1072 *
1073 * The caller may receive the data as a single mbuf chain by supplying
1074 * an mbuf **mp0 for use in returning the chain. The uio is then used
1075 * only for the count in uio_resid.
1076 */
1077 int
1078 soreceive(so, psa, uio, mp0, controlp, flagsp)
1079 register struct socket *so;
1080 struct sockaddr **psa;
1081 struct uio *uio;
1082 struct mbuf **mp0;
1083 struct mbuf **controlp;
1084 int *flagsp;
1085 {
1086 register struct mbuf *m, **mp;
1087 register int flags, len, error, s, offset;
1088 struct protosw *pr = so->so_proto;
1089 struct mbuf *nextrecord;
1090 int moff, type = 0;
1091 int orig_resid = uio->uio_resid;
1092 struct kextcb *kp;
1093
1094 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START,
1095 so,
1096 uio->uio_resid,
1097 so->so_rcv.sb_cc,
1098 so->so_rcv.sb_lowat,
1099 so->so_rcv.sb_hiwat);
1100
1101 kp = sotokextcb(so);
1102 while (kp)
1103 { if (kp->e_soif && kp->e_soif->sf_soreceive)
1104 { error = (*kp->e_soif->sf_soreceive)(so, psa, &uio,
1105 mp0, controlp,
1106 flagsp, kp);
1107 if (error)
1108 return((error == EJUSTRETURN) ? 0 : error);
1109 }
1110 kp = kp->e_next;
1111 }
1112
1113 mp = mp0;
1114 if (psa)
1115 *psa = 0;
1116 if (controlp)
1117 *controlp = 0;
1118 if (flagsp)
1119 flags = *flagsp &~ MSG_EOR;
1120 else
1121 flags = 0;
1122 /*
1123 * When SO_WANTOOBFLAG is set we try to get out-of-band data
1124 * regardless of the flags argument. Here is the case were
1125 * out-of-band data is not inline.
1126 */
1127 if ((flags & MSG_OOB) ||
1128 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1129 (so->so_options & SO_OOBINLINE) == 0 &&
1130 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
1131 m = m_get(M_WAIT, MT_DATA);
1132 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1133 if (error)
1134 goto bad;
1135 do {
1136 error = uiomove(mtod(m, caddr_t),
1137 (int) min(uio->uio_resid, m->m_len), uio);
1138 m = m_free(m);
1139 } while (uio->uio_resid && error == 0 && m);
1140 bad:
1141 if (m)
1142 m_freem(m);
1143 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
1144 if (error == EWOULDBLOCK || error == EINVAL) {
1145 /*
1146 * Let's try to get normal data:
1147 * EWOULDBLOCK: out-of-band data not receive yet;
1148 * EINVAL: out-of-band data already read.
1149 */
1150 error = 0;
1151 goto nooob;
1152 } else if (error == 0 && flagsp)
1153 *flagsp |= MSG_OOB;
1154 }
1155 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1156 return (error);
1157 }
1158 nooob:
1159 if (mp)
1160 *mp = (struct mbuf *)0;
1161 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1162 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1163
1164 restart:
1165 if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags)))
1166 {
1167 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1168 return (error);
1169 }
1170 s = splnet();
1171
1172 m = so->so_rcv.sb_mb;
1173 /*
1174 * If we have less data than requested, block awaiting more
1175 * (subject to any timeout) if:
1176 * 1. the current count is less than the low water mark, or
1177 * 2. MSG_WAITALL is set, and it is possible to do the entire
1178 * receive operation at once if we block (resid <= hiwat).
1179 * 3. MSG_DONTWAIT is not set
1180 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1181 * we have to do the receive in sections, and thus risk returning
1182 * a short count if a timeout or signal occurs after we start.
1183 */
1184 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
1185 so->so_rcv.sb_cc < uio->uio_resid) &&
1186 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1187 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1188 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1189 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
1190 if (so->so_error) {
1191 if (m)
1192 goto dontblock;
1193 error = so->so_error;
1194 if ((flags & MSG_PEEK) == 0)
1195 so->so_error = 0;
1196 goto release;
1197 }
1198 if (so->so_state & SS_CANTRCVMORE) {
1199 if (m)
1200 goto dontblock;
1201 else
1202 goto release;
1203 }
1204 for (; m; m = m->m_next)
1205 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1206 m = so->so_rcv.sb_mb;
1207 goto dontblock;
1208 }
1209 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1210 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1211 error = ENOTCONN;
1212 goto release;
1213 }
1214 if (uio->uio_resid == 0)
1215 goto release;
1216 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
1217 error = EWOULDBLOCK;
1218 goto release;
1219 }
1220 sbunlock(&so->so_rcv);
1221 if (socket_debug)
1222 printf("Waiting for socket data\n");
1223 error = sbwait(&so->so_rcv);
1224 if (socket_debug)
1225 printf("SORECEIVE - sbwait returned %d\n", error);
1226 splx(s);
1227 if (error)
1228 {
1229 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1230 return (error);
1231 }
1232 goto restart;
1233 }
1234 dontblock:
1235 #ifdef notyet /* XXXX */
1236 if (uio->uio_procp)
1237 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
1238 #endif
1239 nextrecord = m->m_nextpkt;
1240 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
1241 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1242 orig_resid = 0;
1243 if (psa)
1244 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
1245 mp0 == 0);
1246 if (flags & MSG_PEEK) {
1247 m = m->m_next;
1248 } else {
1249 sbfree(&so->so_rcv, m);
1250 MFREE(m, so->so_rcv.sb_mb);
1251 m = so->so_rcv.sb_mb;
1252 }
1253 }
1254 while (m && m->m_type == MT_CONTROL && error == 0) {
1255 if (flags & MSG_PEEK) {
1256 if (controlp)
1257 *controlp = m_copy(m, 0, m->m_len);
1258 m = m->m_next;
1259 } else {
1260 sbfree(&so->so_rcv, m);
1261 if (controlp) {
1262 if (pr->pr_domain->dom_externalize &&
1263 mtod(m, struct cmsghdr *)->cmsg_type ==
1264 SCM_RIGHTS)
1265 error = (*pr->pr_domain->dom_externalize)(m);
1266 *controlp = m;
1267 so->so_rcv.sb_mb = m->m_next;
1268 m->m_next = 0;
1269 m = so->so_rcv.sb_mb;
1270 } else {
1271 MFREE(m, so->so_rcv.sb_mb);
1272 m = so->so_rcv.sb_mb;
1273 }
1274 }
1275 if (controlp) {
1276 orig_resid = 0;
1277 controlp = &(*controlp)->m_next;
1278 }
1279 }
1280 if (m) {
1281 if ((flags & MSG_PEEK) == 0)
1282 m->m_nextpkt = nextrecord;
1283 type = m->m_type;
1284 if (type == MT_OOBDATA)
1285 flags |= MSG_OOB;
1286 }
1287 moff = 0;
1288 offset = 0;
1289 while (m && uio->uio_resid > 0 && error == 0) {
1290 if (m->m_type == MT_OOBDATA) {
1291 if (type != MT_OOBDATA)
1292 break;
1293 } else if (type == MT_OOBDATA)
1294 break;
1295 #if 0
1296 /*
1297 * This assertion needs rework. The trouble is Appletalk is uses many
1298 * mbuf types (NOT listed in mbuf.h!) which will trigger this panic.
1299 * For now just remove the assertion... CSM 9/98
1300 */
1301 else
1302 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1303 ("receive 3"));
1304 #endif
1305 /*
1306 * Make sure to allways set MSG_OOB event when getting
1307 * out of band data inline.
1308 */
1309 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1310 (so->so_options & SO_OOBINLINE) != 0 &&
1311 (so->so_state & SS_RCVATMARK) != 0) {
1312 flags |= MSG_OOB;
1313 }
1314 so->so_state &= ~SS_RCVATMARK;
1315 len = uio->uio_resid;
1316 if (so->so_oobmark && len > so->so_oobmark - offset)
1317 len = so->so_oobmark - offset;
1318 if (len > m->m_len - moff)
1319 len = m->m_len - moff;
1320 /*
1321 * If mp is set, just pass back the mbufs.
1322 * Otherwise copy them out via the uio, then free.
1323 * Sockbuf must be consistent here (points to current mbuf,
1324 * it points to next record) when we drop priority;
1325 * we must note any additions to the sockbuf when we
1326 * block interrupts again.
1327 */
1328 if (mp == 0) {
1329 splx(s);
1330 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
1331 s = splnet();
1332 if (error)
1333 goto release;
1334 } else
1335 uio->uio_resid -= len;
1336 if (len == m->m_len - moff) {
1337 if (m->m_flags & M_EOR)
1338 flags |= MSG_EOR;
1339 if (flags & MSG_PEEK) {
1340 m = m->m_next;
1341 moff = 0;
1342 } else {
1343 nextrecord = m->m_nextpkt;
1344 sbfree(&so->so_rcv, m);
1345 if (mp) {
1346 *mp = m;
1347 mp = &m->m_next;
1348 so->so_rcv.sb_mb = m = m->m_next;
1349 *mp = (struct mbuf *)0;
1350 } else {
1351 MFREE(m, so->so_rcv.sb_mb);
1352 m = so->so_rcv.sb_mb;
1353 }
1354 if (m)
1355 m->m_nextpkt = nextrecord;
1356 }
1357 } else {
1358 if (flags & MSG_PEEK)
1359 moff += len;
1360 else {
1361 if (mp)
1362 *mp = m_copym(m, 0, len, M_WAIT);
1363 m->m_data += len;
1364 m->m_len -= len;
1365 so->so_rcv.sb_cc -= len;
1366 }
1367 }
1368 if (so->so_oobmark) {
1369 if ((flags & MSG_PEEK) == 0) {
1370 so->so_oobmark -= len;
1371 if (so->so_oobmark == 0) {
1372 so->so_state |= SS_RCVATMARK;
1373 postevent(so, 0, EV_OOB);
1374 break;
1375 }
1376 } else {
1377 offset += len;
1378 if (offset == so->so_oobmark)
1379 break;
1380 }
1381 }
1382 if (flags & MSG_EOR)
1383 break;
1384 /*
1385 * If the MSG_WAITALL flag is set (for non-atomic socket),
1386 * we must not quit until "uio->uio_resid == 0" or an error
1387 * termination. If a signal/timeout occurs, return
1388 * with a short count but without error.
1389 * Keep sockbuf locked against other readers.
1390 */
1391 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
1392 !sosendallatonce(so) && !nextrecord) {
1393 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1394 break;
1395 error = sbwait(&so->so_rcv);
1396 if (error) {
1397 sbunlock(&so->so_rcv);
1398 splx(s);
1399 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0);
1400 return (0);
1401 }
1402 m = so->so_rcv.sb_mb;
1403 if (m)
1404 nextrecord = m->m_nextpkt;
1405 }
1406 }
1407
1408 if (m && pr->pr_flags & PR_ATOMIC) {
1409 if (so->so_options & SO_DONTTRUNC)
1410 flags |= MSG_RCVMORE;
1411 else
1412 { flags |= MSG_TRUNC;
1413 if ((flags & MSG_PEEK) == 0)
1414 (void) sbdroprecord(&so->so_rcv);
1415 }
1416 }
1417 if ((flags & MSG_PEEK) == 0) {
1418 if (m == 0)
1419 so->so_rcv.sb_mb = nextrecord;
1420 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1421 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1422 }
1423 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0)
1424 flags |= MSG_HAVEMORE;
1425 if (orig_resid == uio->uio_resid && orig_resid &&
1426 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1427 sbunlock(&so->so_rcv);
1428 splx(s);
1429 goto restart;
1430 }
1431
1432 if (flagsp)
1433 *flagsp |= flags;
1434 release:
1435 sbunlock(&so->so_rcv);
1436 splx(s);
1437
1438 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
1439 so,
1440 uio->uio_resid,
1441 so->so_rcv.sb_cc,
1442 0,
1443 error);
1444
1445 return (error);
1446 }
1447
1448 int
1449 soshutdown(so, how)
1450 register struct socket *so;
1451 register int how;
1452 {
1453 register struct protosw *pr = so->so_proto;
1454 struct kextcb *kp;
1455 int ret;
1456
1457
1458 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0);
1459 kp = sotokextcb(so);
1460 while (kp)
1461 { if (kp->e_soif && kp->e_soif->sf_soshutdown)
1462 { ret = (*kp->e_soif->sf_soshutdown)(so, how, kp);
1463 if (ret)
1464 return((ret == EJUSTRETURN) ? 0 : ret);
1465 }
1466 kp = kp->e_next;
1467 }
1468
1469 how++;
1470 if (how & FREAD) {
1471 sorflush(so);
1472 postevent(so, 0, EV_RCLOSED);
1473 }
1474 if (how & FWRITE) {
1475 ret = ((*pr->pr_usrreqs->pru_shutdown)(so));
1476 postevent(so, 0, EV_WCLOSED);
1477 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1478 return(ret);
1479 }
1480
1481 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1482 return (0);
1483 }
1484
1485 void
1486 sorflush(so)
1487 register struct socket *so;
1488 {
1489 register struct sockbuf *sb = &so->so_rcv;
1490 register struct protosw *pr = so->so_proto;
1491 register int s, error;
1492 struct sockbuf asb;
1493 struct kextcb *kp;
1494
1495 kp = sotokextcb(so);
1496 while (kp)
1497 { if (kp->e_soif && kp->e_soif->sf_sorflush)
1498 { if ((*kp->e_soif->sf_sorflush)(so, kp))
1499 return;
1500 }
1501 kp = kp->e_next;
1502 }
1503
1504 sb->sb_flags |= SB_NOINTR;
1505 (void) sblock(sb, M_WAIT);
1506 s = splimp();
1507 socantrcvmore(so);
1508 sbunlock(sb);
1509 asb = *sb;
1510 bzero((caddr_t)sb, sizeof (*sb));
1511 splx(s);
1512 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1513 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1514 sbrelease(&asb);
1515 }
1516
1517 /*
1518 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1519 * an additional variant to handle the case where the option value needs
1520 * to be some kind of integer, but not a specific size.
1521 * In addition to their use here, these functions are also called by the
1522 * protocol-level pr_ctloutput() routines.
1523 */
1524 int
1525 sooptcopyin(sopt, buf, len, minlen)
1526 struct sockopt *sopt;
1527 void *buf;
1528 size_t len;
1529 size_t minlen;
1530 {
1531 size_t valsize;
1532
1533 /*
1534 * If the user gives us more than we wanted, we ignore it,
1535 * but if we don't get the minimum length the caller
1536 * wants, we return EINVAL. On success, sopt->sopt_valsize
1537 * is set to however much we actually retrieved.
1538 */
1539 if ((valsize = sopt->sopt_valsize) < minlen)
1540 return EINVAL;
1541 if (valsize > len)
1542 sopt->sopt_valsize = valsize = len;
1543
1544 if (sopt->sopt_p != 0)
1545 return (copyin(sopt->sopt_val, buf, valsize));
1546
1547 bcopy(sopt->sopt_val, buf, valsize);
1548 return 0;
1549 }
1550
1551 int
1552 sosetopt(so, sopt)
1553 struct socket *so;
1554 struct sockopt *sopt;
1555 {
1556 int error, optval;
1557 struct linger l;
1558 struct timeval tv;
1559 short val;
1560 struct kextcb *kp;
1561
1562 kp = sotokextcb(so);
1563 while (kp)
1564 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1565 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1566 if (error)
1567 return((error == EJUSTRETURN) ? 0 : error);
1568 }
1569 kp = kp->e_next;
1570 }
1571
1572 error = 0;
1573 if (sopt->sopt_level != SOL_SOCKET) {
1574 if (so->so_proto && so->so_proto->pr_ctloutput)
1575 return ((*so->so_proto->pr_ctloutput)
1576 (so, sopt));
1577 error = ENOPROTOOPT;
1578 } else {
1579 switch (sopt->sopt_name) {
1580 case SO_LINGER:
1581 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1582 if (error)
1583 goto bad;
1584
1585 so->so_linger = l.l_linger;
1586 if (l.l_onoff)
1587 so->so_options |= SO_LINGER;
1588 else
1589 so->so_options &= ~SO_LINGER;
1590 break;
1591
1592 case SO_DEBUG:
1593 case SO_KEEPALIVE:
1594 case SO_DONTROUTE:
1595 case SO_USELOOPBACK:
1596 case SO_BROADCAST:
1597 case SO_REUSEADDR:
1598 case SO_REUSEPORT:
1599 case SO_OOBINLINE:
1600 case SO_TIMESTAMP:
1601 case SO_DONTTRUNC:
1602 case SO_WANTMORE:
1603 case SO_WANTOOBFLAG:
1604 error = sooptcopyin(sopt, &optval, sizeof optval,
1605 sizeof optval);
1606 if (error)
1607 goto bad;
1608 if (optval)
1609 so->so_options |= sopt->sopt_name;
1610 else
1611 so->so_options &= ~sopt->sopt_name;
1612 break;
1613
1614 case SO_SNDBUF:
1615 case SO_RCVBUF:
1616 case SO_SNDLOWAT:
1617 case SO_RCVLOWAT:
1618 error = sooptcopyin(sopt, &optval, sizeof optval,
1619 sizeof optval);
1620 if (error)
1621 goto bad;
1622
1623 /*
1624 * Values < 1 make no sense for any of these
1625 * options, so disallow them.
1626 */
1627 if (optval < 1) {
1628 error = EINVAL;
1629 goto bad;
1630 }
1631
1632 switch (sopt->sopt_name) {
1633 case SO_SNDBUF:
1634 case SO_RCVBUF:
1635 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1636 &so->so_snd : &so->so_rcv,
1637 (u_long) optval) == 0) {
1638 error = ENOBUFS;
1639 goto bad;
1640 }
1641 break;
1642
1643 /*
1644 * Make sure the low-water is never greater than
1645 * the high-water.
1646 */
1647 case SO_SNDLOWAT:
1648 so->so_snd.sb_lowat =
1649 (optval > so->so_snd.sb_hiwat) ?
1650 so->so_snd.sb_hiwat : optval;
1651 break;
1652 case SO_RCVLOWAT:
1653 so->so_rcv.sb_lowat =
1654 (optval > so->so_rcv.sb_hiwat) ?
1655 so->so_rcv.sb_hiwat : optval;
1656 break;
1657 }
1658 break;
1659
1660 case SO_SNDTIMEO:
1661 case SO_RCVTIMEO:
1662 error = sooptcopyin(sopt, &tv, sizeof tv,
1663 sizeof tv);
1664 if (error)
1665 goto bad;
1666
1667 if (tv.tv_sec > SHRT_MAX / hz - hz) {
1668 error = EDOM;
1669 goto bad;
1670 }
1671 val = tv.tv_sec * hz + tv.tv_usec / tick;
1672
1673 switch (sopt->sopt_name) {
1674 case SO_SNDTIMEO:
1675 so->so_snd.sb_timeo = val;
1676 break;
1677 case SO_RCVTIMEO:
1678 so->so_rcv.sb_timeo = val;
1679 break;
1680 }
1681 break;
1682
1683 case SO_NKE:
1684 { struct so_nke nke;
1685 struct NFDescriptor *nf1, *nf2 = NULL;
1686
1687 error = sooptcopyin(sopt, &nke,
1688 sizeof nke, sizeof nke);
1689 if (error)
1690 goto bad;
1691
1692 error = nke_insert(so, &nke);
1693 break;
1694 }
1695
1696 default:
1697 error = ENOPROTOOPT;
1698 break;
1699 }
1700 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1701 (void) ((*so->so_proto->pr_ctloutput)
1702 (so, sopt));
1703 }
1704 }
1705 bad:
1706 return (error);
1707 }
1708
1709 /* Helper routine for getsockopt */
1710 int
1711 sooptcopyout(sopt, buf, len)
1712 struct sockopt *sopt;
1713 void *buf;
1714 size_t len;
1715 {
1716 int error;
1717 size_t valsize;
1718
1719 error = 0;
1720
1721 /*
1722 * Documented get behavior is that we always return a value,
1723 * possibly truncated to fit in the user's buffer.
1724 * Traditional behavior is that we always tell the user
1725 * precisely how much we copied, rather than something useful
1726 * like the total amount we had available for her.
1727 * Note that this interface is not idempotent; the entire answer must
1728 * generated ahead of time.
1729 */
1730 valsize = min(len, sopt->sopt_valsize);
1731 sopt->sopt_valsize = valsize;
1732 if (sopt->sopt_val != 0) {
1733 if (sopt->sopt_p != 0)
1734 error = copyout(buf, sopt->sopt_val, valsize);
1735 else
1736 bcopy(buf, sopt->sopt_val, valsize);
1737 }
1738 return error;
1739 }
1740
1741 int
1742 sogetopt(so, sopt)
1743 struct socket *so;
1744 struct sockopt *sopt;
1745 {
1746 int error, optval;
1747 struct linger l;
1748 struct timeval tv;
1749 struct mbuf *m;
1750 struct kextcb *kp;
1751
1752 kp = sotokextcb(so);
1753 while (kp)
1754 { if (kp->e_soif && kp->e_soif->sf_socontrol)
1755 { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1756 if (error)
1757 return((error == EJUSTRETURN) ? 0 : error);
1758 }
1759 kp = kp->e_next;
1760 }
1761
1762 error = 0;
1763 if (sopt->sopt_level != SOL_SOCKET) {
1764 if (so->so_proto && so->so_proto->pr_ctloutput) {
1765 return ((*so->so_proto->pr_ctloutput)
1766 (so, sopt));
1767 } else
1768 return (ENOPROTOOPT);
1769 } else {
1770 switch (sopt->sopt_name) {
1771 case SO_LINGER:
1772 l.l_onoff = so->so_options & SO_LINGER;
1773 l.l_linger = so->so_linger;
1774 error = sooptcopyout(sopt, &l, sizeof l);
1775 break;
1776
1777 case SO_USELOOPBACK:
1778 case SO_DONTROUTE:
1779 case SO_DEBUG:
1780 case SO_KEEPALIVE:
1781 case SO_REUSEADDR:
1782 case SO_REUSEPORT:
1783 case SO_BROADCAST:
1784 case SO_OOBINLINE:
1785 case SO_TIMESTAMP:
1786 case SO_DONTTRUNC:
1787 case SO_WANTMORE:
1788 case SO_WANTOOBFLAG:
1789 optval = so->so_options & sopt->sopt_name;
1790 integer:
1791 error = sooptcopyout(sopt, &optval, sizeof optval);
1792 break;
1793
1794 case SO_TYPE:
1795 optval = so->so_type;
1796 goto integer;
1797
1798 case SO_NREAD:
1799 { int pkt_total;
1800 struct mbuf *m1;
1801
1802 pkt_total = 0;
1803 m1 = so->so_rcv.sb_mb;
1804 if (so->so_proto->pr_flags & PR_ATOMIC)
1805 {
1806 #if 0
1807 kprintf("SKT CC: %d\n", so->so_rcv.sb_cc);
1808 #endif
1809 while (m1)
1810 { if (m1->m_type == MT_DATA)
1811 pkt_total += m1->m_len;
1812 #if 0
1813 kprintf("CNT: %d/%d\n", m1->m_len, pkt_total);
1814 #endif
1815 m1 = m1->m_next;
1816 }
1817 optval = pkt_total;
1818 } else
1819 optval = so->so_rcv.sb_cc;
1820 #if 0
1821 kprintf("RTN: %d\n", optval);
1822 #endif
1823 goto integer;
1824 }
1825 case SO_ERROR:
1826 optval = so->so_error;
1827 so->so_error = 0;
1828 goto integer;
1829
1830 case SO_SNDBUF:
1831 optval = so->so_snd.sb_hiwat;
1832 goto integer;
1833
1834 case SO_RCVBUF:
1835 optval = so->so_rcv.sb_hiwat;
1836 goto integer;
1837
1838 case SO_SNDLOWAT:
1839 optval = so->so_snd.sb_lowat;
1840 goto integer;
1841
1842 case SO_RCVLOWAT:
1843 optval = so->so_rcv.sb_lowat;
1844 goto integer;
1845
1846 case SO_SNDTIMEO:
1847 case SO_RCVTIMEO:
1848 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1849 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1850
1851 tv.tv_sec = optval / hz;
1852 tv.tv_usec = (optval % hz) * tick;
1853 error = sooptcopyout(sopt, &tv, sizeof tv);
1854 break;
1855
1856 default:
1857 error = ENOPROTOOPT;
1858 break;
1859 }
1860 return (error);
1861 }
1862 }
1863
1864 void
1865 sohasoutofband(so)
1866 register struct socket *so;
1867 {
1868 struct proc *p;
1869
1870 struct kextcb *kp;
1871
1872 kp = sotokextcb(so);
1873 while (kp)
1874 { if (kp->e_soif && kp->e_soif->sf_sohasoutofband)
1875 { if ((*kp->e_soif->sf_sohasoutofband)(so, kp))
1876 return;
1877 }
1878 kp = kp->e_next;
1879 }
1880 if (so->so_pgid < 0)
1881 gsignal(-so->so_pgid, SIGURG);
1882 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1883 psignal(p, SIGURG);
1884 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1885 selwakeup(&so->so_rcv.sb_sel);
1886 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1887 }
1888
1889 /*
1890 * Network filter support
1891 */
1892 /* Run the list of filters, creating extension control blocks */
1893 sfilter_init(register struct socket *so)
1894 { struct kextcb *kp, **kpp;
1895 struct protosw *prp;
1896 struct NFDescriptor *nfp;
1897
1898 prp = so->so_proto;
1899 nfp = prp->pr_sfilter.tqh_first; /* non-null */
1900 kpp = &so->so_ext;
1901 kp = NULL;
1902 while (nfp)
1903 { MALLOC(kp, struct kextcb *, sizeof(*kp),
1904 M_TEMP, M_WAITOK);
1905 if (kp == NULL)
1906 return(ENOBUFS); /* so_free will clean up */
1907 *kpp = kp;
1908 kpp = &kp->e_next;
1909 kp->e_next = NULL;
1910 kp->e_fcb = NULL;
1911 kp->e_nfd = nfp;
1912 kp->e_soif = nfp->nf_soif;
1913 kp->e_sout = nfp->nf_soutil;
1914 /*
1915 * Ignore return value for create
1916 * Everyone gets a chance at startup
1917 */
1918 if (kp->e_soif && kp->e_soif->sf_socreate)
1919 (*kp->e_soif->sf_socreate)(so, prp, kp);
1920 nfp = nfp->nf_next.tqe_next;
1921 }
1922 return(0);
1923 }
1924
1925
1926 /*
1927 * Run the list of filters, freeing extension control blocks
1928 * Assumes the soif/soutil blocks have been handled.
1929 */
1930 sfilter_term(struct socket *so)
1931 { struct kextcb *kp, *kp1;
1932
1933 kp = so->so_ext;
1934 while (kp)
1935 { kp1 = kp->e_next;
1936 /*
1937 * Ignore return code on termination; everyone must
1938 * get terminated.
1939 */
1940 if (kp->e_soif && kp->e_soif->sf_sofree)
1941 kp->e_soif->sf_sofree(so, kp);
1942 FREE(kp, M_TEMP);
1943 kp = kp1;
1944 }
1945 return(0);
1946 }
1947
1948
1949 int
1950 sopoll(struct socket *so, int events, struct ucred *cred)
1951 {
1952 struct proc *p = current_proc();
1953 int revents = 0;
1954 int s = splnet();
1955
1956 if (events & (POLLIN | POLLRDNORM))
1957 if (soreadable(so))
1958 revents |= events & (POLLIN | POLLRDNORM);
1959
1960 if (events & (POLLOUT | POLLWRNORM))
1961 if (sowriteable(so))
1962 revents |= events & (POLLOUT | POLLWRNORM);
1963
1964 if (events & (POLLPRI | POLLRDBAND))
1965 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1966 revents |= events & (POLLPRI | POLLRDBAND);
1967
1968 if (revents == 0) {
1969 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1970 selrecord(p, &so->so_rcv.sb_sel);
1971 so->so_rcv.sb_sel.si_flags |= SI_SBSEL;
1972 }
1973
1974 if (events & (POLLOUT | POLLWRNORM)) {
1975 selrecord(p, &so->so_snd.sb_sel);
1976 so->so_snd.sb_sel.si_flags |= SI_SBSEL;
1977 }
1978 }
1979
1980 splx(s);
1981 return (revents);
1982 }
1983
1984 /*#### IPv6 Integration. Added new routines */
1985 int
1986 sooptgetm(struct sockopt *sopt, struct mbuf **mp)
1987 {
1988 struct mbuf *m, *m_prev;
1989 int sopt_size = sopt->sopt_valsize;
1990
1991 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1992 if (m == 0)
1993 return ENOBUFS;
1994 if (sopt_size > MLEN) {
1995 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1996 if ((m->m_flags & M_EXT) == 0) {
1997 m_free(m);
1998 return ENOBUFS;
1999 }
2000 m->m_len = min(MCLBYTES, sopt_size);
2001 } else {
2002 m->m_len = min(MLEN, sopt_size);
2003 }
2004 sopt_size -= m->m_len;
2005 *mp = m;
2006 m_prev = m;
2007
2008 while (sopt_size) {
2009 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2010 if (m == 0) {
2011 m_freem(*mp);
2012 return ENOBUFS;
2013 }
2014 if (sopt_size > MLEN) {
2015 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2016 if ((m->m_flags & M_EXT) == 0) {
2017 m_freem(*mp);
2018 return ENOBUFS;
2019 }
2020 m->m_len = min(MCLBYTES, sopt_size);
2021 } else {
2022 m->m_len = min(MLEN, sopt_size);
2023 }
2024 sopt_size -= m->m_len;
2025 m_prev->m_next = m;
2026 m_prev = m;
2027 }
2028 return 0;
2029 }
2030
2031 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2032 int
2033 sooptmcopyin(struct sockopt *sopt, struct mbuf *m)
2034 {
2035 struct mbuf *m0 = m;
2036
2037 if (sopt->sopt_val == NULL)
2038 return 0;
2039 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2040 if (sopt->sopt_p != NULL) {
2041 int error;
2042
2043 error = copyin(sopt->sopt_val, mtod(m, char *),
2044 m->m_len);
2045 if (error != 0) {
2046 m_freem(m0);
2047 return(error);
2048 }
2049 } else
2050 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2051 sopt->sopt_valsize -= m->m_len;
2052 (caddr_t)sopt->sopt_val += m->m_len;
2053 m = m->m_next;
2054 }
2055 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2056 panic("sooptmcopyin");
2057 return 0;
2058 }
2059
2060 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2061 int
2062 sooptmcopyout(struct sockopt *sopt, struct mbuf *m)
2063 {
2064 struct mbuf *m0 = m;
2065 size_t valsize = 0;
2066
2067 if (sopt->sopt_val == NULL)
2068 return 0;
2069 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2070 if (sopt->sopt_p != NULL) {
2071 int error;
2072
2073 error = copyout(mtod(m, char *), sopt->sopt_val,
2074 m->m_len);
2075 if (error != 0) {
2076 m_freem(m0);
2077 return(error);
2078 }
2079 } else
2080 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2081 sopt->sopt_valsize -= m->m_len;
2082 (caddr_t)sopt->sopt_val += m->m_len;
2083 valsize += m->m_len;
2084 m = m->m_next;
2085 }
2086 if (m != NULL) {
2087 /* enough soopt buffer should be given from user-land */
2088 m_freem(m0);
2089 return(EINVAL);
2090 }
2091 sopt->sopt_valsize = valsize;
2092 return 0;
2093 }
2094