]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/uipc_socket.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / kern / uipc_socket.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */
26/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
27/*
28 * Copyright (c) 1982, 1986, 1988, 1990, 1993
29 * The Regents of the University of California. All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
9bccf70c
A
59 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
60 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.16 2001/06/14 20:46:06 ume Exp $
1c79356b
A
61 */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/proc.h>
66#include <sys/fcntl.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/domain.h>
70#include <sys/kernel.h>
71#include <sys/poll.h>
72#include <sys/protosw.h>
73#include <sys/socket.h>
74#include <sys/socketvar.h>
75#include <sys/resourcevar.h>
76#include <sys/signalvar.h>
77#include <sys/sysctl.h>
78#include <sys/uio.h>
79#include <sys/ev.h>
80#include <sys/kdebug.h>
81#include <net/route.h>
82#include <netinet/in.h>
83#include <netinet/in_pcb.h>
84#include <kern/zalloc.h>
85#include <machine/limits.h>
86
87int so_cache_hw = 0;
88int so_cache_timeouts = 0;
89int so_cache_max_freed = 0;
90int cached_sock_count = 0;
91struct socket *socket_cache_head = 0;
92struct socket *socket_cache_tail = 0;
93u_long so_cache_time = 0;
94int so_cache_init_done = 0;
95struct zone *so_cache_zone;
96extern int get_inpcb_str_size();
97extern int get_tcp_str_size();
98
99#include <machine/limits.h>
100
101int socket_debug = 0;
102int socket_zone = M_SOCKET;
103so_gen_t so_gencnt; /* generation count for sockets */
104
105MALLOC_DEFINE(M_SONAME, "soname", "socket name");
106MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
107
108#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0)
109#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2)
110#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1)
111#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3)
112#define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1)
113#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
114#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
115
116
117SYSCTL_DECL(_kern_ipc);
118
119static int somaxconn = SOMAXCONN;
120SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn,
121 0, "");
122
123/* Should we get a maximum also ??? */
fa4905b1 124static int sosendmaxchain = 65536;
1c79356b
A
125static int sosendminchain = 16384;
126SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain,
127 0, "");
128
129void so_cache_timer();
9bccf70c
A
130struct mbuf *m_getpackets(int, int, int);
131
1c79356b
A
132
133/*
134 * Socket operation routines.
135 * These routines are called by the routines in
136 * sys_socket.c or from a system process, and
137 * implement the semantics of socket operations by
138 * switching out to the protocol specific routines.
139 */
140
9bccf70c 141#ifdef __APPLE__
1c79356b
A
142void socketinit()
143{
144 vm_size_t str_size;
145
146 so_cache_init_done = 1;
147
148 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
149 str_size = (vm_size_t)( sizeof(struct socket) + 4 +
150 get_inpcb_str_size() + 4 +
151 get_tcp_str_size());
152 so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone");
153#if TEMPDEBUG
154 kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
155#endif
156
157}
158
159void cached_sock_alloc(so, waitok)
160struct socket **so;
161int waitok;
162
163{
164 caddr_t temp;
165 int s;
166 register u_long offset;
167
168
169 s = splnet();
170 if (cached_sock_count) {
171 cached_sock_count--;
172 *so = socket_cache_head;
173 if (*so == 0)
174 panic("cached_sock_alloc: cached sock is null");
175
176 socket_cache_head = socket_cache_head->cache_next;
177 if (socket_cache_head)
178 socket_cache_head->cache_prev = 0;
179 else
180 socket_cache_tail = 0;
181 splx(s);
182
183 temp = (*so)->so_saved_pcb;
184 bzero((caddr_t)*so, sizeof(struct socket));
185#if TEMPDEBUG
186 kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so,
187 cached_sock_count);
188#endif
189 (*so)->so_saved_pcb = temp;
190 }
191 else {
192#if TEMPDEBUG
193 kprintf("Allocating cached sock %x from memory\n", *so);
194#endif
195
196 splx(s);
197 if (waitok)
198 *so = (struct socket *) zalloc(so_cache_zone);
199 else
200 *so = (struct socket *) zalloc_noblock(so_cache_zone);
201
202 if (*so == 0)
203 return;
204
205 bzero((caddr_t)*so, sizeof(struct socket));
206
207 /*
208 * Define offsets for extra structures into our single block of
209 * memory. Align extra structures on longword boundaries.
210 */
211
212
213 offset = (u_long) *so;
214 offset += sizeof(struct socket);
215 if (offset & 0x3) {
216 offset += 4;
217 offset &= 0xfffffffc;
218 }
219 (*so)->so_saved_pcb = (caddr_t) offset;
220 offset += get_inpcb_str_size();
221 if (offset & 0x3) {
222 offset += 4;
223 offset &= 0xfffffffc;
224 }
225
226 ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset;
227#if TEMPDEBUG
228 kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so,
229 (*so)->so_saved_pcb,
230 ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb);
231#endif
232 }
233
234 (*so)->cached_in_sock_layer = 1;
235}
236
237
238void cached_sock_free(so)
239struct socket *so;
240{
241 int s;
242
243
244 s = splnet();
245 if (++cached_sock_count > MAX_CACHED_SOCKETS) {
246 --cached_sock_count;
247 splx(s);
248#if TEMPDEBUG
249 kprintf("Freeing overflowed cached socket %x\n", so);
250#endif
251 zfree(so_cache_zone, (vm_offset_t) so);
252 }
253 else {
254#if TEMPDEBUG
255 kprintf("Freeing socket %x into cache\n", so);
256#endif
257 if (so_cache_hw < cached_sock_count)
258 so_cache_hw = cached_sock_count;
259
260 so->cache_next = socket_cache_head;
261 so->cache_prev = 0;
262 if (socket_cache_head)
263 socket_cache_head->cache_prev = so;
264 else
265 socket_cache_tail = so;
266
267 so->cache_timestamp = so_cache_time;
268 socket_cache_head = so;
269 splx(s);
270 }
271
272#if TEMPDEBUG
273 kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count);
274#endif
275
276
277}
278
279
280void so_cache_timer()
281{
282 register struct socket *p;
283 register int s;
284 register int n_freed = 0;
285 boolean_t funnel_state;
286
287 funnel_state = thread_funnel_set(network_flock, TRUE);
288
289 ++so_cache_time;
290
291 s = splnet();
292
293 while (p = socket_cache_tail)
294 {
295 if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT)
296 break;
297
298 so_cache_timeouts++;
299
300 if (socket_cache_tail = p->cache_prev)
301 p->cache_prev->cache_next = 0;
302 if (--cached_sock_count == 0)
303 socket_cache_head = 0;
304
305 splx(s);
306
307 zfree(so_cache_zone, (vm_offset_t) p);
308
309 splnet();
310 if (++n_freed >= SO_CACHE_MAX_FREE_BATCH)
311 {
312 so_cache_max_freed++;
313 break;
314 }
315 }
316 splx(s);
317
318 timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz));
319
320 (void) thread_funnel_set(network_flock, FALSE);
321
322}
9bccf70c 323#endif /* __APPLE__ */
1c79356b
A
324
325/*
326 * Get a socket structure from our zone, and initialize it.
327 * We don't implement `waitok' yet (see comments in uipc_domain.c).
328 * Note that it would probably be better to allocate socket
329 * and PCB at the same time, but I'm not convinced that all
330 * the protocols can be easily modified to do this.
331 */
332struct socket *
333soalloc(waitok, dom, type)
334 int waitok;
335 int dom;
336 int type;
337{
338 struct socket *so;
339
340 if ((dom == PF_INET) && (type == SOCK_STREAM))
341 cached_sock_alloc(&so, waitok);
342 else
343 {
344 so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK);
345 if (so)
346 bzero(so, sizeof *so);
347 }
348 /* XXX race condition for reentrant kernel */
349
350 if (so) {
351 so->so_gencnt = ++so_gencnt;
352 so->so_zone = socket_zone;
353 }
354
355 return so;
356}
357
358int
359socreate(dom, aso, type, proto)
360 int dom;
361 struct socket **aso;
362 register int type;
363 int proto;
1c79356b
A
364{
365 struct proc *p = current_proc();
366 register struct protosw *prp;
9bccf70c 367 register struct socket *so;
1c79356b
A
368 register int error = 0;
369
370 if (proto)
371 prp = pffindproto(dom, proto, type);
372 else
373 prp = pffindtype(dom, type);
9bccf70c 374
1c79356b
A
375 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
376 return (EPROTONOSUPPORT);
9bccf70c
A
377#ifndef __APPLE__
378
379 if (p->p_prison && jail_socket_unixiproute_only &&
380 prp->pr_domain->dom_family != PF_LOCAL &&
381 prp->pr_domain->dom_family != PF_INET &&
382 prp->pr_domain->dom_family != PF_ROUTE) {
383 return (EPROTONOSUPPORT);
384 }
385
386#endif
1c79356b
A
387 if (prp->pr_type != type)
388 return (EPROTOTYPE);
389 so = soalloc(p != 0, dom, type);
390 if (so == 0)
391 return (ENOBUFS);
392
393 TAILQ_INIT(&so->so_incomp);
394 TAILQ_INIT(&so->so_comp);
395 so->so_type = type;
396
9bccf70c 397#ifdef __APPLE__
1c79356b
A
398 if (p != 0) {
399 if (p->p_ucred->cr_uid == 0)
400 so->so_state = SS_PRIV;
401
402 so->so_uid = p->p_ucred->cr_uid;
403 }
9bccf70c
A
404#else
405 so->so_cred = p->p_ucred;
406 crhold(so->so_cred);
407#endif
1c79356b 408 so->so_proto = prp;
9bccf70c 409#ifdef __APPLE__
1c79356b
A
410 so->so_rcv.sb_flags |= SB_RECV; /* XXX */
411 if (prp->pr_sfilter.tqh_first)
412 error = sfilter_init(so);
413 if (error == 0)
9bccf70c 414#endif
1c79356b 415 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
1c79356b
A
416 if (error) {
417 so->so_state |= SS_NOFDREF;
418 sofree(so);
419 return (error);
420 }
9bccf70c 421#ifdef __APPLE__
1c79356b
A
422 prp->pr_domain->dom_refs++;
423 so->so_rcv.sb_so = so->so_snd.sb_so = so;
424 TAILQ_INIT(&so->so_evlist);
9bccf70c 425#endif
1c79356b
A
426 *aso = so;
427 return (0);
428}
429
430int
431sobind(so, nam)
432 struct socket *so;
433 struct sockaddr *nam;
434
435{
436 struct proc *p = current_proc();
437 int error;
438 struct kextcb *kp;
439 int s = splnet();
440
441 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
9bccf70c
A
442 if (error == 0) {
443 kp = sotokextcb(so);
444 while (kp) {
445 if (kp->e_soif && kp->e_soif->sf_sobind) {
446 error = (*kp->e_soif->sf_sobind)(so, nam, kp);
447 if (error) {
448 if (error == EJUSTRETURN) {
449 error = 0;
1c79356b 450 break;
9bccf70c 451 }
1c79356b
A
452 splx(s);
453 return(error);
454 }
455 }
456 kp = kp->e_next;
457 }
458 }
459 splx(s);
460 return (error);
461}
462
463void
464sodealloc(so)
465 struct socket *so;
466{
467 so->so_gencnt = ++so_gencnt;
468
9bccf70c
A
469#ifndef __APPLE__
470 if (so->so_rcv.sb_hiwat)
471 (void)chgsbsize(so->so_cred->cr_uidinfo,
472 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
473 if (so->so_snd.sb_hiwat)
474 (void)chgsbsize(so->so_cred->cr_uidinfo,
475 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
476#ifdef INET
477 if (so->so_accf != NULL) {
478 if (so->so_accf->so_accept_filter != NULL &&
479 so->so_accf->so_accept_filter->accf_destroy != NULL) {
480 so->so_accf->so_accept_filter->accf_destroy(so);
481 }
482 if (so->so_accf->so_accept_filter_str != NULL)
483 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
484 FREE(so->so_accf, M_ACCF);
485 }
486#endif /* INET */
487 crfree(so->so_cred);
488 zfreei(so->so_zone, so);
489#else
1c79356b
A
490 if (so->cached_in_sock_layer == 1)
491 cached_sock_free(so);
492 else
493 _FREE_ZONE(so, sizeof(*so), so->so_zone);
9bccf70c 494#endif /* __APPLE__ */
1c79356b
A
495}
496
497int
498solisten(so, backlog)
499 register struct socket *so;
500 int backlog;
501
502{
503 struct kextcb *kp;
504 struct proc *p = current_proc();
505 int s, error;
506
507 s = splnet();
508 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
509 if (error) {
510 splx(s);
511 return (error);
512 }
e3027f41 513 if (TAILQ_EMPTY(&so->so_comp))
1c79356b
A
514 so->so_options |= SO_ACCEPTCONN;
515 if (backlog < 0 || backlog > somaxconn)
516 backlog = somaxconn;
517 so->so_qlimit = backlog;
518 kp = sotokextcb(so);
9bccf70c
A
519 while (kp) {
520 if (kp->e_soif && kp->e_soif->sf_solisten) {
521 error = (*kp->e_soif->sf_solisten)(so, kp);
522 if (error) {
523 if (error == EJUSTRETURN) {
524 error = 0;
1c79356b 525 break;
9bccf70c 526 }
1c79356b
A
527 splx(s);
528 return(error);
529 }
530 }
531 kp = kp->e_next;
532 }
533
534 splx(s);
535 return (0);
536}
537
538
539void
540sofree(so)
541 register struct socket *so;
9bccf70c
A
542{
543 int error;
1c79356b
A
544 struct kextcb *kp;
545 struct socket *head = so->so_head;
546
547 kp = sotokextcb(so);
9bccf70c
A
548 while (kp) {
549 if (kp->e_soif && kp->e_soif->sf_sofree) {
550 error = (*kp->e_soif->sf_sofree)(so, kp);
0b4e3aa0
A
551 if (error) {
552 selthreadclear(&so->so_snd.sb_sel);
553 selthreadclear(&so->so_rcv.sb_sel);
1c79356b 554 return; /* void fn */
0b4e3aa0 555 }
1c79356b
A
556 }
557 kp = kp->e_next;
558 }
559
0b4e3aa0 560 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) {
9bccf70c 561#ifdef __APPLE__
0b4e3aa0
A
562 selthreadclear(&so->so_snd.sb_sel);
563 selthreadclear(&so->so_rcv.sb_sel);
9bccf70c 564#endif
1c79356b 565 return;
0b4e3aa0 566 }
9bccf70c
A
567 if (head != NULL) {
568 if (so->so_state & SS_INCOMP) {
569 TAILQ_REMOVE(&head->so_incomp, so, so_list);
570 head->so_incqlen--;
571 } else if (so->so_state & SS_COMP) {
572 /*
573 * We must not decommission a socket that's
574 * on the accept(2) queue. If we do, then
575 * accept(2) may hang after select(2) indicated
576 * that the listening socket was ready.
577 */
578#ifdef __APPLE__
579 selthreadclear(&so->so_snd.sb_sel);
580 selthreadclear(&so->so_rcv.sb_sel);
581#endif
582 return;
583 } else {
584 panic("sofree: not queued");
585 }
1c79356b 586 head->so_qlen--;
9bccf70c 587 so->so_state &= ~SS_INCOMP;
1c79356b
A
588 so->so_head = NULL;
589 }
9bccf70c 590#ifdef __APPLE__
0b4e3aa0 591 selthreadclear(&so->so_snd.sb_sel);
1c79356b 592 sbrelease(&so->so_snd);
9bccf70c 593#endif
1c79356b
A
594 sorflush(so);
595 sfilter_term(so);
596 sodealloc(so);
597}
598
599/*
600 * Close a socket on last file table reference removal.
601 * Initiate disconnect if connected.
602 * Free socket when disconnect complete.
603 */
604int
605soclose(so)
606 register struct socket *so;
607{
608 int s = splnet(); /* conservative */
609 int error = 0;
610 struct kextcb *kp;
611
9bccf70c
A
612#ifndef __APPLE__
613 funsetown(so->so_sigio);
1c79356b
A
614#endif
615 kp = sotokextcb(so);
9bccf70c
A
616 while (kp) {
617 if (kp->e_soif && kp->e_soif->sf_soclose) {
618 error = (*kp->e_soif->sf_soclose)(so, kp);
619 if (error) {
620 splx(s);
1c79356b
A
621 return((error == EJUSTRETURN) ? 0 : error);
622 }
623 }
624 kp = kp->e_next;
625 }
626
627 if (so->so_options & SO_ACCEPTCONN) {
628 struct socket *sp, *sonext;
629
e3027f41
A
630 sp = TAILQ_FIRST(&so->so_incomp);
631 for (; sp != NULL; sp = sonext) {
632 sonext = TAILQ_NEXT(sp, so_list);
633 (void) soabort(sp);
634 }
635 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
636 sonext = TAILQ_NEXT(sp, so_list);
637 /* Dequeue from so_comp since sofree() won't do it */
638 TAILQ_REMOVE(&so->so_comp, sp, so_list);
639 so->so_qlen--;
640 sp->so_state &= ~SS_COMP;
641 sp->so_head = NULL;
642 (void) soabort(sp);
643 }
644
1c79356b
A
645 }
646 if (so->so_pcb == 0)
647 goto discard;
648 if (so->so_state & SS_ISCONNECTED) {
649 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
650 error = sodisconnect(so);
651 if (error)
652 goto drop;
653 }
654 if (so->so_options & SO_LINGER) {
655 if ((so->so_state & SS_ISDISCONNECTING) &&
656 (so->so_state & SS_NBIO))
657 goto drop;
658 while (so->so_state & SS_ISCONNECTED) {
659 error = tsleep((caddr_t)&so->so_timeo,
660 PSOCK | PCATCH, "soclos", so->so_linger);
661 if (error)
662 break;
663 }
664 }
665 }
666drop:
667 if (so->so_pcb) {
668 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
669 if (error == 0)
670 error = error2;
671 }
672discard:
e3027f41 673 if (so->so_pcb && so->so_state & SS_NOFDREF)
1c79356b
A
674 panic("soclose: NOFDREF");
675 so->so_state |= SS_NOFDREF;
9bccf70c 676#ifdef __APPLE__
1c79356b
A
677 so->so_proto->pr_domain->dom_refs--;
678 evsofree(so);
9bccf70c 679#endif
1c79356b
A
680 sofree(so);
681 splx(s);
682 return (error);
683}
684
685/*
686 * Must be called at splnet...
687 */
688int
689soabort(so)
690 struct socket *so;
691{
9bccf70c 692 int error;
1c79356b 693
9bccf70c
A
694 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
695 if (error) {
696 sofree(so);
697 return error;
698 }
699 return (0);
1c79356b
A
700}
701
702int
703soaccept(so, nam)
704 register struct socket *so;
705 struct sockaddr **nam;
9bccf70c
A
706{
707 int s = splnet();
1c79356b
A
708 int error;
709 struct kextcb *kp;
710
711 if ((so->so_state & SS_NOFDREF) == 0)
712 panic("soaccept: !NOFDREF");
713 so->so_state &= ~SS_NOFDREF;
714 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
9bccf70c
A
715 if (error == 0) {
716 kp = sotokextcb(so);
1c79356b 717 while (kp) {
9bccf70c
A
718 if (kp->e_soif && kp->e_soif->sf_soaccept) {
719 error = (*kp->e_soif->sf_soaccept)(so, nam, kp);
720 if (error) {
721 if (error == EJUSTRETURN) {
722 error = 0;
1c79356b 723 break;
9bccf70c 724 }
1c79356b
A
725 splx(s);
726 return(error);
727 }
728 }
729 kp = kp->e_next;
730 }
731 }
732
733
734 splx(s);
735 return (error);
736}
737
738int
739soconnect(so, nam)
740 register struct socket *so;
741 struct sockaddr *nam;
742
743{
744 int s;
745 int error;
746 struct proc *p = current_proc();
747 struct kextcb *kp;
748
749 if (so->so_options & SO_ACCEPTCONN)
750 return (EOPNOTSUPP);
751 s = splnet();
752 /*
753 * If protocol is connection-based, can only connect once.
754 * Otherwise, if connected, try to disconnect first.
755 * This allows user to disconnect by connecting to, e.g.,
756 * a null address.
757 */
758 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
759 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
760 (error = sodisconnect(so))))
761 error = EISCONN;
762 else {
9bccf70c
A
763 /*
764 * Run connect filter before calling protocol:
765 * - non-blocking connect returns before completion;
766 * - allows filters to modify address.
767 */
768 kp = sotokextcb(so);
769 while (kp) {
770 if (kp->e_soif && kp->e_soif->sf_soconnect) {
771 error = (*kp->e_soif->sf_soconnect)(so, nam, kp);
772 if (error) {
773 if (error == EJUSTRETURN) {
774 error = 0;
775 }
776 splx(s);
777 return(error);
778 }
779 }
780 kp = kp->e_next;
781 }
1c79356b 782 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
1c79356b 783 }
1c79356b
A
784 splx(s);
785 return (error);
786}
787
788int
789soconnect2(so1, so2)
790 register struct socket *so1;
791 struct socket *so2;
792{
793 int s = splnet();
794 int error;
795 struct kextcb *kp;
796
797 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
9bccf70c
A
798 if (error == 0) {
799 kp = sotokextcb(so1);
800 while (kp) {
801 if (kp->e_soif && kp->e_soif->sf_soconnect2) {
802 error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp);
803 if (error) {
804 if (error == EJUSTRETURN) {
805 return 0;
1c79356b 806 break;
9bccf70c 807 }
1c79356b
A
808 splx(s);
809 return(error);
810 }
811 }
812 kp = kp->e_next;
813 }
814 }
815 splx(s);
816 return (error);
817}
818
819int
820sodisconnect(so)
821 register struct socket *so;
822{
823 int s = splnet();
824 int error;
825 struct kextcb *kp;
826
827 if ((so->so_state & SS_ISCONNECTED) == 0) {
828 error = ENOTCONN;
829 goto bad;
830 }
831 if (so->so_state & SS_ISDISCONNECTING) {
832 error = EALREADY;
833 goto bad;
834 }
835 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
9bccf70c
A
836 if (error == 0) {
837 kp = sotokextcb(so);
838 while (kp) {
839 if (kp->e_soif && kp->e_soif->sf_sodisconnect) {
840 error = (*kp->e_soif->sf_sodisconnect)(so, kp);
841 if (error) {
842 if (error == EJUSTRETURN) {
843 error = 0;
1c79356b 844 break;
9bccf70c 845 }
1c79356b
A
846 splx(s);
847 return(error);
848 }
849 }
850 kp = kp->e_next;
851 }
852 }
853
854bad:
855 splx(s);
856 return (error);
857}
858
859#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)
860/*
861 * Send on a socket.
862 * If send must go all at once and message is larger than
863 * send buffering, then hard error.
864 * Lock against other senders.
865 * If must go all at once and not enough room now, then
866 * inform user that this would block and do nothing.
867 * Otherwise, if nonblocking, send as much as possible.
868 * The data to be sent is described by "uio" if nonzero,
869 * otherwise by the mbuf chain "top" (which must be null
870 * if uio is not). Data provided in mbuf chain must be small
871 * enough to send all at once.
872 *
873 * Returns nonzero on error, timeout or signal; callers
874 * must check for short counts if EINTR/ERESTART are returned.
875 * Data and control buffers are freed on return.
876 * Experiment:
877 * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf
878 * MSG_SEND: go thru as for MSG_HOLD on current fragment, then
879 * point at the mbuf chain being constructed and go from there.
880 */
881int
882sosend(so, addr, uio, top, control, flags)
883 register struct socket *so;
884 struct sockaddr *addr;
885 struct uio *uio;
886 struct mbuf *top;
887 struct mbuf *control;
888 int flags;
889
890{
891 struct mbuf **mp;
fa4905b1 892 register struct mbuf *m, *freelist = NULL;
1c79356b
A
893 register long space, len, resid;
894 int clen = 0, error, s, dontroute, mlen, sendflags;
895 int atomic = sosendallatonce(so) || top;
896 struct proc *p = current_proc();
897 struct kextcb *kp;
898
899 if (uio)
900 resid = uio->uio_resid;
901 else
902 resid = top->m_pkthdr.len;
903
904 KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START),
905 so,
906 resid,
907 so->so_snd.sb_cc,
908 so->so_snd.sb_lowat,
909 so->so_snd.sb_hiwat);
910
911 /*
912 * In theory resid should be unsigned.
913 * However, space must be signed, as it might be less than 0
914 * if we over-committed, and we must use a signed comparison
915 * of space and resid. On the other hand, a negative resid
916 * causes us to loop sending 0-length segments to the protocol.
917 *
918 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
919 * type sockets since that's an error.
920 */
921 if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
922 error = EINVAL;
923 goto out;
924 }
925
926 dontroute =
927 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
928 (so->so_proto->pr_flags & PR_ATOMIC);
929 if (p)
930 p->p_stats->p_ru.ru_msgsnd++;
931 if (control)
932 clen = control->m_len;
933#define snderr(errno) { error = errno; splx(s); goto release; }
934
935restart:
936 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
937 if (error)
938 goto out;
939 do {
940 s = splnet();
941 if (so->so_state & SS_CANTSENDMORE)
942 snderr(EPIPE);
943 if (so->so_error) {
944 error = so->so_error;
945 so->so_error = 0;
946 splx(s);
947 goto release;
948 }
949 if ((so->so_state & SS_ISCONNECTED) == 0) {
950 /*
951 * `sendto' and `sendmsg' is allowed on a connection-
952 * based socket if it supports implied connect.
953 * Return ENOTCONN if not connected and no address is
954 * supplied.
955 */
956 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
957 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
958 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
959 !(resid == 0 && clen != 0))
960 snderr(ENOTCONN);
961 } else if (addr == 0 && !(flags&MSG_HOLD))
962 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
963 ENOTCONN : EDESTADDRREQ);
964 }
965 space = sbspace(&so->so_snd);
966 if (flags & MSG_OOB)
967 space += 1024;
968 if ((atomic && resid > so->so_snd.sb_hiwat) ||
969 clen > so->so_snd.sb_hiwat)
970 snderr(EMSGSIZE);
971 if (space < resid + clen && uio &&
972 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
973 if (so->so_state & SS_NBIO)
974 snderr(EWOULDBLOCK);
975 sbunlock(&so->so_snd);
976 error = sbwait(&so->so_snd);
977 splx(s);
978 if (error)
979 goto out;
980 goto restart;
981 }
982 splx(s);
983 mp = &top;
984 space -= clen;
fa4905b1 985
1c79356b
A
986 do {
987 if (uio == NULL) {
988 /*
989 * Data is prepackaged in "top".
990 */
991 resid = 0;
992 if (flags & MSG_EOR)
993 top->m_flags |= M_EOR;
994 } else {
fa4905b1
A
995 boolean_t dropped_funnel = FALSE;
996 int chainlength;
997 int bytes_to_copy;
998
999 bytes_to_copy = min(resid, space);
1000
1001 if (sosendminchain > 0) {
1002 if (bytes_to_copy >= sosendminchain) {
1003 dropped_funnel = TRUE;
1004 (void)thread_funnel_set(network_flock, FALSE);
1005 }
1006 chainlength = 0;
1007 } else
1008 chainlength = sosendmaxchain;
1009
1c79356b 1010 do {
fa4905b1
A
1011
1012 if (bytes_to_copy >= MINCLSIZE) {
9bccf70c
A
1013 /*
1014 * try to maintain a local cache of mbuf clusters needed to complete this write
1015 * the list is further limited to the number that are currently needed to fill the socket
1016 * this mechanism allows a large number of mbufs/clusters to be grabbed under a single
1017 * mbuf lock... if we can't get any clusters, than fall back to trying for mbufs
1018 * if we fail early (or miscalcluate the number needed) make sure to release any clusters
1019 * we haven't yet consumed.
1020 */
fa4905b1
A
1021 if ((m = freelist) == NULL) {
1022 int num_needed;
1023 int hdrs_needed = 0;
1024
1025 if (top == 0)
1026 hdrs_needed = 1;
1027 num_needed = bytes_to_copy / MCLBYTES;
1028
1029 if ((bytes_to_copy - (num_needed * MCLBYTES)) >= MINCLSIZE)
1030 num_needed++;
1031
1032 if ((freelist = m_getpackets(num_needed, hdrs_needed, M_WAIT)) == NULL)
1033 goto getpackets_failed;
1034 m = freelist;
1035 }
1036 freelist = m->m_next;
1037 m->m_next = NULL;
1038
1039 mlen = MCLBYTES;
1040 len = min(mlen, bytes_to_copy);
1041 } else {
1042getpackets_failed:
1043 if (top == 0) {
1c79356b
A
1044 MGETHDR(m, M_WAIT, MT_DATA);
1045 mlen = MHLEN;
1046 m->m_pkthdr.len = 0;
1047 m->m_pkthdr.rcvif = (struct ifnet *)0;
fa4905b1 1048 } else {
1c79356b
A
1049 MGET(m, M_WAIT, MT_DATA);
1050 mlen = MLEN;
fa4905b1
A
1051 }
1052 len = min(mlen, bytes_to_copy);
1053 /*
1054 * For datagram protocols, leave room
1055 * for protocol headers in first mbuf.
1056 */
1057 if (atomic && top == 0 && len < mlen)
1058 MH_ALIGN(m, len);
1c79356b 1059 }
fa4905b1
A
1060 chainlength += len;
1061
1c79356b 1062 space -= len;
fa4905b1 1063
1c79356b 1064 error = uiomove(mtod(m, caddr_t), (int)len, uio);
fa4905b1 1065
1c79356b
A
1066 resid = uio->uio_resid;
1067
1068 m->m_len = len;
1069 *mp = m;
1070 top->m_pkthdr.len += len;
1071 if (error)
1072 break;
1073 mp = &m->m_next;
1074 if (resid <= 0) {
1075 if (flags & MSG_EOR)
1076 top->m_flags |= M_EOR;
1077 break;
1078 }
fa4905b1
A
1079 bytes_to_copy = min(resid, space);
1080
1081 } while (space > 0 && (chainlength < sosendmaxchain || atomic || resid < MINCLSIZE));
1082
1083 if (dropped_funnel == TRUE)
1084 (void)thread_funnel_set(network_flock, TRUE);
1c79356b
A
1085 if (error)
1086 goto release;
1087 }
1088
1089 if (flags & (MSG_HOLD|MSG_SEND))
1090 { /* Enqueue for later, go away if HOLD */
1091 register struct mbuf *mb1;
1092 if (so->so_temp && (flags & MSG_FLUSH))
1093 { m_freem(so->so_temp);
1094 so->so_temp = NULL;
1095 }
1096 if (so->so_temp)
1097 so->so_tail->m_next = top;
1098 else
1099 so->so_temp = top;
1100 mb1 = top;
1101 while (mb1->m_next)
1102 mb1 = mb1->m_next;
1103 so->so_tail = mb1;
1104 if (flags&MSG_HOLD)
1105 { top = NULL;
1106 goto release;
1107 }
1108 top = so->so_temp;
1109 }
1110 if (dontroute)
1111 so->so_options |= SO_DONTROUTE;
1112 s = splnet(); /* XXX */
1c79356b
A
1113 /* Compute flags here, for pru_send and NKEs */
1114 sendflags = (flags & MSG_OOB) ? PRUS_OOB :
1115 /*
1116 * If the user set MSG_EOF, the protocol
1117 * understands this flag and nothing left to
1118 * send then use PRU_SEND_EOF instead of PRU_SEND.
1119 */
1120 ((flags & MSG_EOF) &&
1121 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1122 (resid <= 0)) ?
1123 PRUS_EOF :
1124 /* If there is more to send set PRUS_MORETOCOME */
1125 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0;
9bccf70c 1126 kp = sotokextcb(so);
1c79356b 1127 while (kp)
9bccf70c
A
1128 { if (kp->e_soif && kp->e_soif->sf_sosend) {
1129 error = (*kp->e_soif->sf_sosend)(so, &addr,
1c79356b
A
1130 &uio, &top,
1131 &control,
1132 &sendflags,
1133 kp);
9bccf70c
A
1134 if (error) {
1135 splx(s);
1136 if (error == EJUSTRETURN) {
1137 sbunlock(&so->so_snd);
fa4905b1
A
1138
1139 if (freelist)
1140 m_freem_list(freelist);
1c79356b
A
1141 return(0);
1142 }
1143 goto release;
1144 }
1145 }
1146 kp = kp->e_next;
1147 }
1148
1149 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1150 sendflags, top, addr, control, p);
1151 splx(s);
9bccf70c 1152#ifdef __APPLE__
1c79356b
A
1153 if (flags & MSG_SEND)
1154 so->so_temp = NULL;
9bccf70c 1155#endif
1c79356b
A
1156 if (dontroute)
1157 so->so_options &= ~SO_DONTROUTE;
1158 clen = 0;
1159 control = 0;
1160 top = 0;
1161 mp = &top;
1162 if (error)
1163 goto release;
1164 } while (resid && space > 0);
1165 } while (resid);
1166
1167release:
1168 sbunlock(&so->so_snd);
1169out:
1170 if (top)
1171 m_freem(top);
1172 if (control)
1173 m_freem(control);
fa4905b1
A
1174 if (freelist)
1175 m_freem_list(freelist);
1c79356b
A
1176
1177 KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END,
1178 so,
1179 resid,
1180 so->so_snd.sb_cc,
1181 space,
1182 error);
1183
1184 return (error);
1185}
1186
1187/*
1188 * Implement receive operations on a socket.
1189 * We depend on the way that records are added to the sockbuf
1190 * by sbappend*. In particular, each record (mbufs linked through m_next)
1191 * must begin with an address if the protocol so specifies,
1192 * followed by an optional mbuf or mbufs containing ancillary data,
1193 * and then zero or more mbufs of data.
1194 * In order to avoid blocking network interrupts for the entire time here,
1195 * we splx() while doing the actual copy to user space.
1196 * Although the sockbuf is locked, new data may still be appended,
1197 * and thus we must maintain consistency of the sockbuf during that time.
1198 *
1199 * The caller may receive the data as a single mbuf chain by supplying
1200 * an mbuf **mp0 for use in returning the chain. The uio is then used
1201 * only for the count in uio_resid.
1202 */
1203int
1204soreceive(so, psa, uio, mp0, controlp, flagsp)
1205 register struct socket *so;
1206 struct sockaddr **psa;
1207 struct uio *uio;
1208 struct mbuf **mp0;
1209 struct mbuf **controlp;
1210 int *flagsp;
1211{
1212 register struct mbuf *m, **mp;
fa4905b1 1213 register struct mbuf *free_list, *ml;
1c79356b
A
1214 register int flags, len, error, s, offset;
1215 struct protosw *pr = so->so_proto;
1216 struct mbuf *nextrecord;
1217 int moff, type = 0;
1218 int orig_resid = uio->uio_resid;
1219 struct kextcb *kp;
1220
1221 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START,
1222 so,
1223 uio->uio_resid,
1224 so->so_rcv.sb_cc,
1225 so->so_rcv.sb_lowat,
1226 so->so_rcv.sb_hiwat);
1227
1228 kp = sotokextcb(so);
9bccf70c
A
1229 while (kp) {
1230 if (kp->e_soif && kp->e_soif->sf_soreceive) {
1231 error = (*kp->e_soif->sf_soreceive)(so, psa, &uio,
1c79356b
A
1232 mp0, controlp,
1233 flagsp, kp);
1234 if (error)
1235 return((error == EJUSTRETURN) ? 0 : error);
1236 }
1237 kp = kp->e_next;
1238 }
1239
1240 mp = mp0;
1241 if (psa)
1242 *psa = 0;
1243 if (controlp)
1244 *controlp = 0;
1245 if (flagsp)
1246 flags = *flagsp &~ MSG_EOR;
1247 else
1248 flags = 0;
1249 /*
1250 * When SO_WANTOOBFLAG is set we try to get out-of-band data
1251 * regardless of the flags argument. Here is the case were
1252 * out-of-band data is not inline.
1253 */
1254 if ((flags & MSG_OOB) ||
1255 ((so->so_options & SO_WANTOOBFLAG) != 0 &&
1256 (so->so_options & SO_OOBINLINE) == 0 &&
1257 (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) {
1258 m = m_get(M_WAIT, MT_DATA);
9bccf70c
A
1259 if (m == NULL)
1260 return (ENOBUFS);
1c79356b
A
1261 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1262 if (error)
1263 goto bad;
1264 do {
1265 error = uiomove(mtod(m, caddr_t),
1266 (int) min(uio->uio_resid, m->m_len), uio);
1267 m = m_free(m);
1268 } while (uio->uio_resid && error == 0 && m);
1269bad:
1270 if (m)
1271 m_freem(m);
9bccf70c
A
1272#ifdef __APPLE__
1273 if ((so->so_options & SO_WANTOOBFLAG) != 0) {
1274 if (error == EWOULDBLOCK || error == EINVAL) {
1275 /*
1276 * Let's try to get normal data:
1277 * EWOULDBLOCK: out-of-band data not receive yet;
1278 * EINVAL: out-of-band data already read.
1279 */
1280 error = 0;
1281 goto nooob;
1282 } else if (error == 0 && flagsp)
1283 *flagsp |= MSG_OOB;
1284 }
1c79356b 1285 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
9bccf70c 1286#endif
1c79356b
A
1287 return (error);
1288 }
1289nooob:
1290 if (mp)
1291 *mp = (struct mbuf *)0;
1292 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
1293 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1294
1295restart:
9bccf70c
A
1296 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1297 if (error) {
1c79356b
A
1298 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1299 return (error);
1300 }
1301 s = splnet();
1302
1303 m = so->so_rcv.sb_mb;
1304 /*
1305 * If we have less data than requested, block awaiting more
1306 * (subject to any timeout) if:
1307 * 1. the current count is less than the low water mark, or
1308 * 2. MSG_WAITALL is set, and it is possible to do the entire
1309 * receive operation at once if we block (resid <= hiwat).
1310 * 3. MSG_DONTWAIT is not set
1311 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1312 * we have to do the receive in sections, and thus risk returning
1313 * a short count if a timeout or signal occurs after we start.
1314 */
1315 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
1316 so->so_rcv.sb_cc < uio->uio_resid) &&
1317 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1318 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1319 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
1320 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
1321 if (so->so_error) {
1322 if (m)
1323 goto dontblock;
1324 error = so->so_error;
1325 if ((flags & MSG_PEEK) == 0)
1326 so->so_error = 0;
1327 goto release;
1328 }
1329 if (so->so_state & SS_CANTRCVMORE) {
1330 if (m)
1331 goto dontblock;
1332 else
1333 goto release;
1334 }
1335 for (; m; m = m->m_next)
1336 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1337 m = so->so_rcv.sb_mb;
1338 goto dontblock;
1339 }
1340 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1341 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1342 error = ENOTCONN;
1343 goto release;
1344 }
1345 if (uio->uio_resid == 0)
1346 goto release;
1347 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
1348 error = EWOULDBLOCK;
1349 goto release;
1350 }
1351 sbunlock(&so->so_rcv);
1352 if (socket_debug)
1353 printf("Waiting for socket data\n");
1354 error = sbwait(&so->so_rcv);
1355 if (socket_debug)
1356 printf("SORECEIVE - sbwait returned %d\n", error);
1357 splx(s);
9bccf70c 1358 if (error) {
1c79356b
A
1359 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0);
1360 return (error);
1361 }
1362 goto restart;
1363 }
1364dontblock:
9bccf70c 1365#ifndef __APPLE__
1c79356b
A
1366 if (uio->uio_procp)
1367 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
1368#endif
1369 nextrecord = m->m_nextpkt;
1370 if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) {
1371 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
1372 orig_resid = 0;
1373 if (psa)
1374 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
1375 mp0 == 0);
1376 if (flags & MSG_PEEK) {
1377 m = m->m_next;
1378 } else {
1379 sbfree(&so->so_rcv, m);
1380 MFREE(m, so->so_rcv.sb_mb);
1381 m = so->so_rcv.sb_mb;
1382 }
1383 }
1384 while (m && m->m_type == MT_CONTROL && error == 0) {
1385 if (flags & MSG_PEEK) {
1386 if (controlp)
1387 *controlp = m_copy(m, 0, m->m_len);
1388 m = m->m_next;
1389 } else {
1390 sbfree(&so->so_rcv, m);
1391 if (controlp) {
1392 if (pr->pr_domain->dom_externalize &&
1393 mtod(m, struct cmsghdr *)->cmsg_type ==
1394 SCM_RIGHTS)
1395 error = (*pr->pr_domain->dom_externalize)(m);
1396 *controlp = m;
1397 so->so_rcv.sb_mb = m->m_next;
1398 m->m_next = 0;
1399 m = so->so_rcv.sb_mb;
1400 } else {
1401 MFREE(m, so->so_rcv.sb_mb);
1402 m = so->so_rcv.sb_mb;
1403 }
1404 }
1405 if (controlp) {
1406 orig_resid = 0;
1407 controlp = &(*controlp)->m_next;
1408 }
1409 }
1410 if (m) {
1411 if ((flags & MSG_PEEK) == 0)
1412 m->m_nextpkt = nextrecord;
1413 type = m->m_type;
1414 if (type == MT_OOBDATA)
1415 flags |= MSG_OOB;
1416 }
1417 moff = 0;
1418 offset = 0;
fa4905b1
A
1419
1420 free_list = m;
1421 ml = (struct mbuf *)0;
1422
1c79356b
A
1423 while (m && uio->uio_resid > 0 && error == 0) {
1424 if (m->m_type == MT_OOBDATA) {
1425 if (type != MT_OOBDATA)
1426 break;
1427 } else if (type == MT_OOBDATA)
1428 break;
9bccf70c 1429#ifndef __APPLE__
1c79356b
A
1430/*
1431 * This assertion needs rework. The trouble is Appletalk is uses many
1432 * mbuf types (NOT listed in mbuf.h!) which will trigger this panic.
1433 * For now just remove the assertion... CSM 9/98
1434 */
1435 else
1436 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1437 ("receive 3"));
9bccf70c
A
1438#else
1439 /*
1440 * Make sure to allways set MSG_OOB event when getting
1441 * out of band data inline.
1442 */
1c79356b 1443 if ((so->so_options & SO_WANTOOBFLAG) != 0 &&
9bccf70c
A
1444 (so->so_options & SO_OOBINLINE) != 0 &&
1445 (so->so_state & SS_RCVATMARK) != 0) {
1446 flags |= MSG_OOB;
1447 }
1448#endif
1c79356b
A
1449 so->so_state &= ~SS_RCVATMARK;
1450 len = uio->uio_resid;
1451 if (so->so_oobmark && len > so->so_oobmark - offset)
1452 len = so->so_oobmark - offset;
1453 if (len > m->m_len - moff)
1454 len = m->m_len - moff;
1455 /*
1456 * If mp is set, just pass back the mbufs.
1457 * Otherwise copy them out via the uio, then free.
1458 * Sockbuf must be consistent here (points to current mbuf,
1459 * it points to next record) when we drop priority;
1460 * we must note any additions to the sockbuf when we
1461 * block interrupts again.
1462 */
1463 if (mp == 0) {
1464 splx(s);
1465 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
1466 s = splnet();
1467 if (error)
1468 goto release;
1469 } else
1470 uio->uio_resid -= len;
1471 if (len == m->m_len - moff) {
1472 if (m->m_flags & M_EOR)
1473 flags |= MSG_EOR;
1474 if (flags & MSG_PEEK) {
1475 m = m->m_next;
1476 moff = 0;
1477 } else {
1478 nextrecord = m->m_nextpkt;
1479 sbfree(&so->so_rcv, m);
1480 if (mp) {
1481 *mp = m;
1482 mp = &m->m_next;
1483 so->so_rcv.sb_mb = m = m->m_next;
1484 *mp = (struct mbuf *)0;
1485 } else {
fa4905b1 1486 m->m_nextpkt = 0;
14353aa8
A
1487 if (ml != 0)
1488 ml->m_next = m;
1489 ml = m;
1490 so->so_rcv.sb_mb = m = m->m_next;
1491 ml->m_next = 0;
1c79356b
A
1492 }
1493 if (m)
1494 m->m_nextpkt = nextrecord;
1495 }
1496 } else {
1497 if (flags & MSG_PEEK)
1498 moff += len;
1499 else {
1500 if (mp)
1501 *mp = m_copym(m, 0, len, M_WAIT);
1502 m->m_data += len;
1503 m->m_len -= len;
1504 so->so_rcv.sb_cc -= len;
1505 }
1506 }
1507 if (so->so_oobmark) {
1508 if ((flags & MSG_PEEK) == 0) {
1509 so->so_oobmark -= len;
1510 if (so->so_oobmark == 0) {
1511 so->so_state |= SS_RCVATMARK;
1512 postevent(so, 0, EV_OOB);
1513 break;
1514 }
1515 } else {
1516 offset += len;
1517 if (offset == so->so_oobmark)
1518 break;
1519 }
1520 }
1521 if (flags & MSG_EOR)
1522 break;
1523 /*
1524 * If the MSG_WAITALL flag is set (for non-atomic socket),
1525 * we must not quit until "uio->uio_resid == 0" or an error
1526 * termination. If a signal/timeout occurs, return
1527 * with a short count but without error.
1528 * Keep sockbuf locked against other readers.
1529 */
1530 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
1531 !sosendallatonce(so) && !nextrecord) {
1532 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1533 break;
fa4905b1
A
1534
1535 if (ml) {
fa4905b1
A
1536 m_freem_list(free_list);
1537 }
1c79356b
A
1538 error = sbwait(&so->so_rcv);
1539 if (error) {
1540 sbunlock(&so->so_rcv);
1541 splx(s);
1542 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0);
1543 return (0);
1544 }
1545 m = so->so_rcv.sb_mb;
fa4905b1 1546 if (m) {
1c79356b 1547 nextrecord = m->m_nextpkt;
fa4905b1
A
1548 free_list = m;
1549 }
1550 ml = (struct mbuf *)0;
1c79356b
A
1551 }
1552 }
fa4905b1 1553 if (ml) {
fa4905b1
A
1554 m_freem_list(free_list);
1555 }
1c79356b
A
1556
1557 if (m && pr->pr_flags & PR_ATOMIC) {
9bccf70c 1558#ifdef __APPLE__
1c79356b
A
1559 if (so->so_options & SO_DONTTRUNC)
1560 flags |= MSG_RCVMORE;
9bccf70c
A
1561 else {
1562#endif
1563 flags |= MSG_TRUNC;
1c79356b
A
1564 if ((flags & MSG_PEEK) == 0)
1565 (void) sbdroprecord(&so->so_rcv);
9bccf70c 1566#ifdef __APPLE__
1c79356b 1567 }
9bccf70c 1568#endif
1c79356b
A
1569 }
1570 if ((flags & MSG_PEEK) == 0) {
1571 if (m == 0)
1572 so->so_rcv.sb_mb = nextrecord;
1573 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
1574 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1575 }
9bccf70c 1576#ifdef __APPLE__
1c79356b
A
1577 if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0)
1578 flags |= MSG_HAVEMORE;
9bccf70c 1579#endif
1c79356b
A
1580 if (orig_resid == uio->uio_resid && orig_resid &&
1581 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1582 sbunlock(&so->so_rcv);
1583 splx(s);
1584 goto restart;
1585 }
1586
1587 if (flagsp)
1588 *flagsp |= flags;
1589release:
1590 sbunlock(&so->so_rcv);
1591 splx(s);
1592
1593 KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END,
1594 so,
1595 uio->uio_resid,
1596 so->so_rcv.sb_cc,
1597 0,
1598 error);
1599
1600 return (error);
1601}
1602
1603int
1604soshutdown(so, how)
1605 register struct socket *so;
1606 register int how;
1607{
1608 register struct protosw *pr = so->so_proto;
1609 struct kextcb *kp;
1610 int ret;
1611
1612
1613 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0);
1614 kp = sotokextcb(so);
9bccf70c
A
1615 while (kp) {
1616 if (kp->e_soif && kp->e_soif->sf_soshutdown) {
1617 ret = (*kp->e_soif->sf_soshutdown)(so, how, kp);
1c79356b
A
1618 if (ret)
1619 return((ret == EJUSTRETURN) ? 0 : ret);
1620 }
1621 kp = kp->e_next;
1622 }
1623
9bccf70c 1624 if (how != SHUT_WR) {
1c79356b
A
1625 sorflush(so);
1626 postevent(so, 0, EV_RCLOSED);
1627 }
9bccf70c 1628 if (how != SHUT_RD) {
1c79356b
A
1629 ret = ((*pr->pr_usrreqs->pru_shutdown)(so));
1630 postevent(so, 0, EV_WCLOSED);
1631 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1632 return(ret);
1633 }
1634
1635 KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0);
1636 return (0);
1637}
1638
1639void
1640sorflush(so)
1641 register struct socket *so;
1642{
1643 register struct sockbuf *sb = &so->so_rcv;
1644 register struct protosw *pr = so->so_proto;
1645 register int s, error;
1646 struct sockbuf asb;
1647 struct kextcb *kp;
1648
1649 kp = sotokextcb(so);
9bccf70c
A
1650 while (kp) {
1651 if (kp->e_soif && kp->e_soif->sf_sorflush) {
1652 if ((*kp->e_soif->sf_sorflush)(so, kp))
1c79356b
A
1653 return;
1654 }
1655 kp = kp->e_next;
1656 }
1657
1658 sb->sb_flags |= SB_NOINTR;
1659 (void) sblock(sb, M_WAIT);
1660 s = splimp();
1661 socantrcvmore(so);
1662 sbunlock(sb);
9bccf70c 1663#ifdef __APPLE__
0b4e3aa0 1664 selthreadclear(&sb->sb_sel);
9bccf70c 1665#endif
1c79356b
A
1666 asb = *sb;
1667 bzero((caddr_t)sb, sizeof (*sb));
9bccf70c
A
1668#ifndef __APPLE__
1669 if (asb.sb_flags & SB_KNOTE) {
1670 sb->sb_sel.si_note = asb.sb_sel.si_note;
1671 sb->sb_flags = SB_KNOTE;
1672 }
1673#endif
1c79356b
A
1674 splx(s);
1675 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1676 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
9bccf70c 1677
1c79356b
A
1678 sbrelease(&asb);
1679}
1680
1681/*
1682 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1683 * an additional variant to handle the case where the option value needs
1684 * to be some kind of integer, but not a specific size.
1685 * In addition to their use here, these functions are also called by the
1686 * protocol-level pr_ctloutput() routines.
1687 */
1688int
1689sooptcopyin(sopt, buf, len, minlen)
1690 struct sockopt *sopt;
1691 void *buf;
1692 size_t len;
1693 size_t minlen;
1694{
1695 size_t valsize;
1696
1697 /*
1698 * If the user gives us more than we wanted, we ignore it,
1699 * but if we don't get the minimum length the caller
1700 * wants, we return EINVAL. On success, sopt->sopt_valsize
1701 * is set to however much we actually retrieved.
1702 */
1703 if ((valsize = sopt->sopt_valsize) < minlen)
1704 return EINVAL;
1705 if (valsize > len)
1706 sopt->sopt_valsize = valsize = len;
1707
1708 if (sopt->sopt_p != 0)
1709 return (copyin(sopt->sopt_val, buf, valsize));
1710
1711 bcopy(sopt->sopt_val, buf, valsize);
1712 return 0;
1713}
1714
1715int
1716sosetopt(so, sopt)
1717 struct socket *so;
1718 struct sockopt *sopt;
1719{
1720 int error, optval;
1721 struct linger l;
1722 struct timeval tv;
1723 short val;
1724 struct kextcb *kp;
1725
9bccf70c
A
1726 if (sopt->sopt_dir != SOPT_SET) {
1727 sopt->sopt_dir = SOPT_SET;
1728 }
1729
1c79356b 1730 kp = sotokextcb(so);
9bccf70c
A
1731 while (kp) {
1732 if (kp->e_soif && kp->e_soif->sf_socontrol) {
1733 error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1c79356b
A
1734 if (error)
1735 return((error == EJUSTRETURN) ? 0 : error);
1736 }
1737 kp = kp->e_next;
1738 }
1739
1740 error = 0;
1741 if (sopt->sopt_level != SOL_SOCKET) {
1742 if (so->so_proto && so->so_proto->pr_ctloutput)
1743 return ((*so->so_proto->pr_ctloutput)
1744 (so, sopt));
1745 error = ENOPROTOOPT;
1746 } else {
1747 switch (sopt->sopt_name) {
1748 case SO_LINGER:
1749 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1750 if (error)
1751 goto bad;
1752
1753 so->so_linger = l.l_linger;
1754 if (l.l_onoff)
1755 so->so_options |= SO_LINGER;
1756 else
1757 so->so_options &= ~SO_LINGER;
1758 break;
1759
1760 case SO_DEBUG:
1761 case SO_KEEPALIVE:
1762 case SO_DONTROUTE:
1763 case SO_USELOOPBACK:
1764 case SO_BROADCAST:
1765 case SO_REUSEADDR:
1766 case SO_REUSEPORT:
1767 case SO_OOBINLINE:
1768 case SO_TIMESTAMP:
9bccf70c 1769#ifdef __APPLE__
1c79356b
A
1770 case SO_DONTTRUNC:
1771 case SO_WANTMORE:
9bccf70c
A
1772 case SO_WANTOOBFLAG:
1773#endif
1c79356b
A
1774 error = sooptcopyin(sopt, &optval, sizeof optval,
1775 sizeof optval);
1776 if (error)
1777 goto bad;
1778 if (optval)
1779 so->so_options |= sopt->sopt_name;
1780 else
1781 so->so_options &= ~sopt->sopt_name;
1782 break;
1783
1784 case SO_SNDBUF:
1785 case SO_RCVBUF:
1786 case SO_SNDLOWAT:
1787 case SO_RCVLOWAT:
1788 error = sooptcopyin(sopt, &optval, sizeof optval,
1789 sizeof optval);
1790 if (error)
1791 goto bad;
1792
1793 /*
1794 * Values < 1 make no sense for any of these
1795 * options, so disallow them.
1796 */
1797 if (optval < 1) {
1798 error = EINVAL;
1799 goto bad;
1800 }
1801
1802 switch (sopt->sopt_name) {
1803 case SO_SNDBUF:
1804 case SO_RCVBUF:
1805 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1806 &so->so_snd : &so->so_rcv,
1807 (u_long) optval) == 0) {
1808 error = ENOBUFS;
1809 goto bad;
1810 }
1811 break;
1812
1813 /*
1814 * Make sure the low-water is never greater than
1815 * the high-water.
1816 */
1817 case SO_SNDLOWAT:
1818 so->so_snd.sb_lowat =
1819 (optval > so->so_snd.sb_hiwat) ?
1820 so->so_snd.sb_hiwat : optval;
1821 break;
1822 case SO_RCVLOWAT:
1823 so->so_rcv.sb_lowat =
1824 (optval > so->so_rcv.sb_hiwat) ?
1825 so->so_rcv.sb_hiwat : optval;
1826 break;
1827 }
1828 break;
1829
1830 case SO_SNDTIMEO:
1831 case SO_RCVTIMEO:
1832 error = sooptcopyin(sopt, &tv, sizeof tv,
1833 sizeof tv);
1834 if (error)
1835 goto bad;
1836
9bccf70c
A
1837 /* assert(hz > 0); */
1838 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1839 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1840 error = EDOM;
1841 goto bad;
1842 }
1843 /* assert(tick > 0); */
1844 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1845 {
1846 long tmp = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1847 if (tmp > SHRT_MAX) {
1c79356b
A
1848 error = EDOM;
1849 goto bad;
1850 }
9bccf70c
A
1851 val = tmp;
1852 }
1c79356b
A
1853
1854 switch (sopt->sopt_name) {
1855 case SO_SNDTIMEO:
1856 so->so_snd.sb_timeo = val;
1857 break;
1858 case SO_RCVTIMEO:
1859 so->so_rcv.sb_timeo = val;
1860 break;
1861 }
1862 break;
1863
1864 case SO_NKE:
9bccf70c
A
1865 {
1866 struct so_nke nke;
1c79356b
A
1867 struct NFDescriptor *nf1, *nf2 = NULL;
1868
9bccf70c
A
1869 error = sooptcopyin(sopt, &nke,
1870 sizeof nke, sizeof nke);
1c79356b
A
1871 if (error)
1872 goto bad;
1873
1874 error = nke_insert(so, &nke);
1875 break;
1876 }
1877
9bccf70c
A
1878 case SO_NOSIGPIPE:
1879 error = sooptcopyin(sopt, &optval, sizeof optval,
1880 sizeof optval);
1881 if (error)
1882 goto bad;
1883 if (optval)
1884 so->so_flags |= SOF_NOSIGPIPE;
1885 else
1886 so->so_flags &= ~SOF_NOSIGPIPE;
1887
1888 break;
1889
1c79356b
A
1890 default:
1891 error = ENOPROTOOPT;
1892 break;
1893 }
1894 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1895 (void) ((*so->so_proto->pr_ctloutput)
1896 (so, sopt));
1897 }
1898 }
1899bad:
1900 return (error);
1901}
1902
1903/* Helper routine for getsockopt */
1904int
1905sooptcopyout(sopt, buf, len)
1906 struct sockopt *sopt;
1907 void *buf;
1908 size_t len;
1909{
1910 int error;
1911 size_t valsize;
1912
1913 error = 0;
1914
1915 /*
1916 * Documented get behavior is that we always return a value,
1917 * possibly truncated to fit in the user's buffer.
1918 * Traditional behavior is that we always tell the user
1919 * precisely how much we copied, rather than something useful
1920 * like the total amount we had available for her.
1921 * Note that this interface is not idempotent; the entire answer must
1922 * generated ahead of time.
1923 */
1924 valsize = min(len, sopt->sopt_valsize);
1925 sopt->sopt_valsize = valsize;
1926 if (sopt->sopt_val != 0) {
1927 if (sopt->sopt_p != 0)
1928 error = copyout(buf, sopt->sopt_val, valsize);
1929 else
1930 bcopy(buf, sopt->sopt_val, valsize);
1931 }
1932 return error;
1933}
1934
1935int
1936sogetopt(so, sopt)
1937 struct socket *so;
1938 struct sockopt *sopt;
1939{
1940 int error, optval;
1941 struct linger l;
1942 struct timeval tv;
1943 struct mbuf *m;
1944 struct kextcb *kp;
1945
9bccf70c
A
1946 if (sopt->sopt_dir != SOPT_GET) {
1947 sopt->sopt_dir = SOPT_GET;
1948 }
1949
1c79356b 1950 kp = sotokextcb(so);
9bccf70c
A
1951 while (kp) {
1952 if (kp->e_soif && kp->e_soif->sf_socontrol) {
1953 error = (*kp->e_soif->sf_socontrol)(so, sopt, kp);
1c79356b
A
1954 if (error)
1955 return((error == EJUSTRETURN) ? 0 : error);
1956 }
1957 kp = kp->e_next;
1958 }
1959
1960 error = 0;
1961 if (sopt->sopt_level != SOL_SOCKET) {
1962 if (so->so_proto && so->so_proto->pr_ctloutput) {
1963 return ((*so->so_proto->pr_ctloutput)
1964 (so, sopt));
1965 } else
1966 return (ENOPROTOOPT);
1967 } else {
1968 switch (sopt->sopt_name) {
1969 case SO_LINGER:
1970 l.l_onoff = so->so_options & SO_LINGER;
1971 l.l_linger = so->so_linger;
1972 error = sooptcopyout(sopt, &l, sizeof l);
1973 break;
1974
1975 case SO_USELOOPBACK:
1976 case SO_DONTROUTE:
1977 case SO_DEBUG:
1978 case SO_KEEPALIVE:
1979 case SO_REUSEADDR:
1980 case SO_REUSEPORT:
1981 case SO_BROADCAST:
1982 case SO_OOBINLINE:
1983 case SO_TIMESTAMP:
9bccf70c 1984#ifdef __APPLE__
1c79356b
A
1985 case SO_DONTTRUNC:
1986 case SO_WANTMORE:
9bccf70c
A
1987 case SO_WANTOOBFLAG:
1988#endif
1c79356b
A
1989 optval = so->so_options & sopt->sopt_name;
1990integer:
1991 error = sooptcopyout(sopt, &optval, sizeof optval);
1992 break;
1993
1994 case SO_TYPE:
1995 optval = so->so_type;
1996 goto integer;
1997
9bccf70c 1998#ifdef __APPLE__
1c79356b 1999 case SO_NREAD:
9bccf70c
A
2000 {
2001 int pkt_total;
1c79356b
A
2002 struct mbuf *m1;
2003
2004 pkt_total = 0;
2005 m1 = so->so_rcv.sb_mb;
2006 if (so->so_proto->pr_flags & PR_ATOMIC)
2007 {
2008#if 0
2009 kprintf("SKT CC: %d\n", so->so_rcv.sb_cc);
2010#endif
9bccf70c
A
2011 while (m1) {
2012 if (m1->m_type == MT_DATA)
1c79356b
A
2013 pkt_total += m1->m_len;
2014#if 0
2015 kprintf("CNT: %d/%d\n", m1->m_len, pkt_total);
2016#endif
2017 m1 = m1->m_next;
2018 }
2019 optval = pkt_total;
2020 } else
2021 optval = so->so_rcv.sb_cc;
2022#if 0
2023 kprintf("RTN: %d\n", optval);
2024#endif
2025 goto integer;
2026 }
9bccf70c 2027#endif
1c79356b
A
2028 case SO_ERROR:
2029 optval = so->so_error;
2030 so->so_error = 0;
2031 goto integer;
2032
2033 case SO_SNDBUF:
2034 optval = so->so_snd.sb_hiwat;
2035 goto integer;
2036
2037 case SO_RCVBUF:
2038 optval = so->so_rcv.sb_hiwat;
2039 goto integer;
2040
2041 case SO_SNDLOWAT:
2042 optval = so->so_snd.sb_lowat;
2043 goto integer;
2044
2045 case SO_RCVLOWAT:
2046 optval = so->so_rcv.sb_lowat;
2047 goto integer;
2048
2049 case SO_SNDTIMEO:
2050 case SO_RCVTIMEO:
2051 optval = (sopt->sopt_name == SO_SNDTIMEO ?
2052 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2053
2054 tv.tv_sec = optval / hz;
2055 tv.tv_usec = (optval % hz) * tick;
2056 error = sooptcopyout(sopt, &tv, sizeof tv);
2057 break;
2058
9bccf70c
A
2059 case SO_NOSIGPIPE:
2060 optval = (so->so_flags & SOF_NOSIGPIPE);
2061 goto integer;
2062
1c79356b
A
2063 default:
2064 error = ENOPROTOOPT;
2065 break;
2066 }
2067 return (error);
2068 }
2069}
2070
9bccf70c 2071#ifdef __APPLE__
1c79356b
A
2072/*
2073 * Network filter support
2074 */
2075/* Run the list of filters, creating extension control blocks */
2076sfilter_init(register struct socket *so)
2077{ struct kextcb *kp, **kpp;
2078 struct protosw *prp;
2079 struct NFDescriptor *nfp;
2080
2081 prp = so->so_proto;
2082 nfp = prp->pr_sfilter.tqh_first; /* non-null */
2083 kpp = &so->so_ext;
2084 kp = NULL;
2085 while (nfp)
2086 { MALLOC(kp, struct kextcb *, sizeof(*kp),
2087 M_TEMP, M_WAITOK);
2088 if (kp == NULL)
2089 return(ENOBUFS); /* so_free will clean up */
2090 *kpp = kp;
2091 kpp = &kp->e_next;
2092 kp->e_next = NULL;
2093 kp->e_fcb = NULL;
2094 kp->e_nfd = nfp;
2095 kp->e_soif = nfp->nf_soif;
2096 kp->e_sout = nfp->nf_soutil;
2097 /*
2098 * Ignore return value for create
2099 * Everyone gets a chance at startup
2100 */
2101 if (kp->e_soif && kp->e_soif->sf_socreate)
2102 (*kp->e_soif->sf_socreate)(so, prp, kp);
2103 nfp = nfp->nf_next.tqe_next;
2104 }
2105 return(0);
2106}
2107
1c79356b
A
2108/*
2109 * Run the list of filters, freeing extension control blocks
2110 * Assumes the soif/soutil blocks have been handled.
2111 */
2112sfilter_term(struct socket *so)
2113{ struct kextcb *kp, *kp1;
2114
2115 kp = so->so_ext;
2116 while (kp)
2117 { kp1 = kp->e_next;
2118 /*
2119 * Ignore return code on termination; everyone must
2120 * get terminated.
2121 */
2122 if (kp->e_soif && kp->e_soif->sf_sofree)
2123 kp->e_soif->sf_sofree(so, kp);
2124 FREE(kp, M_TEMP);
2125 kp = kp1;
2126 }
2127 return(0);
2128}
9bccf70c 2129#endif __APPLE__
1c79356b 2130
9bccf70c 2131/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1c79356b 2132int
9bccf70c 2133soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1c79356b
A
2134{
2135 struct mbuf *m, *m_prev;
2136 int sopt_size = sopt->sopt_valsize;
2137
2138 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2139 if (m == 0)
2140 return ENOBUFS;
2141 if (sopt_size > MLEN) {
2142 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2143 if ((m->m_flags & M_EXT) == 0) {
2144 m_free(m);
2145 return ENOBUFS;
2146 }
2147 m->m_len = min(MCLBYTES, sopt_size);
2148 } else {
2149 m->m_len = min(MLEN, sopt_size);
2150 }
2151 sopt_size -= m->m_len;
2152 *mp = m;
2153 m_prev = m;
2154
2155 while (sopt_size) {
2156 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
2157 if (m == 0) {
2158 m_freem(*mp);
2159 return ENOBUFS;
2160 }
2161 if (sopt_size > MLEN) {
2162 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
2163 if ((m->m_flags & M_EXT) == 0) {
2164 m_freem(*mp);
2165 return ENOBUFS;
2166 }
2167 m->m_len = min(MCLBYTES, sopt_size);
2168 } else {
2169 m->m_len = min(MLEN, sopt_size);
2170 }
2171 sopt_size -= m->m_len;
2172 m_prev->m_next = m;
2173 m_prev = m;
2174 }
2175 return 0;
2176}
2177
2178/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2179int
9bccf70c 2180soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1c79356b
A
2181{
2182 struct mbuf *m0 = m;
2183
2184 if (sopt->sopt_val == NULL)
2185 return 0;
2186 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2187 if (sopt->sopt_p != NULL) {
2188 int error;
2189
2190 error = copyin(sopt->sopt_val, mtod(m, char *),
2191 m->m_len);
2192 if (error != 0) {
2193 m_freem(m0);
2194 return(error);
2195 }
2196 } else
2197 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2198 sopt->sopt_valsize -= m->m_len;
2199 (caddr_t)sopt->sopt_val += m->m_len;
2200 m = m->m_next;
2201 }
2202 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
9bccf70c 2203 panic("soopt_mcopyin");
1c79356b
A
2204 return 0;
2205}
2206
2207/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2208int
9bccf70c 2209soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1c79356b
A
2210{
2211 struct mbuf *m0 = m;
2212 size_t valsize = 0;
2213
2214 if (sopt->sopt_val == NULL)
2215 return 0;
2216 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2217 if (sopt->sopt_p != NULL) {
2218 int error;
2219
2220 error = copyout(mtod(m, char *), sopt->sopt_val,
2221 m->m_len);
2222 if (error != 0) {
2223 m_freem(m0);
2224 return(error);
2225 }
2226 } else
2227 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2228 sopt->sopt_valsize -= m->m_len;
2229 (caddr_t)sopt->sopt_val += m->m_len;
2230 valsize += m->m_len;
2231 m = m->m_next;
2232 }
2233 if (m != NULL) {
2234 /* enough soopt buffer should be given from user-land */
2235 m_freem(m0);
2236 return(EINVAL);
2237 }
2238 sopt->sopt_valsize = valsize;
2239 return 0;
2240}
2241
9bccf70c
A
2242void
2243sohasoutofband(so)
2244 register struct socket *so;
2245{
2246 struct proc *p;
2247 struct kextcb *kp;
2248
2249 kp = sotokextcb(so);
2250 while (kp) {
2251 if (kp->e_soif && kp->e_soif->sf_sohasoutofband) {
2252 if ((*kp->e_soif->sf_sohasoutofband)(so, kp))
2253 return;
2254 }
2255 kp = kp->e_next;
2256 }
2257 if (so->so_pgid < 0)
2258 gsignal(-so->so_pgid, SIGURG);
2259 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
2260 psignal(p, SIGURG);
2261 selwakeup(&so->so_rcv.sb_sel);
2262}
2263
2264int
2265sopoll(struct socket *so, int events, struct ucred *cred, void * wql)
2266{
2267 struct proc *p = current_proc();
2268 int revents = 0;
2269 int s = splnet();
2270
2271 if (events & (POLLIN | POLLRDNORM))
2272 if (soreadable(so))
2273 revents |= events & (POLLIN | POLLRDNORM);
2274
2275 if (events & (POLLOUT | POLLWRNORM))
2276 if (sowriteable(so))
2277 revents |= events & (POLLOUT | POLLWRNORM);
2278
2279 if (events & (POLLPRI | POLLRDBAND))
2280 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
2281 revents |= events & (POLLPRI | POLLRDBAND);
2282
2283 if (revents == 0) {
2284 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
2285 /* Darwin sets the flag first, BSD calls selrecord first */
2286 so->so_rcv.sb_flags |= SB_SEL;
2287 selrecord(p, &so->so_rcv.sb_sel, wql);
2288 }
2289
2290 if (events & (POLLOUT | POLLWRNORM)) {
2291 /* Darwin sets the flag first, BSD calls selrecord first */
2292 so->so_snd.sb_flags |= SB_SEL;
2293 selrecord(p, &so->so_snd.sb_sel, wql);
2294 }
2295 }
2296
2297 splx(s);
2298 return (revents);
2299}