]> git.saurik.com Git - apple/xnu.git/blob - bsd/netns/spp_usrreq.c
2178f9a1966cb29fcc5984505c145a812f428bdd
[apple/xnu.git] / bsd / netns / spp_usrreq.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1984, 1985, 1986, 1987, 1993
24 * The Regents of the University of California. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * @(#)spp_usrreq.c 8.1 (Berkeley) 6/10/93
55 */
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/protosw.h>
62 #include <sys/socket.h>
63 #include <sys/socketvar.h>
64 #include <sys/errno.h>
65 #include <sys/ev.h>
66
67 #include <net/if.h>
68 #include <net/route.h>
69 #include <netinet/tcp_fsm.h>
70
71 #include <netns/ns.h>
72 #include <netns/ns_pcb.h>
73 #include <netns/idp.h>
74 #include <netns/idp_var.h>
75 #include <netns/ns_error.h>
76 #include <netns/sp.h>
77 #include <netns/spidp.h>
78 #include <netns/spp_timer.h>
79 #include <netns/spp_var.h>
80 #include <netns/spp_debug.h>
81
82 /*
83 * SP protocol implementation.
84 */
85 spp_init()
86 {
87
88 spp_iss = 1; /* WRONG !! should fish it out of TODR */
89 }
90 struct spidp spp_savesi;
91 int traceallspps = 0;
92 extern int sppconsdebug;
93 int spp_hardnosed;
94 int spp_use_delack = 0;
95 u_short spp_newchecks[50];
96
97 /*ARGSUSED*/
98 spp_input(m, nsp)
99 register struct mbuf *m;
100 register struct nspcb *nsp;
101 {
102 register struct sppcb *cb;
103 register struct spidp *si = mtod(m, struct spidp *);
104 register struct socket *so;
105 short ostate;
106 int dropsocket = 0;
107
108
109 sppstat.spps_rcvtotal++;
110 if (nsp == 0) {
111 panic("No nspcb in spp_input\n");
112 return;
113 }
114
115 cb = nstosppcb(nsp);
116 if (cb == 0) goto bad;
117
118 if (m->m_len < sizeof(*si)) {
119 if ((m = m_pullup(m, sizeof(*si))) == 0) {
120 sppstat.spps_rcvshort++;
121 return;
122 }
123 si = mtod(m, struct spidp *);
124 }
125 si->si_seq = ntohs(si->si_seq);
126 si->si_ack = ntohs(si->si_ack);
127 si->si_alo = ntohs(si->si_alo);
128
129 so = nsp->nsp_socket;
130 if (so->so_options & SO_DEBUG || traceallspps) {
131 ostate = cb->s_state;
132 spp_savesi = *si;
133 }
134 if (so->so_options & SO_ACCEPTCONN) {
135 struct sppcb *ocb = cb;
136
137 so = sonewconn(so, 0);
138 if (so == 0) {
139 goto drop;
140 }
141 /*
142 * This is ugly, but ....
143 *
144 * Mark socket as temporary until we're
145 * committed to keeping it. The code at
146 * ``drop'' and ``dropwithreset'' check the
147 * flag dropsocket to see if the temporary
148 * socket created here should be discarded.
149 * We mark the socket as discardable until
150 * we're committed to it below in TCPS_LISTEN.
151 */
152 dropsocket++;
153 nsp = (struct nspcb *)so->so_pcb;
154 nsp->nsp_laddr = si->si_dna;
155 cb = nstosppcb(nsp);
156 cb->s_mtu = ocb->s_mtu; /* preserve sockopts */
157 cb->s_flags = ocb->s_flags; /* preserve sockopts */
158 cb->s_flags2 = ocb->s_flags2; /* preserve sockopts */
159 cb->s_state = TCPS_LISTEN;
160 }
161
162 /*
163 * Packet received on connection.
164 * reset idle time and keep-alive timer;
165 */
166 cb->s_idle = 0;
167 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
168
169 switch (cb->s_state) {
170
171 case TCPS_LISTEN:{
172 struct mbuf *am;
173 register struct sockaddr_ns *sns;
174 struct ns_addr laddr;
175
176 /*
177 * If somebody here was carying on a conversation
178 * and went away, and his pen pal thinks he can
179 * still talk, we get the misdirected packet.
180 */
181 if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) {
182 spp_istat.gonawy++;
183 goto dropwithreset;
184 }
185 am = m_get(M_DONTWAIT, MT_SONAME);
186 if (am == NULL)
187 goto drop;
188 am->m_len = sizeof (struct sockaddr_ns);
189 sns = mtod(am, struct sockaddr_ns *);
190 sns->sns_len = sizeof(*sns);
191 sns->sns_family = AF_NS;
192 sns->sns_addr = si->si_sna;
193 laddr = nsp->nsp_laddr;
194 if (ns_nullhost(laddr))
195 nsp->nsp_laddr = si->si_dna;
196 if (ns_pcbconnect(nsp, am)) {
197 nsp->nsp_laddr = laddr;
198 (void) m_free(am);
199 spp_istat.noconn++;
200 goto drop;
201 }
202 (void) m_free(am);
203 spp_template(cb);
204 dropsocket = 0; /* committed to socket */
205 cb->s_did = si->si_sid;
206 cb->s_rack = si->si_ack;
207 cb->s_ralo = si->si_alo;
208 #define THREEWAYSHAKE
209 #ifdef THREEWAYSHAKE
210 cb->s_state = TCPS_SYN_RECEIVED;
211 cb->s_force = 1 + SPPT_KEEP;
212 sppstat.spps_accepts++;
213 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
214 }
215 break;
216 /*
217 * This state means that we have heard a response
218 * to our acceptance of their connection
219 * It is probably logically unnecessary in this
220 * implementation.
221 */
222 case TCPS_SYN_RECEIVED: {
223 if (si->si_did!=cb->s_sid) {
224 spp_istat.wrncon++;
225 goto drop;
226 }
227 #endif
228 nsp->nsp_fport = si->si_sport;
229 cb->s_timer[SPPT_REXMT] = 0;
230 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
231 soisconnected(so);
232 cb->s_state = TCPS_ESTABLISHED;
233 sppstat.spps_accepts++;
234 }
235 break;
236
237 /*
238 * This state means that we have gotten a response
239 * to our attempt to establish a connection.
240 * We fill in the data from the other side,
241 * telling us which port to respond to, instead of the well-
242 * known one we might have sent to in the first place.
243 * We also require that this is a response to our
244 * connection id.
245 */
246 case TCPS_SYN_SENT:
247 if (si->si_did!=cb->s_sid) {
248 spp_istat.notme++;
249 goto drop;
250 }
251 sppstat.spps_connects++;
252 cb->s_did = si->si_sid;
253 cb->s_rack = si->si_ack;
254 cb->s_ralo = si->si_alo;
255 cb->s_dport = nsp->nsp_fport = si->si_sport;
256 cb->s_timer[SPPT_REXMT] = 0;
257 cb->s_flags |= SF_ACKNOW;
258 soisconnected(so);
259 cb->s_state = TCPS_ESTABLISHED;
260 /* Use roundtrip time of connection request for initial rtt */
261 if (cb->s_rtt) {
262 cb->s_srtt = cb->s_rtt << 3;
263 cb->s_rttvar = cb->s_rtt << 1;
264 SPPT_RANGESET(cb->s_rxtcur,
265 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
266 SPPTV_MIN, SPPTV_REXMTMAX);
267 cb->s_rtt = 0;
268 }
269 }
270 if (so->so_options & SO_DEBUG || traceallspps)
271 spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0);
272
273 m->m_len -= sizeof (struct idp);
274 m->m_pkthdr.len -= sizeof (struct idp);
275 m->m_data += sizeof (struct idp);
276
277 if (spp_reass(cb, si)) {
278 (void) m_freem(m);
279 }
280 if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT)))
281 (void) spp_output(cb, (struct mbuf *)0);
282 cb->s_flags &= ~(SF_WIN|SF_RXT);
283 return;
284
285 dropwithreset:
286 if (dropsocket)
287 (void) soabort(so);
288 si->si_seq = ntohs(si->si_seq);
289 si->si_ack = ntohs(si->si_ack);
290 si->si_alo = ntohs(si->si_alo);
291 ns_error(dtom(si), NS_ERR_NOSOCK, 0);
292 if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps)
293 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
294 return;
295
296 drop:
297 bad:
298 if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG ||
299 traceallspps)
300 spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0);
301 m_freem(m);
302 }
303
304 int spprexmtthresh = 3;
305
306 /*
307 * This is structurally similar to the tcp reassembly routine
308 * but its function is somewhat different: It merely queues
309 * packets up, and suppresses duplicates.
310 */
311 spp_reass(cb, si)
312 register struct sppcb *cb;
313 register struct spidp *si;
314 {
315 register struct spidp_q *q;
316 register struct mbuf *m;
317 register struct socket *so = cb->s_nspcb->nsp_socket;
318 char packetp = cb->s_flags & SF_HI;
319 int incr;
320 char wakeup = 0;
321
322 if (si == SI(0))
323 goto present;
324 /*
325 * Update our news from them.
326 */
327 if (si->si_cc & SP_SA)
328 cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW);
329 if (SSEQ_GT(si->si_alo, cb->s_ralo))
330 cb->s_flags |= SF_WIN;
331 if (SSEQ_LEQ(si->si_ack, cb->s_rack)) {
332 if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) {
333 sppstat.spps_rcvdupack++;
334 /*
335 * If this is a completely duplicate ack
336 * and other conditions hold, we assume
337 * a packet has been dropped and retransmit
338 * it exactly as in tcp_input().
339 */
340 if (si->si_ack != cb->s_rack ||
341 si->si_alo != cb->s_ralo)
342 cb->s_dupacks = 0;
343 else if (++cb->s_dupacks == spprexmtthresh) {
344 u_short onxt = cb->s_snxt;
345 int cwnd = cb->s_cwnd;
346
347 cb->s_snxt = si->si_ack;
348 cb->s_cwnd = CUNIT;
349 cb->s_force = 1 + SPPT_REXMT;
350 (void) spp_output(cb, (struct mbuf *)0);
351 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
352 cb->s_rtt = 0;
353 if (cwnd >= 4 * CUNIT)
354 cb->s_cwnd = cwnd / 2;
355 if (SSEQ_GT(onxt, cb->s_snxt))
356 cb->s_snxt = onxt;
357 return (1);
358 }
359 } else
360 cb->s_dupacks = 0;
361 goto update_window;
362 }
363 cb->s_dupacks = 0;
364 /*
365 * If our correspondent acknowledges data we haven't sent
366 * TCP would drop the packet after acking. We'll be a little
367 * more permissive
368 */
369 if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) {
370 sppstat.spps_rcvacktoomuch++;
371 si->si_ack = cb->s_smax + 1;
372 }
373 sppstat.spps_rcvackpack++;
374 /*
375 * If transmit timer is running and timed sequence
376 * number was acked, update smoothed round trip time.
377 * See discussion of algorithm in tcp_input.c
378 */
379 if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) {
380 sppstat.spps_rttupdated++;
381 if (cb->s_srtt != 0) {
382 register short delta;
383 delta = cb->s_rtt - (cb->s_srtt >> 3);
384 if ((cb->s_srtt += delta) <= 0)
385 cb->s_srtt = 1;
386 if (delta < 0)
387 delta = -delta;
388 delta -= (cb->s_rttvar >> 2);
389 if ((cb->s_rttvar += delta) <= 0)
390 cb->s_rttvar = 1;
391 } else {
392 /*
393 * No rtt measurement yet
394 */
395 cb->s_srtt = cb->s_rtt << 3;
396 cb->s_rttvar = cb->s_rtt << 1;
397 }
398 cb->s_rtt = 0;
399 cb->s_rxtshift = 0;
400 SPPT_RANGESET(cb->s_rxtcur,
401 ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1,
402 SPPTV_MIN, SPPTV_REXMTMAX);
403 }
404 /*
405 * If all outstanding data is acked, stop retransmit
406 * timer and remember to restart (more output or persist).
407 * If there is more data to be acked, restart retransmit
408 * timer, using current (possibly backed-off) value;
409 */
410 if (si->si_ack == cb->s_smax + 1) {
411 cb->s_timer[SPPT_REXMT] = 0;
412 cb->s_flags |= SF_RXT;
413 } else if (cb->s_timer[SPPT_PERSIST] == 0)
414 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
415 /*
416 * When new data is acked, open the congestion window.
417 * If the window gives us less than ssthresh packets
418 * in flight, open exponentially (maxseg at a time).
419 * Otherwise open linearly (maxseg^2 / cwnd at a time).
420 */
421 incr = CUNIT;
422 if (cb->s_cwnd > cb->s_ssthresh)
423 incr = max(incr * incr / cb->s_cwnd, 1);
424 cb->s_cwnd = min(cb->s_cwnd + incr, cb->s_cwmx);
425 /*
426 * Trim Acked data from output queue.
427 */
428 while ((m = so->so_snd.sb_mb) != NULL) {
429 if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack))
430 sbdroprecord(&so->so_snd);
431 else
432 break;
433 }
434 sowwakeup(so);
435 cb->s_rack = si->si_ack;
436 update_window:
437 if (SSEQ_LT(cb->s_snxt, cb->s_rack))
438 cb->s_snxt = cb->s_rack;
439 if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq &&
440 (SSEQ_LT(cb->s_swl2, si->si_ack) ||
441 cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) {
442 /* keep track of pure window updates */
443 if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack
444 && SSEQ_LT(cb->s_ralo, si->si_alo)) {
445 sppstat.spps_rcvwinupd++;
446 sppstat.spps_rcvdupack--;
447 }
448 cb->s_ralo = si->si_alo;
449 cb->s_swl1 = si->si_seq;
450 cb->s_swl2 = si->si_ack;
451 cb->s_swnd = (1 + si->si_alo - si->si_ack);
452 if (cb->s_swnd > cb->s_smxw)
453 cb->s_smxw = cb->s_swnd;
454 cb->s_flags |= SF_WIN;
455 }
456 /*
457 * If this packet number is higher than that which
458 * we have allocated refuse it, unless urgent
459 */
460 if (SSEQ_GT(si->si_seq, cb->s_alo)) {
461 if (si->si_cc & SP_SP) {
462 sppstat.spps_rcvwinprobe++;
463 return (1);
464 } else
465 sppstat.spps_rcvpackafterwin++;
466 if (si->si_cc & SP_OB) {
467 if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) {
468 ns_error(dtom(si), NS_ERR_FULLUP, 0);
469 return (0);
470 } /* else queue this packet; */
471 } else {
472 /*register struct socket *so = cb->s_nspcb->nsp_socket;
473 if (so->so_state && SS_NOFDREF) {
474 ns_error(dtom(si), NS_ERR_NOSOCK, 0);
475 (void)spp_close(cb);
476 } else
477 would crash system*/
478 spp_istat.notyet++;
479 ns_error(dtom(si), NS_ERR_FULLUP, 0);
480 return (0);
481 }
482 }
483 /*
484 * If this is a system packet, we don't need to
485 * queue it up, and won't update acknowledge #
486 */
487 if (si->si_cc & SP_SP) {
488 return (1);
489 }
490 /*
491 * We have already seen this packet, so drop.
492 */
493 if (SSEQ_LT(si->si_seq, cb->s_ack)) {
494 spp_istat.bdreas++;
495 sppstat.spps_rcvduppack++;
496 if (si->si_seq == cb->s_ack - 1)
497 spp_istat.lstdup++;
498 return (1);
499 }
500 /*
501 * Loop through all packets queued up to insert in
502 * appropriate sequence.
503 */
504 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
505 if (si->si_seq == SI(q)->si_seq) {
506 sppstat.spps_rcvduppack++;
507 return (1);
508 }
509 if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) {
510 sppstat.spps_rcvoopack++;
511 break;
512 }
513 }
514 insque(si, q->si_prev);
515 /*
516 * If this packet is urgent, inform process
517 */
518 if (si->si_cc & SP_OB) {
519 cb->s_iobc = ((char *)si)[1 + sizeof(*si)];
520 sohasoutofband(so);
521 cb->s_oobflags |= SF_IOOB;
522 }
523 present:
524 #define SPINC sizeof(struct sphdr)
525 /*
526 * Loop through all packets queued up to update acknowledge
527 * number, and present all acknowledged data to user;
528 * If in packet interface mode, show packet headers.
529 */
530 for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) {
531 if (SI(q)->si_seq == cb->s_ack) {
532 cb->s_ack++;
533 m = dtom(q);
534 if (SI(q)->si_cc & SP_OB) {
535 cb->s_oobflags &= ~SF_IOOB;
536 if (so->so_rcv.sb_cc)
537 so->so_oobmark = so->so_rcv.sb_cc;
538 else
539 so->so_state |= SS_RCVATMARK;
540 postevent(so, 0, EV_OOB);
541 }
542 q = q->si_prev;
543 remque(q->si_next);
544 wakeup = 1;
545 sppstat.spps_rcvpack++;
546 #ifdef SF_NEWCALL
547 if (cb->s_flags2 & SF_NEWCALL) {
548 struct sphdr *sp = mtod(m, struct sphdr *);
549 u_char dt = sp->sp_dt;
550 spp_newchecks[4]++;
551 if (dt != cb->s_rhdr.sp_dt) {
552 struct mbuf *mm =
553 m_getclr(M_DONTWAIT, MT_CONTROL);
554 spp_newchecks[0]++;
555 if (mm != NULL) {
556 u_short *s =
557 mtod(mm, u_short *);
558 cb->s_rhdr.sp_dt = dt;
559 mm->m_len = 5; /*XXX*/
560 s[0] = 5;
561 s[1] = 1;
562 *(u_char *)(&s[2]) = dt;
563 sbappend(&so->so_rcv, mm);
564 }
565 }
566 if (sp->sp_cc & SP_OB) {
567 MCHTYPE(m, MT_OOBDATA);
568 spp_newchecks[1]++;
569 so->so_oobmark = 0;
570 so->so_state &= ~SS_RCVATMARK;
571 }
572 if (packetp == 0) {
573 m->m_data += SPINC;
574 m->m_len -= SPINC;
575 m->m_pkthdr.len -= SPINC;
576 }
577 if ((sp->sp_cc & SP_EM) || packetp) {
578 sbappendrecord(&so->so_rcv, m);
579 spp_newchecks[9]++;
580 } else
581 sbappend(&so->so_rcv, m);
582 } else
583 #endif
584 if (packetp) {
585 sbappendrecord(&so->so_rcv, m);
586 } else {
587 cb->s_rhdr = *mtod(m, struct sphdr *);
588 m->m_data += SPINC;
589 m->m_len -= SPINC;
590 m->m_pkthdr.len -= SPINC;
591 sbappend(&so->so_rcv, m);
592 }
593 } else
594 break;
595 }
596 if (wakeup) sorwakeup(so);
597 return (0);
598 }
599
600 spp_ctlinput(cmd, arg)
601 int cmd;
602 caddr_t arg;
603 {
604 struct ns_addr *na;
605 extern u_char nsctlerrmap[];
606 extern spp_abort(), spp_quench();
607 extern struct nspcb *idp_drop();
608 struct ns_errp *errp;
609 struct nspcb *nsp;
610 struct sockaddr_ns *sns;
611 int type;
612
613 if (cmd < 0 || cmd > PRC_NCMDS)
614 return;
615 type = NS_ERR_UNREACH_HOST;
616
617 switch (cmd) {
618
619 case PRC_ROUTEDEAD:
620 return;
621
622 case PRC_IFDOWN:
623 case PRC_HOSTDEAD:
624 case PRC_HOSTUNREACH:
625 sns = (struct sockaddr_ns *)arg;
626 if (sns->sns_family != AF_NS)
627 return;
628 na = &sns->sns_addr;
629 break;
630
631 default:
632 errp = (struct ns_errp *)arg;
633 na = &errp->ns_err_idp.idp_dna;
634 type = errp->ns_err_num;
635 type = ntohs((u_short)type);
636 }
637 switch (type) {
638
639 case NS_ERR_UNREACH_HOST:
640 ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0);
641 break;
642
643 case NS_ERR_TOO_BIG:
644 case NS_ERR_NOSOCK:
645 nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port,
646 NS_WILDCARD);
647 if (nsp) {
648 if(nsp->nsp_pcb)
649 (void) spp_drop((struct sppcb *)nsp->nsp_pcb,
650 (int)nsctlerrmap[cmd]);
651 else
652 (void) idp_drop(nsp, (int)nsctlerrmap[cmd]);
653 }
654 break;
655
656 case NS_ERR_FULLUP:
657 ns_pcbnotify(na, 0, spp_quench, (long) 0);
658 }
659 }
660 /*
661 * When a source quench is received, close congestion window
662 * to one packet. We will gradually open it again as we proceed.
663 */
664 spp_quench(nsp)
665 struct nspcb *nsp;
666 {
667 struct sppcb *cb = nstosppcb(nsp);
668
669 if (cb)
670 cb->s_cwnd = CUNIT;
671 }
672
673 #ifdef notdef
674 int
675 spp_fixmtu(nsp)
676 register struct nspcb *nsp;
677 {
678 register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb);
679 register struct mbuf *m;
680 register struct spidp *si;
681 struct ns_errp *ep;
682 struct sockbuf *sb;
683 int badseq, len;
684 struct mbuf *firstbad, *m0;
685
686 if (cb) {
687 /*
688 * The notification that we have sent
689 * too much is bad news -- we will
690 * have to go through queued up so far
691 * splitting ones which are too big and
692 * reassigning sequence numbers and checksums.
693 * we should then retransmit all packets from
694 * one above the offending packet to the last one
695 * we had sent (or our allocation)
696 * then the offending one so that the any queued
697 * data at our destination will be discarded.
698 */
699 ep = (struct ns_errp *)nsp->nsp_notify_param;
700 sb = &nsp->nsp_socket->so_snd;
701 cb->s_mtu = ep->ns_err_param;
702 badseq = SI(&ep->ns_err_idp)->si_seq;
703 for (m = sb->sb_mb; m; m = m->m_act) {
704 si = mtod(m, struct spidp *);
705 if (si->si_seq == badseq)
706 break;
707 }
708 if (m == 0) return;
709 firstbad = m;
710 /*for (;;) {*/
711 /* calculate length */
712 for (m0 = m, len = 0; m ; m = m->m_next)
713 len += m->m_len;
714 if (len > cb->s_mtu) {
715 }
716 /* FINISH THIS
717 } */
718 }
719 }
720 #endif
721
722 spp_output(cb, m0)
723 register struct sppcb *cb;
724 struct mbuf *m0;
725 {
726 struct socket *so = cb->s_nspcb->nsp_socket;
727 register struct mbuf *m;
728 register struct spidp *si = (struct spidp *) 0;
729 register struct sockbuf *sb = &so->so_snd;
730 int len = 0, win, rcv_win;
731 short span, off, recordp = 0;
732 u_short alo;
733 int error = 0, sendalot;
734 #ifdef notdef
735 int idle;
736 #endif
737 struct mbuf *mprev;
738 extern int idpcksum;
739
740 if (m0) {
741 int mtu = cb->s_mtu;
742 int datalen;
743 /*
744 * Make sure that packet isn't too big.
745 */
746 for (m = m0; m ; m = m->m_next) {
747 mprev = m;
748 len += m->m_len;
749 if (m->m_flags & M_EOR)
750 recordp = 1;
751 }
752 datalen = (cb->s_flags & SF_HO) ?
753 len - sizeof (struct sphdr) : len;
754 if (datalen > mtu) {
755 if (cb->s_flags & SF_PI) {
756 m_freem(m0);
757 return (EMSGSIZE);
758 } else {
759 int oldEM = cb->s_cc & SP_EM;
760
761 cb->s_cc &= ~SP_EM;
762 while (len > mtu) {
763 /*
764 * Here we are only being called
765 * from usrreq(), so it is OK to
766 * block.
767 */
768 m = m_copym(m0, 0, mtu, M_WAIT);
769 if (cb->s_flags & SF_NEWCALL) {
770 struct mbuf *mm = m;
771 spp_newchecks[7]++;
772 while (mm) {
773 mm->m_flags &= ~M_EOR;
774 mm = mm->m_next;
775 }
776 }
777 error = spp_output(cb, m);
778 if (error) {
779 cb->s_cc |= oldEM;
780 m_freem(m0);
781 return(error);
782 }
783 m_adj(m0, mtu);
784 len -= mtu;
785 }
786 cb->s_cc |= oldEM;
787 }
788 }
789 /*
790 * Force length even, by adding a "garbage byte" if
791 * necessary.
792 */
793 if (len & 1) {
794 m = mprev;
795 if (M_TRAILINGSPACE(m) >= 1)
796 m->m_len++;
797 else {
798 struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA);
799
800 if (m1 == 0) {
801 m_freem(m0);
802 return (ENOBUFS);
803 }
804 m1->m_len = 1;
805 *(mtod(m1, u_char *)) = 0;
806 m->m_next = m1;
807 }
808 }
809 m = m_gethdr(M_DONTWAIT, MT_HEADER);
810 if (m == 0) {
811 m_freem(m0);
812 return (ENOBUFS);
813 }
814 /*
815 * Fill in mbuf with extended SP header
816 * and addresses and length put into network format.
817 */
818 MH_ALIGN(m, sizeof (struct spidp));
819 m->m_len = sizeof (struct spidp);
820 m->m_next = m0;
821 si = mtod(m, struct spidp *);
822 si->si_i = *cb->s_idp;
823 si->si_s = cb->s_shdr;
824 if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) {
825 register struct sphdr *sh;
826 if (m0->m_len < sizeof (*sh)) {
827 if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) {
828 (void) m_free(m);
829 m_freem(m0);
830 return (EINVAL);
831 }
832 m->m_next = m0;
833 }
834 sh = mtod(m0, struct sphdr *);
835 si->si_dt = sh->sp_dt;
836 si->si_cc |= sh->sp_cc & SP_EM;
837 m0->m_len -= sizeof (*sh);
838 m0->m_data += sizeof (*sh);
839 len -= sizeof (*sh);
840 }
841 len += sizeof(*si);
842 if ((cb->s_flags2 & SF_NEWCALL) && recordp) {
843 si->si_cc |= SP_EM;
844 spp_newchecks[8]++;
845 }
846 if (cb->s_oobflags & SF_SOOB) {
847 /*
848 * Per jqj@cornell:
849 * make sure OB packets convey exactly 1 byte.
850 * If the packet is 1 byte or larger, we
851 * have already guaranted there to be at least
852 * one garbage byte for the checksum, and
853 * extra bytes shouldn't hurt!
854 */
855 if (len > sizeof(*si)) {
856 si->si_cc |= SP_OB;
857 len = (1 + sizeof(*si));
858 }
859 }
860 si->si_len = htons((u_short)len);
861 m->m_pkthdr.len = ((len - 1) | 1) + 1;
862 /*
863 * queue stuff up for output
864 */
865 sbappendrecord(sb, m);
866 cb->s_seq++;
867 }
868 #ifdef notdef
869 idle = (cb->s_smax == (cb->s_rack - 1));
870 #endif
871 again:
872 sendalot = 0;
873 off = cb->s_snxt - cb->s_rack;
874 win = min(cb->s_swnd, (cb->s_cwnd/CUNIT));
875
876 /*
877 * If in persist timeout with window of 0, send a probe.
878 * Otherwise, if window is small but nonzero
879 * and timer expired, send what we can and go into
880 * transmit state.
881 */
882 if (cb->s_force == 1 + SPPT_PERSIST) {
883 if (win != 0) {
884 cb->s_timer[SPPT_PERSIST] = 0;
885 cb->s_rxtshift = 0;
886 }
887 }
888 span = cb->s_seq - cb->s_rack;
889 len = min(span, win) - off;
890
891 if (len < 0) {
892 /*
893 * Window shrank after we went into it.
894 * If window shrank to 0, cancel pending
895 * restransmission and pull s_snxt back
896 * to (closed) window. We will enter persist
897 * state below. If the widndow didn't close completely,
898 * just wait for an ACK.
899 */
900 len = 0;
901 if (win == 0) {
902 cb->s_timer[SPPT_REXMT] = 0;
903 cb->s_snxt = cb->s_rack;
904 }
905 }
906 if (len > 1)
907 sendalot = 1;
908 rcv_win = sbspace(&so->so_rcv);
909
910 /*
911 * Send if we owe peer an ACK.
912 */
913 if (cb->s_oobflags & SF_SOOB) {
914 /*
915 * must transmit this out of band packet
916 */
917 cb->s_oobflags &= ~ SF_SOOB;
918 sendalot = 1;
919 sppstat.spps_sndurg++;
920 goto found;
921 }
922 if (cb->s_flags & SF_ACKNOW)
923 goto send;
924 if (cb->s_state < TCPS_ESTABLISHED)
925 goto send;
926 /*
927 * Silly window can't happen in spp.
928 * Code from tcp deleted.
929 */
930 if (len)
931 goto send;
932 /*
933 * Compare available window to amount of window
934 * known to peer (as advertised window less
935 * next expected input.) If the difference is at least two
936 * packets or at least 35% of the mximum possible window,
937 * then want to send a window update to peer.
938 */
939 if (rcv_win > 0) {
940 u_short delta = 1 + cb->s_alo - cb->s_ack;
941 int adv = rcv_win - (delta * cb->s_mtu);
942
943 if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) ||
944 (100 * adv / so->so_rcv.sb_hiwat >= 35)) {
945 sppstat.spps_sndwinup++;
946 cb->s_flags |= SF_ACKNOW;
947 goto send;
948 }
949
950 }
951 /*
952 * Many comments from tcp_output.c are appropriate here
953 * including . . .
954 * If send window is too small, there is data to transmit, and no
955 * retransmit or persist is pending, then go to persist state.
956 * If nothing happens soon, send when timer expires:
957 * if window is nonzero, transmit what we can,
958 * otherwise send a probe.
959 */
960 if (so->so_snd.sb_cc && cb->s_timer[SPPT_REXMT] == 0 &&
961 cb->s_timer[SPPT_PERSIST] == 0) {
962 cb->s_rxtshift = 0;
963 spp_setpersist(cb);
964 }
965 /*
966 * No reason to send a packet, just return.
967 */
968 cb->s_outx = 1;
969 return (0);
970
971 send:
972 /*
973 * Find requested packet.
974 */
975 si = 0;
976 if (len > 0) {
977 cb->s_want = cb->s_snxt;
978 for (m = sb->sb_mb; m; m = m->m_act) {
979 si = mtod(m, struct spidp *);
980 if (SSEQ_LEQ(cb->s_snxt, si->si_seq))
981 break;
982 }
983 found:
984 if (si) {
985 if (si->si_seq == cb->s_snxt)
986 cb->s_snxt++;
987 else
988 sppstat.spps_sndvoid++, si = 0;
989 }
990 }
991 /*
992 * update window
993 */
994 if (rcv_win < 0)
995 rcv_win = 0;
996 alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu));
997 if (SSEQ_LT(alo, cb->s_alo))
998 alo = cb->s_alo;
999
1000 if (si) {
1001 /*
1002 * must make a copy of this packet for
1003 * idp_output to monkey with
1004 */
1005 m = m_copy(dtom(si), 0, (int)M_COPYALL);
1006 if (m == NULL) {
1007 return (ENOBUFS);
1008 }
1009 si = mtod(m, struct spidp *);
1010 if (SSEQ_LT(si->si_seq, cb->s_smax))
1011 sppstat.spps_sndrexmitpack++;
1012 else
1013 sppstat.spps_sndpack++;
1014 } else if (cb->s_force || cb->s_flags & SF_ACKNOW) {
1015 /*
1016 * Must send an acknowledgement or a probe
1017 */
1018 if (cb->s_force)
1019 sppstat.spps_sndprobe++;
1020 if (cb->s_flags & SF_ACKNOW)
1021 sppstat.spps_sndacks++;
1022 m = m_gethdr(M_DONTWAIT, MT_HEADER);
1023 if (m == 0)
1024 return (ENOBUFS);
1025 /*
1026 * Fill in mbuf with extended SP header
1027 * and addresses and length put into network format.
1028 */
1029 MH_ALIGN(m, sizeof (struct spidp));
1030 m->m_len = sizeof (*si);
1031 m->m_pkthdr.len = sizeof (*si);
1032 si = mtod(m, struct spidp *);
1033 si->si_i = *cb->s_idp;
1034 si->si_s = cb->s_shdr;
1035 si->si_seq = cb->s_smax + 1;
1036 si->si_len = htons(sizeof (*si));
1037 si->si_cc |= SP_SP;
1038 } else {
1039 cb->s_outx = 3;
1040 if (so->so_options & SO_DEBUG || traceallspps)
1041 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1042 return (0);
1043 }
1044 /*
1045 * Stuff checksum and output datagram.
1046 */
1047 if ((si->si_cc & SP_SP) == 0) {
1048 if (cb->s_force != (1 + SPPT_PERSIST) ||
1049 cb->s_timer[SPPT_PERSIST] == 0) {
1050 /*
1051 * If this is a new packet and we are not currently
1052 * timing anything, time this one.
1053 */
1054 if (SSEQ_LT(cb->s_smax, si->si_seq)) {
1055 cb->s_smax = si->si_seq;
1056 if (cb->s_rtt == 0) {
1057 sppstat.spps_segstimed++;
1058 cb->s_rtseq = si->si_seq;
1059 cb->s_rtt = 1;
1060 }
1061 }
1062 /*
1063 * Set rexmt timer if not currently set,
1064 * Initial value for retransmit timer is smoothed
1065 * round-trip time + 2 * round-trip time variance.
1066 * Initialize shift counter which is used for backoff
1067 * of retransmit time.
1068 */
1069 if (cb->s_timer[SPPT_REXMT] == 0 &&
1070 cb->s_snxt != cb->s_rack) {
1071 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1072 if (cb->s_timer[SPPT_PERSIST]) {
1073 cb->s_timer[SPPT_PERSIST] = 0;
1074 cb->s_rxtshift = 0;
1075 }
1076 }
1077 } else if (SSEQ_LT(cb->s_smax, si->si_seq)) {
1078 cb->s_smax = si->si_seq;
1079 }
1080 } else if (cb->s_state < TCPS_ESTABLISHED) {
1081 if (cb->s_rtt == 0)
1082 cb->s_rtt = 1; /* Time initial handshake */
1083 if (cb->s_timer[SPPT_REXMT] == 0)
1084 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1085 }
1086 {
1087 /*
1088 * Do not request acks when we ack their data packets or
1089 * when we do a gratuitous window update.
1090 */
1091 if (((si->si_cc & SP_SP) == 0) || cb->s_force)
1092 si->si_cc |= SP_SA;
1093 si->si_seq = htons(si->si_seq);
1094 si->si_alo = htons(alo);
1095 si->si_ack = htons(cb->s_ack);
1096
1097 if (idpcksum) {
1098 si->si_sum = 0;
1099 len = ntohs(si->si_len);
1100 if (len & 1)
1101 len++;
1102 si->si_sum = ns_cksum(m, len);
1103 } else
1104 si->si_sum = 0xffff;
1105
1106 cb->s_outx = 4;
1107 if (so->so_options & SO_DEBUG || traceallspps)
1108 spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0);
1109
1110 if (so->so_options & SO_DONTROUTE)
1111 error = ns_output(m, (struct route *)0, NS_ROUTETOIF);
1112 else
1113 error = ns_output(m, &cb->s_nspcb->nsp_route, 0);
1114 }
1115 if (error) {
1116 return (error);
1117 }
1118 sppstat.spps_sndtotal++;
1119 /*
1120 * Data sent (as far as we can tell).
1121 * If this advertises a larger window than any other segment,
1122 * then remember the size of the advertized window.
1123 * Any pending ACK has now been sent.
1124 */
1125 cb->s_force = 0;
1126 cb->s_flags &= ~(SF_ACKNOW|SF_DELACK);
1127 if (SSEQ_GT(alo, cb->s_alo))
1128 cb->s_alo = alo;
1129 if (sendalot)
1130 goto again;
1131 cb->s_outx = 5;
1132 return (0);
1133 }
1134
1135 int spp_do_persist_panics = 0;
1136
1137 spp_setpersist(cb)
1138 register struct sppcb *cb;
1139 {
1140 register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1141 extern int spp_backoff[];
1142
1143 if (cb->s_timer[SPPT_REXMT] && spp_do_persist_panics)
1144 panic("spp_output REXMT");
1145 /*
1146 * Start/restart persistance timer.
1147 */
1148 SPPT_RANGESET(cb->s_timer[SPPT_PERSIST],
1149 t*spp_backoff[cb->s_rxtshift],
1150 SPPTV_PERSMIN, SPPTV_PERSMAX);
1151 if (cb->s_rxtshift < SPP_MAXRXTSHIFT)
1152 cb->s_rxtshift++;
1153 }
1154 /*ARGSUSED*/
1155 spp_ctloutput(req, so, level, name, value)
1156 int req;
1157 struct socket *so;
1158 int name;
1159 struct mbuf **value;
1160 {
1161 register struct mbuf *m;
1162 struct nspcb *nsp = sotonspcb(so);
1163 register struct sppcb *cb;
1164 int mask, error = 0;
1165
1166 if (level != NSPROTO_SPP) {
1167 /* This will have to be changed when we do more general
1168 stacking of protocols */
1169 return (idp_ctloutput(req, so, level, name, value));
1170 }
1171 if (nsp == NULL) {
1172 error = EINVAL;
1173 goto release;
1174 } else
1175 cb = nstosppcb(nsp);
1176
1177 switch (req) {
1178
1179 case PRCO_GETOPT:
1180 if (value == NULL)
1181 return (EINVAL);
1182 m = m_get(M_DONTWAIT, MT_DATA);
1183 if (m == NULL)
1184 return (ENOBUFS);
1185 switch (name) {
1186
1187 case SO_HEADERS_ON_INPUT:
1188 mask = SF_HI;
1189 goto get_flags;
1190
1191 case SO_HEADERS_ON_OUTPUT:
1192 mask = SF_HO;
1193 get_flags:
1194 m->m_len = sizeof(short);
1195 *mtod(m, short *) = cb->s_flags & mask;
1196 break;
1197
1198 case SO_MTU:
1199 m->m_len = sizeof(u_short);
1200 *mtod(m, short *) = cb->s_mtu;
1201 break;
1202
1203 case SO_LAST_HEADER:
1204 m->m_len = sizeof(struct sphdr);
1205 *mtod(m, struct sphdr *) = cb->s_rhdr;
1206 break;
1207
1208 case SO_DEFAULT_HEADERS:
1209 m->m_len = sizeof(struct spidp);
1210 *mtod(m, struct sphdr *) = cb->s_shdr;
1211 break;
1212
1213 default:
1214 error = EINVAL;
1215 }
1216 *value = m;
1217 break;
1218
1219 case PRCO_SETOPT:
1220 if (value == 0 || *value == 0) {
1221 error = EINVAL;
1222 break;
1223 }
1224 switch (name) {
1225 int *ok;
1226
1227 case SO_HEADERS_ON_INPUT:
1228 mask = SF_HI;
1229 goto set_head;
1230
1231 case SO_HEADERS_ON_OUTPUT:
1232 mask = SF_HO;
1233 set_head:
1234 if (cb->s_flags & SF_PI) {
1235 ok = mtod(*value, int *);
1236 if (*ok)
1237 cb->s_flags |= mask;
1238 else
1239 cb->s_flags &= ~mask;
1240 } else error = EINVAL;
1241 break;
1242
1243 case SO_MTU:
1244 cb->s_mtu = *(mtod(*value, u_short *));
1245 break;
1246
1247 #ifdef SF_NEWCALL
1248 case SO_NEWCALL:
1249 ok = mtod(*value, int *);
1250 if (*ok) {
1251 cb->s_flags2 |= SF_NEWCALL;
1252 spp_newchecks[5]++;
1253 } else {
1254 cb->s_flags2 &= ~SF_NEWCALL;
1255 spp_newchecks[6]++;
1256 }
1257 break;
1258 #endif
1259
1260 case SO_DEFAULT_HEADERS:
1261 {
1262 register struct sphdr *sp
1263 = mtod(*value, struct sphdr *);
1264 cb->s_dt = sp->sp_dt;
1265 cb->s_cc = sp->sp_cc & SP_EM;
1266 }
1267 break;
1268
1269 default:
1270 error = EINVAL;
1271 }
1272 m_freem(*value);
1273 break;
1274 }
1275 release:
1276 return (error);
1277 }
1278
1279 /*ARGSUSED*/
1280 spp_usrreq(so, req, m, nam, controlp)
1281 struct socket *so;
1282 int req;
1283 struct mbuf *m, *nam, *controlp;
1284 {
1285 struct nspcb *nsp = sotonspcb(so);
1286 register struct sppcb *cb;
1287 int s = splnet();
1288 int error = 0, ostate;
1289 struct mbuf *mm;
1290 register struct sockbuf *sb;
1291
1292 if (req == PRU_CONTROL)
1293 return (ns_control(so, (int)m, (caddr_t)nam,
1294 (struct ifnet *)controlp));
1295 if (nsp == NULL) {
1296 if (req != PRU_ATTACH) {
1297 error = EINVAL;
1298 goto release;
1299 }
1300 } else
1301 cb = nstosppcb(nsp);
1302
1303 ostate = cb ? cb->s_state : 0;
1304
1305 switch (req) {
1306
1307 case PRU_ATTACH:
1308 if (nsp != NULL) {
1309 error = EISCONN;
1310 break;
1311 }
1312 error = ns_pcballoc(so, &nspcb);
1313 if (error)
1314 break;
1315 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
1316 error = soreserve(so, (u_long) 3072, (u_long) 3072);
1317 if (error)
1318 break;
1319 }
1320 nsp = sotonspcb(so);
1321
1322 mm = m_getclr(M_DONTWAIT, MT_PCB);
1323 sb = &so->so_snd;
1324
1325 if (mm == NULL) {
1326 error = ENOBUFS;
1327 break;
1328 }
1329 cb = mtod(mm, struct sppcb *);
1330 mm = m_getclr(M_DONTWAIT, MT_HEADER);
1331 if (mm == NULL) {
1332 (void) m_free(dtom(m));
1333 error = ENOBUFS;
1334 break;
1335 }
1336 cb->s_idp = mtod(mm, struct idp *);
1337 cb->s_state = TCPS_LISTEN;
1338 cb->s_smax = -1;
1339 cb->s_swl1 = -1;
1340 cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q;
1341 cb->s_nspcb = nsp;
1342 cb->s_mtu = 576 - sizeof (struct spidp);
1343 cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu;
1344 cb->s_ssthresh = cb->s_cwnd;
1345 cb->s_cwmx = sbspace(sb) * CUNIT /
1346 (2 * sizeof (struct spidp));
1347 /* Above is recomputed when connecting to account
1348 for changed buffering or mtu's */
1349 cb->s_rtt = SPPTV_SRTTBASE;
1350 cb->s_rttvar = SPPTV_SRTTDFLT << 2;
1351 SPPT_RANGESET(cb->s_rxtcur,
1352 ((SPPTV_SRTTBASE >> 2) + (SPPTV_SRTTDFLT << 2)) >> 1,
1353 SPPTV_MIN, SPPTV_REXMTMAX);
1354 nsp->nsp_pcb = (caddr_t) cb;
1355 break;
1356
1357 case PRU_DETACH:
1358 if (nsp == NULL) {
1359 error = ENOTCONN;
1360 break;
1361 }
1362 if (cb->s_state > TCPS_LISTEN)
1363 cb = spp_disconnect(cb);
1364 else
1365 cb = spp_close(cb);
1366 break;
1367
1368 case PRU_BIND:
1369 error = ns_pcbbind(nsp, nam);
1370 break;
1371
1372 case PRU_LISTEN:
1373 if (nsp->nsp_lport == 0)
1374 error = ns_pcbbind(nsp, (struct mbuf *)0);
1375 if (error == 0)
1376 cb->s_state = TCPS_LISTEN;
1377 break;
1378
1379 /*
1380 * Initiate connection to peer.
1381 * Enter SYN_SENT state, and mark socket as connecting.
1382 * Start keep-alive timer, setup prototype header,
1383 * Send initial system packet requesting connection.
1384 */
1385 case PRU_CONNECT:
1386 if (nsp->nsp_lport == 0) {
1387 error = ns_pcbbind(nsp, (struct mbuf *)0);
1388 if (error)
1389 break;
1390 }
1391 error = ns_pcbconnect(nsp, nam);
1392 if (error)
1393 break;
1394 soisconnecting(so);
1395 sppstat.spps_connattempt++;
1396 cb->s_state = TCPS_SYN_SENT;
1397 cb->s_did = 0;
1398 spp_template(cb);
1399 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1400 cb->s_force = 1 + SPPTV_KEEP;
1401 /*
1402 * Other party is required to respond to
1403 * the port I send from, but he is not
1404 * required to answer from where I am sending to,
1405 * so allow wildcarding.
1406 * original port I am sending to is still saved in
1407 * cb->s_dport.
1408 */
1409 nsp->nsp_fport = 0;
1410 error = spp_output(cb, (struct mbuf *) 0);
1411 break;
1412
1413 case PRU_CONNECT2:
1414 error = EOPNOTSUPP;
1415 break;
1416
1417 /*
1418 * We may decide later to implement connection closing
1419 * handshaking at the spp level optionally.
1420 * here is the hook to do it:
1421 */
1422 case PRU_DISCONNECT:
1423 cb = spp_disconnect(cb);
1424 break;
1425
1426 /*
1427 * Accept a connection. Essentially all the work is
1428 * done at higher levels; just return the address
1429 * of the peer, storing through addr.
1430 */
1431 case PRU_ACCEPT: {
1432 struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *);
1433
1434 nam->m_len = sizeof (struct sockaddr_ns);
1435 sns->sns_family = AF_NS;
1436 sns->sns_addr = nsp->nsp_faddr;
1437 break;
1438 }
1439
1440 case PRU_SHUTDOWN:
1441 socantsendmore(so);
1442 cb = spp_usrclosed(cb);
1443 if (cb)
1444 error = spp_output(cb, (struct mbuf *) 0);
1445 break;
1446
1447 /*
1448 * After a receive, possibly send acknowledgment
1449 * updating allocation.
1450 */
1451 case PRU_RCVD:
1452 cb->s_flags |= SF_RVD;
1453 (void) spp_output(cb, (struct mbuf *) 0);
1454 cb->s_flags &= ~SF_RVD;
1455 break;
1456
1457 case PRU_ABORT:
1458 (void) spp_drop(cb, ECONNABORTED);
1459 break;
1460
1461 case PRU_SENSE:
1462 case PRU_CONTROL:
1463 m = NULL;
1464 error = EOPNOTSUPP;
1465 break;
1466
1467 case PRU_RCVOOB:
1468 if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark ||
1469 (so->so_state & SS_RCVATMARK)) {
1470 m->m_len = 1;
1471 *mtod(m, caddr_t) = cb->s_iobc;
1472 break;
1473 }
1474 error = EINVAL;
1475 break;
1476
1477 case PRU_SENDOOB:
1478 if (sbspace(&so->so_snd) < -512) {
1479 error = ENOBUFS;
1480 break;
1481 }
1482 cb->s_oobflags |= SF_SOOB;
1483 /* fall into */
1484 case PRU_SEND:
1485 if (controlp) {
1486 u_short *p = mtod(controlp, u_short *);
1487 spp_newchecks[2]++;
1488 if ((p[0] == 5) && p[1] == 1) { /* XXXX, for testing */
1489 cb->s_shdr.sp_dt = *(u_char *)(&p[2]);
1490 spp_newchecks[3]++;
1491 }
1492 m_freem(controlp);
1493 }
1494 controlp = NULL;
1495 error = spp_output(cb, m);
1496 m = NULL;
1497 break;
1498
1499 case PRU_SOCKADDR:
1500 ns_setsockaddr(nsp, nam);
1501 break;
1502
1503 case PRU_PEERADDR:
1504 ns_setpeeraddr(nsp, nam);
1505 break;
1506
1507 case PRU_SLOWTIMO:
1508 cb = spp_timers(cb, (int)nam);
1509 req |= ((int)nam) << 8;
1510 break;
1511
1512 case PRU_FASTTIMO:
1513 case PRU_PROTORCV:
1514 case PRU_PROTOSEND:
1515 error = EOPNOTSUPP;
1516 break;
1517
1518 default:
1519 panic("sp_usrreq");
1520 }
1521 if (cb && (so->so_options & SO_DEBUG || traceallspps))
1522 spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req);
1523 release:
1524 if (controlp != NULL)
1525 m_freem(controlp);
1526 if (m != NULL)
1527 m_freem(m);
1528 splx(s);
1529 return (error);
1530 }
1531
1532 spp_usrreq_sp(so, req, m, nam, controlp)
1533 struct socket *so;
1534 int req;
1535 struct mbuf *m, *nam, *controlp;
1536 {
1537 int error = spp_usrreq(so, req, m, nam, controlp);
1538
1539 if (req == PRU_ATTACH && error == 0) {
1540 struct nspcb *nsp = sotonspcb(so);
1541 ((struct sppcb *)nsp->nsp_pcb)->s_flags |=
1542 (SF_HI | SF_HO | SF_PI);
1543 }
1544 return (error);
1545 }
1546
1547 /*
1548 * Create template to be used to send spp packets on a connection.
1549 * Called after host entry created, fills
1550 * in a skeletal spp header (choosing connection id),
1551 * minimizing the amount of work necessary when the connection is used.
1552 */
1553 spp_template(cb)
1554 register struct sppcb *cb;
1555 {
1556 register struct nspcb *nsp = cb->s_nspcb;
1557 register struct idp *idp = cb->s_idp;
1558 register struct sockbuf *sb = &(nsp->nsp_socket->so_snd);
1559
1560 idp->idp_pt = NSPROTO_SPP;
1561 idp->idp_sna = nsp->nsp_laddr;
1562 idp->idp_dna = nsp->nsp_faddr;
1563 cb->s_sid = htons(spp_iss);
1564 spp_iss += SPP_ISSINCR/2;
1565 cb->s_alo = 1;
1566 cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu;
1567 cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement
1568 of large packets */
1569 cb->s_cwmx = (sbspace(sb) * CUNIT) / (2 * sizeof(struct spidp));
1570 cb->s_cwmx = max(cb->s_cwmx, cb->s_cwnd);
1571 /* But allow for lots of little packets as well */
1572 }
1573
1574 /*
1575 * Close a SPIP control block:
1576 * discard spp control block itself
1577 * discard ns protocol control block
1578 * wake up any sleepers
1579 */
1580 struct sppcb *
1581 spp_close(cb)
1582 register struct sppcb *cb;
1583 {
1584 register struct spidp_q *s;
1585 struct nspcb *nsp = cb->s_nspcb;
1586 struct socket *so = nsp->nsp_socket;
1587 register struct mbuf *m;
1588
1589 s = cb->s_q.si_next;
1590 while (s != &(cb->s_q)) {
1591 s = s->si_next;
1592 m = dtom(s->si_prev);
1593 remque(s->si_prev);
1594 m_freem(m);
1595 }
1596 (void) m_free(dtom(cb->s_idp));
1597 (void) m_free(dtom(cb));
1598 nsp->nsp_pcb = 0;
1599 soisdisconnected(so);
1600 ns_pcbdetach(nsp);
1601 sppstat.spps_closed++;
1602 return ((struct sppcb *)0);
1603 }
1604 /*
1605 * Someday we may do level 3 handshaking
1606 * to close a connection or send a xerox style error.
1607 * For now, just close.
1608 */
1609 struct sppcb *
1610 spp_usrclosed(cb)
1611 register struct sppcb *cb;
1612 {
1613 return (spp_close(cb));
1614 }
1615 struct sppcb *
1616 spp_disconnect(cb)
1617 register struct sppcb *cb;
1618 {
1619 return (spp_close(cb));
1620 }
1621 /*
1622 * Drop connection, reporting
1623 * the specified error.
1624 */
1625 struct sppcb *
1626 spp_drop(cb, errno)
1627 register struct sppcb *cb;
1628 int errno;
1629 {
1630 struct socket *so = cb->s_nspcb->nsp_socket;
1631
1632 /*
1633 * someday, in the xerox world
1634 * we will generate error protocol packets
1635 * announcing that the socket has gone away.
1636 */
1637 if (TCPS_HAVERCVDSYN(cb->s_state)) {
1638 sppstat.spps_drops++;
1639 cb->s_state = TCPS_CLOSED;
1640 /*(void) tcp_output(cb);*/
1641 } else
1642 sppstat.spps_conndrops++;
1643 so->so_error = errno;
1644 return (spp_close(cb));
1645 }
1646
1647 spp_abort(nsp)
1648 struct nspcb *nsp;
1649 {
1650
1651 (void) spp_close((struct sppcb *)nsp->nsp_pcb);
1652 }
1653
1654 int spp_backoff[SPP_MAXRXTSHIFT+1] =
1655 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
1656 /*
1657 * Fast timeout routine for processing delayed acks
1658 */
1659 spp_fasttimo()
1660 {
1661 register struct nspcb *nsp;
1662 register struct sppcb *cb;
1663 int s = splnet();
1664
1665 nsp = nspcb.nsp_next;
1666 if (nsp)
1667 for (; nsp != &nspcb; nsp = nsp->nsp_next)
1668 if ((cb = (struct sppcb *)nsp->nsp_pcb) &&
1669 (cb->s_flags & SF_DELACK)) {
1670 cb->s_flags &= ~SF_DELACK;
1671 cb->s_flags |= SF_ACKNOW;
1672 sppstat.spps_delack++;
1673 (void) spp_output(cb, (struct mbuf *) 0);
1674 }
1675 splx(s);
1676 }
1677
1678 /*
1679 * spp protocol timeout routine called every 500 ms.
1680 * Updates the timers in all active pcb's and
1681 * causes finite state machine actions if timers expire.
1682 */
1683 spp_slowtimo()
1684 {
1685 register struct nspcb *ip, *ipnxt;
1686 register struct sppcb *cb;
1687 int s = splnet();
1688 register int i;
1689
1690 /*
1691 * Search through tcb's and update active timers.
1692 */
1693 ip = nspcb.nsp_next;
1694 if (ip == 0) {
1695 splx(s);
1696 return;
1697 }
1698 while (ip != &nspcb) {
1699 cb = nstosppcb(ip);
1700 ipnxt = ip->nsp_next;
1701 if (cb == 0)
1702 goto tpgone;
1703 for (i = 0; i < SPPT_NTIMERS; i++) {
1704 if (cb->s_timer[i] && --cb->s_timer[i] == 0) {
1705 (void) spp_usrreq(cb->s_nspcb->nsp_socket,
1706 PRU_SLOWTIMO, (struct mbuf *)0,
1707 (struct mbuf *)i, (struct mbuf *)0,
1708 (struct mbuf *)0);
1709 if (ipnxt->nsp_prev != ip)
1710 goto tpgone;
1711 }
1712 }
1713 cb->s_idle++;
1714 if (cb->s_rtt)
1715 cb->s_rtt++;
1716 tpgone:
1717 ip = ipnxt;
1718 }
1719 spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */
1720 splx(s);
1721 }
1722 /*
1723 * SPP timer processing.
1724 */
1725 struct sppcb *
1726 spp_timers(cb, timer)
1727 register struct sppcb *cb;
1728 int timer;
1729 {
1730 long rexmt;
1731 int win;
1732
1733 cb->s_force = 1 + timer;
1734 switch (timer) {
1735
1736 /*
1737 * 2 MSL timeout in shutdown went off. TCP deletes connection
1738 * control block.
1739 */
1740 case SPPT_2MSL:
1741 printf("spp: SPPT_2MSL went off for no reason\n");
1742 cb->s_timer[timer] = 0;
1743 break;
1744
1745 /*
1746 * Retransmission timer went off. Message has not
1747 * been acked within retransmit interval. Back off
1748 * to a longer retransmit interval and retransmit one packet.
1749 */
1750 case SPPT_REXMT:
1751 if (++cb->s_rxtshift > SPP_MAXRXTSHIFT) {
1752 cb->s_rxtshift = SPP_MAXRXTSHIFT;
1753 sppstat.spps_timeoutdrop++;
1754 cb = spp_drop(cb, ETIMEDOUT);
1755 break;
1756 }
1757 sppstat.spps_rexmttimeo++;
1758 rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1;
1759 rexmt *= spp_backoff[cb->s_rxtshift];
1760 SPPT_RANGESET(cb->s_rxtcur, rexmt, SPPTV_MIN, SPPTV_REXMTMAX);
1761 cb->s_timer[SPPT_REXMT] = cb->s_rxtcur;
1762 /*
1763 * If we have backed off fairly far, our srtt
1764 * estimate is probably bogus. Clobber it
1765 * so we'll take the next rtt measurement as our srtt;
1766 * move the current srtt into rttvar to keep the current
1767 * retransmit times until then.
1768 */
1769 if (cb->s_rxtshift > SPP_MAXRXTSHIFT / 4 ) {
1770 cb->s_rttvar += (cb->s_srtt >> 2);
1771 cb->s_srtt = 0;
1772 }
1773 cb->s_snxt = cb->s_rack;
1774 /*
1775 * If timing a packet, stop the timer.
1776 */
1777 cb->s_rtt = 0;
1778 /*
1779 * See very long discussion in tcp_timer.c about congestion
1780 * window and sstrhesh
1781 */
1782 win = min(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2;
1783 if (win < 2)
1784 win = 2;
1785 cb->s_cwnd = CUNIT;
1786 cb->s_ssthresh = win * CUNIT;
1787 (void) spp_output(cb, (struct mbuf *) 0);
1788 break;
1789
1790 /*
1791 * Persistance timer into zero window.
1792 * Force a probe to be sent.
1793 */
1794 case SPPT_PERSIST:
1795 sppstat.spps_persisttimeo++;
1796 spp_setpersist(cb);
1797 (void) spp_output(cb, (struct mbuf *) 0);
1798 break;
1799
1800 /*
1801 * Keep-alive timer went off; send something
1802 * or drop connection if idle for too long.
1803 */
1804 case SPPT_KEEP:
1805 sppstat.spps_keeptimeo++;
1806 if (cb->s_state < TCPS_ESTABLISHED)
1807 goto dropit;
1808 if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) {
1809 if (cb->s_idle >= SPPTV_MAXIDLE)
1810 goto dropit;
1811 sppstat.spps_keepprobe++;
1812 (void) spp_output(cb, (struct mbuf *) 0);
1813 } else
1814 cb->s_idle = 0;
1815 cb->s_timer[SPPT_KEEP] = SPPTV_KEEP;
1816 break;
1817 dropit:
1818 sppstat.spps_keepdrops++;
1819 cb = spp_drop(cb, ETIMEDOUT);
1820 break;
1821 }
1822 return (cb);
1823 }
1824 #ifndef lint
1825 int SppcbSize = sizeof (struct sppcb);
1826 int NspcbSize = sizeof (struct nspcb);
1827 #endif /* lint */