blob: 4e8da4e0c70542816384ded36cd0255e1d4ac21a [file] [log] [blame]
James Kuszmaul4cb043c2021-01-17 11:25:51 -08001/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifdef __FreeBSD__
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 310590 2016-12-26 11:06:41Z tuexen $");
36#endif
37
38#include <netinet/sctp_os.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#include <netinet/sctp_pcb.h>
42#include <netinet/sctp_header.h>
43#include <netinet/sctputil.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_input.h>
46#include <netinet/sctp_auth.h>
47#include <netinet/sctp_indata.h>
48#include <netinet/sctp_asconf.h>
49#include <netinet/sctp_bsd_addr.h>
50#include <netinet/sctp_timer.h>
51#include <netinet/sctp_crc32.h>
52#if defined(INET) || defined(INET6)
53#if !defined(__Userspace_os_Windows)
54#include <netinet/udp.h>
55#endif
56#endif
57#if defined(__FreeBSD__)
58#include <sys/smp.h>
59#endif
60#if defined(__Userspace__)
61#include <user_socketvar.h>
62#endif
63
64#if defined(__APPLE__)
65#define APPLE_FILE_NO 2
66#endif
67
68
69static void
70sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
71{
72 struct sctp_nets *net;
73
74 /* This now not only stops all cookie timers
75 * it also stops any INIT timers as well. This
76 * will make sure that the timers are stopped in
77 * all collision cases.
78 */
79 SCTP_TCB_LOCK_ASSERT(stcb);
80 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
81 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
82 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
83 stcb->sctp_ep,
84 stcb,
85 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
86 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
87 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
88 stcb->sctp_ep,
89 stcb,
90 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
91 }
92 }
93}
94
95/* INIT handler */
96static void
97sctp_handle_init(struct mbuf *m, int iphlen, int offset,
98 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
99 struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
100 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
101#if defined(__FreeBSD__)
102 uint8_t mflowtype, uint32_t mflowid,
103#endif
104 uint32_t vrf_id, uint16_t port)
105{
106 struct sctp_init *init;
107 struct mbuf *op_err;
108
109 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
110 (void *)stcb);
111 if (stcb == NULL) {
112 SCTP_INP_RLOCK(inp);
113 }
114 /* validate length */
115 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
116 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
117 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
118#if defined(__FreeBSD__)
119 mflowtype, mflowid,
120#endif
121 vrf_id, port);
122 if (stcb)
123 *abort_no_unlock = 1;
124 goto outnow;
125 }
126 /* validate parameters */
127 init = &cp->init;
128 if (init->initiate_tag == 0) {
129 /* protocol error... send abort */
130 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
131 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
132#if defined(__FreeBSD__)
133 mflowtype, mflowid,
134#endif
135 vrf_id, port);
136 if (stcb)
137 *abort_no_unlock = 1;
138 goto outnow;
139 }
140 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
141 /* invalid parameter... send abort */
142 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
143 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
144#if defined(__FreeBSD__)
145 mflowtype, mflowid,
146#endif
147 vrf_id, port);
148 if (stcb)
149 *abort_no_unlock = 1;
150 goto outnow;
151 }
152 if (init->num_inbound_streams == 0) {
153 /* protocol error... send abort */
154 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
155 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
156#if defined(__FreeBSD__)
157 mflowtype, mflowid,
158#endif
159 vrf_id, port);
160 if (stcb)
161 *abort_no_unlock = 1;
162 goto outnow;
163 }
164 if (init->num_outbound_streams == 0) {
165 /* protocol error... send abort */
166 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
167 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
168#if defined(__FreeBSD__)
169 mflowtype, mflowid,
170#endif
171 vrf_id, port);
172 if (stcb)
173 *abort_no_unlock = 1;
174 goto outnow;
175 }
176 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
177 offset + ntohs(cp->ch.chunk_length))) {
178 /* auth parameter(s) error... send abort */
179 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
180 "Problem with AUTH parameters");
181 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
182#if defined(__FreeBSD__)
183 mflowtype, mflowid,
184#endif
185 vrf_id, port);
186 if (stcb)
187 *abort_no_unlock = 1;
188 goto outnow;
189 }
190 /* We are only accepting if we have a socket with positive so_qlimit.*/
191 if ((stcb == NULL) &&
192 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
193 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
194 (inp->sctp_socket == NULL) ||
195 (inp->sctp_socket->so_qlimit == 0))) {
196 /*
197 * FIX ME ?? What about TCP model and we have a
198 * match/restart case? Actually no fix is needed.
199 * the lookup will always find the existing assoc so stcb
200 * would not be NULL. It may be questionable to do this
201 * since we COULD just send back the INIT-ACK and hope that
202 * the app did accept()'s by the time the COOKIE was sent. But
203 * there is a price to pay for COOKIE generation and I don't
204 * want to pay it on the chance that the app will actually do
205 * some accepts(). The App just looses and should NOT be in
206 * this state :-)
207 */
208 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
209 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
210 "No listener");
211 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
212#if defined(__FreeBSD__)
213 mflowtype, mflowid, inp->fibnum,
214#endif
215 vrf_id, port);
216 }
217 goto outnow;
218 }
219 if ((stcb != NULL) &&
220 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
221 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
222 sctp_send_shutdown_ack(stcb, NULL);
223 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
224 } else {
225 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
226 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
227 src, dst, sh, cp,
228#if defined(__FreeBSD__)
229 mflowtype, mflowid,
230#endif
231 vrf_id, port,
232 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
233 }
234 outnow:
235 if (stcb == NULL) {
236 SCTP_INP_RUNLOCK(inp);
237 }
238}
239
240/*
241 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
242 */
243
244int
245sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
246#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
247 SCTP_UNUSED
248#endif
249)
250{
251 int unsent_data;
252 unsigned int i;
253 struct sctp_stream_queue_pending *sp;
254 struct sctp_association *asoc;
255
256 /* This function returns if any stream has true unsent data on it.
257 * Note that as it looks through it will clean up any places that
258 * have old data that has been sent but left at top of stream queue.
259 */
260 asoc = &stcb->asoc;
261 unsent_data = 0;
262 SCTP_TCB_SEND_LOCK(stcb);
263 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
264 /* Check to see if some data queued */
265 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
266 /*sa_ignore FREED_MEMORY*/
267 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
268 if (sp == NULL) {
269 continue;
270 }
271 if ((sp->msg_is_complete) &&
272 (sp->length == 0) &&
273 (sp->sender_all_done)) {
274 /* We are doing differed cleanup. Last
275 * time through when we took all the data
276 * the sender_all_done was not set.
277 */
278 if (sp->put_last_out == 0) {
279 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
280 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
281 sp->sender_all_done,
282 sp->length,
283 sp->msg_is_complete,
284 sp->put_last_out);
285 }
286 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
287 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
288 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
289 if (sp->net) {
290 sctp_free_remote_addr(sp->net);
291 sp->net = NULL;
292 }
293 if (sp->data) {
294 sctp_m_freem(sp->data);
295 sp->data = NULL;
296 }
297 sctp_free_a_strmoq(stcb, sp, so_locked);
298 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
299 unsent_data++;
300 }
301 } else {
302 unsent_data++;
303 }
304 if (unsent_data > 0) {
305 break;
306 }
307 }
308 }
309 SCTP_TCB_SEND_UNLOCK(stcb);
310 return (unsent_data);
311}
312
313static int
314sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
315{
316 struct sctp_init *init;
317 struct sctp_association *asoc;
318 struct sctp_nets *lnet;
319 unsigned int i;
320
321 init = &cp->init;
322 asoc = &stcb->asoc;
323 /* save off parameters */
324 asoc->peer_vtag = ntohl(init->initiate_tag);
325 asoc->peers_rwnd = ntohl(init->a_rwnd);
326 /* init tsn's */
327 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
328
329 if (!TAILQ_EMPTY(&asoc->nets)) {
330 /* update any ssthresh's that may have a default */
331 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
332 lnet->ssthresh = asoc->peers_rwnd;
333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
334 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
335 }
336
337 }
338 }
339 SCTP_TCB_SEND_LOCK(stcb);
340 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
341 unsigned int newcnt;
342 struct sctp_stream_out *outs;
343 struct sctp_stream_queue_pending *sp, *nsp;
344 struct sctp_tmit_chunk *chk, *nchk;
345
346 /* abandon the upper streams */
347 newcnt = ntohs(init->num_inbound_streams);
348 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
349 if (chk->rec.data.sid >= newcnt) {
350 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
351 asoc->send_queue_cnt--;
352 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
353 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
354#ifdef INVARIANTS
355 } else {
356 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
357#endif
358 }
359 if (chk->data != NULL) {
360 sctp_free_bufspace(stcb, asoc, chk, 1);
361 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
362 0, chk, SCTP_SO_NOT_LOCKED);
363 if (chk->data) {
364 sctp_m_freem(chk->data);
365 chk->data = NULL;
366 }
367 }
368 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
369 /*sa_ignore FREED_MEMORY*/
370 }
371 }
372 if (asoc->strmout) {
373 for (i = newcnt; i < asoc->pre_open_streams; i++) {
374 outs = &asoc->strmout[i];
375 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
376 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
377 TAILQ_REMOVE(&outs->outqueue, sp, next);
378 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
379 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
380 stcb, 0, sp, SCTP_SO_NOT_LOCKED);
381 if (sp->data) {
382 sctp_m_freem(sp->data);
383 sp->data = NULL;
384 }
385 if (sp->net) {
386 sctp_free_remote_addr(sp->net);
387 sp->net = NULL;
388 }
389 /* Free the chunk */
390 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
391 /*sa_ignore FREED_MEMORY*/
392 }
393 outs->state = SCTP_STREAM_CLOSED;
394 }
395 }
396 /* cut back the count */
397 asoc->pre_open_streams = newcnt;
398 }
399 SCTP_TCB_SEND_UNLOCK(stcb);
400 asoc->streamoutcnt = asoc->pre_open_streams;
401 if (asoc->strmout) {
402 for (i = 0; i < asoc->streamoutcnt; i++) {
403 asoc->strmout[i].state = SCTP_STREAM_OPEN;
404 }
405 }
406 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
407 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
409 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
410 }
411 /* This is the next one we expect */
412 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
413
414 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
415 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
416
417 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
418 /* open the requested streams */
419
420 if (asoc->strmin != NULL) {
421 /* Free the old ones */
422 for (i = 0; i < asoc->streamincnt; i++) {
423 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
424 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
425 }
426 SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
427 }
428 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
429 asoc->streamincnt = ntohs(init->num_outbound_streams);
430 } else {
431 asoc->streamincnt = asoc->max_inbound_streams;
432 }
433 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
434 sizeof(struct sctp_stream_in), SCTP_M_STRMI);
435 if (asoc->strmin == NULL) {
436 /* we didn't get memory for the streams! */
437 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
438 return (-1);
439 }
440 for (i = 0; i < asoc->streamincnt; i++) {
441 asoc->strmin[i].sid = i;
442 asoc->strmin[i].last_mid_delivered = 0xffffffff;
443 TAILQ_INIT(&asoc->strmin[i].inqueue);
444 TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
445 asoc->strmin[i].pd_api_started = 0;
446 asoc->strmin[i].delivery_started = 0;
447 }
448 /*
449 * load_address_from_init will put the addresses into the
450 * association when the COOKIE is processed or the INIT-ACK is
451 * processed. Both types of COOKIE's existing and new call this
452 * routine. It will remove addresses that are no longer in the
453 * association (for the restarting case where addresses are
454 * removed). Up front when the INIT arrives we will discard it if it
455 * is a restart and new addresses have been added.
456 */
457 /* sa_ignore MEMLEAK */
458 return (0);
459}
460
461/*
462 * INIT-ACK message processing/consumption returns value < 0 on error
463 */
464static int
465sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
466 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
467 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
468 struct sctp_nets *net, int *abort_no_unlock,
469#if defined(__FreeBSD__)
470 uint8_t mflowtype, uint32_t mflowid,
471#endif
472 uint32_t vrf_id)
473{
474 struct sctp_association *asoc;
475 struct mbuf *op_err;
476 int retval, abort_flag;
477 uint32_t initack_limit;
478 int nat_friendly = 0;
479
480 /* First verify that we have no illegal param's */
481 abort_flag = 0;
482
483 op_err = sctp_arethere_unrecognized_parameters(m,
484 (offset + sizeof(struct sctp_init_chunk)),
485 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
486 if (abort_flag) {
487 /* Send an abort and notify peer */
488 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
489 *abort_no_unlock = 1;
490 return (-1);
491 }
492 asoc = &stcb->asoc;
493 asoc->peer_supports_nat = (uint8_t)nat_friendly;
494 /* process the peer's parameters in the INIT-ACK */
495 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
496 if (retval < 0) {
497 return (retval);
498 }
499 initack_limit = offset + ntohs(cp->ch.chunk_length);
500 /* load all addresses */
501 if ((retval = sctp_load_addresses_from_init(stcb, m,
502 (offset + sizeof(struct sctp_init_chunk)), initack_limit,
503 src, dst, NULL, stcb->asoc.port))) {
504 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
505 "Problem with address parameters");
506 SCTPDBG(SCTP_DEBUG_INPUT1,
507 "Load addresses from INIT causes an abort %d\n",
508 retval);
509 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
510 src, dst, sh, op_err,
511#if defined(__FreeBSD__)
512 mflowtype, mflowid,
513#endif
514 vrf_id, net->port);
515 *abort_no_unlock = 1;
516 return (-1);
517 }
518 /* if the peer doesn't support asconf, flush the asconf queue */
519 if (asoc->asconf_supported == 0) {
520 struct sctp_asconf_addr *param, *nparam;
521
522 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
523 TAILQ_REMOVE(&asoc->asconf_queue, param, next);
524 SCTP_FREE(param, SCTP_M_ASC_ADDR);
525 }
526 }
527
528 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
529 stcb->asoc.local_hmacs);
530 if (op_err) {
531 sctp_queue_op_err(stcb, op_err);
532 /* queuing will steal away the mbuf chain to the out queue */
533 op_err = NULL;
534 }
535 /* extract the cookie and queue it to "echo" it back... */
536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
537 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
538 stcb->asoc.overall_error_count,
539 0,
540 SCTP_FROM_SCTP_INPUT,
541 __LINE__);
542 }
543 stcb->asoc.overall_error_count = 0;
544 net->error_count = 0;
545
546 /*
547 * Cancel the INIT timer, We do this first before queueing the
548 * cookie. We always cancel at the primary to assue that we are
549 * canceling the timer started by the INIT which always goes to the
550 * primary.
551 */
552 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
553 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
554
555 /* calculate the RTO */
556 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
557 SCTP_RTT_FROM_NON_DATA);
558#if defined(__Userspace__)
559 if (stcb->sctp_ep->recv_callback) {
560 if (stcb->sctp_socket) {
561 uint32_t inqueue_bytes, sb_free_now;
562 struct sctp_inpcb *inp;
563
564 inp = stcb->sctp_ep;
565 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
566 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
567
568 /* check if the amount free in the send socket buffer crossed the threshold */
569 if (inp->send_callback &&
570 (((inp->send_sb_threshold > 0) &&
571 (sb_free_now >= inp->send_sb_threshold) &&
572 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
573 (inp->send_sb_threshold == 0))) {
574 atomic_add_int(&stcb->asoc.refcnt, 1);
575 SCTP_TCB_UNLOCK(stcb);
576 inp->send_callback(stcb->sctp_socket, sb_free_now);
577 SCTP_TCB_LOCK(stcb);
578 atomic_subtract_int(&stcb->asoc.refcnt, 1);
579 }
580 }
581 }
582#endif
583 retval = sctp_send_cookie_echo(m, offset, stcb, net);
584 if (retval < 0) {
585 /*
586 * No cookie, we probably should send a op error. But in any
587 * case if there is no cookie in the INIT-ACK, we can
588 * abandon the peer, its broke.
589 */
590 if (retval == -3) {
591 uint16_t len;
592
593 len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
594 /* We abort with an error of missing mandatory param */
595 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
596 if (op_err != NULL) {
597 struct sctp_error_missing_param *cause;
598
599 SCTP_BUF_LEN(op_err) = len;
600 cause = mtod(op_err, struct sctp_error_missing_param *);
601 /* Subtract the reserved param */
602 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
603 cause->cause.length = htons(len);
604 cause->num_missing_params = htonl(1);
605 cause->type[0] = htons(SCTP_STATE_COOKIE);
606 }
607 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
608 src, dst, sh, op_err,
609#if defined(__FreeBSD__)
610 mflowtype, mflowid,
611#endif
612 vrf_id, net->port);
613 *abort_no_unlock = 1;
614 }
615 return (retval);
616 }
617
618 return (0);
619}
620
621static void
622sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
623 struct sctp_tcb *stcb, struct sctp_nets *net)
624{
625 union sctp_sockstore store;
626 struct sctp_nets *r_net, *f_net;
627 struct timeval tv;
628 int req_prim = 0;
629 uint16_t old_error_counter;
630
631 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
632 /* Invalid length */
633 return;
634 }
635
636 memset(&store, 0, sizeof(store));
637 switch (cp->heartbeat.hb_info.addr_family) {
638#ifdef INET
639 case AF_INET:
640 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
641 store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
642#ifdef HAVE_SIN_LEN
643 store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
644#endif
645 store.sin.sin_port = stcb->rport;
646 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
647 sizeof(store.sin.sin_addr));
648 } else {
649 return;
650 }
651 break;
652#endif
653#ifdef INET6
654 case AF_INET6:
655 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
656 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
657#ifdef HAVE_SIN6_LEN
658 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
659#endif
660 store.sin6.sin6_port = stcb->rport;
661 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
662 } else {
663 return;
664 }
665 break;
666#endif
667#if defined(__Userspace__)
668 case AF_CONN:
669 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
670 store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
671#ifdef HAVE_SCONN_LEN
672 store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
673#endif
674 store.sconn.sconn_port = stcb->rport;
675 memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
676 } else {
677 return;
678 }
679 break;
680#endif
681 default:
682 return;
683 }
684 r_net = sctp_findnet(stcb, &store.sa);
685 if (r_net == NULL) {
686 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
687 return;
688 }
689 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
690 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
691 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
692 /*
693 * If the its a HB and it's random value is correct when can
694 * confirm the destination.
695 */
696 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
697 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
698 stcb->asoc.primary_destination = r_net;
699 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
700 f_net = TAILQ_FIRST(&stcb->asoc.nets);
701 if (f_net != r_net) {
702 /* first one on the list is NOT the primary
703 * sctp_cmpaddr() is much more efficient if
704 * the primary is the first on the list, make it
705 * so.
706 */
707 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
708 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
709 }
710 req_prim = 1;
711 }
712 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
713 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
714 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
715 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
716 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
717 }
718 old_error_counter = r_net->error_count;
719 r_net->error_count = 0;
720 r_net->hb_responded = 1;
721 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
722 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
723 /* Now lets do a RTO with this */
724 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
725 SCTP_RTT_FROM_NON_DATA);
726 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
727 r_net->dest_state |= SCTP_ADDR_REACHABLE;
728 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
729 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
730 }
731 if (r_net->dest_state & SCTP_ADDR_PF) {
732 r_net->dest_state &= ~SCTP_ADDR_PF;
733 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
734 }
735 if (old_error_counter > 0) {
736 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
737 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
738 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
739 }
740 if (r_net == stcb->asoc.primary_destination) {
741 if (stcb->asoc.alternate) {
742 /* release the alternate, primary is good */
743 sctp_free_remote_addr(stcb->asoc.alternate);
744 stcb->asoc.alternate = NULL;
745 }
746 }
747 /* Mobility adaptation */
748 if (req_prim) {
749 if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
750 SCTP_MOBILITY_BASE) ||
751 sctp_is_mobility_feature_on(stcb->sctp_ep,
752 SCTP_MOBILITY_FASTHANDOFF)) &&
753 sctp_is_mobility_feature_on(stcb->sctp_ep,
754 SCTP_MOBILITY_PRIM_DELETED)) {
755
756 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
757 stcb->sctp_ep, stcb, NULL,
758 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
759 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
760 SCTP_MOBILITY_FASTHANDOFF)) {
761 sctp_assoc_immediate_retrans(stcb,
762 stcb->asoc.primary_destination);
763 }
764 if (sctp_is_mobility_feature_on(stcb->sctp_ep,
765 SCTP_MOBILITY_BASE)) {
766 sctp_move_chunks_from_net(stcb,
767 stcb->asoc.deleted_primary);
768 }
769 sctp_delete_prim_timer(stcb->sctp_ep, stcb,
770 stcb->asoc.deleted_primary);
771 }
772 }
773}
774
775static int
776sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
777{
778 /* return 0 means we want you to proceed with the abort
779 * non-zero means no abort processing
780 */
781 struct sctpasochead *head;
782
783 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
784 /* generate a new vtag and send init */
785 LIST_REMOVE(stcb, sctp_asocs);
786 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
787 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
788 /* put it in the bucket in the vtag hash of assoc's for the system */
789 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
790 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
791 return (1);
792 }
793 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
794 /* treat like a case where the cookie expired i.e.:
795 * - dump current cookie.
796 * - generate a new vtag.
797 * - resend init.
798 */
799 /* generate a new vtag and send init */
800 LIST_REMOVE(stcb, sctp_asocs);
801 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
802 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
803 sctp_stop_all_cookie_timers(stcb);
804 sctp_toss_old_cookies(stcb, &stcb->asoc);
805 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
806 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
807 /* put it in the bucket in the vtag hash of assoc's for the system */
808 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
809 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
810 return (1);
811 }
812 return (0);
813}
814
815static int
816sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
817 struct sctp_nets *net)
818{
819 /* return 0 means we want you to proceed with the abort
820 * non-zero means no abort processing
821 */
822 if (stcb->asoc.auth_supported == 0) {
823 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
824 return (0);
825 }
826 sctp_asconf_send_nat_state_update(stcb, net);
827 return (1);
828}
829
830
831static void
832sctp_handle_abort(struct sctp_abort_chunk *abort,
833 struct sctp_tcb *stcb, struct sctp_nets *net)
834{
835#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
836 struct socket *so;
837#endif
838 uint16_t len;
839 uint16_t error;
840
841 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
842 if (stcb == NULL)
843 return;
844
845 len = ntohs(abort->ch.chunk_length);
846 if (len > sizeof (struct sctp_chunkhdr)) {
847 /* Need to check the cause codes for our
848 * two magic nat aborts which don't kill the assoc
849 * necessarily.
850 */
851 struct sctp_gen_error_cause *cause;
852
853 cause = (struct sctp_gen_error_cause *)(abort + 1);
854 error = ntohs(cause->code);
855 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
856 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
857 abort->ch.chunk_flags);
858 if (sctp_handle_nat_colliding_state(stcb)) {
859 return;
860 }
861 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
862 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
863 abort->ch.chunk_flags);
864 if (sctp_handle_nat_missing_state(stcb, net)) {
865 return;
866 }
867 }
868 } else {
869 error = 0;
870 }
871 /* stop any receive timers */
872 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
873 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
874 /* notify user of the abort and clean up... */
875 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
876 /* free the tcb */
877 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
878 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
879 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
880 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
881 }
882#ifdef SCTP_ASOCLOG_OF_TSNS
883 sctp_print_out_track_log(stcb);
884#endif
885#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
886 so = SCTP_INP_SO(stcb->sctp_ep);
887 atomic_add_int(&stcb->asoc.refcnt, 1);
888 SCTP_TCB_UNLOCK(stcb);
889 SCTP_SOCKET_LOCK(so, 1);
890 SCTP_TCB_LOCK(stcb);
891 atomic_subtract_int(&stcb->asoc.refcnt, 1);
892#endif
893 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
894 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
895 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
896#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
897 SCTP_SOCKET_UNLOCK(so, 1);
898#endif
899 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
900}
901
902static void
903sctp_start_net_timers(struct sctp_tcb *stcb)
904{
905 uint32_t cnt_hb_sent;
906 struct sctp_nets *net;
907
908 cnt_hb_sent = 0;
909 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
910 /* For each network start:
911 * 1) A pmtu timer.
912 * 2) A HB timer
913 * 3) If the dest in unconfirmed send
914 * a hb as well if under max_hb_burst have
915 * been sent.
916 */
917 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
918 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
919 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
920 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
921 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
922 cnt_hb_sent++;
923 }
924 }
925 if (cnt_hb_sent) {
926 sctp_chunk_output(stcb->sctp_ep, stcb,
927 SCTP_OUTPUT_FROM_COOKIE_ACK,
928 SCTP_SO_NOT_LOCKED);
929 }
930}
931
932
933static void
934sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
935 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
936{
937 struct sctp_association *asoc;
938 int some_on_streamwheel;
939 int old_state;
940#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
941 struct socket *so;
942#endif
943
944 SCTPDBG(SCTP_DEBUG_INPUT2,
945 "sctp_handle_shutdown: handling SHUTDOWN\n");
946 if (stcb == NULL)
947 return;
948 asoc = &stcb->asoc;
949 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
950 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
951 return;
952 }
953 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
954 /* Shutdown NOT the expected size */
955 return;
956 }
957 old_state = SCTP_GET_STATE(asoc);
958 sctp_update_acked(stcb, cp, abort_flag);
959 if (*abort_flag) {
960 return;
961 }
962 if (asoc->control_pdapi) {
963 /* With a normal shutdown
964 * we assume the end of last record.
965 */
966 SCTP_INP_READ_LOCK(stcb->sctp_ep);
967 if (asoc->control_pdapi->on_strm_q) {
968 struct sctp_stream_in *strm;
969
970 strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
971 if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
972 /* Unordered */
973 TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
974 asoc->control_pdapi->on_strm_q = 0;
975 } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
976 /* Ordered */
977 TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
978 asoc->control_pdapi->on_strm_q = 0;
979#ifdef INVARIANTS
980 } else {
981 panic("Unknown state on ctrl:%p on_strm_q:%d",
982 asoc->control_pdapi,
983 asoc->control_pdapi->on_strm_q);
984#endif
985 }
986 }
987 asoc->control_pdapi->end_added = 1;
988 asoc->control_pdapi->pdapi_aborted = 1;
989 asoc->control_pdapi = NULL;
990 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
991#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
992 so = SCTP_INP_SO(stcb->sctp_ep);
993 atomic_add_int(&stcb->asoc.refcnt, 1);
994 SCTP_TCB_UNLOCK(stcb);
995 SCTP_SOCKET_LOCK(so, 1);
996 SCTP_TCB_LOCK(stcb);
997 atomic_subtract_int(&stcb->asoc.refcnt, 1);
998 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
999 /* assoc was freed while we were unlocked */
1000 SCTP_SOCKET_UNLOCK(so, 1);
1001 return;
1002 }
1003#endif
1004 if (stcb->sctp_socket) {
1005 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1006 }
1007#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1008 SCTP_SOCKET_UNLOCK(so, 1);
1009#endif
1010 }
1011 /* goto SHUTDOWN_RECEIVED state to block new requests */
1012 if (stcb->sctp_socket) {
1013 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1014 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
1015 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1016 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
1017 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1018 /* notify upper layer that peer has initiated a shutdown */
1019 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1020
1021 /* reset time */
1022 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
1023 }
1024 }
1025 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
1026 /*
1027 * stop the shutdown timer, since we WILL move to
1028 * SHUTDOWN-ACK-SENT.
1029 */
1030 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
1031 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
1032 }
1033 /* Now is there unsent data on a stream somewhere? */
1034 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
1035
1036 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1037 !TAILQ_EMPTY(&asoc->sent_queue) ||
1038 some_on_streamwheel) {
1039 /* By returning we will push more data out */
1040 return;
1041 } else {
1042 /* no outstanding data to send, so move on... */
1043 /* send SHUTDOWN-ACK */
1044 /* move to SHUTDOWN-ACK-SENT state */
1045 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1046 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1047 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1048 }
1049 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1050 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
1051 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
1052 sctp_stop_timers_for_shutdown(stcb);
1053 sctp_send_shutdown_ack(stcb, net);
1054 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
1055 stcb->sctp_ep, stcb, net);
1056 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1057 sctp_send_shutdown_ack(stcb, net);
1058 }
1059 }
1060}
1061
1062static void
1063sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
1064 struct sctp_tcb *stcb,
1065 struct sctp_nets *net)
1066{
1067 struct sctp_association *asoc;
1068#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1069 struct socket *so;
1070
1071 so = SCTP_INP_SO(stcb->sctp_ep);
1072#endif
1073 SCTPDBG(SCTP_DEBUG_INPUT2,
1074 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
1075 if (stcb == NULL)
1076 return;
1077
1078 asoc = &stcb->asoc;
1079 /* process according to association state */
1080 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
1081 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
1082 /* unexpected SHUTDOWN-ACK... do OOTB handling... */
1083 sctp_send_shutdown_complete(stcb, net, 1);
1084 SCTP_TCB_UNLOCK(stcb);
1085 return;
1086 }
1087 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
1088 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1089 /* unexpected SHUTDOWN-ACK... so ignore... */
1090 SCTP_TCB_UNLOCK(stcb);
1091 return;
1092 }
1093 if (asoc->control_pdapi) {
1094 /* With a normal shutdown
1095 * we assume the end of last record.
1096 */
1097 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1098 asoc->control_pdapi->end_added = 1;
1099 asoc->control_pdapi->pdapi_aborted = 1;
1100 asoc->control_pdapi = NULL;
1101 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1102#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1103 atomic_add_int(&stcb->asoc.refcnt, 1);
1104 SCTP_TCB_UNLOCK(stcb);
1105 SCTP_SOCKET_LOCK(so, 1);
1106 SCTP_TCB_LOCK(stcb);
1107 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1108 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1109 /* assoc was freed while we were unlocked */
1110 SCTP_SOCKET_UNLOCK(so, 1);
1111 return;
1112 }
1113#endif
1114 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1115#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1116 SCTP_SOCKET_UNLOCK(so, 1);
1117#endif
1118 }
1119#ifdef INVARIANTS
1120 if (!TAILQ_EMPTY(&asoc->send_queue) ||
1121 !TAILQ_EMPTY(&asoc->sent_queue) ||
1122 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
1123 panic("Queues are not empty when handling SHUTDOWN-ACK");
1124 }
1125#endif
1126 /* stop the timer */
1127 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
1128 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
1129 /* send SHUTDOWN-COMPLETE */
1130 sctp_send_shutdown_complete(stcb, net, 0);
1131 /* notify upper layer protocol */
1132 if (stcb->sctp_socket) {
1133 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1134 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1135 stcb->sctp_socket->so_snd.sb_cc = 0;
1136 }
1137 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
1138 }
1139 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
1140 /* free the TCB but first save off the ep */
1141#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1142 atomic_add_int(&stcb->asoc.refcnt, 1);
1143 SCTP_TCB_UNLOCK(stcb);
1144 SCTP_SOCKET_LOCK(so, 1);
1145 SCTP_TCB_LOCK(stcb);
1146 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1147#endif
1148 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1149 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1150#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1151 SCTP_SOCKET_UNLOCK(so, 1);
1152#endif
1153}
1154
1155/*
1156 * Skip past the param header and then we will find the chunk that caused the
1157 * problem. There are two possibilities ASCONF or FWD-TSN other than that and
1158 * our peer must be broken.
1159 */
1160static void
1161sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
1162 struct sctp_nets *net)
1163{
1164 struct sctp_chunkhdr *chk;
1165
1166 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
1167 switch (chk->chunk_type) {
1168 case SCTP_ASCONF_ACK:
1169 case SCTP_ASCONF:
1170 sctp_asconf_cleanup(stcb, net);
1171 break;
1172 case SCTP_IFORWARD_CUM_TSN:
1173 case SCTP_FORWARD_CUM_TSN:
1174 stcb->asoc.prsctp_supported = 0;
1175 break;
1176 default:
1177 SCTPDBG(SCTP_DEBUG_INPUT2,
1178 "Peer does not support chunk type %d(%x)??\n",
1179 chk->chunk_type, (uint32_t) chk->chunk_type);
1180 break;
1181 }
1182}
1183
1184/*
1185 * Skip past the param header and then we will find the param that caused the
1186 * problem. There are a number of param's in a ASCONF OR the prsctp param
1187 * these will turn of specific features.
1188 * XXX: Is this the right thing to do?
1189 */
1190static void
1191sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
1192{
1193 struct sctp_paramhdr *pbad;
1194
1195 pbad = phdr + 1;
1196 switch (ntohs(pbad->param_type)) {
1197 /* pr-sctp draft */
1198 case SCTP_PRSCTP_SUPPORTED:
1199 stcb->asoc.prsctp_supported = 0;
1200 break;
1201 case SCTP_SUPPORTED_CHUNK_EXT:
1202 break;
1203 /* draft-ietf-tsvwg-addip-sctp */
1204 case SCTP_HAS_NAT_SUPPORT:
1205 stcb->asoc.peer_supports_nat = 0;
1206 break;
1207 case SCTP_ADD_IP_ADDRESS:
1208 case SCTP_DEL_IP_ADDRESS:
1209 case SCTP_SET_PRIM_ADDR:
1210 stcb->asoc.asconf_supported = 0;
1211 break;
1212 case SCTP_SUCCESS_REPORT:
1213 case SCTP_ERROR_CAUSE_IND:
1214 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
1215 SCTPDBG(SCTP_DEBUG_INPUT2,
1216 "Turning off ASCONF to this strange peer\n");
1217 stcb->asoc.asconf_supported = 0;
1218 break;
1219 default:
1220 SCTPDBG(SCTP_DEBUG_INPUT2,
1221 "Peer does not support param type %d(%x)??\n",
1222 pbad->param_type, (uint32_t) pbad->param_type);
1223 break;
1224 }
1225}
1226
1227static int
1228sctp_handle_error(struct sctp_chunkhdr *ch,
1229 struct sctp_tcb *stcb, struct sctp_nets *net)
1230{
1231 int chklen;
1232 struct sctp_paramhdr *phdr;
1233 uint16_t error, error_type;
1234 uint16_t error_len;
1235 struct sctp_association *asoc;
1236 int adjust;
1237#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1238 struct socket *so;
1239#endif
1240
1241 /* parse through all of the errors and process */
1242 asoc = &stcb->asoc;
1243 phdr = (struct sctp_paramhdr *)((caddr_t)ch +
1244 sizeof(struct sctp_chunkhdr));
1245 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
1246 error = 0;
1247 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
1248 /* Process an Error Cause */
1249 error_type = ntohs(phdr->param_type);
1250 error_len = ntohs(phdr->param_length);
1251 if ((error_len > chklen) || (error_len == 0)) {
1252 /* invalid param length for this param */
1253 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
1254 chklen, error_len);
1255 return (0);
1256 }
1257 if (error == 0) {
1258 /* report the first error cause */
1259 error = error_type;
1260 }
1261 switch (error_type) {
1262 case SCTP_CAUSE_INVALID_STREAM:
1263 case SCTP_CAUSE_MISSING_PARAM:
1264 case SCTP_CAUSE_INVALID_PARAM:
1265 case SCTP_CAUSE_NO_USER_DATA:
1266 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
1267 error_type);
1268 break;
1269 case SCTP_CAUSE_NAT_COLLIDING_STATE:
1270 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
1271 ch->chunk_flags);
1272 if (sctp_handle_nat_colliding_state(stcb)) {
1273 return (0);
1274 }
1275 break;
1276 case SCTP_CAUSE_NAT_MISSING_STATE:
1277 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
1278 ch->chunk_flags);
1279 if (sctp_handle_nat_missing_state(stcb, net)) {
1280 return (0);
1281 }
1282 break;
1283 case SCTP_CAUSE_STALE_COOKIE:
1284 /*
1285 * We only act if we have echoed a cookie and are
1286 * waiting.
1287 */
1288 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
1289 int *p;
1290
1291 p = (int *)((caddr_t)phdr + sizeof(*phdr));
1292 /* Save the time doubled */
1293 asoc->cookie_preserve_req = ntohl(*p) << 1;
1294 asoc->stale_cookie_count++;
1295 if (asoc->stale_cookie_count >
1296 asoc->max_init_times) {
1297 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
1298 /* now free the asoc */
1299#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1300 so = SCTP_INP_SO(stcb->sctp_ep);
1301 atomic_add_int(&stcb->asoc.refcnt, 1);
1302 SCTP_TCB_UNLOCK(stcb);
1303 SCTP_SOCKET_LOCK(so, 1);
1304 SCTP_TCB_LOCK(stcb);
1305 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1306#endif
1307 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
1308 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1309#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1310 SCTP_SOCKET_UNLOCK(so, 1);
1311#endif
1312 return (-1);
1313 }
1314 /* blast back to INIT state */
1315 sctp_toss_old_cookies(stcb, &stcb->asoc);
1316 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
1317 asoc->state |= SCTP_STATE_COOKIE_WAIT;
1318 sctp_stop_all_cookie_timers(stcb);
1319 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1320 }
1321 break;
1322 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
1323 /*
1324 * Nothing we can do here, we don't do hostname
1325 * addresses so if the peer does not like my IPv6
1326 * (or IPv4 for that matter) it does not matter. If
1327 * they don't support that type of address, they can
1328 * NOT possibly get that packet type... i.e. with no
1329 * IPv6 you can't receive a IPv6 packet. so we can
1330 * safely ignore this one. If we ever added support
1331 * for HOSTNAME Addresses, then we would need to do
1332 * something here.
1333 */
1334 break;
1335 case SCTP_CAUSE_UNRECOG_CHUNK:
1336 sctp_process_unrecog_chunk(stcb, phdr, net);
1337 break;
1338 case SCTP_CAUSE_UNRECOG_PARAM:
1339 sctp_process_unrecog_param(stcb, phdr);
1340 break;
1341 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
1342 /*
1343 * We ignore this since the timer will drive out a
1344 * new cookie anyway and there timer will drive us
1345 * to send a SHUTDOWN_COMPLETE. We can't send one
1346 * here since we don't have their tag.
1347 */
1348 break;
1349 case SCTP_CAUSE_DELETING_LAST_ADDR:
1350 case SCTP_CAUSE_RESOURCE_SHORTAGE:
1351 case SCTP_CAUSE_DELETING_SRC_ADDR:
1352 /*
1353 * We should NOT get these here, but in a
1354 * ASCONF-ACK.
1355 */
1356 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
1357 error_type);
1358 break;
1359 case SCTP_CAUSE_OUT_OF_RESC:
1360 /*
1361 * And what, pray tell do we do with the fact that
1362 * the peer is out of resources? Not really sure we
1363 * could do anything but abort. I suspect this
1364 * should have came WITH an abort instead of in a
1365 * OP-ERROR.
1366 */
1367 break;
1368 default:
1369 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
1370 error_type);
1371 break;
1372 }
1373 adjust = SCTP_SIZE32(error_len);
1374 chklen -= adjust;
1375 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
1376 }
1377 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED);
1378 return (0);
1379}
1380
1381static int
1382sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
1383 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
1384 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
1385 struct sctp_nets *net, int *abort_no_unlock,
1386#if defined(__FreeBSD__)
1387 uint8_t mflowtype, uint32_t mflowid,
1388#endif
1389 uint32_t vrf_id)
1390{
1391 struct sctp_init_ack *init_ack;
1392 struct mbuf *op_err;
1393
1394 SCTPDBG(SCTP_DEBUG_INPUT2,
1395 "sctp_handle_init_ack: handling INIT-ACK\n");
1396
1397 if (stcb == NULL) {
1398 SCTPDBG(SCTP_DEBUG_INPUT2,
1399 "sctp_handle_init_ack: TCB is null\n");
1400 return (-1);
1401 }
1402 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
1403 /* Invalid length */
1404 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1405 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1406 src, dst, sh, op_err,
1407#if defined(__FreeBSD__)
1408 mflowtype, mflowid,
1409#endif
1410 vrf_id, net->port);
1411 *abort_no_unlock = 1;
1412 return (-1);
1413 }
1414 init_ack = &cp->init;
1415 /* validate parameters */
1416 if (init_ack->initiate_tag == 0) {
1417 /* protocol error... send an abort */
1418 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1419 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1420 src, dst, sh, op_err,
1421#if defined(__FreeBSD__)
1422 mflowtype, mflowid,
1423#endif
1424 vrf_id, net->port);
1425 *abort_no_unlock = 1;
1426 return (-1);
1427 }
1428 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
1429 /* protocol error... send an abort */
1430 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1431 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1432 src, dst, sh, op_err,
1433#if defined(__FreeBSD__)
1434 mflowtype, mflowid,
1435#endif
1436 vrf_id, net->port);
1437 *abort_no_unlock = 1;
1438 return (-1);
1439 }
1440 if (init_ack->num_inbound_streams == 0) {
1441 /* protocol error... send an abort */
1442 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1443 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1444 src, dst, sh, op_err,
1445#if defined(__FreeBSD__)
1446 mflowtype, mflowid,
1447#endif
1448 vrf_id, net->port);
1449 *abort_no_unlock = 1;
1450 return (-1);
1451 }
1452 if (init_ack->num_outbound_streams == 0) {
1453 /* protocol error... send an abort */
1454 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
1455 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
1456 src, dst, sh, op_err,
1457#if defined(__FreeBSD__)
1458 mflowtype, mflowid,
1459#endif
1460 vrf_id, net->port);
1461 *abort_no_unlock = 1;
1462 return (-1);
1463 }
1464 /* process according to association state... */
1465 switch (stcb->asoc.state & SCTP_STATE_MASK) {
1466 case SCTP_STATE_COOKIE_WAIT:
1467 /* this is the expected state for this chunk */
1468 /* process the INIT-ACK parameters */
1469 if (stcb->asoc.primary_destination->dest_state &
1470 SCTP_ADDR_UNCONFIRMED) {
1471 /*
1472 * The primary is where we sent the INIT, we can
1473 * always consider it confirmed when the INIT-ACK is
1474 * returned. Do this before we load addresses
1475 * though.
1476 */
1477 stcb->asoc.primary_destination->dest_state &=
1478 ~SCTP_ADDR_UNCONFIRMED;
1479 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
1480 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
1481 }
1482 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
1483 net, abort_no_unlock,
1484#if defined(__FreeBSD__)
1485 mflowtype, mflowid,
1486#endif
1487 vrf_id) < 0) {
1488 /* error in parsing parameters */
1489 return (-1);
1490 }
1491 /* update our state */
1492 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
1493 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
1494
1495 /* reset the RTO calc */
1496 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
1497 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
1498 stcb->asoc.overall_error_count,
1499 0,
1500 SCTP_FROM_SCTP_INPUT,
1501 __LINE__);
1502 }
1503 stcb->asoc.overall_error_count = 0;
1504 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1505 /*
1506 * collapse the init timer back in case of a exponential
1507 * backoff
1508 */
1509 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
1510 stcb, net);
1511 /*
1512 * the send at the end of the inbound data processing will
1513 * cause the cookie to be sent
1514 */
1515 break;
1516 case SCTP_STATE_SHUTDOWN_SENT:
1517 /* incorrect state... discard */
1518 break;
1519 case SCTP_STATE_COOKIE_ECHOED:
1520 /* incorrect state... discard */
1521 break;
1522 case SCTP_STATE_OPEN:
1523 /* incorrect state... discard */
1524 break;
1525 case SCTP_STATE_EMPTY:
1526 case SCTP_STATE_INUSE:
1527 default:
1528 /* incorrect state... discard */
1529 return (-1);
1530 break;
1531 }
1532 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
1533 return (0);
1534}
1535
1536static struct sctp_tcb *
1537sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1538 struct sockaddr *src, struct sockaddr *dst,
1539 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1540 struct sctp_inpcb *inp, struct sctp_nets **netp,
1541 struct sockaddr *init_src, int *notification,
1542 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1543#if defined(__FreeBSD__)
1544 uint8_t mflowtype, uint32_t mflowid,
1545#endif
1546 uint32_t vrf_id, uint16_t port);
1547
1548
1549/*
1550 * handle a state cookie for an existing association m: input packet mbuf
1551 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
1552 * "split" mbuf and the cookie signature does not exist offset: offset into
1553 * mbuf to the cookie-echo chunk
1554 */
1555static struct sctp_tcb *
1556sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
1557 struct sockaddr *src, struct sockaddr *dst,
1558 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1559 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
1560 struct sockaddr *init_src, int *notification,
1561 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1562#if defined(__FreeBSD__)
1563 uint8_t mflowtype, uint32_t mflowid,
1564#endif
1565 uint32_t vrf_id, uint16_t port)
1566{
1567 struct sctp_association *asoc;
1568 struct sctp_init_chunk *init_cp, init_buf;
1569 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1570 struct sctp_nets *net;
1571 struct mbuf *op_err;
1572 int init_offset, initack_offset, i;
1573 int retval;
1574 int spec_flag = 0;
1575 uint32_t how_indx;
1576#if defined(SCTP_DETAILED_STR_STATS)
1577 int j;
1578#endif
1579
1580 net = *netp;
1581 /* I know that the TCB is non-NULL from the caller */
1582 asoc = &stcb->asoc;
1583 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1584 if (asoc->cookie_how[how_indx] == 0)
1585 break;
1586 }
1587 if (how_indx < sizeof(asoc->cookie_how)) {
1588 asoc->cookie_how[how_indx] = 1;
1589 }
1590 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1591 /* SHUTDOWN came in after sending INIT-ACK */
1592 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1593 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
1594 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
1595#if defined(__FreeBSD__)
1596 mflowtype, mflowid, inp->fibnum,
1597#endif
1598 vrf_id, net->port);
1599 if (how_indx < sizeof(asoc->cookie_how))
1600 asoc->cookie_how[how_indx] = 2;
1601 return (NULL);
1602 }
1603 /*
1604 * find and validate the INIT chunk in the cookie (peer's info) the
1605 * INIT should start after the cookie-echo header struct (chunk
1606 * header, state cookie header struct)
1607 */
1608 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1609
1610 init_cp = (struct sctp_init_chunk *)
1611 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1612 (uint8_t *) & init_buf);
1613 if (init_cp == NULL) {
1614 /* could not pull a INIT chunk in cookie */
1615 return (NULL);
1616 }
1617 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1618 return (NULL);
1619 }
1620 /*
1621 * find and validate the INIT-ACK chunk in the cookie (my info) the
1622 * INIT-ACK follows the INIT chunk
1623 */
1624 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
1625 initack_cp = (struct sctp_init_ack_chunk *)
1626 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1627 (uint8_t *) & initack_buf);
1628 if (initack_cp == NULL) {
1629 /* could not pull INIT-ACK chunk in cookie */
1630 return (NULL);
1631 }
1632 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1633 return (NULL);
1634 }
1635 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1636 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1637 /*
1638 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1639 * to get into the OPEN state
1640 */
1641 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1642 /*-
1643 * Opps, this means that we somehow generated two vtag's
1644 * the same. I.e. we did:
1645 * Us Peer
1646 * <---INIT(tag=a)------
1647 * ----INIT-ACK(tag=t)-->
1648 * ----INIT(tag=t)------> *1
1649 * <---INIT-ACK(tag=a)---
1650 * <----CE(tag=t)------------- *2
1651 *
1652 * At point *1 we should be generating a different
1653 * tag t'. Which means we would throw away the CE and send
1654 * ours instead. Basically this is case C (throw away side).
1655 */
1656 if (how_indx < sizeof(asoc->cookie_how))
1657 asoc->cookie_how[how_indx] = 17;
1658 return (NULL);
1659
1660 }
1661 switch (SCTP_GET_STATE(asoc)) {
1662 case SCTP_STATE_COOKIE_WAIT:
1663 case SCTP_STATE_COOKIE_ECHOED:
1664 /*
1665 * INIT was sent but got a COOKIE_ECHO with the
1666 * correct tags... just accept it...but we must
1667 * process the init so that we can make sure we
1668 * have the right seq no's.
1669 */
1670 /* First we must process the INIT !! */
1671 retval = sctp_process_init(init_cp, stcb);
1672 if (retval < 0) {
1673 if (how_indx < sizeof(asoc->cookie_how))
1674 asoc->cookie_how[how_indx] = 3;
1675 return (NULL);
1676 }
1677 /* we have already processed the INIT so no problem */
1678 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
1679 stcb, net,
1680 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1681 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
1682 stcb, net,
1683 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1684 /* update current state */
1685 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1686 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1687 else
1688 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1689
1690 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1691 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1692 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1693 stcb->sctp_ep, stcb, asoc->primary_destination);
1694 }
1695 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1696 sctp_stop_all_cookie_timers(stcb);
1697 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1698 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1699 (inp->sctp_socket->so_qlimit == 0)
1700 ) {
1701#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1702 struct socket *so;
1703#endif
1704 /*
1705 * Here is where collision would go if we
1706 * did a connect() and instead got a
1707 * init/init-ack/cookie done before the
1708 * init-ack came back..
1709 */
1710 stcb->sctp_ep->sctp_flags |=
1711 SCTP_PCB_FLAGS_CONNECTED;
1712#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1713 so = SCTP_INP_SO(stcb->sctp_ep);
1714 atomic_add_int(&stcb->asoc.refcnt, 1);
1715 SCTP_TCB_UNLOCK(stcb);
1716 SCTP_SOCKET_LOCK(so, 1);
1717 SCTP_TCB_LOCK(stcb);
1718 atomic_add_int(&stcb->asoc.refcnt, -1);
1719 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1720 SCTP_SOCKET_UNLOCK(so, 1);
1721 return (NULL);
1722 }
1723#endif
1724 soisconnected(stcb->sctp_socket);
1725#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1726 SCTP_SOCKET_UNLOCK(so, 1);
1727#endif
1728 }
1729 /* notify upper layer */
1730 *notification = SCTP_NOTIFY_ASSOC_UP;
1731 /*
1732 * since we did not send a HB make sure we
1733 * don't double things
1734 */
1735 net->hb_responded = 1;
1736 net->RTO = sctp_calculate_rto(stcb, asoc, net,
1737 &cookie->time_entered,
1738 sctp_align_unsafe_makecopy,
1739 SCTP_RTT_FROM_NON_DATA);
1740
1741 if (stcb->asoc.sctp_autoclose_ticks &&
1742 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1743 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1744 inp, stcb, NULL);
1745 }
1746 break;
1747 default:
1748 /*
1749 * we're in the OPEN state (or beyond), so
1750 * peer must have simply lost the COOKIE-ACK
1751 */
1752 break;
1753 } /* end switch */
1754 sctp_stop_all_cookie_timers(stcb);
1755 /*
1756 * We ignore the return code here.. not sure if we should
1757 * somehow abort.. but we do have an existing asoc. This
1758 * really should not fail.
1759 */
1760 if (sctp_load_addresses_from_init(stcb, m,
1761 init_offset + sizeof(struct sctp_init_chunk),
1762 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1763 if (how_indx < sizeof(asoc->cookie_how))
1764 asoc->cookie_how[how_indx] = 4;
1765 return (NULL);
1766 }
1767 /* respond with a COOKIE-ACK */
1768 sctp_toss_old_cookies(stcb, asoc);
1769 sctp_send_cookie_ack(stcb);
1770 if (how_indx < sizeof(asoc->cookie_how))
1771 asoc->cookie_how[how_indx] = 5;
1772 return (stcb);
1773 }
1774
1775 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1776 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1777 cookie->tie_tag_my_vtag == 0 &&
1778 cookie->tie_tag_peer_vtag == 0) {
1779 /*
1780 * case C in Section 5.2.4 Table 2: XMOO silently discard
1781 */
1782 if (how_indx < sizeof(asoc->cookie_how))
1783 asoc->cookie_how[how_indx] = 6;
1784 return (NULL);
1785 }
1786 /* If nat support, and the below and stcb is established,
1787 * send back a ABORT(colliding state) if we are established.
1788 */
1789 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
1790 (asoc->peer_supports_nat) &&
1791 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1792 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1793 (asoc->peer_vtag == 0)))) {
1794 /* Special case - Peer's support nat. We may have
1795 * two init's that we gave out the same tag on since
1796 * one was not established.. i.e. we get INIT from host-1
1797 * behind the nat and we respond tag-a, we get a INIT from
1798 * host-2 behind the nat and we get tag-a again. Then we
1799 * bring up host-1 (or 2's) assoc, Then comes the cookie
1800 * from hsot-2 (or 1). Now we have colliding state. We must
1801 * send an abort here with colliding state indication.
1802 */
1803 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
1804 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
1805#if defined(__FreeBSD__)
1806 mflowtype, mflowid, inp->fibnum,
1807#endif
1808 vrf_id, port);
1809 return (NULL);
1810 }
1811 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1812 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
1813 (asoc->peer_vtag == 0))) {
1814 /*
1815 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1816 * should be ok, re-accept peer info
1817 */
1818 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1819 /* Extension of case C.
1820 * If we hit this, then the random number
1821 * generator returned the same vtag when we
1822 * first sent our INIT-ACK and when we later sent
1823 * our INIT. The side with the seq numbers that are
1824 * different will be the one that normnally would
1825 * have hit case C. This in effect "extends" our vtags
1826 * in this collision case to be 64 bits. The same collision
1827 * could occur aka you get both vtag and seq number the
1828 * same twice in a row.. but is much less likely. If it
1829 * did happen then we would proceed through and bring
1830 * up the assoc.. we may end up with the wrong stream
1831 * setup however.. which would be bad.. but there is
1832 * no way to tell.. until we send on a stream that does
1833 * not exist :-)
1834 */
1835 if (how_indx < sizeof(asoc->cookie_how))
1836 asoc->cookie_how[how_indx] = 7;
1837
1838 return (NULL);
1839 }
1840 if (how_indx < sizeof(asoc->cookie_how))
1841 asoc->cookie_how[how_indx] = 8;
1842 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1843 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1844 sctp_stop_all_cookie_timers(stcb);
1845 /*
1846 * since we did not send a HB make sure we don't double
1847 * things
1848 */
1849 net->hb_responded = 1;
1850 if (stcb->asoc.sctp_autoclose_ticks &&
1851 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1852 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1853 NULL);
1854 }
1855 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1856 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1857
1858 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1859 /* Ok the peer probably discarded our
1860 * data (if we echoed a cookie+data). So anything
1861 * on the sent_queue should be marked for
1862 * retransmit, we may not get something to
1863 * kick us so it COULD still take a timeout
1864 * to move these.. but it can't hurt to mark them.
1865 */
1866 struct sctp_tmit_chunk *chk;
1867 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1868 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1869 chk->sent = SCTP_DATAGRAM_RESEND;
1870 sctp_flight_size_decrease(chk);
1871 sctp_total_flight_decrease(stcb, chk);
1872 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1873 spec_flag++;
1874 }
1875 }
1876
1877 }
1878 /* process the INIT info (peer's info) */
1879 retval = sctp_process_init(init_cp, stcb);
1880 if (retval < 0) {
1881 if (how_indx < sizeof(asoc->cookie_how))
1882 asoc->cookie_how[how_indx] = 9;
1883 return (NULL);
1884 }
1885 if (sctp_load_addresses_from_init(stcb, m,
1886 init_offset + sizeof(struct sctp_init_chunk),
1887 initack_offset, src, dst, init_src, stcb->asoc.port)) {
1888 if (how_indx < sizeof(asoc->cookie_how))
1889 asoc->cookie_how[how_indx] = 10;
1890 return (NULL);
1891 }
1892 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1893 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1894 *notification = SCTP_NOTIFY_ASSOC_UP;
1895
1896 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1897 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1898 (inp->sctp_socket->so_qlimit == 0)) {
1899#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1900 struct socket *so;
1901#endif
1902 stcb->sctp_ep->sctp_flags |=
1903 SCTP_PCB_FLAGS_CONNECTED;
1904#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1905 so = SCTP_INP_SO(stcb->sctp_ep);
1906 atomic_add_int(&stcb->asoc.refcnt, 1);
1907 SCTP_TCB_UNLOCK(stcb);
1908 SCTP_SOCKET_LOCK(so, 1);
1909 SCTP_TCB_LOCK(stcb);
1910 atomic_add_int(&stcb->asoc.refcnt, -1);
1911 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1912 SCTP_SOCKET_UNLOCK(so, 1);
1913 return (NULL);
1914 }
1915#endif
1916 soisconnected(stcb->sctp_socket);
1917#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1918 SCTP_SOCKET_UNLOCK(so, 1);
1919#endif
1920 }
1921 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1922 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1923 else
1924 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1925 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1926 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1927 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1928 } else {
1929 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1930 }
1931 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
1932 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1933 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1934 stcb->sctp_ep, stcb, asoc->primary_destination);
1935 }
1936 sctp_stop_all_cookie_timers(stcb);
1937 sctp_toss_old_cookies(stcb, asoc);
1938 sctp_send_cookie_ack(stcb);
1939 if (spec_flag) {
1940 /* only if we have retrans set do we do this. What
1941 * this call does is get only the COOKIE-ACK out
1942 * and then when we return the normal call to
1943 * sctp_chunk_output will get the retrans out
1944 * behind this.
1945 */
1946 sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
1947 }
1948 if (how_indx < sizeof(asoc->cookie_how))
1949 asoc->cookie_how[how_indx] = 11;
1950
1951 return (stcb);
1952 }
1953 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1954 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1955 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1956 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1957 cookie->tie_tag_peer_vtag != 0) {
1958 struct sctpasochead *head;
1959#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1960 struct socket *so;
1961#endif
1962
1963 if (asoc->peer_supports_nat) {
1964 /* This is a gross gross hack.
1965 * Just call the cookie_new code since we
1966 * are allowing a duplicate association.
1967 * I hope this works...
1968 */
1969 return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
1970 sh, cookie, cookie_len,
1971 inp, netp, init_src,notification,
1972 auth_skipped, auth_offset, auth_len,
1973#if defined(__FreeBSD__)
1974 mflowtype, mflowid,
1975#endif
1976 vrf_id, port));
1977 }
1978 /*
1979 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1980 */
1981 /* temp code */
1982 if (how_indx < sizeof(asoc->cookie_how))
1983 asoc->cookie_how[how_indx] = 12;
1984 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
1985 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1986 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
1987 SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1988
1989 /* notify upper layer */
1990 *notification = SCTP_NOTIFY_ASSOC_RESTART;
1991 atomic_add_int(&stcb->asoc.refcnt, 1);
1992 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1993 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1994 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1995 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1996 }
1997 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1998 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1999 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
2000 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
2001 }
2002 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2003 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2004 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2005 stcb->sctp_ep, stcb, asoc->primary_destination);
2006
2007 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
2008 /* move to OPEN state, if not in SHUTDOWN_SENT */
2009 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2010 }
2011 asoc->pre_open_streams =
2012 ntohs(initack_cp->init.num_outbound_streams);
2013 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2014 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2015 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2016
2017 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2018
2019 asoc->str_reset_seq_in = asoc->init_seq_number;
2020
2021 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2022 if (asoc->mapping_array) {
2023 memset(asoc->mapping_array, 0,
2024 asoc->mapping_array_size);
2025 }
2026 if (asoc->nr_mapping_array) {
2027 memset(asoc->nr_mapping_array, 0,
2028 asoc->mapping_array_size);
2029 }
2030 SCTP_TCB_UNLOCK(stcb);
2031#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2032 so = SCTP_INP_SO(stcb->sctp_ep);
2033 SCTP_SOCKET_LOCK(so, 1);
2034#endif
2035 SCTP_INP_INFO_WLOCK();
2036 SCTP_INP_WLOCK(stcb->sctp_ep);
2037 SCTP_TCB_LOCK(stcb);
2038 atomic_add_int(&stcb->asoc.refcnt, -1);
2039 /* send up all the data */
2040 SCTP_TCB_SEND_LOCK(stcb);
2041
2042 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
2043 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2044 stcb->asoc.strmout[i].chunks_on_queues = 0;
2045#if defined(SCTP_DETAILED_STR_STATS)
2046 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
2047 asoc->strmout[i].abandoned_sent[j] = 0;
2048 asoc->strmout[i].abandoned_unsent[j] = 0;
2049 }
2050#else
2051 asoc->strmout[i].abandoned_sent[0] = 0;
2052 asoc->strmout[i].abandoned_unsent[0] = 0;
2053#endif
2054 stcb->asoc.strmout[i].sid = i;
2055 stcb->asoc.strmout[i].next_mid_ordered = 0;
2056 stcb->asoc.strmout[i].next_mid_unordered = 0;
2057 stcb->asoc.strmout[i].last_msg_incomplete = 0;
2058 }
2059 /* process the INIT-ACK info (my info) */
2060 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2061 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2062
2063 /* pull from vtag hash */
2064 LIST_REMOVE(stcb, sctp_asocs);
2065 /* re-insert to new vtag position */
2066 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2067 SCTP_BASE_INFO(hashasocmark))];
2068 /*
2069 * put it in the bucket in the vtag hash of assoc's for the
2070 * system
2071 */
2072 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2073
2074 SCTP_TCB_SEND_UNLOCK(stcb);
2075 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2076 SCTP_INP_INFO_WUNLOCK();
2077#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2078 SCTP_SOCKET_UNLOCK(so, 1);
2079#endif
2080 asoc->total_flight = 0;
2081 asoc->total_flight_count = 0;
2082 /* process the INIT info (peer's info) */
2083 retval = sctp_process_init(init_cp, stcb);
2084 if (retval < 0) {
2085 if (how_indx < sizeof(asoc->cookie_how))
2086 asoc->cookie_how[how_indx] = 13;
2087
2088 return (NULL);
2089 }
2090 /*
2091 * since we did not send a HB make sure we don't double
2092 * things
2093 */
2094 net->hb_responded = 1;
2095
2096 if (sctp_load_addresses_from_init(stcb, m,
2097 init_offset + sizeof(struct sctp_init_chunk),
2098 initack_offset, src, dst, init_src, stcb->asoc.port)) {
2099 if (how_indx < sizeof(asoc->cookie_how))
2100 asoc->cookie_how[how_indx] = 14;
2101
2102 return (NULL);
2103 }
2104 /* respond with a COOKIE-ACK */
2105 sctp_stop_all_cookie_timers(stcb);
2106 sctp_toss_old_cookies(stcb, asoc);
2107 sctp_send_cookie_ack(stcb);
2108 if (how_indx < sizeof(asoc->cookie_how))
2109 asoc->cookie_how[how_indx] = 15;
2110
2111 return (stcb);
2112 }
2113 if (how_indx < sizeof(asoc->cookie_how))
2114 asoc->cookie_how[how_indx] = 16;
2115 /* all other cases... */
2116 return (NULL);
2117}
2118
2119
2120/*
2121 * handle a state cookie for a new association m: input packet mbuf chain--
2122 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
2123 * and the cookie signature does not exist offset: offset into mbuf to the
2124 * cookie-echo chunk length: length of the cookie chunk to: where the init
2125 * was from returns a new TCB
2126 */
2127static struct sctp_tcb *
2128sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
2129 struct sockaddr *src, struct sockaddr *dst,
2130 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
2131 struct sctp_inpcb *inp, struct sctp_nets **netp,
2132 struct sockaddr *init_src, int *notification,
2133 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2134#if defined(__FreeBSD__)
2135 uint8_t mflowtype, uint32_t mflowid,
2136#endif
2137 uint32_t vrf_id, uint16_t port)
2138{
2139 struct sctp_tcb *stcb;
2140 struct sctp_init_chunk *init_cp, init_buf;
2141 struct sctp_init_ack_chunk *initack_cp, initack_buf;
2142 union sctp_sockstore store;
2143 struct sctp_association *asoc;
2144 int init_offset, initack_offset, initack_limit;
2145 int retval;
2146 int error = 0;
2147 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
2148#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2149 struct socket *so;
2150
2151 so = SCTP_INP_SO(inp);
2152#endif
2153
2154 /*
2155 * find and validate the INIT chunk in the cookie (peer's info) the
2156 * INIT should start after the cookie-echo header struct (chunk
2157 * header, state cookie header struct)
2158 */
2159 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
2160 init_cp = (struct sctp_init_chunk *)
2161 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
2162 (uint8_t *) & init_buf);
2163 if (init_cp == NULL) {
2164 /* could not pull a INIT chunk in cookie */
2165 SCTPDBG(SCTP_DEBUG_INPUT1,
2166 "process_cookie_new: could not pull INIT chunk hdr\n");
2167 return (NULL);
2168 }
2169 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
2170 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
2171 return (NULL);
2172 }
2173 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
2174 /*
2175 * find and validate the INIT-ACK chunk in the cookie (my info) the
2176 * INIT-ACK follows the INIT chunk
2177 */
2178 initack_cp = (struct sctp_init_ack_chunk *)
2179 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
2180 (uint8_t *) & initack_buf);
2181 if (initack_cp == NULL) {
2182 /* could not pull INIT-ACK chunk in cookie */
2183 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
2184 return (NULL);
2185 }
2186 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
2187 return (NULL);
2188 }
2189 /*
2190 * NOTE: We can't use the INIT_ACK's chk_length to determine the
2191 * "initack_limit" value. This is because the chk_length field
2192 * includes the length of the cookie, but the cookie is omitted when
2193 * the INIT and INIT_ACK are tacked onto the cookie...
2194 */
2195 initack_limit = offset + cookie_len;
2196
2197 /*
2198 * now that we know the INIT/INIT-ACK are in place, create a new TCB
2199 * and popluate
2200 */
2201
2202 /*
2203 * Here we do a trick, we set in NULL for the proc/thread argument. We
2204 * do this since in effect we only use the p argument when
2205 * the socket is unbound and we must do an implicit bind.
2206 * Since we are getting a cookie, we cannot be unbound.
2207 */
2208 stcb = sctp_aloc_assoc(inp, init_src, &error,
2209 ntohl(initack_cp->init.initiate_tag), vrf_id,
2210 ntohs(initack_cp->init.num_outbound_streams),
2211 port,
2212#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2213 (struct thread *)NULL
2214#elif defined(__Windows__)
2215 (PKTHREAD)NULL
2216#else
2217 (struct proc *)NULL
2218#endif
2219 );
2220 if (stcb == NULL) {
2221 struct mbuf *op_err;
2222
2223 /* memory problem? */
2224 SCTPDBG(SCTP_DEBUG_INPUT1,
2225 "process_cookie_new: no room for another TCB!\n");
2226 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2227 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2228 src, dst, sh, op_err,
2229#if defined(__FreeBSD__)
2230 mflowtype, mflowid,
2231#endif
2232 vrf_id, port);
2233 return (NULL);
2234 }
2235 /* get the correct sctp_nets */
2236 if (netp)
2237 *netp = sctp_findnet(stcb, init_src);
2238
2239 asoc = &stcb->asoc;
2240 /* get scope variables out of cookie */
2241 asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
2242 asoc->scope.site_scope = cookie->site_scope;
2243 asoc->scope.local_scope = cookie->local_scope;
2244 asoc->scope.loopback_scope = cookie->loopback_scope;
2245
2246#if defined(__Userspace__)
2247 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2248 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
2249 (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
2250#else
2251 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
2252 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
2253#endif
2254 struct mbuf *op_err;
2255
2256 /*
2257 * Houston we have a problem. The EP changed while the
2258 * cookie was in flight. Only recourse is to abort the
2259 * association.
2260 */
2261 atomic_add_int(&stcb->asoc.refcnt, 1);
2262 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2263 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
2264 src, dst, sh, op_err,
2265#if defined(__FreeBSD__)
2266 mflowtype, mflowid,
2267#endif
2268 vrf_id, port);
2269#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2270 SCTP_TCB_UNLOCK(stcb);
2271 SCTP_SOCKET_LOCK(so, 1);
2272 SCTP_TCB_LOCK(stcb);
2273#endif
2274 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2275 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
2276#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2277 SCTP_SOCKET_UNLOCK(so, 1);
2278#endif
2279 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2280 return (NULL);
2281 }
2282 /* process the INIT-ACK info (my info) */
2283 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
2284 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
2285 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
2286 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
2287 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
2288 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
2289 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
2290 asoc->str_reset_seq_in = asoc->init_seq_number;
2291
2292 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
2293
2294 /* process the INIT info (peer's info) */
2295 if (netp)
2296 retval = sctp_process_init(init_cp, stcb);
2297 else
2298 retval = 0;
2299 if (retval < 0) {
2300 atomic_add_int(&stcb->asoc.refcnt, 1);
2301#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2302 SCTP_TCB_UNLOCK(stcb);
2303 SCTP_SOCKET_LOCK(so, 1);
2304 SCTP_TCB_LOCK(stcb);
2305#endif
2306 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2307 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
2308#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2309 SCTP_SOCKET_UNLOCK(so, 1);
2310#endif
2311 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2312 return (NULL);
2313 }
2314 /* load all addresses */
2315 if (sctp_load_addresses_from_init(stcb, m,
2316 init_offset + sizeof(struct sctp_init_chunk), initack_offset,
2317 src, dst, init_src, port)) {
2318 atomic_add_int(&stcb->asoc.refcnt, 1);
2319#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2320 SCTP_TCB_UNLOCK(stcb);
2321 SCTP_SOCKET_LOCK(so, 1);
2322 SCTP_TCB_LOCK(stcb);
2323#endif
2324 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2325 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2326#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2327 SCTP_SOCKET_UNLOCK(so, 1);
2328#endif
2329 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2330 return (NULL);
2331 }
2332 /*
2333 * verify any preceding AUTH chunk that was skipped
2334 */
2335 /* pull the local authentication parameters from the cookie/init-ack */
2336 sctp_auth_get_cookie_params(stcb, m,
2337 initack_offset + sizeof(struct sctp_init_ack_chunk),
2338 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
2339 if (auth_skipped) {
2340 struct sctp_auth_chunk *auth;
2341
2342 auth = (struct sctp_auth_chunk *)
2343 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
2344 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
2345 /* auth HMAC failed, dump the assoc and packet */
2346 SCTPDBG(SCTP_DEBUG_AUTH1,
2347 "COOKIE-ECHO: AUTH failed\n");
2348 atomic_add_int(&stcb->asoc.refcnt, 1);
2349#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2350 SCTP_TCB_UNLOCK(stcb);
2351 SCTP_SOCKET_LOCK(so, 1);
2352 SCTP_TCB_LOCK(stcb);
2353#endif
2354 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2355 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2356#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2357 SCTP_SOCKET_UNLOCK(so, 1);
2358#endif
2359 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2360 return (NULL);
2361 } else {
2362 /* remaining chunks checked... good to go */
2363 stcb->asoc.authenticated = 1;
2364 }
2365 }
2366 /* update current state */
2367 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2368 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
2369 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2370 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2371 stcb->sctp_ep, stcb, asoc->primary_destination);
2372 }
2373 sctp_stop_all_cookie_timers(stcb);
2374 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
2375 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2376
2377 /*
2378 * if we're doing ASCONFs, check to see if we have any new local
2379 * addresses that need to get added to the peer (eg. addresses
2380 * changed while cookie echo in flight). This needs to be done
2381 * after we go to the OPEN state to do the correct asconf
2382 * processing. else, make sure we have the correct addresses in our
2383 * lists
2384 */
2385
2386 /* warning, we re-use sin, sin6, sa_store here! */
2387 /* pull in local_address (our "from" address) */
2388 switch (cookie->laddr_type) {
2389#ifdef INET
2390 case SCTP_IPV4_ADDRESS:
2391 /* source addr is IPv4 */
2392 memset(&store.sin, 0, sizeof(struct sockaddr_in));
2393 store.sin.sin_family = AF_INET;
2394#ifdef HAVE_SIN_LEN
2395 store.sin.sin_len = sizeof(struct sockaddr_in);
2396#endif
2397 store.sin.sin_addr.s_addr = cookie->laddress[0];
2398 break;
2399#endif
2400#ifdef INET6
2401 case SCTP_IPV6_ADDRESS:
2402 /* source addr is IPv6 */
2403 memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
2404 store.sin6.sin6_family = AF_INET6;
2405#ifdef HAVE_SIN6_LEN
2406 store.sin6.sin6_len = sizeof(struct sockaddr_in6);
2407#endif
2408 store.sin6.sin6_scope_id = cookie->scope_id;
2409 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
2410 break;
2411#endif
2412#if defined(__Userspace__)
2413 case SCTP_CONN_ADDRESS:
2414 /* source addr is conn */
2415 memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
2416 store.sconn.sconn_family = AF_CONN;
2417#ifdef HAVE_SCONN_LEN
2418 store.sconn.sconn_len = sizeof(struct sockaddr_conn);
2419#endif
2420 memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
2421 break;
2422#endif
2423 default:
2424 atomic_add_int(&stcb->asoc.refcnt, 1);
2425#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2426 SCTP_TCB_UNLOCK(stcb);
2427 SCTP_SOCKET_LOCK(so, 1);
2428 SCTP_TCB_LOCK(stcb);
2429#endif
2430 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2431 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2432#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2433 SCTP_SOCKET_UNLOCK(so, 1);
2434#endif
2435 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2436 return (NULL);
2437 }
2438
2439 /* set up to notify upper layer */
2440 *notification = SCTP_NOTIFY_ASSOC_UP;
2441 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2442 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2443 (inp->sctp_socket->so_qlimit == 0)) {
2444 /*
2445 * This is an endpoint that called connect() how it got a
2446 * cookie that is NEW is a bit of a mystery. It must be that
2447 * the INIT was sent, but before it got there.. a complete
2448 * INIT/INIT-ACK/COOKIE arrived. But of course then it
2449 * should have went to the other code.. not here.. oh well..
2450 * a bit of protection is worth having..
2451 */
2452 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2453#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2454 atomic_add_int(&stcb->asoc.refcnt, 1);
2455 SCTP_TCB_UNLOCK(stcb);
2456 SCTP_SOCKET_LOCK(so, 1);
2457 SCTP_TCB_LOCK(stcb);
2458 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2459 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2460 SCTP_SOCKET_UNLOCK(so, 1);
2461 return (NULL);
2462 }
2463#endif
2464 soisconnected(stcb->sctp_socket);
2465#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2466 SCTP_SOCKET_UNLOCK(so, 1);
2467#endif
2468 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2469 (inp->sctp_socket->so_qlimit)) {
2470 /*
2471 * We don't want to do anything with this one. Since it is
2472 * the listening guy. The timer will get started for
2473 * accepted connections in the caller.
2474 */
2475 ;
2476 }
2477 /* since we did not send a HB make sure we don't double things */
2478 if ((netp) && (*netp))
2479 (*netp)->hb_responded = 1;
2480
2481 if (stcb->asoc.sctp_autoclose_ticks &&
2482 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2483 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
2484 }
2485 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2486 if ((netp != NULL) && (*netp != NULL)) {
2487 /* calculate the RTT and set the encaps port */
2488 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
2489 &cookie->time_entered, sctp_align_unsafe_makecopy,
2490 SCTP_RTT_FROM_NON_DATA);
2491 }
2492 /* respond with a COOKIE-ACK */
2493 sctp_send_cookie_ack(stcb);
2494
2495 /*
2496 * check the address lists for any ASCONFs that need to be sent
2497 * AFTER the cookie-ack is sent
2498 */
2499 sctp_check_address_list(stcb, m,
2500 initack_offset + sizeof(struct sctp_init_ack_chunk),
2501 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
2502 &store.sa, cookie->local_scope, cookie->site_scope,
2503 cookie->ipv4_scope, cookie->loopback_scope);
2504
2505
2506 return (stcb);
2507}
2508
2509/*
2510 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
2511 * we NEED to make sure we are not already using the vtag. If so we
2512 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
2513 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
2514 SCTP_BASE_INFO(hashasocmark))];
2515 LIST_FOREACH(stcb, head, sctp_asocs) {
2516 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
2517 -- SEND ABORT - TRY AGAIN --
2518 }
2519 }
2520*/
2521
2522/*
2523 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
2524 * existing (non-NULL) TCB
2525 */
2526static struct mbuf *
2527sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
2528 struct sockaddr *src, struct sockaddr *dst,
2529 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
2530 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
2531 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
2532 struct sctp_tcb **locked_tcb,
2533#if defined(__FreeBSD__)
2534 uint8_t mflowtype, uint32_t mflowid,
2535#endif
2536 uint32_t vrf_id, uint16_t port)
2537{
2538 struct sctp_state_cookie *cookie;
2539 struct sctp_tcb *l_stcb = *stcb;
2540 struct sctp_inpcb *l_inp;
2541 struct sockaddr *to;
2542 struct sctp_pcb *ep;
2543 struct mbuf *m_sig;
2544 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
2545 uint8_t *sig;
2546 uint8_t cookie_ok = 0;
2547 unsigned int sig_offset, cookie_offset;
2548 unsigned int cookie_len;
2549 struct timeval now;
2550 struct timeval time_expires;
2551 int notification = 0;
2552 struct sctp_nets *netl;
2553 int had_a_existing_tcb = 0;
2554 int send_int_conf = 0;
2555#ifdef INET
2556 struct sockaddr_in sin;
2557#endif
2558#ifdef INET6
2559 struct sockaddr_in6 sin6;
2560#endif
2561#if defined(__Userspace__)
2562 struct sockaddr_conn sconn;
2563#endif
2564
2565 SCTPDBG(SCTP_DEBUG_INPUT2,
2566 "sctp_handle_cookie: handling COOKIE-ECHO\n");
2567
2568 if (inp_p == NULL) {
2569 return (NULL);
2570 }
2571 cookie = &cp->cookie;
2572 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
2573 cookie_len = ntohs(cp->ch.chunk_length);
2574
2575 if ((cookie->peerport != sh->src_port) ||
2576 (cookie->myport != sh->dest_port) ||
2577 (cookie->my_vtag != sh->v_tag)) {
2578 /*
2579 * invalid ports or bad tag. Note that we always leave the
2580 * v_tag in the header in network order and when we stored
2581 * it in the my_vtag slot we also left it in network order.
2582 * This maintains the match even though it may be in the
2583 * opposite byte order of the machine :->
2584 */
2585 return (NULL);
2586 }
2587 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
2588 sizeof(struct sctp_init_chunk) +
2589 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
2590 /* cookie too small */
2591 return (NULL);
2592 }
2593 /*
2594 * split off the signature into its own mbuf (since it should not be
2595 * calculated in the sctp_hmac_m() call).
2596 */
2597 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
2598 m_sig = m_split(m, sig_offset, M_NOWAIT);
2599 if (m_sig == NULL) {
2600 /* out of memory or ?? */
2601 return (NULL);
2602 }
2603#ifdef SCTP_MBUF_LOGGING
2604 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2605 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
2606 }
2607#endif
2608
2609 /*
2610 * compute the signature/digest for the cookie
2611 */
2612 ep = &(*inp_p)->sctp_ep;
2613 l_inp = *inp_p;
2614 if (l_stcb) {
2615 SCTP_TCB_UNLOCK(l_stcb);
2616 }
2617 SCTP_INP_RLOCK(l_inp);
2618 if (l_stcb) {
2619 SCTP_TCB_LOCK(l_stcb);
2620 }
2621 /* which cookie is it? */
2622 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
2623 (ep->current_secret_number != ep->last_secret_number)) {
2624 /* it's the old cookie */
2625 (void)sctp_hmac_m(SCTP_HMAC,
2626 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2627 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2628 } else {
2629 /* it's the current cookie */
2630 (void)sctp_hmac_m(SCTP_HMAC,
2631 (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
2632 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2633 }
2634 /* get the signature */
2635 SCTP_INP_RUNLOCK(l_inp);
2636 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
2637 if (sig == NULL) {
2638 /* couldn't find signature */
2639 sctp_m_freem(m_sig);
2640 return (NULL);
2641 }
2642 /* compare the received digest with the computed digest */
2643 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
2644 /* try the old cookie? */
2645 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
2646 (ep->current_secret_number != ep->last_secret_number)) {
2647 /* compute digest with old */
2648 (void)sctp_hmac_m(SCTP_HMAC,
2649 (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
2650 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
2651 /* compare */
2652 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
2653 cookie_ok = 1;
2654 }
2655 } else {
2656 cookie_ok = 1;
2657 }
2658
2659 /*
2660 * Now before we continue we must reconstruct our mbuf so that
2661 * normal processing of any other chunks will work.
2662 */
2663 {
2664 struct mbuf *m_at;
2665
2666 m_at = m;
2667 while (SCTP_BUF_NEXT(m_at) != NULL) {
2668 m_at = SCTP_BUF_NEXT(m_at);
2669 }
2670 SCTP_BUF_NEXT(m_at) = m_sig;
2671 }
2672
2673 if (cookie_ok == 0) {
2674 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
2675 SCTPDBG(SCTP_DEBUG_INPUT2,
2676 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
2677 (uint32_t) offset, cookie_offset, sig_offset);
2678 return (NULL);
2679 }
2680
2681 /*
2682 * check the cookie timestamps to be sure it's not stale
2683 */
2684 (void)SCTP_GETTIME_TIMEVAL(&now);
2685 /* Expire time is in Ticks, so we convert to seconds */
2686 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
2687 time_expires.tv_usec = cookie->time_entered.tv_usec;
2688 /* TODO sctp_constants.h needs alternative time macros when
2689 * _KERNEL is undefined.
2690 */
2691#ifndef __FreeBSD__
2692 if (timercmp(&now, &time_expires, >))
2693#else
2694 if (timevalcmp(&now, &time_expires, >))
2695#endif
2696 {
2697 /* cookie is stale! */
2698 struct mbuf *op_err;
2699 struct sctp_error_stale_cookie *cause;
2700 uint32_t tim;
2701 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
2702 0, M_NOWAIT, 1, MT_DATA);
2703 if (op_err == NULL) {
2704 /* FOOBAR */
2705 return (NULL);
2706 }
2707 /* Set the len */
2708 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
2709 cause = mtod(op_err, struct sctp_error_stale_cookie *);
2710 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
2711 cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
2712 (sizeof(uint32_t))));
2713 /* seconds to usec */
2714 tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
2715 /* add in usec */
2716 if (tim == 0)
2717 tim = now.tv_usec - cookie->time_entered.tv_usec;
2718 cause->stale_time = htonl(tim);
2719 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
2720#if defined(__FreeBSD__)
2721 mflowtype, mflowid, l_inp->fibnum,
2722#endif
2723 vrf_id, port);
2724 return (NULL);
2725 }
2726 /*
2727 * Now we must see with the lookup address if we have an existing
2728 * asoc. This will only happen if we were in the COOKIE-WAIT state
2729 * and a INIT collided with us and somewhere the peer sent the
2730 * cookie on another address besides the single address our assoc
2731 * had for him. In this case we will have one of the tie-tags set at
2732 * least AND the address field in the cookie can be used to look it
2733 * up.
2734 */
2735 to = NULL;
2736 switch (cookie->addr_type) {
2737#ifdef INET6
2738 case SCTP_IPV6_ADDRESS:
2739 memset(&sin6, 0, sizeof(sin6));
2740 sin6.sin6_family = AF_INET6;
2741#ifdef HAVE_SIN6_LEN
2742 sin6.sin6_len = sizeof(sin6);
2743#endif
2744 sin6.sin6_port = sh->src_port;
2745 sin6.sin6_scope_id = cookie->scope_id;
2746 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
2747 sizeof(sin6.sin6_addr.s6_addr));
2748 to = (struct sockaddr *)&sin6;
2749 break;
2750#endif
2751#ifdef INET
2752 case SCTP_IPV4_ADDRESS:
2753 memset(&sin, 0, sizeof(sin));
2754 sin.sin_family = AF_INET;
2755#ifdef HAVE_SIN_LEN
2756 sin.sin_len = sizeof(sin);
2757#endif
2758 sin.sin_port = sh->src_port;
2759 sin.sin_addr.s_addr = cookie->address[0];
2760 to = (struct sockaddr *)&sin;
2761 break;
2762#endif
2763#if defined(__Userspace__)
2764 case SCTP_CONN_ADDRESS:
2765 memset(&sconn, 0, sizeof(struct sockaddr_conn));
2766 sconn.sconn_family = AF_CONN;
2767#ifdef HAVE_SCONN_LEN
2768 sconn.sconn_len = sizeof(struct sockaddr_conn);
2769#endif
2770 sconn.sconn_port = sh->src_port;
2771 memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
2772 to = (struct sockaddr *)&sconn;
2773 break;
2774#endif
2775 default:
2776 /* This should not happen */
2777 return (NULL);
2778 }
2779 if (*stcb == NULL) {
2780 /* Yep, lets check */
2781 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
2782 if (*stcb == NULL) {
2783 /*
2784 * We should have only got back the same inp. If we
2785 * got back a different ep we have a problem. The
2786 * original findep got back l_inp and now
2787 */
2788 if (l_inp != *inp_p) {
2789 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
2790 }
2791 } else {
2792 if (*locked_tcb == NULL) {
2793 /* In this case we found the assoc only
2794 * after we locked the create lock. This means
2795 * we are in a colliding case and we must make
2796 * sure that we unlock the tcb if its one of the
2797 * cases where we throw away the incoming packets.
2798 */
2799 *locked_tcb = *stcb;
2800
2801 /* We must also increment the inp ref count
2802 * since the ref_count flags was set when we
2803 * did not find the TCB, now we found it which
2804 * reduces the refcount.. we must raise it back
2805 * out to balance it all :-)
2806 */
2807 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2808 if ((*stcb)->sctp_ep != l_inp) {
2809 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2810 (void *)(*stcb)->sctp_ep, (void *)l_inp);
2811 }
2812 }
2813 }
2814 }
2815
2816 cookie_len -= SCTP_SIGNATURE_SIZE;
2817 if (*stcb == NULL) {
2818 /* this is the "normal" case... get a new TCB */
2819 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
2820 cookie, cookie_len, *inp_p,
2821 netp, to, &notification,
2822 auth_skipped, auth_offset, auth_len,
2823#if defined(__FreeBSD__)
2824 mflowtype, mflowid,
2825#endif
2826 vrf_id, port);
2827 } else {
2828 /* this is abnormal... cookie-echo on existing TCB */
2829 had_a_existing_tcb = 1;
2830 *stcb = sctp_process_cookie_existing(m, iphlen, offset,
2831 src, dst, sh,
2832 cookie, cookie_len, *inp_p, *stcb, netp, to,
2833 &notification, auth_skipped, auth_offset, auth_len,
2834#if defined(__FreeBSD__)
2835 mflowtype, mflowid,
2836#endif
2837 vrf_id, port);
2838 }
2839
2840 if (*stcb == NULL) {
2841 /* still no TCB... must be bad cookie-echo */
2842 return (NULL);
2843 }
2844#if defined(__FreeBSD__)
2845 if (*netp != NULL) {
2846 (*netp)->flowtype = mflowtype;
2847 (*netp)->flowid = mflowid;
2848 }
2849#endif
2850 /*
2851 * Ok, we built an association so confirm the address we sent the
2852 * INIT-ACK to.
2853 */
2854 netl = sctp_findnet(*stcb, to);
2855 /*
2856 * This code should in theory NOT run but
2857 */
2858 if (netl == NULL) {
2859 /* TSNH! Huh, why do I need to add this address here? */
2860 if (sctp_add_remote_addr(*stcb, to, NULL, port,
2861 SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
2862 return (NULL);
2863 }
2864 netl = sctp_findnet(*stcb, to);
2865 }
2866 if (netl) {
2867 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2868 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2869 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2870 netl);
2871 send_int_conf = 1;
2872 }
2873 }
2874 sctp_start_net_timers(*stcb);
2875 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2876 if (!had_a_existing_tcb ||
2877 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2878 /*
2879 * If we have a NEW cookie or the connect never
2880 * reached the connected state during collision we
2881 * must do the TCP accept thing.
2882 */
2883 struct socket *so, *oso;
2884 struct sctp_inpcb *inp;
2885
2886 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2887 /*
2888 * For a restart we will keep the same
2889 * socket, no need to do anything. I THINK!!
2890 */
2891 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
2892 if (send_int_conf) {
2893 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2894 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
2895 }
2896 return (m);
2897 }
2898 oso = (*inp_p)->sctp_socket;
2899#if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2900 /*
2901 * We do this to keep the sockets side happy during
2902 * the sonewcon ONLY.
2903 */
2904 NET_LOCK_GIANT();
2905#endif
2906 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2907 SCTP_TCB_UNLOCK((*stcb));
2908#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2909 CURVNET_SET(oso->so_vnet);
2910#endif
2911#if defined(__APPLE__)
2912 SCTP_SOCKET_LOCK(oso, 1);
2913#endif
2914 so = sonewconn(oso, 0
2915#if defined(__APPLE__)
2916 ,NULL
2917#endif
2918#ifdef __Panda__
2919 ,NULL , (*inp_p)->def_vrf_id
2920#endif
2921 );
2922#if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
2923 NET_UNLOCK_GIANT();
2924#endif
2925#if defined(__APPLE__)
2926 SCTP_SOCKET_UNLOCK(oso, 1);
2927#endif
2928#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2929 CURVNET_RESTORE();
2930#endif
2931 SCTP_TCB_LOCK((*stcb));
2932 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2933
2934 if (so == NULL) {
2935 struct mbuf *op_err;
2936#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2937 struct socket *pcb_so;
2938#endif
2939 /* Too many sockets */
2940 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2941 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
2942 sctp_abort_association(*inp_p, NULL, m, iphlen,
2943 src, dst, sh, op_err,
2944#if defined(__FreeBSD__)
2945 mflowtype, mflowid,
2946#endif
2947 vrf_id, port);
2948#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2949 pcb_so = SCTP_INP_SO(*inp_p);
2950 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2951 SCTP_TCB_UNLOCK((*stcb));
2952 SCTP_SOCKET_LOCK(pcb_so, 1);
2953 SCTP_TCB_LOCK((*stcb));
2954 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2955#endif
2956 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
2957 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2958#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2959 SCTP_SOCKET_UNLOCK(pcb_so, 1);
2960#endif
2961 return (NULL);
2962 }
2963 inp = (struct sctp_inpcb *)so->so_pcb;
2964 SCTP_INP_INCR_REF(inp);
2965 /*
2966 * We add the unbound flag here so that
2967 * if we get an soabort() before we get the
2968 * move_pcb done, we will properly cleanup.
2969 */
2970 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2971 SCTP_PCB_FLAGS_CONNECTED |
2972 SCTP_PCB_FLAGS_IN_TCPPOOL |
2973 SCTP_PCB_FLAGS_UNBOUND |
2974 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2975 SCTP_PCB_FLAGS_DONT_WAKE);
2976 inp->sctp_features = (*inp_p)->sctp_features;
2977 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
2978 inp->sctp_socket = so;
2979 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2980 inp->max_cwnd = (*inp_p)->max_cwnd;
2981 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
2982 inp->ecn_supported = (*inp_p)->ecn_supported;
2983 inp->prsctp_supported = (*inp_p)->prsctp_supported;
2984 inp->auth_supported = (*inp_p)->auth_supported;
2985 inp->asconf_supported = (*inp_p)->asconf_supported;
2986 inp->reconfig_supported = (*inp_p)->reconfig_supported;
2987 inp->nrsack_supported = (*inp_p)->nrsack_supported;
2988 inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
2989 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2990 inp->sctp_context = (*inp_p)->sctp_context;
2991 inp->local_strreset_support = (*inp_p)->local_strreset_support;
2992 inp->fibnum = (*inp_p)->fibnum;
2993 inp->inp_starting_point_for_iterator = NULL;
2994#if defined(__Userspace__)
2995 inp->ulp_info = (*inp_p)->ulp_info;
2996 inp->recv_callback = (*inp_p)->recv_callback;
2997 inp->send_callback = (*inp_p)->send_callback;
2998 inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
2999#endif
3000 /*
3001 * copy in the authentication parameters from the
3002 * original endpoint
3003 */
3004 if (inp->sctp_ep.local_hmacs)
3005 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3006 inp->sctp_ep.local_hmacs =
3007 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
3008 if (inp->sctp_ep.local_auth_chunks)
3009 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
3010 inp->sctp_ep.local_auth_chunks =
3011 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
3012
3013 /*
3014 * Now we must move it from one hash table to
3015 * another and get the tcb in the right place.
3016 */
3017
3018 /* This is where the one-2-one socket is put into
3019 * the accept state waiting for the accept!
3020 */
3021 if (*stcb) {
3022 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
3023 }
3024 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
3025
3026 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3027 SCTP_TCB_UNLOCK((*stcb));
3028
3029#if defined(__FreeBSD__)
3030 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
3031 0);
3032#else
3033 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
3034#endif
3035 SCTP_TCB_LOCK((*stcb));
3036 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3037
3038
3039 /* now we must check to see if we were aborted while
3040 * the move was going on and the lock/unlock happened.
3041 */
3042 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3043 /* yep it was, we leave the
3044 * assoc attached to the socket since
3045 * the sctp_inpcb_free() call will send
3046 * an abort for us.
3047 */
3048 SCTP_INP_DECR_REF(inp);
3049 return (NULL);
3050 }
3051 SCTP_INP_DECR_REF(inp);
3052 /* Switch over to the new guy */
3053 *inp_p = inp;
3054 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3055 if (send_int_conf) {
3056 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3057 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3058 }
3059
3060 /* Pull it from the incomplete queue and wake the guy */
3061#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3062 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
3063 SCTP_TCB_UNLOCK((*stcb));
3064 SCTP_SOCKET_LOCK(so, 1);
3065#endif
3066 soisconnected(so);
3067#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3068 SCTP_TCB_LOCK((*stcb));
3069 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
3070 SCTP_SOCKET_UNLOCK(so, 1);
3071#endif
3072 return (m);
3073 }
3074 }
3075 if (notification) {
3076 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3077 }
3078 if (send_int_conf) {
3079 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
3080 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
3081 }
3082 return (m);
3083}
3084
3085static void
3086sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
3087 struct sctp_tcb *stcb, struct sctp_nets *net)
3088{
3089 /* cp must not be used, others call this without a c-ack :-) */
3090 struct sctp_association *asoc;
3091
3092 SCTPDBG(SCTP_DEBUG_INPUT2,
3093 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
3094 if ((stcb == NULL) || (net == NULL)) {
3095 return;
3096 }
3097
3098 asoc = &stcb->asoc;
3099
3100 sctp_stop_all_cookie_timers(stcb);
3101 /* process according to association state */
3102 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
3103 /* state change only needed when I am in right state */
3104 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
3105 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
3106 sctp_start_net_timers(stcb);
3107 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
3108 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3109 stcb->sctp_ep, stcb, asoc->primary_destination);
3110
3111 }
3112 /* update RTO */
3113 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
3114 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
3115 if (asoc->overall_error_count == 0) {
3116 net->RTO = sctp_calculate_rto(stcb, asoc, net,
3117 &asoc->time_entered, sctp_align_safe_nocopy,
3118 SCTP_RTT_FROM_NON_DATA);
3119 }
3120 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
3121 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3122 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3123 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3124#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3125 struct socket *so;
3126
3127#endif
3128 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3129#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3130 so = SCTP_INP_SO(stcb->sctp_ep);
3131 atomic_add_int(&stcb->asoc.refcnt, 1);
3132 SCTP_TCB_UNLOCK(stcb);
3133 SCTP_SOCKET_LOCK(so, 1);
3134 SCTP_TCB_LOCK(stcb);
3135 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3136#endif
3137 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
3138 soisconnected(stcb->sctp_socket);
3139 }
3140#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3141 SCTP_SOCKET_UNLOCK(so, 1);
3142#endif
3143 }
3144 /*
3145 * since we did not send a HB make sure we don't double
3146 * things
3147 */
3148 net->hb_responded = 1;
3149
3150 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3151 /* We don't need to do the asconf thing,
3152 * nor hb or autoclose if the socket is closed.
3153 */
3154 goto closed_socket;
3155 }
3156
3157 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
3158 stcb, net);
3159
3160
3161 if (stcb->asoc.sctp_autoclose_ticks &&
3162 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
3163 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
3164 stcb->sctp_ep, stcb, NULL);
3165 }
3166 /*
3167 * send ASCONF if parameters are pending and ASCONFs are
3168 * allowed (eg. addresses changed when init/cookie echo were
3169 * in flight)
3170 */
3171 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
3172 (stcb->asoc.asconf_supported == 1) &&
3173 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
3174#ifdef SCTP_TIMER_BASED_ASCONF
3175 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
3176 stcb->sctp_ep, stcb,
3177 stcb->asoc.primary_destination);
3178#else
3179 sctp_send_asconf(stcb, stcb->asoc.primary_destination,
3180 SCTP_ADDR_NOT_LOCKED);
3181#endif
3182 }
3183 }
3184closed_socket:
3185 /* Toss the cookie if I can */
3186 sctp_toss_old_cookies(stcb, asoc);
3187 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3188 /* Restart the timer if we have pending data */
3189 struct sctp_tmit_chunk *chk;
3190
3191 chk = TAILQ_FIRST(&asoc->sent_queue);
3192 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
3193 }
3194}
3195
3196static void
3197sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
3198 struct sctp_tcb *stcb)
3199{
3200 struct sctp_nets *net;
3201 struct sctp_tmit_chunk *lchk;
3202 struct sctp_ecne_chunk bkup;
3203 uint8_t override_bit;
3204 uint32_t tsn, window_data_tsn;
3205 int len;
3206 unsigned int pkt_cnt;
3207
3208 len = ntohs(cp->ch.chunk_length);
3209 if ((len != sizeof(struct sctp_ecne_chunk)) &&
3210 (len != sizeof(struct old_sctp_ecne_chunk))) {
3211 return;
3212 }
3213 if (len == sizeof(struct old_sctp_ecne_chunk)) {
3214 /* Its the old format */
3215 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
3216 bkup.num_pkts_since_cwr = htonl(1);
3217 cp = &bkup;
3218 }
3219 SCTP_STAT_INCR(sctps_recvecne);
3220 tsn = ntohl(cp->tsn);
3221 pkt_cnt = ntohl(cp->num_pkts_since_cwr);
3222 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
3223 if (lchk == NULL) {
3224 window_data_tsn = stcb->asoc.sending_seq - 1;
3225 } else {
3226 window_data_tsn = lchk->rec.data.tsn;
3227 }
3228
3229 /* Find where it was sent to if possible. */
3230 net = NULL;
3231 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
3232 if (lchk->rec.data.tsn == tsn) {
3233 net = lchk->whoTo;
3234 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
3235 break;
3236 }
3237 if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
3238 break;
3239 }
3240 }
3241 if (net == NULL) {
3242 /*
3243 * What to do. A previous send of a
3244 * CWR was possibly lost. See how old it is, we
3245 * may have it marked on the actual net.
3246 */
3247 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3248 if (tsn == net->last_cwr_tsn) {
3249 /* Found him, send it off */
3250 break;
3251 }
3252 }
3253 if (net == NULL) {
3254 /*
3255 * If we reach here, we need to send a special
3256 * CWR that says hey, we did this a long time
3257 * ago and you lost the response.
3258 */
3259 net = TAILQ_FIRST(&stcb->asoc.nets);
3260 if (net == NULL) {
3261 /* TSNH */
3262 return;
3263 }
3264 override_bit = SCTP_CWR_REDUCE_OVERRIDE;
3265 } else {
3266 override_bit = 0;
3267 }
3268 } else {
3269 override_bit = 0;
3270 }
3271 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
3272 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3273 /* JRS - Use the congestion control given in the pluggable CC module */
3274 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
3275 /*
3276 * We reduce once every RTT. So we will only lower cwnd at
3277 * the next sending seq i.e. the window_data_tsn
3278 */
3279 net->cwr_window_tsn = window_data_tsn;
3280 net->ecn_ce_pkt_cnt += pkt_cnt;
3281 net->lost_cnt = pkt_cnt;
3282 net->last_cwr_tsn = tsn;
3283 } else {
3284 override_bit |= SCTP_CWR_IN_SAME_WINDOW;
3285 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
3286 ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
3287 /*
3288 * Another loss in the same window update how
3289 * many marks/packets lost we have had.
3290 */
3291 int cnt = 1;
3292 if (pkt_cnt > net->lost_cnt) {
3293 /* Should be the case */
3294 cnt = (pkt_cnt - net->lost_cnt);
3295 net->ecn_ce_pkt_cnt += cnt;
3296 }
3297 net->lost_cnt = pkt_cnt;
3298 net->last_cwr_tsn = tsn;
3299 /*
3300 * Most CC functions will ignore this call, since we are in-window
3301 * yet of the initial CE the peer saw.
3302 */
3303 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
3304 }
3305 }
3306 /*
3307 * We always send a CWR this way if our previous one was lost our
3308 * peer will get an update, or if it is not time again to reduce we
3309 * still get the cwr to the peer. Note we set the override when we
3310 * could not find the TSN on the chunk or the destination network.
3311 */
3312 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
3313}
3314
3315static void
3316sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
3317{
3318 /*
3319 * Here we get a CWR from the peer. We must look in the outqueue and
3320 * make sure that we have a covered ECNE in the control chunk part.
3321 * If so remove it.
3322 */
3323 struct sctp_tmit_chunk *chk;
3324 struct sctp_ecne_chunk *ecne;
3325 int override;
3326 uint32_t cwr_tsn;
3327
3328 cwr_tsn = ntohl(cp->tsn);
3329 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
3330 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
3331 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
3332 continue;
3333 }
3334 if ((override == 0) && (chk->whoTo != net)) {
3335 /* Must be from the right src unless override is set */
3336 continue;
3337 }
3338 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
3339 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
3340 /* this covers this ECNE, we can remove it */
3341 stcb->asoc.ecn_echo_cnt_onq--;
3342 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
3343 sctp_next);
3344 sctp_m_freem(chk->data);
3345 chk->data = NULL;
3346 stcb->asoc.ctrl_queue_cnt--;
3347 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3348 if (override == 0) {
3349 break;
3350 }
3351 }
3352 }
3353}
3354
3355static void
3356sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
3357 struct sctp_tcb *stcb, struct sctp_nets *net)
3358{
3359 struct sctp_association *asoc;
3360#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3361 struct socket *so;
3362#endif
3363
3364 SCTPDBG(SCTP_DEBUG_INPUT2,
3365 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
3366 if (stcb == NULL)
3367 return;
3368
3369 asoc = &stcb->asoc;
3370 /* process according to association state */
3371 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
3372 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
3373 SCTPDBG(SCTP_DEBUG_INPUT2,
3374 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
3375 SCTP_TCB_UNLOCK(stcb);
3376 return;
3377 }
3378 /* notify upper layer protocol */
3379 if (stcb->sctp_socket) {
3380 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
3381 }
3382#ifdef INVARIANTS
3383 if (!TAILQ_EMPTY(&asoc->send_queue) ||
3384 !TAILQ_EMPTY(&asoc->sent_queue) ||
3385 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
3386 panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
3387 }
3388#endif
3389 /* stop the timer */
3390 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
3391 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
3392 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
3393 /* free the TCB */
3394 SCTPDBG(SCTP_DEBUG_INPUT2,
3395 "sctp_handle_shutdown_complete: calls free-asoc\n");
3396#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3397 so = SCTP_INP_SO(stcb->sctp_ep);
3398 atomic_add_int(&stcb->asoc.refcnt, 1);
3399 SCTP_TCB_UNLOCK(stcb);
3400 SCTP_SOCKET_LOCK(so, 1);
3401 SCTP_TCB_LOCK(stcb);
3402 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3403#endif
3404 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
3405 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
3406#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3407 SCTP_SOCKET_UNLOCK(so, 1);
3408#endif
3409 return;
3410}
3411
3412static int
3413process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
3414 struct sctp_nets *net, uint8_t flg)
3415{
3416 switch (desc->chunk_type) {
3417 case SCTP_DATA:
3418 /* find the tsn to resend (possibly */
3419 {
3420 uint32_t tsn;
3421 struct sctp_tmit_chunk *tp1;
3422
3423 tsn = ntohl(desc->tsn_ifany);
3424 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3425 if (tp1->rec.data.tsn == tsn) {
3426 /* found it */
3427 break;
3428 }
3429 if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
3430 /* not found */
3431 tp1 = NULL;
3432 break;
3433 }
3434 }
3435 if (tp1 == NULL) {
3436 /*
3437 * Do it the other way , aka without paying
3438 * attention to queue seq order.
3439 */
3440 SCTP_STAT_INCR(sctps_pdrpdnfnd);
3441 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3442 if (tp1->rec.data.tsn == tsn) {
3443 /* found it */
3444 break;
3445 }
3446 }
3447 }
3448 if (tp1 == NULL) {
3449 SCTP_STAT_INCR(sctps_pdrptsnnf);
3450 }
3451 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
3452 uint8_t *ddp;
3453
3454 if (((flg & SCTP_BADCRC) == 0) &&
3455 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3456 return (0);
3457 }
3458 if ((stcb->asoc.peers_rwnd == 0) &&
3459 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
3460 SCTP_STAT_INCR(sctps_pdrpdiwnp);
3461 return (0);
3462 }
3463 if (stcb->asoc.peers_rwnd == 0 &&
3464 (flg & SCTP_FROM_MIDDLE_BOX)) {
3465 SCTP_STAT_INCR(sctps_pdrpdizrw);
3466 return (0);
3467 }
3468 ddp = (uint8_t *) (mtod(tp1->data, caddr_t) +
3469 sizeof(struct sctp_data_chunk));
3470 {
3471 unsigned int iii;
3472
3473 for (iii = 0; iii < sizeof(desc->data_bytes);
3474 iii++) {
3475 if (ddp[iii] != desc->data_bytes[iii]) {
3476 SCTP_STAT_INCR(sctps_pdrpbadd);
3477 return (-1);
3478 }
3479 }
3480 }
3481
3482 if (tp1->do_rtt) {
3483 /*
3484 * this guy had a RTO calculation
3485 * pending on it, cancel it
3486 */
3487 if (tp1->whoTo->rto_needed == 0) {
3488 tp1->whoTo->rto_needed = 1;
3489 }
3490 tp1->do_rtt = 0;
3491 }
3492 SCTP_STAT_INCR(sctps_pdrpmark);
3493 if (tp1->sent != SCTP_DATAGRAM_RESEND)
3494 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3495 /*
3496 * mark it as if we were doing a FR, since
3497 * we will be getting gap ack reports behind
3498 * the info from the router.
3499 */
3500 tp1->rec.data.doing_fast_retransmit = 1;
3501 /*
3502 * mark the tsn with what sequences can
3503 * cause a new FR.
3504 */
3505 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
3506 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
3507 } else {
3508 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
3509 }
3510
3511 /* restart the timer */
3512 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3513 stcb, tp1->whoTo,
3514 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3515 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3516 stcb, tp1->whoTo);
3517
3518 /* fix counts and things */
3519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3520 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
3521 tp1->whoTo->flight_size,
3522 tp1->book_size,
3523 (uint32_t)(uintptr_t)stcb,
3524 tp1->rec.data.tsn);
3525 }
3526 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3527 sctp_flight_size_decrease(tp1);
3528 sctp_total_flight_decrease(stcb, tp1);
3529 }
3530 tp1->sent = SCTP_DATAGRAM_RESEND;
3531 } {
3532 /* audit code */
3533 unsigned int audit;
3534
3535 audit = 0;
3536 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
3537 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3538 audit++;
3539 }
3540 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
3541 sctp_next) {
3542 if (tp1->sent == SCTP_DATAGRAM_RESEND)
3543 audit++;
3544 }
3545 if (audit != stcb->asoc.sent_queue_retran_cnt) {
3546 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
3547 audit, stcb->asoc.sent_queue_retran_cnt);
3548#ifndef SCTP_AUDITING_ENABLED
3549 stcb->asoc.sent_queue_retran_cnt = audit;
3550#endif
3551 }
3552 }
3553 }
3554 break;
3555 case SCTP_ASCONF:
3556 {
3557 struct sctp_tmit_chunk *asconf;
3558
3559 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
3560 sctp_next) {
3561 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
3562 break;
3563 }
3564 }
3565 if (asconf) {
3566 if (asconf->sent != SCTP_DATAGRAM_RESEND)
3567 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3568 asconf->sent = SCTP_DATAGRAM_RESEND;
3569 asconf->snd_count--;
3570 }
3571 }
3572 break;
3573 case SCTP_INITIATION:
3574 /* resend the INIT */
3575 stcb->asoc.dropped_special_cnt++;
3576 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
3577 /*
3578 * If we can get it in, in a few attempts we do
3579 * this, otherwise we let the timer fire.
3580 */
3581 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
3582 stcb, net,
3583 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
3584 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
3585 }
3586 break;
3587 case SCTP_SELECTIVE_ACK:
3588 case SCTP_NR_SELECTIVE_ACK:
3589 /* resend the sack */
3590 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
3591 break;
3592 case SCTP_HEARTBEAT_REQUEST:
3593 /* resend a demand HB */
3594 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
3595 /* Only retransmit if we KNOW we wont destroy the tcb */
3596 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
3597 }
3598 break;
3599 case SCTP_SHUTDOWN:
3600 sctp_send_shutdown(stcb, net);
3601 break;
3602 case SCTP_SHUTDOWN_ACK:
3603 sctp_send_shutdown_ack(stcb, net);
3604 break;
3605 case SCTP_COOKIE_ECHO:
3606 {
3607 struct sctp_tmit_chunk *cookie;
3608
3609 cookie = NULL;
3610 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
3611 sctp_next) {
3612 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
3613 break;
3614 }
3615 }
3616 if (cookie) {
3617 if (cookie->sent != SCTP_DATAGRAM_RESEND)
3618 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3619 cookie->sent = SCTP_DATAGRAM_RESEND;
3620 sctp_stop_all_cookie_timers(stcb);
3621 }
3622 }
3623 break;
3624 case SCTP_COOKIE_ACK:
3625 sctp_send_cookie_ack(stcb);
3626 break;
3627 case SCTP_ASCONF_ACK:
3628 /* resend last asconf ack */
3629 sctp_send_asconf_ack(stcb);
3630 break;
3631 case SCTP_IFORWARD_CUM_TSN:
3632 case SCTP_FORWARD_CUM_TSN:
3633 send_forward_tsn(stcb, &stcb->asoc);
3634 break;
3635 /* can't do anything with these */
3636 case SCTP_PACKET_DROPPED:
3637 case SCTP_INITIATION_ACK: /* this should not happen */
3638 case SCTP_HEARTBEAT_ACK:
3639 case SCTP_ABORT_ASSOCIATION:
3640 case SCTP_OPERATION_ERROR:
3641 case SCTP_SHUTDOWN_COMPLETE:
3642 case SCTP_ECN_ECHO:
3643 case SCTP_ECN_CWR:
3644 default:
3645 break;
3646 }
3647 return (0);
3648}
3649
3650void
3651sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3652{
3653 uint32_t i;
3654 uint16_t temp;
3655
3656 /*
3657 * We set things to 0xffffffff since this is the last delivered sequence
3658 * and we will be sending in 0 after the reset.
3659 */
3660
3661 if (number_entries) {
3662 for (i = 0; i < number_entries; i++) {
3663 temp = ntohs(list[i]);
3664 if (temp >= stcb->asoc.streamincnt) {
3665 continue;
3666 }
3667 stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
3668 }
3669 } else {
3670 list = NULL;
3671 for (i = 0; i < stcb->asoc.streamincnt; i++) {
3672 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
3673 }
3674 }
3675 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3676}
3677
3678static void
3679sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3680{
3681 uint32_t i;
3682 uint16_t temp;
3683
3684 if (number_entries > 0) {
3685 for (i = 0; i < number_entries; i++) {
3686 temp = ntohs(list[i]);
3687 if (temp >= stcb->asoc.streamoutcnt) {
3688 /* no such stream */
3689 continue;
3690 }
3691 stcb->asoc.strmout[temp].next_mid_ordered = 0;
3692 stcb->asoc.strmout[temp].next_mid_unordered = 0;
3693 }
3694 } else {
3695 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3696 stcb->asoc.strmout[i].next_mid_ordered = 0;
3697 stcb->asoc.strmout[i].next_mid_unordered = 0;
3698 }
3699 }
3700 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
3701}
3702
3703static void
3704sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
3705{
3706 uint32_t i;
3707 uint16_t temp;
3708
3709 if (number_entries > 0) {
3710 for (i = 0; i < number_entries; i++) {
3711 temp = ntohs(list[i]);
3712 if (temp >= stcb->asoc.streamoutcnt) {
3713 /* no such stream */
3714 continue;
3715 }
3716 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
3717 }
3718 } else {
3719 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3720 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
3721 }
3722 }
3723}
3724
3725
3726struct sctp_stream_reset_request *
3727sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
3728{
3729 struct sctp_association *asoc;
3730 struct sctp_chunkhdr *ch;
3731 struct sctp_stream_reset_request *r;
3732 struct sctp_tmit_chunk *chk;
3733 int len, clen;
3734
3735 asoc = &stcb->asoc;
3736 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
3737 asoc->stream_reset_outstanding = 0;
3738 return (NULL);
3739 }
3740 if (stcb->asoc.str_reset == NULL) {
3741 asoc->stream_reset_outstanding = 0;
3742 return (NULL);
3743 }
3744 chk = stcb->asoc.str_reset;
3745 if (chk->data == NULL) {
3746 return (NULL);
3747 }
3748 if (bchk) {
3749 /* he wants a copy of the chk pointer */
3750 *bchk = chk;
3751 }
3752 clen = chk->send_size;
3753 ch = mtod(chk->data, struct sctp_chunkhdr *);
3754 r = (struct sctp_stream_reset_request *)(ch + 1);
3755 if (ntohl(r->request_seq) == seq) {
3756 /* found it */
3757 return (r);
3758 }
3759 len = SCTP_SIZE32(ntohs(r->ph.param_length));
3760 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
3761 /* move to the next one, there can only be a max of two */
3762 r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
3763 if (ntohl(r->request_seq) == seq) {
3764 return (r);
3765 }
3766 }
3767 /* that seq is not here */
3768 return (NULL);
3769}
3770
3771static void
3772sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
3773{
3774 struct sctp_association *asoc;
3775 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
3776
3777 if (stcb->asoc.str_reset == NULL) {
3778 return;
3779 }
3780 asoc = &stcb->asoc;
3781
3782 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
3783 chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
3784 TAILQ_REMOVE(&asoc->control_send_queue,
3785 chk,
3786 sctp_next);
3787 if (chk->data) {
3788 sctp_m_freem(chk->data);
3789 chk->data = NULL;
3790 }
3791 asoc->ctrl_queue_cnt--;
3792 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
3793 /*sa_ignore NO_NULL_CHK*/
3794 stcb->asoc.str_reset = NULL;
3795}
3796
3797
3798static int
3799sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
3800 uint32_t seq, uint32_t action,
3801 struct sctp_stream_reset_response *respin)
3802{
3803 uint16_t type;
3804 int lparm_len;
3805 struct sctp_association *asoc = &stcb->asoc;
3806 struct sctp_tmit_chunk *chk;
3807 struct sctp_stream_reset_request *req_param;
3808 struct sctp_stream_reset_out_request *req_out_param;
3809 struct sctp_stream_reset_in_request *req_in_param;
3810 uint32_t number_entries;
3811
3812 if (asoc->stream_reset_outstanding == 0) {
3813 /* duplicate */
3814 return (0);
3815 }
3816 if (seq == stcb->asoc.str_reset_seq_out) {
3817 req_param = sctp_find_stream_reset(stcb, seq, &chk);
3818 if (req_param != NULL) {
3819 stcb->asoc.str_reset_seq_out++;
3820 type = ntohs(req_param->ph.param_type);
3821 lparm_len = ntohs(req_param->ph.param_length);
3822 if (type == SCTP_STR_RESET_OUT_REQUEST) {
3823 int no_clear = 0;
3824
3825 req_out_param = (struct sctp_stream_reset_out_request *)req_param;
3826 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
3827 asoc->stream_reset_out_is_outstanding = 0;
3828 if (asoc->stream_reset_outstanding)
3829 asoc->stream_reset_outstanding--;
3830 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3831 /* do it */
3832 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
3833 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3834 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3835 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
3836 /* Set it up so we don't stop retransmitting */
3837 asoc->stream_reset_outstanding++;
3838 stcb->asoc.str_reset_seq_out--;
3839 asoc->stream_reset_out_is_outstanding = 1;
3840 no_clear = 1;
3841 } else {
3842 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3843 }
3844 if (no_clear == 0) {
3845 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
3846 }
3847 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
3848 req_in_param = (struct sctp_stream_reset_in_request *)req_param;
3849 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
3850 if (asoc->stream_reset_outstanding)
3851 asoc->stream_reset_outstanding--;
3852 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3853 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
3854 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3855 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3856 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
3857 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
3858 }
3859 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
3860 /* Ok we now may have more streams */
3861 int num_stream;
3862
3863 num_stream = stcb->asoc.strm_pending_add_size;
3864 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
3865 /* TSNH */
3866 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
3867 }
3868 stcb->asoc.strm_pending_add_size = 0;
3869 if (asoc->stream_reset_outstanding)
3870 asoc->stream_reset_outstanding--;
3871 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3872 /* Put the new streams into effect */
3873 int i;
3874 for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
3875 asoc->strmout[i].state = SCTP_STREAM_OPEN;
3876 }
3877 asoc->streamoutcnt += num_stream;
3878 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
3879 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3880 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3881 SCTP_STREAM_CHANGE_DENIED);
3882 } else {
3883 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3884 SCTP_STREAM_CHANGE_FAILED);
3885 }
3886 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
3887 if (asoc->stream_reset_outstanding)
3888 asoc->stream_reset_outstanding--;
3889 if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3890 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3891 SCTP_STREAM_CHANGE_DENIED);
3892 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
3893 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
3894 SCTP_STREAM_CHANGE_FAILED);
3895 }
3896 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
3897 /**
3898 * a) Adopt the new in tsn.
3899 * b) reset the map
3900 * c) Adopt the new out-tsn
3901 */
3902 struct sctp_stream_reset_response_tsn *resp;
3903 struct sctp_forward_tsn_chunk fwdtsn;
3904 int abort_flag = 0;
3905 if (respin == NULL) {
3906 /* huh ? */
3907 return (0);
3908 }
3909 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
3910 return (0);
3911 }
3912 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
3913 resp = (struct sctp_stream_reset_response_tsn *)respin;
3914 asoc->stream_reset_outstanding--;
3915 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
3916 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
3917 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
3918 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
3919 if (abort_flag) {
3920 return (1);
3921 }
3922 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
3923 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
3924 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
3925 }
3926
3927 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
3928 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
3929 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
3930
3931 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
3932 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
3933
3934 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
3935 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
3936
3937 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
3938 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
3939 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
3940 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
3941 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3942 SCTP_ASSOC_RESET_DENIED);
3943 } else {
3944 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
3945 SCTP_ASSOC_RESET_FAILED);
3946 }
3947 }
3948 /* get rid of the request and get the request flags */
3949 if (asoc->stream_reset_outstanding == 0) {
3950 sctp_clean_up_stream_reset(stcb);
3951 }
3952 }
3953 }
3954 if (asoc->stream_reset_outstanding == 0) {
3955 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
3956 }
3957 return (0);
3958}
3959
3960static void
3961sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
3962 struct sctp_tmit_chunk *chk,
3963 struct sctp_stream_reset_in_request *req, int trunc)
3964{
3965 uint32_t seq;
3966 int len, i;
3967 int number_entries;
3968 uint16_t temp;
3969
3970 /*
3971 * peer wants me to send a str-reset to him for my outgoing seq's if
3972 * seq_in is right.
3973 */
3974 struct sctp_association *asoc = &stcb->asoc;
3975
3976 seq = ntohl(req->request_seq);
3977 if (asoc->str_reset_seq_in == seq) {
3978 asoc->last_reset_action[1] = asoc->last_reset_action[0];
3979 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
3980 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3981 } else if (trunc) {
3982 /* Can't do it, since they exceeded our buffer size */
3983 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3984 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
3985 len = ntohs(req->ph.param_length);
3986 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
3987 if (number_entries) {
3988 for (i = 0; i < number_entries; i++) {
3989 temp = ntohs(req->list_of_streams[i]);
3990 if (temp >= stcb->asoc.streamoutcnt) {
3991 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
3992 goto bad_boy;
3993 }
3994 req->list_of_streams[i] = temp;
3995 }
3996 for (i = 0; i < number_entries; i++) {
3997 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
3998 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
3999 }
4000 }
4001 } else {
4002 /* Its all */
4003 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
4004 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
4005 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
4006 }
4007 }
4008 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4009 } else {
4010 /* Can't do it, since we have sent one out */
4011 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4012 }
4013 bad_boy:
4014 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4015 asoc->str_reset_seq_in++;
4016 } else if (asoc->str_reset_seq_in - 1 == seq) {
4017 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4018 } else if (asoc->str_reset_seq_in - 2 == seq) {
4019 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4020 } else {
4021 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4022 }
4023 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
4024}
4025
4026static int
4027sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
4028 struct sctp_tmit_chunk *chk,
4029 struct sctp_stream_reset_tsn_request *req)
4030{
4031 /* reset all in and out and update the tsn */
4032 /*
4033 * A) reset my str-seq's on in and out. B) Select a receive next,
4034 * and set cum-ack to it. Also process this selected number as a
4035 * fwd-tsn as well. C) set in the response my next sending seq.
4036 */
4037 struct sctp_forward_tsn_chunk fwdtsn;
4038 struct sctp_association *asoc = &stcb->asoc;
4039 int abort_flag = 0;
4040 uint32_t seq;
4041
4042 seq = ntohl(req->request_seq);
4043 if (asoc->str_reset_seq_in == seq) {
4044 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
4045 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4046 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4047 } else {
4048 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
4049 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
4050 fwdtsn.ch.chunk_flags = 0;
4051 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
4052 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
4053 if (abort_flag) {
4054 return (1);
4055 }
4056 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
4057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
4058 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4059 }
4060 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4061 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
4062 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
4063 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
4064 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
4065 atomic_add_int(&asoc->sending_seq, 1);
4066 /* save off historical data for retrans */
4067 asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
4068 asoc->last_sending_seq[0] = asoc->sending_seq;
4069 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
4070 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
4071 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
4072 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
4073 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4074 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
4075 }
4076 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4077 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4078 asoc->str_reset_seq_in++;
4079 } else if (asoc->str_reset_seq_in - 1 == seq) {
4080 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
4081 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
4082 } else if (asoc->str_reset_seq_in - 2 == seq) {
4083 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
4084 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
4085 } else {
4086 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4087 }
4088 return (0);
4089}
4090
4091static void
4092sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
4093 struct sctp_tmit_chunk *chk,
4094 struct sctp_stream_reset_out_request *req, int trunc)
4095{
4096 uint32_t seq, tsn;
4097 int number_entries, len;
4098 struct sctp_association *asoc = &stcb->asoc;
4099
4100 seq = ntohl(req->request_seq);
4101
4102 /* now if its not a duplicate we process it */
4103 if (asoc->str_reset_seq_in == seq) {
4104 len = ntohs(req->ph.param_length);
4105 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
4106 /*
4107 * the sender is resetting, handle the list issue.. we must
4108 * a) verify if we can do the reset, if so no problem b) If
4109 * we can't do the reset we must copy the request. c) queue
4110 * it, and setup the data in processor to trigger it off
4111 * when needed and dequeue all the queued data.
4112 */
4113 tsn = ntohl(req->send_reset_at_tsn);
4114
4115 /* move the reset action back one */
4116 asoc->last_reset_action[1] = asoc->last_reset_action[0];
4117 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
4118 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4119 } else if (trunc) {
4120 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4121 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
4122 /* we can do it now */
4123 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
4124 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4125 } else {
4126 /*
4127 * we must queue it up and thus wait for the TSN's
4128 * to arrive that are at or before tsn
4129 */
4130 struct sctp_stream_reset_list *liste;
4131 int siz;
4132
4133 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
4134 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
4135 siz, SCTP_M_STRESET);
4136 if (liste == NULL) {
4137 /* gak out of memory */
4138 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4139 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4140 return;
4141 }
4142 liste->seq = seq;
4143 liste->tsn = tsn;
4144 liste->number_entries = number_entries;
4145 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
4146 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
4147 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
4148 }
4149 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4150 asoc->str_reset_seq_in++;
4151 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4152 /*
4153 * one seq back, just echo back last action since my
4154 * response was lost.
4155 */
4156 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4157 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4158 /*
4159 * two seq back, just echo back last action since my
4160 * response was lost.
4161 */
4162 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4163 } else {
4164 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4165 }
4166}
4167
4168static void
4169sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4170 struct sctp_stream_reset_add_strm *str_add)
4171{
4172 /*
4173 * Peer is requesting to add more streams.
4174 * If its within our max-streams we will
4175 * allow it.
4176 */
4177 uint32_t num_stream, i;
4178 uint32_t seq;
4179 struct sctp_association *asoc = &stcb->asoc;
4180 struct sctp_queued_to_read *ctl, *nctl;
4181
4182 /* Get the number. */
4183 seq = ntohl(str_add->request_seq);
4184 num_stream = ntohs(str_add->number_of_streams);
4185 /* Now what would be the new total? */
4186 if (asoc->str_reset_seq_in == seq) {
4187 num_stream += stcb->asoc.streamincnt;
4188 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4189 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4190 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4191 } else if ((num_stream > stcb->asoc.max_inbound_streams) ||
4192 (num_stream > 0xffff)) {
4193 /* We must reject it they ask for to many */
4194 denied:
4195 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4196 } else {
4197 /* Ok, we can do that :-) */
4198 struct sctp_stream_in *oldstrm;
4199
4200 /* save off the old */
4201 oldstrm = stcb->asoc.strmin;
4202 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
4203 (num_stream * sizeof(struct sctp_stream_in)),
4204 SCTP_M_STRMI);
4205 if (stcb->asoc.strmin == NULL) {
4206 stcb->asoc.strmin = oldstrm;
4207 goto denied;
4208 }
4209 /* copy off the old data */
4210 for (i = 0; i < stcb->asoc.streamincnt; i++) {
4211 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4212 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4213 stcb->asoc.strmin[i].sid = i;
4214 stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
4215 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
4216 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
4217 /* now anything on those queues? */
4218 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
4219 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
4220 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
4221 }
4222 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
4223 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
4224 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
4225 }
4226 }
4227 /* Init the new streams */
4228 for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
4229 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
4230 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
4231 stcb->asoc.strmin[i].sid = i;
4232 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
4233 stcb->asoc.strmin[i].pd_api_started = 0;
4234 stcb->asoc.strmin[i].delivery_started = 0;
4235 }
4236 SCTP_FREE(oldstrm, SCTP_M_STRMI);
4237 /* update the size */
4238 stcb->asoc.streamincnt = num_stream;
4239 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4240 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
4241 }
4242 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4243 asoc->str_reset_seq_in++;
4244 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4245 /*
4246 * one seq back, just echo back last action since my
4247 * response was lost.
4248 */
4249 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4250 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4251 /*
4252 * two seq back, just echo back last action since my
4253 * response was lost.
4254 */
4255 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4256 } else {
4257 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4258
4259 }
4260}
4261
4262static void
4263sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
4264 struct sctp_stream_reset_add_strm *str_add)
4265{
4266 /*
4267 * Peer is requesting to add more streams.
4268 * If its within our max-streams we will
4269 * allow it.
4270 */
4271 uint16_t num_stream;
4272 uint32_t seq;
4273 struct sctp_association *asoc = &stcb->asoc;
4274
4275 /* Get the number. */
4276 seq = ntohl(str_add->request_seq);
4277 num_stream = ntohs(str_add->number_of_streams);
4278 /* Now what would be the new total? */
4279 if (asoc->str_reset_seq_in == seq) {
4280 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
4281 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
4282 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4283 } else if (stcb->asoc.stream_reset_outstanding) {
4284 /* We must reject it we have something pending */
4285 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
4286 } else {
4287 /* Ok, we can do that :-) */
4288 int mychk;
4289 mychk = stcb->asoc.streamoutcnt;
4290 mychk += num_stream;
4291 if (mychk < 0x10000) {
4292 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
4293 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
4294 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4295 }
4296 } else {
4297 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
4298 }
4299 }
4300 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
4301 asoc->str_reset_seq_in++;
4302 } else if ((asoc->str_reset_seq_in - 1) == seq) {
4303 /*
4304 * one seq back, just echo back last action since my
4305 * response was lost.
4306 */
4307 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
4308 } else if ((asoc->str_reset_seq_in - 2) == seq) {
4309 /*
4310 * two seq back, just echo back last action since my
4311 * response was lost.
4312 */
4313 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
4314 } else {
4315 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
4316 }
4317}
4318
4319#if !defined(__Panda__)
4320#ifdef __GNUC__
4321__attribute__ ((noinline))
4322#endif
4323#endif
4324static int
4325sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
4326 struct sctp_chunkhdr *ch_req)
4327{
4328 uint16_t remaining_length, param_len, ptype;
4329 struct sctp_paramhdr pstore;
4330 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
4331 uint32_t seq = 0;
4332 int num_req = 0;
4333 int trunc = 0;
4334 struct sctp_tmit_chunk *chk;
4335 struct sctp_chunkhdr *ch;
4336 struct sctp_paramhdr *ph;
4337 int ret_code = 0;
4338 int num_param = 0;
4339
4340 /* now it may be a reset or a reset-response */
4341 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
4342
4343 /* setup for adding the response */
4344 sctp_alloc_a_chunk(stcb, chk);
4345 if (chk == NULL) {
4346 return (ret_code);
4347 }
4348 chk->copy_by_ref = 0;
4349 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
4350 chk->rec.chunk_id.can_take_data = 0;
4351 chk->flags = 0;
4352 chk->asoc = &stcb->asoc;
4353 chk->no_fr_allowed = 0;
4354 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
4355 chk->book_size_scale = 0;
4356 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4357 if (chk->data == NULL) {
4358 strres_nochunk:
4359 if (chk->data) {
4360 sctp_m_freem(chk->data);
4361 chk->data = NULL;
4362 }
4363 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
4364 return (ret_code);
4365 }
4366 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
4367
4368 /* setup chunk parameters */
4369 chk->sent = SCTP_DATAGRAM_UNSENT;
4370 chk->snd_count = 0;
4371 chk->whoTo = NULL;
4372
4373 ch = mtod(chk->data, struct sctp_chunkhdr *);
4374 ch->chunk_type = SCTP_STREAM_RESET;
4375 ch->chunk_flags = 0;
4376 ch->chunk_length = htons(chk->send_size);
4377 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
4378 offset += sizeof(struct sctp_chunkhdr);
4379 while (remaining_length >= sizeof(struct sctp_paramhdr)) {
4380 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
4381 if (ph == NULL) {
4382 /* TSNH */
4383 break;
4384 }
4385 param_len = ntohs(ph->param_length);
4386 if ((param_len > remaining_length) ||
4387 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
4388 /* bad parameter length */
4389 break;
4390 }
4391 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
4392 (uint8_t *)&cstore);
4393 if (ph == NULL) {
4394 /* TSNH */
4395 break;
4396 }
4397 ptype = ntohs(ph->param_type);
4398 num_param++;
4399 if (param_len > sizeof(cstore)) {
4400 trunc = 1;
4401 } else {
4402 trunc = 0;
4403 }
4404 if (num_param > SCTP_MAX_RESET_PARAMS) {
4405 /* hit the max of parameters already sorry.. */
4406 break;
4407 }
4408 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
4409 struct sctp_stream_reset_out_request *req_out;
4410
4411 if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
4412 break;
4413 }
4414 req_out = (struct sctp_stream_reset_out_request *)ph;
4415 num_req++;
4416 if (stcb->asoc.stream_reset_outstanding) {
4417 seq = ntohl(req_out->response_seq);
4418 if (seq == stcb->asoc.str_reset_seq_out) {
4419 /* implicit ack */
4420 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
4421 }
4422 }
4423 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
4424 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
4425 struct sctp_stream_reset_add_strm *str_add;
4426
4427 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4428 break;
4429 }
4430 str_add = (struct sctp_stream_reset_add_strm *)ph;
4431 num_req++;
4432 sctp_handle_str_reset_add_strm(stcb, chk, str_add);
4433 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
4434 struct sctp_stream_reset_add_strm *str_add;
4435
4436 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
4437 break;
4438 }
4439 str_add = (struct sctp_stream_reset_add_strm *)ph;
4440 num_req++;
4441 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
4442 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
4443 struct sctp_stream_reset_in_request *req_in;
4444
4445 num_req++;
4446 req_in = (struct sctp_stream_reset_in_request *)ph;
4447 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
4448 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
4449 struct sctp_stream_reset_tsn_request *req_tsn;
4450
4451 num_req++;
4452 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
4453 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
4454 ret_code = 1;
4455 goto strres_nochunk;
4456 }
4457 /* no more */
4458 break;
4459 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
4460 struct sctp_stream_reset_response *resp;
4461 uint32_t result;
4462
4463 if (param_len < sizeof(struct sctp_stream_reset_response)) {
4464 break;
4465 }
4466 resp = (struct sctp_stream_reset_response *)ph;
4467 seq = ntohl(resp->response_seq);
4468 result = ntohl(resp->result);
4469 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
4470 ret_code = 1;
4471 goto strres_nochunk;
4472 }
4473 } else {
4474 break;
4475 }
4476 offset += SCTP_SIZE32(param_len);
4477 if (remaining_length >= SCTP_SIZE32(param_len)) {
4478 remaining_length -= SCTP_SIZE32(param_len);
4479 } else {
4480 remaining_length = 0;
4481 }
4482 }
4483 if (num_req == 0) {
4484 /* we have no response free the stuff */
4485 goto strres_nochunk;
4486 }
4487 /* ok we have a chunk to link in */
4488 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
4489 chk,
4490 sctp_next);
4491 stcb->asoc.ctrl_queue_cnt++;
4492 return (ret_code);
4493}
4494
4495/*
4496 * Handle a router or endpoints report of a packet loss, there are two ways
4497 * to handle this, either we get the whole packet and must disect it
4498 * ourselves (possibly with truncation and or corruption) or it is a summary
4499 * from a middle box that did the disectting for us.
4500 */
4501static void
4502sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
4503 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
4504{
4505 uint32_t bottle_bw, on_queue;
4506 uint16_t trunc_len;
4507 unsigned int chlen;
4508 unsigned int at;
4509 struct sctp_chunk_desc desc;
4510 struct sctp_chunkhdr *ch;
4511
4512 chlen = ntohs(cp->ch.chunk_length);
4513 chlen -= sizeof(struct sctp_pktdrop_chunk);
4514 /* XXX possible chlen underflow */
4515 if (chlen == 0) {
4516 ch = NULL;
4517 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
4518 SCTP_STAT_INCR(sctps_pdrpbwrpt);
4519 } else {
4520 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
4521 chlen -= sizeof(struct sctphdr);
4522 /* XXX possible chlen underflow */
4523 memset(&desc, 0, sizeof(desc));
4524 }
4525 trunc_len = (uint16_t) ntohs(cp->trunc_len);
4526 if (trunc_len > limit) {
4527 trunc_len = limit;
4528 }
4529
4530 /* now the chunks themselves */
4531 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
4532 desc.chunk_type = ch->chunk_type;
4533 /* get amount we need to move */
4534 at = ntohs(ch->chunk_length);
4535 if (at < sizeof(struct sctp_chunkhdr)) {
4536 /* corrupt chunk, maybe at the end? */
4537 SCTP_STAT_INCR(sctps_pdrpcrupt);
4538 break;
4539 }
4540 if (trunc_len == 0) {
4541 /* we are supposed to have all of it */
4542 if (at > chlen) {
4543 /* corrupt skip it */
4544 SCTP_STAT_INCR(sctps_pdrpcrupt);
4545 break;
4546 }
4547 } else {
4548 /* is there enough of it left ? */
4549 if (desc.chunk_type == SCTP_DATA) {
4550 if (chlen < (sizeof(struct sctp_data_chunk) +
4551 sizeof(desc.data_bytes))) {
4552 break;
4553 }
4554 } else {
4555 if (chlen < sizeof(struct sctp_chunkhdr)) {
4556 break;
4557 }
4558 }
4559 }
4560 if (desc.chunk_type == SCTP_DATA) {
4561 /* can we get out the tsn? */
4562 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4563 SCTP_STAT_INCR(sctps_pdrpmbda);
4564
4565 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
4566 /* yep */
4567 struct sctp_data_chunk *dcp;
4568 uint8_t *ddp;
4569 unsigned int iii;
4570
4571 dcp = (struct sctp_data_chunk *)ch;
4572 ddp = (uint8_t *) (dcp + 1);
4573 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
4574 desc.data_bytes[iii] = ddp[iii];
4575 }
4576 desc.tsn_ifany = dcp->dp.tsn;
4577 } else {
4578 /* nope we are done. */
4579 SCTP_STAT_INCR(sctps_pdrpnedat);
4580 break;
4581 }
4582 } else {
4583 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
4584 SCTP_STAT_INCR(sctps_pdrpmbct);
4585 }
4586
4587 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
4588 SCTP_STAT_INCR(sctps_pdrppdbrk);
4589 break;
4590 }
4591 if (SCTP_SIZE32(at) > chlen) {
4592 break;
4593 }
4594 chlen -= SCTP_SIZE32(at);
4595 if (chlen < sizeof(struct sctp_chunkhdr)) {
4596 /* done, none left */
4597 break;
4598 }
4599 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
4600 }
4601 /* Now update any rwnd --- possibly */
4602 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
4603 /* From a peer, we get a rwnd report */
4604 uint32_t a_rwnd;
4605
4606 SCTP_STAT_INCR(sctps_pdrpfehos);
4607
4608 bottle_bw = ntohl(cp->bottle_bw);
4609 on_queue = ntohl(cp->current_onq);
4610 if (bottle_bw && on_queue) {
4611 /* a rwnd report is in here */
4612 if (bottle_bw > on_queue)
4613 a_rwnd = bottle_bw - on_queue;
4614 else
4615 a_rwnd = 0;
4616
4617 if (a_rwnd == 0)
4618 stcb->asoc.peers_rwnd = 0;
4619 else {
4620 if (a_rwnd > stcb->asoc.total_flight) {
4621 stcb->asoc.peers_rwnd =
4622 a_rwnd - stcb->asoc.total_flight;
4623 } else {
4624 stcb->asoc.peers_rwnd = 0;
4625 }
4626 if (stcb->asoc.peers_rwnd <
4627 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4628 /* SWS sender side engages */
4629 stcb->asoc.peers_rwnd = 0;
4630 }
4631 }
4632 }
4633 } else {
4634 SCTP_STAT_INCR(sctps_pdrpfmbox);
4635 }
4636
4637 /* now middle boxes in sat networks get a cwnd bump */
4638 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
4639 (stcb->asoc.sat_t3_loss_recovery == 0) &&
4640 (stcb->asoc.sat_network)) {
4641 /*
4642 * This is debatable but for sat networks it makes sense
4643 * Note if a T3 timer has went off, we will prohibit any
4644 * changes to cwnd until we exit the t3 loss recovery.
4645 */
4646 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
4647 net, cp, &bottle_bw, &on_queue);
4648 }
4649}
4650
4651/*
4652 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
4653 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
4654 * offset: offset into the mbuf chain to first chunkhdr - length: is the
4655 * length of the complete packet outputs: - length: modified to remaining
4656 * length after control processing - netp: modified to new sctp_nets after
4657 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
4658 * bad packet,...) otherwise return the tcb for this packet
4659 */
4660#if !defined(__Panda__)
4661#ifdef __GNUC__
4662__attribute__ ((noinline))
4663#endif
4664#endif
4665static struct sctp_tcb *
4666sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
4667 struct sockaddr *src, struct sockaddr *dst,
4668 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
4669 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
4670#if defined(__FreeBSD__)
4671 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4672#endif
4673 uint32_t vrf_id, uint16_t port)
4674{
4675 struct sctp_association *asoc;
4676 struct mbuf *op_err;
4677 char msg[SCTP_DIAG_INFO_LEN];
4678 uint32_t vtag_in;
4679 int num_chunks = 0; /* number of control chunks processed */
4680 uint32_t chk_length;
4681 int ret;
4682 int abort_no_unlock = 0;
4683 int ecne_seen = 0;
4684 /*
4685 * How big should this be, and should it be alloc'd? Lets try the
4686 * d-mtu-ceiling for now (2k) and that should hopefully work ...
4687 * until we get into jumbo grams and such..
4688 */
4689 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
4690 struct sctp_tcb *locked_tcb = stcb;
4691 int got_auth = 0;
4692 uint32_t auth_offset = 0, auth_len = 0;
4693 int auth_skipped = 0;
4694 int asconf_cnt = 0;
4695#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4696 struct socket *so;
4697#endif
4698
4699 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
4700 iphlen, *offset, length, (void *)stcb);
4701
4702 /* validate chunk header length... */
4703 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
4704 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
4705 ntohs(ch->chunk_length));
4706 if (locked_tcb) {
4707 SCTP_TCB_UNLOCK(locked_tcb);
4708 }
4709 return (NULL);
4710 }
4711 /*
4712 * validate the verification tag
4713 */
4714 vtag_in = ntohl(sh->v_tag);
4715
4716 if (locked_tcb) {
4717 SCTP_TCB_LOCK_ASSERT(locked_tcb);
4718 }
4719 if (ch->chunk_type == SCTP_INITIATION) {
4720 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
4721 ntohs(ch->chunk_length), vtag_in);
4722 if (vtag_in != 0) {
4723 /* protocol error- silently discard... */
4724 SCTP_STAT_INCR(sctps_badvtag);
4725 if (locked_tcb) {
4726 SCTP_TCB_UNLOCK(locked_tcb);
4727 }
4728 return (NULL);
4729 }
4730 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
4731 /*
4732 * If there is no stcb, skip the AUTH chunk and process
4733 * later after a stcb is found (to validate the lookup was
4734 * valid.
4735 */
4736 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
4737 (stcb == NULL) &&
4738 (inp->auth_supported == 1)) {
4739 /* save this chunk for later processing */
4740 auth_skipped = 1;
4741 auth_offset = *offset;
4742 auth_len = ntohs(ch->chunk_length);
4743
4744 /* (temporarily) move past this chunk */
4745 *offset += SCTP_SIZE32(auth_len);
4746 if (*offset >= length) {
4747 /* no more data left in the mbuf chain */
4748 *offset = length;
4749 if (locked_tcb) {
4750 SCTP_TCB_UNLOCK(locked_tcb);
4751 }
4752 return (NULL);
4753 }
4754 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4755 sizeof(struct sctp_chunkhdr), chunk_buf);
4756 }
4757 if (ch == NULL) {
4758 /* Help */
4759 *offset = length;
4760 if (locked_tcb) {
4761 SCTP_TCB_UNLOCK(locked_tcb);
4762 }
4763 return (NULL);
4764 }
4765 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
4766 goto process_control_chunks;
4767 }
4768 /*
4769 * first check if it's an ASCONF with an unknown src addr we
4770 * need to look inside to find the association
4771 */
4772 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
4773 struct sctp_chunkhdr *asconf_ch = ch;
4774 uint32_t asconf_offset = 0, asconf_len = 0;
4775
4776 /* inp's refcount may be reduced */
4777 SCTP_INP_INCR_REF(inp);
4778
4779 asconf_offset = *offset;
4780 do {
4781 asconf_len = ntohs(asconf_ch->chunk_length);
4782 if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
4783 break;
4784 stcb = sctp_findassociation_ep_asconf(m,
4785 *offset,
4786 dst,
4787 sh, &inp, netp, vrf_id);
4788 if (stcb != NULL)
4789 break;
4790 asconf_offset += SCTP_SIZE32(asconf_len);
4791 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
4792 sizeof(struct sctp_chunkhdr), chunk_buf);
4793 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
4794 if (stcb == NULL) {
4795 /*
4796 * reduce inp's refcount if not reduced in
4797 * sctp_findassociation_ep_asconf().
4798 */
4799 SCTP_INP_DECR_REF(inp);
4800 } else {
4801 locked_tcb = stcb;
4802 }
4803
4804 /* now go back and verify any auth chunk to be sure */
4805 if (auth_skipped && (stcb != NULL)) {
4806 struct sctp_auth_chunk *auth;
4807
4808 auth = (struct sctp_auth_chunk *)
4809 sctp_m_getptr(m, auth_offset,
4810 auth_len, chunk_buf);
4811 got_auth = 1;
4812 auth_skipped = 0;
4813 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
4814 auth_offset)) {
4815 /* auth HMAC failed so dump it */
4816 *offset = length;
4817 if (locked_tcb) {
4818 SCTP_TCB_UNLOCK(locked_tcb);
4819 }
4820 return (NULL);
4821 } else {
4822 /* remaining chunks are HMAC checked */
4823 stcb->asoc.authenticated = 1;
4824 }
4825 }
4826 }
4827 if (stcb == NULL) {
4828 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4829 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4830 msg);
4831 /* no association, so it's out of the blue... */
4832 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
4833#if defined(__FreeBSD__)
4834 mflowtype, mflowid, inp->fibnum,
4835#endif
4836 vrf_id, port);
4837 *offset = length;
4838 if (locked_tcb) {
4839 SCTP_TCB_UNLOCK(locked_tcb);
4840 }
4841 return (NULL);
4842 }
4843 asoc = &stcb->asoc;
4844 /* ABORT and SHUTDOWN can use either v_tag... */
4845 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
4846 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
4847 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
4848 /* Take the T-bit always into account. */
4849 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
4850 (vtag_in == asoc->my_vtag)) ||
4851 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
4852 (vtag_in == asoc->peer_vtag))) {
4853 /* this is valid */
4854 } else {
4855 /* drop this packet... */
4856 SCTP_STAT_INCR(sctps_badvtag);
4857 if (locked_tcb) {
4858 SCTP_TCB_UNLOCK(locked_tcb);
4859 }
4860 return (NULL);
4861 }
4862 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4863 if (vtag_in != asoc->my_vtag) {
4864 /*
4865 * this could be a stale SHUTDOWN-ACK or the
4866 * peer never got the SHUTDOWN-COMPLETE and
4867 * is still hung; we have started a new asoc
4868 * but it won't complete until the shutdown
4869 * is completed
4870 */
4871 if (locked_tcb) {
4872 SCTP_TCB_UNLOCK(locked_tcb);
4873 }
4874 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
4875 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
4876 msg);
4877 sctp_handle_ootb(m, iphlen, *offset, src, dst,
4878 sh, inp, op_err,
4879#if defined(__FreeBSD__)
4880 mflowtype, mflowid, fibnum,
4881#endif
4882 vrf_id, port);
4883 return (NULL);
4884 }
4885 } else {
4886 /* for all other chunks, vtag must match */
4887 if (vtag_in != asoc->my_vtag) {
4888 /* invalid vtag... */
4889 SCTPDBG(SCTP_DEBUG_INPUT3,
4890 "invalid vtag: %xh, expect %xh\n",
4891 vtag_in, asoc->my_vtag);
4892 SCTP_STAT_INCR(sctps_badvtag);
4893 if (locked_tcb) {
4894 SCTP_TCB_UNLOCK(locked_tcb);
4895 }
4896 *offset = length;
4897 return (NULL);
4898 }
4899 }
4900 } /* end if !SCTP_COOKIE_ECHO */
4901 /*
4902 * process all control chunks...
4903 */
4904 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
4905 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
4906 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
4907 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
4908 /* implied cookie-ack.. we must have lost the ack */
4909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4910 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4911 stcb->asoc.overall_error_count,
4912 0,
4913 SCTP_FROM_SCTP_INPUT,
4914 __LINE__);
4915 }
4916 stcb->asoc.overall_error_count = 0;
4917 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
4918 *netp);
4919 }
4920
4921 process_control_chunks:
4922 while (IS_SCTP_CONTROL(ch)) {
4923 /* validate chunk length */
4924 chk_length = ntohs(ch->chunk_length);
4925 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
4926 ch->chunk_type, chk_length);
4927 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
4928 if (chk_length < sizeof(*ch) ||
4929 (*offset + (int)chk_length) > length) {
4930 *offset = length;
4931 if (locked_tcb) {
4932 SCTP_TCB_UNLOCK(locked_tcb);
4933 }
4934 return (NULL);
4935 }
4936 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
4937 /*
4938 * INIT-ACK only gets the init ack "header" portion only
4939 * because we don't have to process the peer's COOKIE. All
4940 * others get a complete chunk.
4941 */
4942 if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
4943 (ch->chunk_type == SCTP_INITIATION)) {
4944 /* get an init-ack chunk */
4945 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4946 sizeof(struct sctp_init_ack_chunk), chunk_buf);
4947 if (ch == NULL) {
4948 *offset = length;
4949 if (locked_tcb) {
4950 SCTP_TCB_UNLOCK(locked_tcb);
4951 }
4952 return (NULL);
4953 }
4954 } else {
4955 /* For cookies and all other chunks. */
4956 if (chk_length > sizeof(chunk_buf)) {
4957 /*
4958 * use just the size of the chunk buffer
4959 * so the front part of our chunks fit in
4960 * contiguous space up to the chunk buffer
4961 * size (508 bytes).
4962 * For chunks that need to get more than that
4963 * they must use the sctp_m_getptr() function
4964 * or other means (e.g. know how to parse mbuf
4965 * chains). Cookies do this already.
4966 */
4967 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4968 (sizeof(chunk_buf) - 4),
4969 chunk_buf);
4970 if (ch == NULL) {
4971 *offset = length;
4972 if (locked_tcb) {
4973 SCTP_TCB_UNLOCK(locked_tcb);
4974 }
4975 return (NULL);
4976 }
4977 } else {
4978 /* We can fit it all */
4979 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4980 chk_length, chunk_buf);
4981 if (ch == NULL) {
4982 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
4983 *offset = length;
4984 if (locked_tcb) {
4985 SCTP_TCB_UNLOCK(locked_tcb);
4986 }
4987 return (NULL);
4988 }
4989 }
4990 }
4991 num_chunks++;
4992 /* Save off the last place we got a control from */
4993 if (stcb != NULL) {
4994 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
4995 /*
4996 * allow last_control to be NULL if
4997 * ASCONF... ASCONF processing will find the
4998 * right net later
4999 */
5000 if ((netp != NULL) && (*netp != NULL))
5001 stcb->asoc.last_control_chunk_from = *netp;
5002 }
5003 }
5004#ifdef SCTP_AUDITING_ENABLED
5005 sctp_audit_log(0xB0, ch->chunk_type);
5006#endif
5007
5008 /* check to see if this chunk required auth, but isn't */
5009 if ((stcb != NULL) &&
5010 (stcb->asoc.auth_supported == 1) &&
5011 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
5012 !stcb->asoc.authenticated) {
5013 /* "silently" ignore */
5014 SCTP_STAT_INCR(sctps_recvauthmissing);
5015 goto next_chunk;
5016 }
5017 switch (ch->chunk_type) {
5018 case SCTP_INITIATION:
5019 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
5020 /* The INIT chunk must be the only chunk. */
5021 if ((num_chunks > 1) ||
5022 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5023 /* RFC 4960 requires that no ABORT is sent */
5024 *offset = length;
5025 if (locked_tcb) {
5026 SCTP_TCB_UNLOCK(locked_tcb);
5027 }
5028 return (NULL);
5029 }
5030 /* Honor our resource limit. */
5031 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
5032 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5033 sctp_abort_association(inp, stcb, m, iphlen,
5034 src, dst, sh, op_err,
5035#if defined(__FreeBSD__)
5036 mflowtype, mflowid,
5037#endif
5038 vrf_id, port);
5039 *offset = length;
5040 return (NULL);
5041 }
5042 sctp_handle_init(m, iphlen, *offset, src, dst, sh,
5043 (struct sctp_init_chunk *)ch, inp,
5044 stcb, *netp, &abort_no_unlock,
5045#if defined(__FreeBSD__)
5046 mflowtype, mflowid,
5047#endif
5048 vrf_id, port);
5049 *offset = length;
5050 if ((!abort_no_unlock) && (locked_tcb)) {
5051 SCTP_TCB_UNLOCK(locked_tcb);
5052 }
5053 return (NULL);
5054 break;
5055 case SCTP_PAD_CHUNK:
5056 break;
5057 case SCTP_INITIATION_ACK:
5058 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
5059 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5060 /* We are not interested anymore */
5061 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5062 ;
5063 } else {
5064 if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
5065 /* Very unlikely */
5066 SCTP_TCB_UNLOCK(locked_tcb);
5067 }
5068 *offset = length;
5069 if (stcb) {
5070#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5071 so = SCTP_INP_SO(inp);
5072 atomic_add_int(&stcb->asoc.refcnt, 1);
5073 SCTP_TCB_UNLOCK(stcb);
5074 SCTP_SOCKET_LOCK(so, 1);
5075 SCTP_TCB_LOCK(stcb);
5076 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5077#endif
5078 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5079 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
5080#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5081 SCTP_SOCKET_UNLOCK(so, 1);
5082#endif
5083 }
5084 return (NULL);
5085 }
5086 }
5087 /* The INIT-ACK chunk must be the only chunk. */
5088 if ((num_chunks > 1) ||
5089 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5090 *offset = length;
5091 if (locked_tcb) {
5092 SCTP_TCB_UNLOCK(locked_tcb);
5093 }
5094 return (NULL);
5095 }
5096 if ((netp) && (*netp)) {
5097 ret = sctp_handle_init_ack(m, iphlen, *offset,
5098 src, dst, sh,
5099 (struct sctp_init_ack_chunk *)ch,
5100 stcb, *netp,
5101 &abort_no_unlock,
5102#if defined(__FreeBSD__)
5103 mflowtype, mflowid,
5104#endif
5105 vrf_id);
5106 } else {
5107 ret = -1;
5108 }
5109 *offset = length;
5110 if (abort_no_unlock) {
5111 return (NULL);
5112 }
5113 /*
5114 * Special case, I must call the output routine to
5115 * get the cookie echoed
5116 */
5117 if ((stcb != NULL) && (ret == 0)) {
5118 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
5119 }
5120 if (locked_tcb) {
5121 SCTP_TCB_UNLOCK(locked_tcb);
5122 }
5123 return (NULL);
5124 break;
5125 case SCTP_SELECTIVE_ACK:
5126 {
5127 struct sctp_sack_chunk *sack;
5128 int abort_now = 0;
5129 uint32_t a_rwnd, cum_ack;
5130 uint16_t num_seg, num_dup;
5131 uint8_t flags;
5132 int offset_seg, offset_dup;
5133
5134 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
5135 SCTP_STAT_INCR(sctps_recvsacks);
5136 if (stcb == NULL) {
5137 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
5138 break;
5139 }
5140 if (chk_length < sizeof(struct sctp_sack_chunk)) {
5141 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
5142 break;
5143 }
5144 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5145 /*-
5146 * If we have sent a shutdown-ack, we will pay no
5147 * attention to a sack sent in to us since
5148 * we don't care anymore.
5149 */
5150 break;
5151 }
5152 sack = (struct sctp_sack_chunk *)ch;
5153 flags = ch->chunk_flags;
5154 cum_ack = ntohl(sack->sack.cum_tsn_ack);
5155 num_seg = ntohs(sack->sack.num_gap_ack_blks);
5156 num_dup = ntohs(sack->sack.num_dup_tsns);
5157 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
5158 if (sizeof(struct sctp_sack_chunk) +
5159 num_seg * sizeof(struct sctp_gap_ack_block) +
5160 num_dup * sizeof(uint32_t) != chk_length) {
5161 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
5162 break;
5163 }
5164 offset_seg = *offset + sizeof(struct sctp_sack_chunk);
5165 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5166 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5167 cum_ack, num_seg, a_rwnd);
5168 stcb->asoc.seen_a_sack_this_pkt = 1;
5169 if ((stcb->asoc.pr_sctp_cnt == 0) &&
5170 (num_seg == 0) &&
5171 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5172 (stcb->asoc.saw_sack_with_frags == 0) &&
5173 (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5174 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
5175 ) {
5176 /* We have a SIMPLE sack having no prior segments and
5177 * data on sent queue to be acked.. Use the faster
5178 * path sack processing. We also allow window update
5179 * sacks with no missing segments to go this way too.
5180 */
5181 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
5182 } else {
5183 if (netp && *netp)
5184 sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5185 num_seg, 0, num_dup, &abort_now, flags,
5186 cum_ack, a_rwnd, ecne_seen);
5187 }
5188 if (abort_now) {
5189 /* ABORT signal from sack processing */
5190 *offset = length;
5191 return (NULL);
5192 }
5193 if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5194 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5195 (stcb->asoc.stream_queue_cnt == 0)) {
5196 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5197 }
5198 }
5199 break;
5200 /* EY - nr_sack: If the received chunk is an nr_sack chunk */
5201 case SCTP_NR_SELECTIVE_ACK:
5202 {
5203 struct sctp_nr_sack_chunk *nr_sack;
5204 int abort_now = 0;
5205 uint32_t a_rwnd, cum_ack;
5206 uint16_t num_seg, num_nr_seg, num_dup;
5207 uint8_t flags;
5208 int offset_seg, offset_dup;
5209
5210 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
5211 SCTP_STAT_INCR(sctps_recvsacks);
5212 if (stcb == NULL) {
5213 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
5214 break;
5215 }
5216 if (stcb->asoc.nrsack_supported == 0) {
5217 goto unknown_chunk;
5218 }
5219 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
5220 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
5221 break;
5222 }
5223 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
5224 /*-
5225 * If we have sent a shutdown-ack, we will pay no
5226 * attention to a sack sent in to us since
5227 * we don't care anymore.
5228 */
5229 break;
5230 }
5231 nr_sack = (struct sctp_nr_sack_chunk *)ch;
5232 flags = ch->chunk_flags;
5233 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
5234 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
5235 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
5236 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
5237 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
5238 if (sizeof(struct sctp_nr_sack_chunk) +
5239 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
5240 num_dup * sizeof(uint32_t) != chk_length) {
5241 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
5242 break;
5243 }
5244 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
5245 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
5246 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
5247 cum_ack, num_seg, a_rwnd);
5248 stcb->asoc.seen_a_sack_this_pkt = 1;
5249 if ((stcb->asoc.pr_sctp_cnt == 0) &&
5250 (num_seg == 0) && (num_nr_seg == 0) &&
5251 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
5252 (stcb->asoc.saw_sack_with_frags == 0) &&
5253 (stcb->asoc.saw_sack_with_nr_frags == 0) &&
5254 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
5255 /*
5256 * We have a SIMPLE sack having no
5257 * prior segments and data on sent
5258 * queue to be acked. Use the
5259 * faster path sack processing. We
5260 * also allow window update sacks
5261 * with no missing segments to go
5262 * this way too.
5263 */
5264 sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
5265 &abort_now, ecne_seen);
5266 } else {
5267 if (netp && *netp)
5268 sctp_handle_sack(m, offset_seg, offset_dup, stcb,
5269 num_seg, num_nr_seg, num_dup, &abort_now, flags,
5270 cum_ack, a_rwnd, ecne_seen);
5271 }
5272 if (abort_now) {
5273 /* ABORT signal from sack processing */
5274 *offset = length;
5275 return (NULL);
5276 }
5277 if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
5278 TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
5279 (stcb->asoc.stream_queue_cnt == 0)) {
5280 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
5281 }
5282 }
5283 break;
5284
5285 case SCTP_HEARTBEAT_REQUEST:
5286 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
5287 if ((stcb) && netp && *netp) {
5288 SCTP_STAT_INCR(sctps_recvheartbeat);
5289 sctp_send_heartbeat_ack(stcb, m, *offset,
5290 chk_length, *netp);
5291
5292 /* He's alive so give him credit */
5293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5294 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5295 stcb->asoc.overall_error_count,
5296 0,
5297 SCTP_FROM_SCTP_INPUT,
5298 __LINE__);
5299 }
5300 stcb->asoc.overall_error_count = 0;
5301 }
5302 break;
5303 case SCTP_HEARTBEAT_ACK:
5304 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
5305 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
5306 /* Its not ours */
5307 *offset = length;
5308 if (locked_tcb) {
5309 SCTP_TCB_UNLOCK(locked_tcb);
5310 }
5311 return (NULL);
5312 }
5313 /* He's alive so give him credit */
5314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5315 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5316 stcb->asoc.overall_error_count,
5317 0,
5318 SCTP_FROM_SCTP_INPUT,
5319 __LINE__);
5320 }
5321 stcb->asoc.overall_error_count = 0;
5322 SCTP_STAT_INCR(sctps_recvheartbeatack);
5323 if (netp && *netp)
5324 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
5325 stcb, *netp);
5326 break;
5327 case SCTP_ABORT_ASSOCIATION:
5328 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
5329 (void *)stcb);
5330 if ((stcb) && netp && *netp)
5331 sctp_handle_abort((struct sctp_abort_chunk *)ch,
5332 stcb, *netp);
5333 *offset = length;
5334 return (NULL);
5335 break;
5336 case SCTP_SHUTDOWN:
5337 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
5338 (void *)stcb);
5339 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
5340 *offset = length;
5341 if (locked_tcb) {
5342 SCTP_TCB_UNLOCK(locked_tcb);
5343 }
5344 return (NULL);
5345 }
5346 if (netp && *netp) {
5347 int abort_flag = 0;
5348
5349 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
5350 stcb, *netp, &abort_flag);
5351 if (abort_flag) {
5352 *offset = length;
5353 return (NULL);
5354 }
5355 }
5356 break;
5357 case SCTP_SHUTDOWN_ACK:
5358 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb);
5359 if ((stcb) && (netp) && (*netp))
5360 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
5361 *offset = length;
5362 return (NULL);
5363 break;
5364
5365 case SCTP_OPERATION_ERROR:
5366 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
5367 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
5368 *offset = length;
5369 return (NULL);
5370 }
5371 break;
5372 case SCTP_COOKIE_ECHO:
5373 SCTPDBG(SCTP_DEBUG_INPUT3,
5374 "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb);
5375 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5376 ;
5377 } else {
5378 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5379 /* We are not interested anymore */
5380 abend:
5381 if (stcb) {
5382 SCTP_TCB_UNLOCK(stcb);
5383 }
5384 *offset = length;
5385 return (NULL);
5386 }
5387 }
5388 /*
5389 * First are we accepting? We do this again here
5390 * since it is possible that a previous endpoint WAS
5391 * listening responded to a INIT-ACK and then
5392 * closed. We opened and bound.. and are now no
5393 * longer listening.
5394 */
5395
5396 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
5397 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
5398 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
5399 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
5400 sctp_abort_association(inp, stcb, m, iphlen,
5401 src, dst, sh, op_err,
5402#if defined(__FreeBSD__)
5403 mflowtype, mflowid,
5404#endif
5405 vrf_id, port);
5406 }
5407 *offset = length;
5408 return (NULL);
5409 } else {
5410 struct mbuf *ret_buf;
5411 struct sctp_inpcb *linp;
5412 if (stcb) {
5413 linp = NULL;
5414 } else {
5415 linp = inp;
5416 }
5417
5418 if (linp) {
5419 SCTP_ASOC_CREATE_LOCK(linp);
5420 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5421 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5422 SCTP_ASOC_CREATE_UNLOCK(linp);
5423 goto abend;
5424 }
5425 }
5426
5427 if (netp) {
5428 ret_buf =
5429 sctp_handle_cookie_echo(m, iphlen,
5430 *offset,
5431 src, dst,
5432 sh,
5433 (struct sctp_cookie_echo_chunk *)ch,
5434 &inp, &stcb, netp,
5435 auth_skipped,
5436 auth_offset,
5437 auth_len,
5438 &locked_tcb,
5439#if defined(__FreeBSD__)
5440 mflowtype,
5441 mflowid,
5442#endif
5443 vrf_id,
5444 port);
5445 } else {
5446 ret_buf = NULL;
5447 }
5448 if (linp) {
5449 SCTP_ASOC_CREATE_UNLOCK(linp);
5450 }
5451 if (ret_buf == NULL) {
5452 if (locked_tcb) {
5453 SCTP_TCB_UNLOCK(locked_tcb);
5454 }
5455 SCTPDBG(SCTP_DEBUG_INPUT3,
5456 "GAK, null buffer\n");
5457 *offset = length;
5458 return (NULL);
5459 }
5460 /* if AUTH skipped, see if it verified... */
5461 if (auth_skipped) {
5462 got_auth = 1;
5463 auth_skipped = 0;
5464 }
5465 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
5466 /*
5467 * Restart the timer if we have
5468 * pending data
5469 */
5470 struct sctp_tmit_chunk *chk;
5471
5472 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
5473 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
5474 }
5475 }
5476 break;
5477 case SCTP_COOKIE_ACK:
5478 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb);
5479 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
5480 if (locked_tcb) {
5481 SCTP_TCB_UNLOCK(locked_tcb);
5482 }
5483 return (NULL);
5484 }
5485 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5486 /* We are not interested anymore */
5487 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
5488 ;
5489 } else if (stcb) {
5490#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5491 so = SCTP_INP_SO(inp);
5492 atomic_add_int(&stcb->asoc.refcnt, 1);
5493 SCTP_TCB_UNLOCK(stcb);
5494 SCTP_SOCKET_LOCK(so, 1);
5495 SCTP_TCB_LOCK(stcb);
5496 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5497#endif
5498 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5499 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
5500#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5501 SCTP_SOCKET_UNLOCK(so, 1);
5502#endif
5503 *offset = length;
5504 return (NULL);
5505 }
5506 }
5507 /* He's alive so give him credit */
5508 if ((stcb) && netp && *netp) {
5509 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5510 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5511 stcb->asoc.overall_error_count,
5512 0,
5513 SCTP_FROM_SCTP_INPUT,
5514 __LINE__);
5515 }
5516 stcb->asoc.overall_error_count = 0;
5517 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch,stcb, *netp);
5518 }
5519 break;
5520 case SCTP_ECN_ECHO:
5521 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
5522 /* He's alive so give him credit */
5523 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
5524 /* Its not ours */
5525 if (locked_tcb) {
5526 SCTP_TCB_UNLOCK(locked_tcb);
5527 }
5528 *offset = length;
5529 return (NULL);
5530 }
5531 if (stcb) {
5532 if (stcb->asoc.ecn_supported == 0) {
5533 goto unknown_chunk;
5534 }
5535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5536 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5537 stcb->asoc.overall_error_count,
5538 0,
5539 SCTP_FROM_SCTP_INPUT,
5540 __LINE__);
5541 }
5542 stcb->asoc.overall_error_count = 0;
5543 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
5544 stcb);
5545 ecne_seen = 1;
5546 }
5547 break;
5548 case SCTP_ECN_CWR:
5549 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
5550 /* He's alive so give him credit */
5551 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
5552 /* Its not ours */
5553 if (locked_tcb) {
5554 SCTP_TCB_UNLOCK(locked_tcb);
5555 }
5556 *offset = length;
5557 return (NULL);
5558 }
5559 if (stcb) {
5560 if (stcb->asoc.ecn_supported == 0) {
5561 goto unknown_chunk;
5562 }
5563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5564 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5565 stcb->asoc.overall_error_count,
5566 0,
5567 SCTP_FROM_SCTP_INPUT,
5568 __LINE__);
5569 }
5570 stcb->asoc.overall_error_count = 0;
5571 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
5572 }
5573 break;
5574 case SCTP_SHUTDOWN_COMPLETE:
5575 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb);
5576 /* must be first and only chunk */
5577 if ((num_chunks > 1) ||
5578 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
5579 *offset = length;
5580 if (locked_tcb) {
5581 SCTP_TCB_UNLOCK(locked_tcb);
5582 }
5583 return (NULL);
5584 }
5585 if ((stcb) && netp && *netp) {
5586 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
5587 stcb, *netp);
5588 }
5589 *offset = length;
5590 return (NULL);
5591 break;
5592 case SCTP_ASCONF:
5593 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
5594 /* He's alive so give him credit */
5595 if (stcb) {
5596 if (stcb->asoc.asconf_supported == 0) {
5597 goto unknown_chunk;
5598 }
5599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5600 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5601 stcb->asoc.overall_error_count,
5602 0,
5603 SCTP_FROM_SCTP_INPUT,
5604 __LINE__);
5605 }
5606 stcb->asoc.overall_error_count = 0;
5607 sctp_handle_asconf(m, *offset, src,
5608 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
5609 asconf_cnt++;
5610 }
5611 break;
5612 case SCTP_ASCONF_ACK:
5613 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
5614 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
5615 /* Its not ours */
5616 if (locked_tcb) {
5617 SCTP_TCB_UNLOCK(locked_tcb);
5618 }
5619 *offset = length;
5620 return (NULL);
5621 }
5622 if ((stcb) && netp && *netp) {
5623 if (stcb->asoc.asconf_supported == 0) {
5624 goto unknown_chunk;
5625 }
5626 /* He's alive so give him credit */
5627 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5628 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5629 stcb->asoc.overall_error_count,
5630 0,
5631 SCTP_FROM_SCTP_INPUT,
5632 __LINE__);
5633 }
5634 stcb->asoc.overall_error_count = 0;
5635 sctp_handle_asconf_ack(m, *offset,
5636 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
5637 if (abort_no_unlock)
5638 return (NULL);
5639 }
5640 break;
5641 case SCTP_FORWARD_CUM_TSN:
5642 case SCTP_IFORWARD_CUM_TSN:
5643 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
5644 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
5645 /* Its not ours */
5646 if (locked_tcb) {
5647 SCTP_TCB_UNLOCK(locked_tcb);
5648 }
5649 *offset = length;
5650 return (NULL);
5651 }
5652
5653 /* He's alive so give him credit */
5654 if (stcb) {
5655 int abort_flag = 0;
5656
5657 if (stcb->asoc.prsctp_supported == 0) {
5658 goto unknown_chunk;
5659 }
5660 stcb->asoc.overall_error_count = 0;
5661 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5662 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5663 stcb->asoc.overall_error_count,
5664 0,
5665 SCTP_FROM_SCTP_INPUT,
5666 __LINE__);
5667 }
5668 *fwd_tsn_seen = 1;
5669 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5670 /* We are not interested anymore */
5671#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5672 so = SCTP_INP_SO(inp);
5673 atomic_add_int(&stcb->asoc.refcnt, 1);
5674 SCTP_TCB_UNLOCK(stcb);
5675 SCTP_SOCKET_LOCK(so, 1);
5676 SCTP_TCB_LOCK(stcb);
5677 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5678#endif
5679 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
5680 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
5681#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5682 SCTP_SOCKET_UNLOCK(so, 1);
5683#endif
5684 *offset = length;
5685 return (NULL);
5686 }
5687 /*
5688 * For sending a SACK this looks like DATA
5689 * chunks.
5690 */
5691 stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
5692 sctp_handle_forward_tsn(stcb,
5693 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
5694 if (abort_flag) {
5695 *offset = length;
5696 return (NULL);
5697 } else {
5698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
5699 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
5700 stcb->asoc.overall_error_count,
5701 0,
5702 SCTP_FROM_SCTP_INPUT,
5703 __LINE__);
5704 }
5705 stcb->asoc.overall_error_count = 0;
5706 }
5707
5708 }
5709 break;
5710 case SCTP_STREAM_RESET:
5711 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
5712 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
5713 /* Its not ours */
5714 if (locked_tcb) {
5715 SCTP_TCB_UNLOCK(locked_tcb);
5716 }
5717 *offset = length;
5718 return (NULL);
5719 }
5720 if (stcb->asoc.reconfig_supported == 0) {
5721 goto unknown_chunk;
5722 }
5723 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
5724 /* stop processing */
5725 *offset = length;
5726 return (NULL);
5727 }
5728 break;
5729 case SCTP_PACKET_DROPPED:
5730 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
5731 /* re-get it all please */
5732 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
5733 /* Its not ours */
5734 if (locked_tcb) {
5735 SCTP_TCB_UNLOCK(locked_tcb);
5736 }
5737 *offset = length;
5738 return (NULL);
5739 }
5740
5741
5742 if (ch && (stcb) && netp && (*netp)) {
5743 if (stcb->asoc.pktdrop_supported == 0) {
5744 goto unknown_chunk;
5745 }
5746 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
5747 stcb, *netp,
5748 min(chk_length, (sizeof(chunk_buf) - 4)));
5749
5750 }
5751
5752 break;
5753 case SCTP_AUTHENTICATION:
5754 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
5755 if (stcb == NULL) {
5756 /* save the first AUTH for later processing */
5757 if (auth_skipped == 0) {
5758 auth_offset = *offset;
5759 auth_len = chk_length;
5760 auth_skipped = 1;
5761 }
5762 /* skip this chunk (temporarily) */
5763 goto next_chunk;
5764 }
5765 if (stcb->asoc.auth_supported == 0) {
5766 goto unknown_chunk;
5767 }
5768 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
5769 (chk_length > (sizeof(struct sctp_auth_chunk) +
5770 SCTP_AUTH_DIGEST_LEN_MAX))) {
5771 /* Its not ours */
5772 if (locked_tcb) {
5773 SCTP_TCB_UNLOCK(locked_tcb);
5774 }
5775 *offset = length;
5776 return (NULL);
5777 }
5778 if (got_auth == 1) {
5779 /* skip this chunk... it's already auth'd */
5780 goto next_chunk;
5781 }
5782 got_auth = 1;
5783 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
5784 m, *offset)) {
5785 /* auth HMAC failed so dump the packet */
5786 *offset = length;
5787 return (stcb);
5788 } else {
5789 /* remaining chunks are HMAC checked */
5790 stcb->asoc.authenticated = 1;
5791 }
5792 break;
5793
5794 default:
5795 unknown_chunk:
5796 /* it's an unknown chunk! */
5797 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
5798 struct sctp_gen_error_cause *cause;
5799 int len;
5800
5801 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
5802 0, M_NOWAIT, 1, MT_DATA);
5803 if (op_err != NULL) {
5804 len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
5805 cause = mtod(op_err, struct sctp_gen_error_cause *);
5806 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
5807 cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
5808 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5809 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
5810 if (SCTP_BUF_NEXT(op_err) != NULL) {
5811#ifdef SCTP_MBUF_LOGGING
5812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5813 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
5814 }
5815#endif
5816 sctp_queue_op_err(stcb, op_err);
5817 } else {
5818 sctp_m_freem(op_err);
5819 }
5820 }
5821 }
5822 if ((ch->chunk_type & 0x80) == 0) {
5823 /* discard this packet */
5824 *offset = length;
5825 return (stcb);
5826 } /* else skip this bad chunk and continue... */
5827 break;
5828 } /* switch (ch->chunk_type) */
5829
5830
5831 next_chunk:
5832 /* get the next chunk */
5833 *offset += SCTP_SIZE32(chk_length);
5834 if (*offset >= length) {
5835 /* no more data left in the mbuf chain */
5836 break;
5837 }
5838 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
5839 sizeof(struct sctp_chunkhdr), chunk_buf);
5840 if (ch == NULL) {
5841 if (locked_tcb) {
5842 SCTP_TCB_UNLOCK(locked_tcb);
5843 }
5844 *offset = length;
5845 return (NULL);
5846 }
5847 } /* while */
5848
5849 if (asconf_cnt > 0 && stcb != NULL) {
5850 sctp_send_asconf_ack(stcb);
5851 }
5852 return (stcb);
5853}
5854
5855
5856/*
5857 * common input chunk processing (v4 and v6)
5858 */
5859void
5860sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
5861 struct sockaddr *src, struct sockaddr *dst,
5862 struct sctphdr *sh, struct sctp_chunkhdr *ch,
5863#if !defined(SCTP_WITH_NO_CSUM)
5864 uint8_t compute_crc,
5865#endif
5866 uint8_t ecn_bits,
5867#if defined(__FreeBSD__)
5868 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
5869#endif
5870 uint32_t vrf_id, uint16_t port)
5871{
5872 uint32_t high_tsn;
5873 int fwd_tsn_seen = 0, data_processed = 0;
5874 struct mbuf *m = *mm, *op_err;
5875 char msg[SCTP_DIAG_INFO_LEN];
5876 int un_sent;
5877 int cnt_ctrl_ready = 0;
5878 struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
5879 struct sctp_tcb *stcb = NULL;
5880 struct sctp_nets *net = NULL;
5881#if defined(__Userspace__)
5882 struct socket *upcall_socket = NULL;
5883#endif
5884 SCTP_STAT_INCR(sctps_recvdatagrams);
5885#ifdef SCTP_AUDITING_ENABLED
5886 sctp_audit_log(0xE0, 1);
5887 sctp_auditing(0, inp, stcb, net);
5888#endif
5889#if !defined(SCTP_WITH_NO_CSUM)
5890 if (compute_crc != 0) {
5891 uint32_t check, calc_check;
5892
5893 check = sh->checksum;
5894 sh->checksum = 0;
5895 calc_check = sctp_calculate_cksum(m, iphlen);
5896 sh->checksum = check;
5897 if (calc_check != check) {
5898 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
5899 calc_check, check, (void *)m, length, iphlen);
5900 stcb = sctp_findassociation_addr(m, offset, src, dst,
5901 sh, ch, &inp, &net, vrf_id);
5902#if defined(INET) || defined(INET6)
5903 if ((ch->chunk_type != SCTP_INITIATION) &&
5904 (net != NULL) && (net->port != port)) {
5905 if (net->port == 0) {
5906 /* UDP encapsulation turned on. */
5907 net->mtu -= sizeof(struct udphdr);
5908 if (stcb->asoc.smallest_mtu > net->mtu) {
5909 sctp_pathmtu_adjustment(stcb, net->mtu);
5910 }
5911 } else if (port == 0) {
5912 /* UDP encapsulation turned off. */
5913 net->mtu += sizeof(struct udphdr);
5914 /* XXX Update smallest_mtu */
5915 }
5916 net->port = port;
5917 }
5918#endif
5919#if defined(__FreeBSD__)
5920 if (net != NULL) {
5921 net->flowtype = mflowtype;
5922 net->flowid = mflowid;
5923 }
5924#endif
5925 if ((inp != NULL) && (stcb != NULL)) {
5926 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
5927 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
5928 } else if ((inp != NULL) && (stcb == NULL)) {
5929 inp_decr = inp;
5930 }
5931 SCTP_STAT_INCR(sctps_badsum);
5932 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
5933 goto out;
5934 }
5935 }
5936#endif
5937 /* Destination port of 0 is illegal, based on RFC4960. */
5938 if (sh->dest_port == 0) {
5939 SCTP_STAT_INCR(sctps_hdrops);
5940 goto out;
5941 }
5942 stcb = sctp_findassociation_addr(m, offset, src, dst,
5943 sh, ch, &inp, &net, vrf_id);
5944#if defined(INET) || defined(INET6)
5945 if ((ch->chunk_type != SCTP_INITIATION) &&
5946 (net != NULL) && (net->port != port)) {
5947 if (net->port == 0) {
5948 /* UDP encapsulation turned on. */
5949 net->mtu -= sizeof(struct udphdr);
5950 if (stcb->asoc.smallest_mtu > net->mtu) {
5951 sctp_pathmtu_adjustment(stcb, net->mtu);
5952 }
5953 } else if (port == 0) {
5954 /* UDP encapsulation turned off. */
5955 net->mtu += sizeof(struct udphdr);
5956 /* XXX Update smallest_mtu */
5957 }
5958 net->port = port;
5959 }
5960#endif
5961#if defined(__FreeBSD__)
5962 if (net != NULL) {
5963 net->flowtype = mflowtype;
5964 net->flowid = mflowid;
5965 }
5966#endif
5967 if (inp == NULL) {
5968 SCTP_STAT_INCR(sctps_noport);
5969#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
5970 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
5971 goto out;
5972 }
5973#endif
5974 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
5975 sctp_send_shutdown_complete2(src, dst, sh,
5976#if defined(__FreeBSD__)
5977 mflowtype, mflowid, fibnum,
5978#endif
5979 vrf_id, port);
5980 goto out;
5981 }
5982 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
5983 goto out;
5984 }
5985 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
5986 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
5987 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
5988 (ch->chunk_type != SCTP_INIT))) {
5989 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5990 "Out of the blue");
5991 sctp_send_abort(m, iphlen, src, dst,
5992 sh, 0, op_err,
5993#if defined(__FreeBSD__)
5994 mflowtype, mflowid, fibnum,
5995#endif
5996 vrf_id, port);
5997 }
5998 }
5999 goto out;
6000 } else if (stcb == NULL) {
6001 inp_decr = inp;
6002 }
6003#ifdef IPSEC
6004 /*-
6005 * I very much doubt any of the IPSEC stuff will work but I have no
6006 * idea, so I will leave it in place.
6007 */
6008 if (inp != NULL) {
6009 switch (dst->sa_family) {
6010#ifdef INET
6011 case AF_INET:
6012 if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
6013 SCTP_STAT_INCR(sctps_hdrops);
6014 goto out;
6015 }
6016 break;
6017#endif
6018#ifdef INET6
6019 case AF_INET6:
6020 if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
6021 SCTP_STAT_INCR(sctps_hdrops);
6022 goto out;
6023 }
6024 break;
6025#endif
6026 default:
6027 break;
6028 }
6029 }
6030#endif
6031 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
6032 (void *)m, iphlen, offset, length, (void *)stcb);
6033 if (stcb) {
6034 /* always clear this before beginning a packet */
6035 stcb->asoc.authenticated = 0;
6036 stcb->asoc.seen_a_sack_this_pkt = 0;
6037 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
6038 (void *)stcb, stcb->asoc.state);
6039
6040 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
6041 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6042 /*-
6043 * If we hit here, we had a ref count
6044 * up when the assoc was aborted and the
6045 * timer is clearing out the assoc, we should
6046 * NOT respond to any packet.. its OOTB.
6047 */
6048 SCTP_TCB_UNLOCK(stcb);
6049 stcb = NULL;
6050 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6051 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6052 msg);
6053 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6054#if defined(__FreeBSD__)
6055 mflowtype, mflowid, inp->fibnum,
6056#endif
6057 vrf_id, port);
6058 goto out;
6059 }
6060
6061 }
6062#if defined(__Userspace__)
6063 if (stcb && !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
6064 if (stcb->sctp_socket != NULL) {
6065 if (stcb->sctp_socket->so_head != NULL) {
6066 upcall_socket = stcb->sctp_socket->so_head;
6067 } else {
6068 upcall_socket = stcb->sctp_socket;
6069 }
6070 SOCK_LOCK(upcall_socket);
6071 soref(upcall_socket);
6072 SOCK_UNLOCK(upcall_socket);
6073 }
6074 }
6075#endif
6076 if (IS_SCTP_CONTROL(ch)) {
6077 /* process the control portion of the SCTP packet */
6078 /* sa_ignore NO_NULL_CHK */
6079 stcb = sctp_process_control(m, iphlen, &offset, length,
6080 src, dst, sh, ch,
6081 inp, stcb, &net, &fwd_tsn_seen,
6082#if defined(__FreeBSD__)
6083 mflowtype, mflowid, fibnum,
6084#endif
6085 vrf_id, port);
6086 if (stcb) {
6087 /* This covers us if the cookie-echo was there
6088 * and it changes our INP.
6089 */
6090 inp = stcb->sctp_ep;
6091#if defined(INET) || defined(INET6)
6092 if ((ch->chunk_type != SCTP_INITIATION) &&
6093 (net != NULL) && (net->port != port)) {
6094 if (net->port == 0) {
6095 /* UDP encapsulation turned on. */
6096 net->mtu -= sizeof(struct udphdr);
6097 if (stcb->asoc.smallest_mtu > net->mtu) {
6098 sctp_pathmtu_adjustment(stcb, net->mtu);
6099 }
6100 } else if (port == 0) {
6101 /* UDP encapsulation turned off. */
6102 net->mtu += sizeof(struct udphdr);
6103 /* XXX Update smallest_mtu */
6104 }
6105 net->port = port;
6106 }
6107#endif
6108 }
6109 } else {
6110 /*
6111 * no control chunks, so pre-process DATA chunks (these
6112 * checks are taken care of by control processing)
6113 */
6114
6115 /*
6116 * if DATA only packet, and auth is required, then punt...
6117 * can't have authenticated without any AUTH (control)
6118 * chunks
6119 */
6120 if ((stcb != NULL) &&
6121 (stcb->asoc.auth_supported == 1) &&
6122 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
6123 /* "silently" ignore */
6124 SCTP_STAT_INCR(sctps_recvauthmissing);
6125 goto out;
6126 }
6127 if (stcb == NULL) {
6128 /* out of the blue DATA chunk */
6129 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6130 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6131 msg);
6132 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6133#if defined(__FreeBSD__)
6134 mflowtype, mflowid, fibnum,
6135#endif
6136 vrf_id, port);
6137 goto out;
6138 }
6139 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
6140 /* v_tag mismatch! */
6141 SCTP_STAT_INCR(sctps_badvtag);
6142 goto out;
6143 }
6144 }
6145
6146 if (stcb == NULL) {
6147 /*
6148 * no valid TCB for this packet, or we found it's a bad
6149 * packet while processing control, or we're done with this
6150 * packet (done or skip rest of data), so we drop it...
6151 */
6152 goto out;
6153 }
6154#if defined(__Userspace__)
6155 if (stcb && upcall_socket == NULL && !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
6156 if (stcb->sctp_socket != NULL) {
6157 if (stcb->sctp_socket->so_head != NULL) {
6158 upcall_socket = stcb->sctp_socket->so_head;
6159 } else {
6160 upcall_socket = stcb->sctp_socket;
6161 }
6162 SOCK_LOCK(upcall_socket);
6163 soref(upcall_socket);
6164 SOCK_UNLOCK(upcall_socket);
6165 }
6166 }
6167#endif
6168 /*
6169 * DATA chunk processing
6170 */
6171 /* plow through the data chunks while length > offset */
6172
6173 /*
6174 * Rest should be DATA only. Check authentication state if AUTH for
6175 * DATA is required.
6176 */
6177 if ((length > offset) &&
6178 (stcb != NULL) &&
6179 (stcb->asoc.auth_supported == 1) &&
6180 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
6181 !stcb->asoc.authenticated) {
6182 /* "silently" ignore */
6183 SCTP_STAT_INCR(sctps_recvauthmissing);
6184 SCTPDBG(SCTP_DEBUG_AUTH1,
6185 "Data chunk requires AUTH, skipped\n");
6186 goto trigger_send;
6187 }
6188 if (length > offset) {
6189 int retval;
6190
6191 /*
6192 * First check to make sure our state is correct. We would
6193 * not get here unless we really did have a tag, so we don't
6194 * abort if this happens, just dump the chunk silently.
6195 */
6196 switch (SCTP_GET_STATE(&stcb->asoc)) {
6197 case SCTP_STATE_COOKIE_ECHOED:
6198 /*
6199 * we consider data with valid tags in this state
6200 * shows us the cookie-ack was lost. Imply it was
6201 * there.
6202 */
6203 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6204 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6205 stcb->asoc.overall_error_count,
6206 0,
6207 SCTP_FROM_SCTP_INPUT,
6208 __LINE__);
6209 }
6210 stcb->asoc.overall_error_count = 0;
6211 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
6212 break;
6213 case SCTP_STATE_COOKIE_WAIT:
6214 /*
6215 * We consider OOTB any data sent during asoc setup.
6216 */
6217 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
6218 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6219 msg);
6220 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
6221#if defined(__FreeBSD__)
6222 mflowtype, mflowid, inp->fibnum,
6223#endif
6224 vrf_id, port);
6225 goto out;
6226 /*sa_ignore NOTREACHED*/
6227 break;
6228 case SCTP_STATE_EMPTY: /* should not happen */
6229 case SCTP_STATE_INUSE: /* should not happen */
6230 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
6231 case SCTP_STATE_SHUTDOWN_ACK_SENT:
6232 default:
6233 goto out;
6234 /*sa_ignore NOTREACHED*/
6235 break;
6236 case SCTP_STATE_OPEN:
6237 case SCTP_STATE_SHUTDOWN_SENT:
6238 break;
6239 }
6240 /* plow through the data chunks while length > offset */
6241 retval = sctp_process_data(mm, iphlen, &offset, length,
6242 inp, stcb, net, &high_tsn);
6243 if (retval == 2) {
6244 /*
6245 * The association aborted, NO UNLOCK needed since
6246 * the association is destroyed.
6247 */
6248 stcb = NULL;
6249 goto out;
6250 }
6251 data_processed = 1;
6252 /*
6253 * Anything important needs to have been m_copy'ed in
6254 * process_data
6255 */
6256 }
6257
6258 /* take care of ecn */
6259 if ((data_processed == 1) &&
6260 (stcb->asoc.ecn_supported == 1) &&
6261 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
6262 /* Yep, we need to add a ECNE */
6263 sctp_send_ecn_echo(stcb, net, high_tsn);
6264 }
6265
6266 if ((data_processed == 0) && (fwd_tsn_seen)) {
6267 int was_a_gap;
6268 uint32_t highest_tsn;
6269
6270 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
6271 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
6272 } else {
6273 highest_tsn = stcb->asoc.highest_tsn_inside_map;
6274 }
6275 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
6276 stcb->asoc.send_sack = 1;
6277 sctp_sack_check(stcb, was_a_gap);
6278 } else if (fwd_tsn_seen) {
6279 stcb->asoc.send_sack = 1;
6280 }
6281 /* trigger send of any chunks in queue... */
6282trigger_send:
6283#ifdef SCTP_AUDITING_ENABLED
6284 sctp_audit_log(0xE0, 2);
6285 sctp_auditing(1, inp, stcb, net);
6286#endif
6287 SCTPDBG(SCTP_DEBUG_INPUT1,
6288 "Check for chunk output prw:%d tqe:%d tf=%d\n",
6289 stcb->asoc.peers_rwnd,
6290 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
6291 stcb->asoc.total_flight);
6292 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
6293 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
6294 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
6295 }
6296 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
6297 cnt_ctrl_ready ||
6298 stcb->asoc.trigger_reset ||
6299 ((un_sent) &&
6300 (stcb->asoc.peers_rwnd > 0 ||
6301 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
6302 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
6303 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
6304 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
6305 }
6306#ifdef SCTP_AUDITING_ENABLED
6307 sctp_audit_log(0xE0, 3);
6308 sctp_auditing(2, inp, stcb, net);
6309#endif
6310 out:
6311 if (stcb != NULL) {
6312 SCTP_TCB_UNLOCK(stcb);
6313 }
6314#if defined(__Userspace__)
6315 if (upcall_socket != NULL) {
6316 if (upcall_socket->so_upcall != NULL) {
6317 (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
6318 }
6319 ACCEPT_LOCK();
6320 SOCK_LOCK(upcall_socket);
6321 sorele(upcall_socket);
6322 }
6323#endif
6324 if (inp_decr != NULL) {
6325 /* reduce ref-count */
6326 SCTP_INP_WLOCK(inp_decr);
6327 SCTP_INP_DECR_REF(inp_decr);
6328 SCTP_INP_WUNLOCK(inp_decr);
6329 }
6330 return;
6331}
6332
6333#ifdef INET
6334#if !defined(__Userspace__)
6335#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
6336void
6337sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
6338#elif defined(__Panda__)
6339void
6340sctp_input(pakhandle_type i_pak)
6341#else
6342void
6343#if __STDC__
6344sctp_input(struct mbuf *i_pak,...)
6345#else
6346sctp_input(i_pak, va_alist)
6347 struct mbuf *i_pak;
6348#endif
6349#endif
6350{
6351 struct mbuf *m;
6352 int iphlen;
6353 uint32_t vrf_id = 0;
6354 uint8_t ecn_bits;
6355 struct sockaddr_in src, dst;
6356 struct ip *ip;
6357 struct sctphdr *sh;
6358 struct sctp_chunkhdr *ch;
6359 int length, offset;
6360#if !defined(SCTP_WITH_NO_CSUM)
6361 uint8_t compute_crc;
6362#endif
6363#if defined(__FreeBSD__)
6364 uint32_t mflowid;
6365 uint8_t mflowtype;
6366 uint16_t fibnum;
6367#endif
6368#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__))
6369 uint16_t port = 0;
6370#endif
6371
6372#if defined(__Panda__)
6373 /* This is Evil, but its the only way to make panda work right. */
6374 iphlen = sizeof(struct ip);
6375#else
6376 iphlen = off;
6377#endif
6378 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
6379 SCTP_RELEASE_PKT(i_pak);
6380 return;
6381 }
6382 m = SCTP_HEADER_TO_CHAIN(i_pak);
6383#ifdef __Panda__
6384 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
6385 (void)SCTP_RELEASE_HEADER(i_pak);
6386#endif
6387#ifdef SCTP_MBUF_LOGGING
6388 /* Log in any input mbufs */
6389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6390 sctp_log_mbc(m, SCTP_MBUF_INPUT);
6391 }
6392#endif
6393#ifdef SCTP_PACKET_LOGGING
6394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
6395 sctp_packet_log(m);
6396 }
6397#endif
6398#if defined(__FreeBSD__)
6399#if __FreeBSD_version > 1000049
6400 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6401 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6402 m->m_pkthdr.len,
6403 if_name(m->m_pkthdr.rcvif),
6404 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6405#elif __FreeBSD_version >= 800000
6406 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6407 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6408 m->m_pkthdr.len,
6409 if_name(m->m_pkthdr.rcvif),
6410 m->m_pkthdr.csum_flags);
6411#else
6412 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6413 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6414 m->m_pkthdr.len,
6415 m->m_pkthdr.rcvif->if_xname,
6416 m->m_pkthdr.csum_flags);
6417#endif
6418#endif
6419#if defined(__APPLE__)
6420 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6421 "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
6422 m->m_pkthdr.len,
6423 m->m_pkthdr.rcvif->if_name,
6424 m->m_pkthdr.rcvif->if_unit,
6425 m->m_pkthdr.csum_flags);
6426#endif
6427#if defined(__Windows__)
6428 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6429 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
6430 m->m_pkthdr.len,
6431 m->m_pkthdr.rcvif->if_xname,
6432 m->m_pkthdr.csum_flags);
6433#endif
6434#if defined(__FreeBSD__)
6435 mflowid = m->m_pkthdr.flowid;
6436 mflowtype = M_HASHTYPE_GET(m);
6437 fibnum = M_GETFIB(m);
6438#endif
6439 SCTP_STAT_INCR(sctps_recvpackets);
6440 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
6441 /* Get IP, SCTP, and first chunk header together in the first mbuf. */
6442 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
6443 if (SCTP_BUF_LEN(m) < offset) {
6444 if ((m = m_pullup(m, offset)) == NULL) {
6445 SCTP_STAT_INCR(sctps_hdrops);
6446 return;
6447 }
6448 }
6449 ip = mtod(m, struct ip *);
6450 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
6451 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
6452 offset -= sizeof(struct sctp_chunkhdr);
6453 memset(&src, 0, sizeof(struct sockaddr_in));
6454 src.sin_family = AF_INET;
6455#ifdef HAVE_SIN_LEN
6456 src.sin_len = sizeof(struct sockaddr_in);
6457#endif
6458 src.sin_port = sh->src_port;
6459 src.sin_addr = ip->ip_src;
6460 memset(&dst, 0, sizeof(struct sockaddr_in));
6461 dst.sin_family = AF_INET;
6462#ifdef HAVE_SIN_LEN
6463 dst.sin_len = sizeof(struct sockaddr_in);
6464#endif
6465 dst.sin_port = sh->dest_port;
6466 dst.sin_addr = ip->ip_dst;
6467#if defined(__Windows__)
6468 NTOHS(ip->ip_len);
6469#endif
6470#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6471 ip->ip_len = ntohs(ip->ip_len);
6472#endif
6473#if defined(__FreeBSD__)
6474#if __FreeBSD_version >= 1000000
6475 length = ntohs(ip->ip_len);
6476#else
6477 length = ip->ip_len + iphlen;
6478#endif
6479#elif defined(__APPLE__)
6480 length = ip->ip_len + iphlen;
6481#elif defined(__Userspace__)
6482#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
6483 length = ip->ip_len;
6484#else
6485 length = ip->ip_len + iphlen;
6486#endif
6487#else
6488 length = ip->ip_len;
6489#endif
6490 /* Validate mbuf chain length with IP payload length. */
6491 if (SCTP_HEADER_LEN(m) != length) {
6492 SCTPDBG(SCTP_DEBUG_INPUT1,
6493 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
6494 SCTP_STAT_INCR(sctps_hdrops);
6495 goto out;
6496 }
6497 /* SCTP does not allow broadcasts or multicasts */
6498 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
6499 goto out;
6500 }
6501 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
6502 goto out;
6503 }
6504 ecn_bits = ip->ip_tos;
6505#if defined(SCTP_WITH_NO_CSUM)
6506 SCTP_STAT_INCR(sctps_recvnocrc);
6507#else
6508#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
6509 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
6510 SCTP_STAT_INCR(sctps_recvhwcrc);
6511 compute_crc = 0;
6512 } else {
6513#else
6514 if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
6515 ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
6516 (SCTP_IS_IT_LOOPBACK(m)))) {
6517 SCTP_STAT_INCR(sctps_recvnocrc);
6518 compute_crc = 0;
6519 } else {
6520#endif
6521 SCTP_STAT_INCR(sctps_recvswcrc);
6522 compute_crc = 1;
6523 }
6524#endif
6525 sctp_common_input_processing(&m, iphlen, offset, length,
6526 (struct sockaddr *)&src,
6527 (struct sockaddr *)&dst,
6528 sh, ch,
6529#if !defined(SCTP_WITH_NO_CSUM)
6530 compute_crc,
6531#endif
6532 ecn_bits,
6533#if defined(__FreeBSD__)
6534 mflowtype, mflowid, fibnum,
6535#endif
6536 vrf_id, port);
6537 out:
6538 if (m) {
6539 sctp_m_freem(m);
6540 }
6541 return;
6542}
6543
6544#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6545extern int *sctp_cpuarry;
6546#endif
6547
6548#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6549int
6550sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
6551{
6552 struct mbuf *m;
6553 int off;
6554
6555 m = *mp;
6556 off = *offp;
6557#else
6558void
6559sctp_input(struct mbuf *m, int off)
6560{
6561#endif
6562#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
6563 if (mp_ncpus > 1) {
6564 struct ip *ip;
6565 struct sctphdr *sh;
6566 int offset;
6567 int cpu_to_use;
6568 uint32_t flowid, tag;
6569
6570 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
6571 flowid = m->m_pkthdr.flowid;
6572 } else {
6573 /* No flow id built by lower layers
6574 * fix it so we create one.
6575 */
6576 offset = off + sizeof(struct sctphdr);
6577 if (SCTP_BUF_LEN(m) < offset) {
6578 if ((m = m_pullup(m, offset)) == NULL) {
6579 SCTP_STAT_INCR(sctps_hdrops);
6580#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6581 return (IPPROTO_DONE);
6582#else
6583 return;
6584#endif
6585 }
6586 }
6587 ip = mtod(m, struct ip *);
6588 sh = (struct sctphdr *)((caddr_t)ip + off);
6589 tag = htonl(sh->v_tag);
6590 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
6591 m->m_pkthdr.flowid = flowid;
6592 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
6593 }
6594 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
6595 sctp_queue_to_mcore(m, off, cpu_to_use);
6596#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6597 return (IPPROTO_DONE);
6598#else
6599 return;
6600#endif
6601 }
6602#endif
6603 sctp_input_with_port(m, off, 0);
6604#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
6605 return (IPPROTO_DONE);
6606#endif
6607}
6608#endif
6609#endif