blob: 87e7774bac3b8793d7bcebeff4f8853dec2ffb1d [file] [log] [blame]
James Kuszmaul4cb043c2021-01-17 11:25:51 -08001/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifdef __FreeBSD__
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 310590 2016-12-26 11:06:41Z tuexen $");
36#endif
37
38#include <netinet/sctp_os.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp_var.h>
42#include <netinet/sctp_sysctl.h>
43#ifdef INET6
44#if defined(__Userspace__) || defined(__FreeBSD__)
45#include <netinet6/sctp6_var.h>
46#endif
47#endif
48#include <netinet/sctp_header.h>
49#include <netinet/sctp_output.h>
50#include <netinet/sctp_uio.h>
51#include <netinet/sctp_timer.h>
52#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
53#include <netinet/sctp_auth.h>
54#include <netinet/sctp_asconf.h>
55#include <netinet/sctp_bsd_addr.h>
56#if defined(__Userspace__)
57#include <netinet/sctp_constants.h>
58#endif
59#if defined(__FreeBSD__)
60#include <netinet/udp.h>
61#include <netinet/udp_var.h>
62#include <sys/proc.h>
63#ifdef INET6
64#include <netinet/icmp6.h>
65#endif
66#endif
67
68#if defined(__APPLE__)
69#define APPLE_FILE_NO 8
70#endif
71
72#if defined(__Windows__)
73#if !defined(SCTP_LOCAL_TRACE_BUF)
74#include "eventrace_netinet.h"
75#include "sctputil.tmh" /* this is the file that will be auto generated */
76#endif
77#else
78#ifndef KTR_SCTP
79#define KTR_SCTP KTR_SUBSYS
80#endif
81#endif
82
83extern const struct sctp_cc_functions sctp_cc_functions[];
84extern const struct sctp_ss_functions sctp_ss_functions[];
85
86void
87sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
88{
89#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
90 struct sctp_cwnd_log sctp_clog;
91
92 sctp_clog.x.sb.stcb = stcb;
93 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
94 if (stcb)
95 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
96 else
97 sctp_clog.x.sb.stcb_sbcc = 0;
98 sctp_clog.x.sb.incr = incr;
99 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 SCTP_LOG_EVENT_SB,
101 from,
102 sctp_clog.x.misc.log1,
103 sctp_clog.x.misc.log2,
104 sctp_clog.x.misc.log3,
105 sctp_clog.x.misc.log4);
106#endif
107}
108
109void
110sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
111{
112#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
113 struct sctp_cwnd_log sctp_clog;
114
115 sctp_clog.x.close.inp = (void *)inp;
116 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
117 if (stcb) {
118 sctp_clog.x.close.stcb = (void *)stcb;
119 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
120 } else {
121 sctp_clog.x.close.stcb = 0;
122 sctp_clog.x.close.state = 0;
123 }
124 sctp_clog.x.close.loc = loc;
125 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
126 SCTP_LOG_EVENT_CLOSE,
127 0,
128 sctp_clog.x.misc.log1,
129 sctp_clog.x.misc.log2,
130 sctp_clog.x.misc.log3,
131 sctp_clog.x.misc.log4);
132#endif
133}
134
135void
136rto_logging(struct sctp_nets *net, int from)
137{
138#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
139 struct sctp_cwnd_log sctp_clog;
140
141 memset(&sctp_clog, 0, sizeof(sctp_clog));
142 sctp_clog.x.rto.net = (void *) net;
143 sctp_clog.x.rto.rtt = net->rtt / 1000;
144 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
145 SCTP_LOG_EVENT_RTT,
146 from,
147 sctp_clog.x.misc.log1,
148 sctp_clog.x.misc.log2,
149 sctp_clog.x.misc.log3,
150 sctp_clog.x.misc.log4);
151#endif
152}
153
154void
155sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
156{
157#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
158 struct sctp_cwnd_log sctp_clog;
159
160 sctp_clog.x.strlog.stcb = stcb;
161 sctp_clog.x.strlog.n_tsn = tsn;
162 sctp_clog.x.strlog.n_sseq = sseq;
163 sctp_clog.x.strlog.e_tsn = 0;
164 sctp_clog.x.strlog.e_sseq = 0;
165 sctp_clog.x.strlog.strm = stream;
166 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
167 SCTP_LOG_EVENT_STRM,
168 from,
169 sctp_clog.x.misc.log1,
170 sctp_clog.x.misc.log2,
171 sctp_clog.x.misc.log3,
172 sctp_clog.x.misc.log4);
173#endif
174}
175
176void
177sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
178{
179#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
180 struct sctp_cwnd_log sctp_clog;
181
182 sctp_clog.x.nagle.stcb = (void *)stcb;
183 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
184 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
185 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
186 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
187 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
188 SCTP_LOG_EVENT_NAGLE,
189 action,
190 sctp_clog.x.misc.log1,
191 sctp_clog.x.misc.log2,
192 sctp_clog.x.misc.log3,
193 sctp_clog.x.misc.log4);
194#endif
195}
196
197void
198sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
199{
200#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
201 struct sctp_cwnd_log sctp_clog;
202
203 sctp_clog.x.sack.cumack = cumack;
204 sctp_clog.x.sack.oldcumack = old_cumack;
205 sctp_clog.x.sack.tsn = tsn;
206 sctp_clog.x.sack.numGaps = gaps;
207 sctp_clog.x.sack.numDups = dups;
208 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
209 SCTP_LOG_EVENT_SACK,
210 from,
211 sctp_clog.x.misc.log1,
212 sctp_clog.x.misc.log2,
213 sctp_clog.x.misc.log3,
214 sctp_clog.x.misc.log4);
215#endif
216}
217
218void
219sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
220{
221#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
222 struct sctp_cwnd_log sctp_clog;
223
224 memset(&sctp_clog, 0, sizeof(sctp_clog));
225 sctp_clog.x.map.base = map;
226 sctp_clog.x.map.cum = cum;
227 sctp_clog.x.map.high = high;
228 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
229 SCTP_LOG_EVENT_MAP,
230 from,
231 sctp_clog.x.misc.log1,
232 sctp_clog.x.misc.log2,
233 sctp_clog.x.misc.log3,
234 sctp_clog.x.misc.log4);
235#endif
236}
237
238void
239sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
240{
241#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
242 struct sctp_cwnd_log sctp_clog;
243
244 memset(&sctp_clog, 0, sizeof(sctp_clog));
245 sctp_clog.x.fr.largest_tsn = biggest_tsn;
246 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
247 sctp_clog.x.fr.tsn = tsn;
248 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
249 SCTP_LOG_EVENT_FR,
250 from,
251 sctp_clog.x.misc.log1,
252 sctp_clog.x.misc.log2,
253 sctp_clog.x.misc.log3,
254 sctp_clog.x.misc.log4);
255#endif
256}
257
258#ifdef SCTP_MBUF_LOGGING
259void
260sctp_log_mb(struct mbuf *m, int from)
261{
262#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
263 struct sctp_cwnd_log sctp_clog;
264
265 sctp_clog.x.mb.mp = m;
266 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
267 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
268 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
269 if (SCTP_BUF_IS_EXTENDED(m)) {
270 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
271#if defined(__APPLE__)
272 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
273#else
274 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
275#endif
276 } else {
277 sctp_clog.x.mb.ext = 0;
278 sctp_clog.x.mb.refcnt = 0;
279 }
280 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
281 SCTP_LOG_EVENT_MBUF,
282 from,
283 sctp_clog.x.misc.log1,
284 sctp_clog.x.misc.log2,
285 sctp_clog.x.misc.log3,
286 sctp_clog.x.misc.log4);
287#endif
288}
289
290void
291sctp_log_mbc(struct mbuf *m, int from)
292{
293 struct mbuf *mat;
294
295 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
296 sctp_log_mb(mat, from);
297 }
298}
299#endif
300
301void
302sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
303{
304#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
305 struct sctp_cwnd_log sctp_clog;
306
307 if (control == NULL) {
308 SCTP_PRINTF("Gak log of NULL?\n");
309 return;
310 }
311 sctp_clog.x.strlog.stcb = control->stcb;
312 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
313 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
314 sctp_clog.x.strlog.strm = control->sinfo_stream;
315 if (poschk != NULL) {
316 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
317 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
318 } else {
319 sctp_clog.x.strlog.e_tsn = 0;
320 sctp_clog.x.strlog.e_sseq = 0;
321 }
322 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
323 SCTP_LOG_EVENT_STRM,
324 from,
325 sctp_clog.x.misc.log1,
326 sctp_clog.x.misc.log2,
327 sctp_clog.x.misc.log3,
328 sctp_clog.x.misc.log4);
329#endif
330}
331
332void
333sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
334{
335#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
336 struct sctp_cwnd_log sctp_clog;
337
338 sctp_clog.x.cwnd.net = net;
339 if (stcb->asoc.send_queue_cnt > 255)
340 sctp_clog.x.cwnd.cnt_in_send = 255;
341 else
342 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
343 if (stcb->asoc.stream_queue_cnt > 255)
344 sctp_clog.x.cwnd.cnt_in_str = 255;
345 else
346 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
347
348 if (net) {
349 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
350 sctp_clog.x.cwnd.inflight = net->flight_size;
351 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
352 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
353 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
354 }
355 if (SCTP_CWNDLOG_PRESEND == from) {
356 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
357 }
358 sctp_clog.x.cwnd.cwnd_augment = augment;
359 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
360 SCTP_LOG_EVENT_CWND,
361 from,
362 sctp_clog.x.misc.log1,
363 sctp_clog.x.misc.log2,
364 sctp_clog.x.misc.log3,
365 sctp_clog.x.misc.log4);
366#endif
367}
368
369#ifndef __APPLE__
370void
371sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
372{
373#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
374 struct sctp_cwnd_log sctp_clog;
375
376 memset(&sctp_clog, 0, sizeof(sctp_clog));
377 if (inp) {
378 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
379
380 } else {
381 sctp_clog.x.lock.sock = (void *) NULL;
382 }
383 sctp_clog.x.lock.inp = (void *) inp;
384#if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
385 if (stcb) {
386 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
387 } else {
388 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
389 }
390 if (inp) {
391 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
392 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
393 } else {
394 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
395 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
396 }
397#if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
398 sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
399#else
400 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
401#endif
402 if (inp && (inp->sctp_socket)) {
403 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
404 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
405 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
406 } else {
407 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
408 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
409 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
410 }
411#endif
412 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
413 SCTP_LOG_LOCK_EVENT,
414 from,
415 sctp_clog.x.misc.log1,
416 sctp_clog.x.misc.log2,
417 sctp_clog.x.misc.log3,
418 sctp_clog.x.misc.log4);
419#endif
420}
421#endif
422
423void
424sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
425{
426#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
427 struct sctp_cwnd_log sctp_clog;
428
429 memset(&sctp_clog, 0, sizeof(sctp_clog));
430 sctp_clog.x.cwnd.net = net;
431 sctp_clog.x.cwnd.cwnd_new_value = error;
432 sctp_clog.x.cwnd.inflight = net->flight_size;
433 sctp_clog.x.cwnd.cwnd_augment = burst;
434 if (stcb->asoc.send_queue_cnt > 255)
435 sctp_clog.x.cwnd.cnt_in_send = 255;
436 else
437 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
438 if (stcb->asoc.stream_queue_cnt > 255)
439 sctp_clog.x.cwnd.cnt_in_str = 255;
440 else
441 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
442 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 SCTP_LOG_EVENT_MAXBURST,
444 from,
445 sctp_clog.x.misc.log1,
446 sctp_clog.x.misc.log2,
447 sctp_clog.x.misc.log3,
448 sctp_clog.x.misc.log4);
449#endif
450}
451
452void
453sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
454{
455#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
456 struct sctp_cwnd_log sctp_clog;
457
458 sctp_clog.x.rwnd.rwnd = peers_rwnd;
459 sctp_clog.x.rwnd.send_size = snd_size;
460 sctp_clog.x.rwnd.overhead = overhead;
461 sctp_clog.x.rwnd.new_rwnd = 0;
462 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
463 SCTP_LOG_EVENT_RWND,
464 from,
465 sctp_clog.x.misc.log1,
466 sctp_clog.x.misc.log2,
467 sctp_clog.x.misc.log3,
468 sctp_clog.x.misc.log4);
469#endif
470}
471
472void
473sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
474{
475#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
476 struct sctp_cwnd_log sctp_clog;
477
478 sctp_clog.x.rwnd.rwnd = peers_rwnd;
479 sctp_clog.x.rwnd.send_size = flight_size;
480 sctp_clog.x.rwnd.overhead = overhead;
481 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
482 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
483 SCTP_LOG_EVENT_RWND,
484 from,
485 sctp_clog.x.misc.log1,
486 sctp_clog.x.misc.log2,
487 sctp_clog.x.misc.log3,
488 sctp_clog.x.misc.log4);
489#endif
490}
491
492#ifdef SCTP_MBCNT_LOGGING
493static void
494sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
495{
496#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
497 struct sctp_cwnd_log sctp_clog;
498
499 sctp_clog.x.mbcnt.total_queue_size = total_oq;
500 sctp_clog.x.mbcnt.size_change = book;
501 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
502 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
503 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
504 SCTP_LOG_EVENT_MBCNT,
505 from,
506 sctp_clog.x.misc.log1,
507 sctp_clog.x.misc.log2,
508 sctp_clog.x.misc.log3,
509 sctp_clog.x.misc.log4);
510#endif
511}
512#endif
513
514void
515sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
516{
517#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
518 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 SCTP_LOG_MISC_EVENT,
520 from,
521 a, b, c, d);
522#endif
523}
524
525void
526sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
527{
528#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
529 struct sctp_cwnd_log sctp_clog;
530
531 sctp_clog.x.wake.stcb = (void *)stcb;
532 sctp_clog.x.wake.wake_cnt = wake_cnt;
533 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
534 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
535 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
536
537 if (stcb->asoc.stream_queue_cnt < 0xff)
538 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
539 else
540 sctp_clog.x.wake.stream_qcnt = 0xff;
541
542 if (stcb->asoc.chunks_on_out_queue < 0xff)
543 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
544 else
545 sctp_clog.x.wake.chunks_on_oque = 0xff;
546
547 sctp_clog.x.wake.sctpflags = 0;
548 /* set in the defered mode stuff */
549 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
550 sctp_clog.x.wake.sctpflags |= 1;
551 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
552 sctp_clog.x.wake.sctpflags |= 2;
553 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
554 sctp_clog.x.wake.sctpflags |= 4;
555 /* what about the sb */
556 if (stcb->sctp_socket) {
557 struct socket *so = stcb->sctp_socket;
558
559 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
560 } else {
561 sctp_clog.x.wake.sbflags = 0xff;
562 }
563 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
564 SCTP_LOG_EVENT_WAKE,
565 from,
566 sctp_clog.x.misc.log1,
567 sctp_clog.x.misc.log2,
568 sctp_clog.x.misc.log3,
569 sctp_clog.x.misc.log4);
570#endif
571}
572
573void
574sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
575{
576#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
577 struct sctp_cwnd_log sctp_clog;
578
579 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
580 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
581 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
582 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
583 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
584 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
585 sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
586 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
587 SCTP_LOG_EVENT_BLOCK,
588 from,
589 sctp_clog.x.misc.log1,
590 sctp_clog.x.misc.log2,
591 sctp_clog.x.misc.log3,
592 sctp_clog.x.misc.log4);
593#endif
594}
595
596int
597sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
598{
599 /* May need to fix this if ktrdump does not work */
600 return (0);
601}
602
603#ifdef SCTP_AUDITING_ENABLED
604uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
605static int sctp_audit_indx = 0;
606
607static
608void
609sctp_print_audit_report(void)
610{
611 int i;
612 int cnt;
613
614 cnt = 0;
615 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
616 if ((sctp_audit_data[i][0] == 0xe0) &&
617 (sctp_audit_data[i][1] == 0x01)) {
618 cnt = 0;
619 SCTP_PRINTF("\n");
620 } else if (sctp_audit_data[i][0] == 0xf0) {
621 cnt = 0;
622 SCTP_PRINTF("\n");
623 } else if ((sctp_audit_data[i][0] == 0xc0) &&
624 (sctp_audit_data[i][1] == 0x01)) {
625 SCTP_PRINTF("\n");
626 cnt = 0;
627 }
628 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
629 (uint32_t) sctp_audit_data[i][1]);
630 cnt++;
631 if ((cnt % 14) == 0)
632 SCTP_PRINTF("\n");
633 }
634 for (i = 0; i < sctp_audit_indx; i++) {
635 if ((sctp_audit_data[i][0] == 0xe0) &&
636 (sctp_audit_data[i][1] == 0x01)) {
637 cnt = 0;
638 SCTP_PRINTF("\n");
639 } else if (sctp_audit_data[i][0] == 0xf0) {
640 cnt = 0;
641 SCTP_PRINTF("\n");
642 } else if ((sctp_audit_data[i][0] == 0xc0) &&
643 (sctp_audit_data[i][1] == 0x01)) {
644 SCTP_PRINTF("\n");
645 cnt = 0;
646 }
647 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
648 (uint32_t) sctp_audit_data[i][1]);
649 cnt++;
650 if ((cnt % 14) == 0)
651 SCTP_PRINTF("\n");
652 }
653 SCTP_PRINTF("\n");
654}
655
656void
657sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
658 struct sctp_nets *net)
659{
660 int resend_cnt, tot_out, rep, tot_book_cnt;
661 struct sctp_nets *lnet;
662 struct sctp_tmit_chunk *chk;
663
664 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
665 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
666 sctp_audit_indx++;
667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 sctp_audit_indx = 0;
669 }
670 if (inp == NULL) {
671 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 sctp_audit_data[sctp_audit_indx][1] = 0x01;
673 sctp_audit_indx++;
674 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 sctp_audit_indx = 0;
676 }
677 return;
678 }
679 if (stcb == NULL) {
680 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
681 sctp_audit_data[sctp_audit_indx][1] = 0x02;
682 sctp_audit_indx++;
683 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
684 sctp_audit_indx = 0;
685 }
686 return;
687 }
688 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
689 sctp_audit_data[sctp_audit_indx][1] =
690 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
691 sctp_audit_indx++;
692 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
693 sctp_audit_indx = 0;
694 }
695 rep = 0;
696 tot_book_cnt = 0;
697 resend_cnt = tot_out = 0;
698 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
699 if (chk->sent == SCTP_DATAGRAM_RESEND) {
700 resend_cnt++;
701 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
702 tot_out += chk->book_size;
703 tot_book_cnt++;
704 }
705 }
706 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
707 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
708 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
709 sctp_audit_indx++;
710 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
711 sctp_audit_indx = 0;
712 }
713 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
714 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
715 rep = 1;
716 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
717 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
718 sctp_audit_data[sctp_audit_indx][1] =
719 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
720 sctp_audit_indx++;
721 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
722 sctp_audit_indx = 0;
723 }
724 }
725 if (tot_out != stcb->asoc.total_flight) {
726 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
727 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
728 sctp_audit_indx++;
729 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
730 sctp_audit_indx = 0;
731 }
732 rep = 1;
733 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
734 (int)stcb->asoc.total_flight);
735 stcb->asoc.total_flight = tot_out;
736 }
737 if (tot_book_cnt != stcb->asoc.total_flight_count) {
738 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
739 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
740 sctp_audit_indx++;
741 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
742 sctp_audit_indx = 0;
743 }
744 rep = 1;
745 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
746
747 stcb->asoc.total_flight_count = tot_book_cnt;
748 }
749 tot_out = 0;
750 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
751 tot_out += lnet->flight_size;
752 }
753 if (tot_out != stcb->asoc.total_flight) {
754 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
755 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
756 sctp_audit_indx++;
757 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
758 sctp_audit_indx = 0;
759 }
760 rep = 1;
761 SCTP_PRINTF("real flight:%d net total was %d\n",
762 stcb->asoc.total_flight, tot_out);
763 /* now corrective action */
764 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
765
766 tot_out = 0;
767 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
768 if ((chk->whoTo == lnet) &&
769 (chk->sent < SCTP_DATAGRAM_RESEND)) {
770 tot_out += chk->book_size;
771 }
772 }
773 if (lnet->flight_size != tot_out) {
774 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
775 (void *)lnet, lnet->flight_size,
776 tot_out);
777 lnet->flight_size = tot_out;
778 }
779 }
780 }
781 if (rep) {
782 sctp_print_audit_report();
783 }
784}
785
786void
787sctp_audit_log(uint8_t ev, uint8_t fd)
788{
789
790 sctp_audit_data[sctp_audit_indx][0] = ev;
791 sctp_audit_data[sctp_audit_indx][1] = fd;
792 sctp_audit_indx++;
793 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
794 sctp_audit_indx = 0;
795 }
796}
797
798#endif
799
800/*
801 * sctp_stop_timers_for_shutdown() should be called
802 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
803 * state to make sure that all timers are stopped.
804 */
805void
806sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
807{
808 struct sctp_association *asoc;
809 struct sctp_nets *net;
810
811 asoc = &stcb->asoc;
812
813 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
814 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
815 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
816 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
817 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
818 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
819 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
820 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
821 }
822}
823
824/*
825 * a list of sizes based on typical mtu's, used only if next hop size not
826 * returned.
827 */
828static uint32_t sctp_mtu_sizes[] = {
829 68,
830 296,
831 508,
832 512,
833 544,
834 576,
835 1006,
836 1492,
837 1500,
838 1536,
839 2002,
840 2048,
841 4352,
842 4464,
843 8166,
844 17914,
845 32000,
846 65535
847};
848
849/*
850 * Return the largest MTU smaller than val. If there is no
851 * entry, just return val.
852 */
853uint32_t
854sctp_get_prev_mtu(uint32_t val)
855{
856 uint32_t i;
857
858 if (val <= sctp_mtu_sizes[0]) {
859 return (val);
860 }
861 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
862 if (val <= sctp_mtu_sizes[i]) {
863 break;
864 }
865 }
866 return (sctp_mtu_sizes[i - 1]);
867}
868
869/*
870 * Return the smallest MTU larger than val. If there is no
871 * entry, just return val.
872 */
873uint32_t
874sctp_get_next_mtu(uint32_t val)
875{
876 /* select another MTU that is just bigger than this one */
877 uint32_t i;
878
879 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
880 if (val < sctp_mtu_sizes[i]) {
881 return (sctp_mtu_sizes[i]);
882 }
883 }
884 return (val);
885}
886
887void
888sctp_fill_random_store(struct sctp_pcb *m)
889{
890 /*
891 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
892 * our counter. The result becomes our good random numbers and we
893 * then setup to give these out. Note that we do no locking to
894 * protect this. This is ok, since if competing folks call this we
895 * will get more gobbled gook in the random store which is what we
896 * want. There is a danger that two guys will use the same random
897 * numbers, but thats ok too since that is random as well :->
898 */
899 m->store_at = 0;
900 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
901 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
902 sizeof(m->random_counter), (uint8_t *)m->random_store);
903 m->random_counter++;
904}
905
906uint32_t
907sctp_select_initial_TSN(struct sctp_pcb *inp)
908{
909 /*
910 * A true implementation should use random selection process to get
911 * the initial stream sequence number, using RFC1750 as a good
912 * guideline
913 */
914 uint32_t x, *xp;
915 uint8_t *p;
916 int store_at, new_store;
917
918 if (inp->initial_sequence_debug != 0) {
919 uint32_t ret;
920
921 ret = inp->initial_sequence_debug;
922 inp->initial_sequence_debug++;
923 return (ret);
924 }
925 retry:
926 store_at = inp->store_at;
927 new_store = store_at + sizeof(uint32_t);
928 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
929 new_store = 0;
930 }
931 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
932 goto retry;
933 }
934 if (new_store == 0) {
935 /* Refill the random store */
936 sctp_fill_random_store(inp);
937 }
938 p = &inp->random_store[store_at];
939 xp = (uint32_t *)p;
940 x = *xp;
941 return (x);
942}
943
944uint32_t
945sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
946{
947 uint32_t x;
948 struct timeval now;
949
950 if (check) {
951 (void)SCTP_GETTIME_TIMEVAL(&now);
952 }
953 for (;;) {
954 x = sctp_select_initial_TSN(&inp->sctp_ep);
955 if (x == 0) {
956 /* we never use 0 */
957 continue;
958 }
959 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
960 break;
961 }
962 }
963 return (x);
964}
965
966int32_t
967sctp_map_assoc_state(int kernel_state)
968{
969 int32_t user_state;
970
971 if (kernel_state & SCTP_STATE_WAS_ABORTED) {
972 user_state = SCTP_CLOSED;
973 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
974 user_state = SCTP_SHUTDOWN_PENDING;
975 } else {
976 switch (kernel_state & SCTP_STATE_MASK) {
977 case SCTP_STATE_EMPTY:
978 user_state = SCTP_CLOSED;
979 break;
980 case SCTP_STATE_INUSE:
981 user_state = SCTP_CLOSED;
982 break;
983 case SCTP_STATE_COOKIE_WAIT:
984 user_state = SCTP_COOKIE_WAIT;
985 break;
986 case SCTP_STATE_COOKIE_ECHOED:
987 user_state = SCTP_COOKIE_ECHOED;
988 break;
989 case SCTP_STATE_OPEN:
990 user_state = SCTP_ESTABLISHED;
991 break;
992 case SCTP_STATE_SHUTDOWN_SENT:
993 user_state = SCTP_SHUTDOWN_SENT;
994 break;
995 case SCTP_STATE_SHUTDOWN_RECEIVED:
996 user_state = SCTP_SHUTDOWN_RECEIVED;
997 break;
998 case SCTP_STATE_SHUTDOWN_ACK_SENT:
999 user_state = SCTP_SHUTDOWN_ACK_SENT;
1000 break;
1001 default:
1002 user_state = SCTP_CLOSED;
1003 break;
1004 }
1005 }
1006 return (user_state);
1007}
1008
1009int
1010sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1011 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1012{
1013 struct sctp_association *asoc;
1014 /*
1015 * Anything set to zero is taken care of by the allocation routine's
1016 * bzero
1017 */
1018
1019 /*
1020 * Up front select what scoping to apply on addresses I tell my peer
1021 * Not sure what to do with these right now, we will need to come up
1022 * with a way to set them. We may need to pass them through from the
1023 * caller in the sctp_aloc_assoc() function.
1024 */
1025 int i;
1026#if defined(SCTP_DETAILED_STR_STATS)
1027 int j;
1028#endif
1029
1030 asoc = &stcb->asoc;
1031 /* init all variables to a known value. */
1032 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
1033 asoc->max_burst = inp->sctp_ep.max_burst;
1034 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1035 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1036 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1037 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1038 asoc->ecn_supported = inp->ecn_supported;
1039 asoc->prsctp_supported = inp->prsctp_supported;
1040 asoc->idata_supported = inp->idata_supported;
1041 asoc->auth_supported = inp->auth_supported;
1042 asoc->asconf_supported = inp->asconf_supported;
1043 asoc->reconfig_supported = inp->reconfig_supported;
1044 asoc->nrsack_supported = inp->nrsack_supported;
1045 asoc->pktdrop_supported = inp->pktdrop_supported;
1046 asoc->idata_supported = inp->idata_supported;
1047 asoc->sctp_cmt_pf = (uint8_t)0;
1048 asoc->sctp_frag_point = inp->sctp_frag_point;
1049 asoc->sctp_features = inp->sctp_features;
1050 asoc->default_dscp = inp->sctp_ep.default_dscp;
1051 asoc->max_cwnd = inp->max_cwnd;
1052#ifdef INET6
1053 if (inp->sctp_ep.default_flowlabel) {
1054 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1055 } else {
1056 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1057 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1058 asoc->default_flowlabel &= 0x000fffff;
1059 asoc->default_flowlabel |= 0x80000000;
1060 } else {
1061 asoc->default_flowlabel = 0;
1062 }
1063 }
1064#endif
1065 asoc->sb_send_resv = 0;
1066 if (override_tag) {
1067 asoc->my_vtag = override_tag;
1068 } else {
1069 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1070 }
1071 /* Get the nonce tags */
1072 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1073 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1074 asoc->vrf_id = vrf_id;
1075
1076#ifdef SCTP_ASOCLOG_OF_TSNS
1077 asoc->tsn_in_at = 0;
1078 asoc->tsn_out_at = 0;
1079 asoc->tsn_in_wrapped = 0;
1080 asoc->tsn_out_wrapped = 0;
1081 asoc->cumack_log_at = 0;
1082 asoc->cumack_log_atsnt = 0;
1083#endif
1084#ifdef SCTP_FS_SPEC_LOG
1085 asoc->fs_index = 0;
1086#endif
1087 asoc->refcnt = 0;
1088 asoc->assoc_up_sent = 0;
1089 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1090 sctp_select_initial_TSN(&inp->sctp_ep);
1091 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1092 /* we are optimisitic here */
1093 asoc->peer_supports_nat = 0;
1094 asoc->sent_queue_retran_cnt = 0;
1095
1096 /* for CMT */
1097 asoc->last_net_cmt_send_started = NULL;
1098
1099 /* This will need to be adjusted */
1100 asoc->last_acked_seq = asoc->init_seq_number - 1;
1101 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1102 asoc->asconf_seq_in = asoc->last_acked_seq;
1103
1104 /* here we are different, we hold the next one we expect */
1105 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1106
1107 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1108 asoc->initial_rto = inp->sctp_ep.initial_rto;
1109
1110 asoc->max_init_times = inp->sctp_ep.max_init_times;
1111 asoc->max_send_times = inp->sctp_ep.max_send_times;
1112 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1113 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1114 asoc->free_chunk_cnt = 0;
1115
1116 asoc->iam_blocking = 0;
1117 asoc->context = inp->sctp_context;
1118 asoc->local_strreset_support = inp->local_strreset_support;
1119 asoc->def_send = inp->def_send;
1120 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1121 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1122 asoc->pr_sctp_cnt = 0;
1123 asoc->total_output_queue_size = 0;
1124
1125 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1126 asoc->scope.ipv6_addr_legal = 1;
1127 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1128 asoc->scope.ipv4_addr_legal = 1;
1129 } else {
1130 asoc->scope.ipv4_addr_legal = 0;
1131 }
1132#if defined(__Userspace__)
1133 asoc->scope.conn_addr_legal = 0;
1134#endif
1135 } else {
1136 asoc->scope.ipv6_addr_legal = 0;
1137#if defined(__Userspace__)
1138 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
1139 asoc->scope.conn_addr_legal = 1;
1140 asoc->scope.ipv4_addr_legal = 0;
1141 } else {
1142 asoc->scope.conn_addr_legal = 0;
1143 asoc->scope.ipv4_addr_legal = 1;
1144 }
1145#else
1146 asoc->scope.ipv4_addr_legal = 1;
1147#endif
1148 }
1149
1150 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1151 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1152
1153 asoc->smallest_mtu = inp->sctp_frag_point;
1154 asoc->minrto = inp->sctp_ep.sctp_minrto;
1155 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1156
1157 asoc->stream_locked_on = 0;
1158 asoc->ecn_echo_cnt_onq = 0;
1159 asoc->stream_locked = 0;
1160
1161 asoc->send_sack = 1;
1162
1163 LIST_INIT(&asoc->sctp_restricted_addrs);
1164
1165 TAILQ_INIT(&asoc->nets);
1166 TAILQ_INIT(&asoc->pending_reply_queue);
1167 TAILQ_INIT(&asoc->asconf_ack_sent);
1168 /* Setup to fill the hb random cache at first HB */
1169 asoc->hb_random_idx = 4;
1170
1171 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1172
1173 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1174 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1175
1176 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1177 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1178
1179 /*
1180 * Now the stream parameters, here we allocate space for all streams
1181 * that we request by default.
1182 */
1183 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1184 o_strms;
1185 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1186 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1187 SCTP_M_STRMO);
1188 if (asoc->strmout == NULL) {
1189 /* big trouble no memory */
1190 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1191 return (ENOMEM);
1192 }
1193 for (i = 0; i < asoc->streamoutcnt; i++) {
1194 /*
1195 * inbound side must be set to 0xffff, also NOTE when we get
1196 * the INIT-ACK back (for INIT sender) we MUST reduce the
1197 * count (streamoutcnt) but first check if we sent to any of
1198 * the upper streams that were dropped (if some were). Those
1199 * that were dropped must be notified to the upper layer as
1200 * failed to send.
1201 */
1202 asoc->strmout[i].next_mid_ordered = 0;
1203 asoc->strmout[i].next_mid_unordered = 0;
1204 TAILQ_INIT(&asoc->strmout[i].outqueue);
1205 asoc->strmout[i].chunks_on_queues = 0;
1206#if defined(SCTP_DETAILED_STR_STATS)
1207 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1208 asoc->strmout[i].abandoned_sent[j] = 0;
1209 asoc->strmout[i].abandoned_unsent[j] = 0;
1210 }
1211#else
1212 asoc->strmout[i].abandoned_sent[0] = 0;
1213 asoc->strmout[i].abandoned_unsent[0] = 0;
1214#endif
1215 asoc->strmout[i].sid = i;
1216 asoc->strmout[i].last_msg_incomplete = 0;
1217 asoc->strmout[i].state = SCTP_STREAM_OPENING;
1218 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1219 }
1220 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1221
1222 /* Now the mapping array */
1223 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1224 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1225 SCTP_M_MAP);
1226 if (asoc->mapping_array == NULL) {
1227 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1228 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1229 return (ENOMEM);
1230 }
1231 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1232 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1233 SCTP_M_MAP);
1234 if (asoc->nr_mapping_array == NULL) {
1235 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1236 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1237 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1238 return (ENOMEM);
1239 }
1240 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1241
1242 /* Now the init of the other outqueues */
1243 TAILQ_INIT(&asoc->free_chunks);
1244 TAILQ_INIT(&asoc->control_send_queue);
1245 TAILQ_INIT(&asoc->asconf_send_queue);
1246 TAILQ_INIT(&asoc->send_queue);
1247 TAILQ_INIT(&asoc->sent_queue);
1248 TAILQ_INIT(&asoc->resetHead);
1249 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1250 TAILQ_INIT(&asoc->asconf_queue);
1251 /* authentication fields */
1252 asoc->authinfo.random = NULL;
1253 asoc->authinfo.active_keyid = 0;
1254 asoc->authinfo.assoc_key = NULL;
1255 asoc->authinfo.assoc_keyid = 0;
1256 asoc->authinfo.recv_key = NULL;
1257 asoc->authinfo.recv_keyid = 0;
1258 LIST_INIT(&asoc->shared_keys);
1259 asoc->marked_retrans = 0;
1260 asoc->port = inp->sctp_ep.port;
1261 asoc->timoinit = 0;
1262 asoc->timodata = 0;
1263 asoc->timosack = 0;
1264 asoc->timoshutdown = 0;
1265 asoc->timoheartbeat = 0;
1266 asoc->timocookie = 0;
1267 asoc->timoshutdownack = 0;
1268 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1269 asoc->discontinuity_time = asoc->start_time;
1270 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1271 asoc->abandoned_unsent[i] = 0;
1272 asoc->abandoned_sent[i] = 0;
1273 }
1274 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
1275 * the association is freed.
1276 */
1277 return (0);
1278}
1279
1280void
1281sctp_print_mapping_array(struct sctp_association *asoc)
1282{
1283 unsigned int i, limit;
1284
1285 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1286 asoc->mapping_array_size,
1287 asoc->mapping_array_base_tsn,
1288 asoc->cumulative_tsn,
1289 asoc->highest_tsn_inside_map,
1290 asoc->highest_tsn_inside_nr_map);
1291 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1292 if (asoc->mapping_array[limit - 1] != 0) {
1293 break;
1294 }
1295 }
1296 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1297 for (i = 0; i < limit; i++) {
1298 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1299 }
1300 if (limit % 16)
1301 SCTP_PRINTF("\n");
1302 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1303 if (asoc->nr_mapping_array[limit - 1]) {
1304 break;
1305 }
1306 }
1307 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1308 for (i = 0; i < limit; i++) {
1309 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
1310 }
1311 if (limit % 16)
1312 SCTP_PRINTF("\n");
1313}
1314
1315int
1316sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1317{
1318 /* mapping array needs to grow */
1319 uint8_t *new_array1, *new_array2;
1320 uint32_t new_size;
1321
1322 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
1323 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1324 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1325 if ((new_array1 == NULL) || (new_array2 == NULL)) {
1326 /* can't get more, forget it */
1327 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1328 if (new_array1) {
1329 SCTP_FREE(new_array1, SCTP_M_MAP);
1330 }
1331 if (new_array2) {
1332 SCTP_FREE(new_array2, SCTP_M_MAP);
1333 }
1334 return (-1);
1335 }
1336 memset(new_array1, 0, new_size);
1337 memset(new_array2, 0, new_size);
1338 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1339 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1340 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1341 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1342 asoc->mapping_array = new_array1;
1343 asoc->nr_mapping_array = new_array2;
1344 asoc->mapping_array_size = new_size;
1345 return (0);
1346}
1347
1348
1349static void
1350sctp_iterator_work(struct sctp_iterator *it)
1351{
1352 int iteration_count = 0;
1353 int inp_skip = 0;
1354 int first_in = 1;
1355 struct sctp_inpcb *tinp;
1356
1357 SCTP_INP_INFO_RLOCK();
1358 SCTP_ITERATOR_LOCK();
1359 sctp_it_ctl.cur_it = it;
1360 if (it->inp) {
1361 SCTP_INP_RLOCK(it->inp);
1362 SCTP_INP_DECR_REF(it->inp);
1363 }
1364 if (it->inp == NULL) {
1365 /* iterator is complete */
1366done_with_iterator:
1367 sctp_it_ctl.cur_it = NULL;
1368 SCTP_ITERATOR_UNLOCK();
1369 SCTP_INP_INFO_RUNLOCK();
1370 if (it->function_atend != NULL) {
1371 (*it->function_atend) (it->pointer, it->val);
1372 }
1373 SCTP_FREE(it, SCTP_M_ITER);
1374 return;
1375 }
1376select_a_new_ep:
1377 if (first_in) {
1378 first_in = 0;
1379 } else {
1380 SCTP_INP_RLOCK(it->inp);
1381 }
1382 while (((it->pcb_flags) &&
1383 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1384 ((it->pcb_features) &&
1385 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1386 /* endpoint flags or features don't match, so keep looking */
1387 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1388 SCTP_INP_RUNLOCK(it->inp);
1389 goto done_with_iterator;
1390 }
1391 tinp = it->inp;
1392 it->inp = LIST_NEXT(it->inp, sctp_list);
1393 SCTP_INP_RUNLOCK(tinp);
1394 if (it->inp == NULL) {
1395 goto done_with_iterator;
1396 }
1397 SCTP_INP_RLOCK(it->inp);
1398 }
1399 /* now go through each assoc which is in the desired state */
1400 if (it->done_current_ep == 0) {
1401 if (it->function_inp != NULL)
1402 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
1403 it->done_current_ep = 1;
1404 }
1405 if (it->stcb == NULL) {
1406 /* run the per instance function */
1407 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1408 }
1409 if ((inp_skip) || it->stcb == NULL) {
1410 if (it->function_inp_end != NULL) {
1411 inp_skip = (*it->function_inp_end)(it->inp,
1412 it->pointer,
1413 it->val);
1414 }
1415 SCTP_INP_RUNLOCK(it->inp);
1416 goto no_stcb;
1417 }
1418 while (it->stcb) {
1419 SCTP_TCB_LOCK(it->stcb);
1420 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1421 /* not in the right state... keep looking */
1422 SCTP_TCB_UNLOCK(it->stcb);
1423 goto next_assoc;
1424 }
1425 /* see if we have limited out the iterator loop */
1426 iteration_count++;
1427 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1428 /* Pause to let others grab the lock */
1429 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1430 SCTP_TCB_UNLOCK(it->stcb);
1431 SCTP_INP_INCR_REF(it->inp);
1432 SCTP_INP_RUNLOCK(it->inp);
1433 SCTP_ITERATOR_UNLOCK();
1434 SCTP_INP_INFO_RUNLOCK();
1435 SCTP_INP_INFO_RLOCK();
1436 SCTP_ITERATOR_LOCK();
1437 if (sctp_it_ctl.iterator_flags) {
1438 /* We won't be staying here */
1439 SCTP_INP_DECR_REF(it->inp);
1440 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1441#if !defined(__FreeBSD__)
1442 if (sctp_it_ctl.iterator_flags &
1443 SCTP_ITERATOR_MUST_EXIT) {
1444 goto done_with_iterator;
1445 }
1446#endif
1447 if (sctp_it_ctl.iterator_flags &
1448 SCTP_ITERATOR_STOP_CUR_IT) {
1449 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1450 goto done_with_iterator;
1451 }
1452 if (sctp_it_ctl.iterator_flags &
1453 SCTP_ITERATOR_STOP_CUR_INP) {
1454 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1455 goto no_stcb;
1456 }
1457 /* If we reach here huh? */
1458 SCTP_PRINTF("Unknown it ctl flag %x\n",
1459 sctp_it_ctl.iterator_flags);
1460 sctp_it_ctl.iterator_flags = 0;
1461 }
1462 SCTP_INP_RLOCK(it->inp);
1463 SCTP_INP_DECR_REF(it->inp);
1464 SCTP_TCB_LOCK(it->stcb);
1465 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1466 iteration_count = 0;
1467 }
1468
1469 /* run function on this one */
1470 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
1471
1472 /*
1473 * we lie here, it really needs to have its own type but
1474 * first I must verify that this won't effect things :-0
1475 */
1476 if (it->no_chunk_output == 0)
1477 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1478
1479 SCTP_TCB_UNLOCK(it->stcb);
1480 next_assoc:
1481 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1482 if (it->stcb == NULL) {
1483 /* Run last function */
1484 if (it->function_inp_end != NULL) {
1485 inp_skip = (*it->function_inp_end)(it->inp,
1486 it->pointer,
1487 it->val);
1488 }
1489 }
1490 }
1491 SCTP_INP_RUNLOCK(it->inp);
1492 no_stcb:
1493 /* done with all assocs on this endpoint, move on to next endpoint */
1494 it->done_current_ep = 0;
1495 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1496 it->inp = NULL;
1497 } else {
1498 it->inp = LIST_NEXT(it->inp, sctp_list);
1499 }
1500 if (it->inp == NULL) {
1501 goto done_with_iterator;
1502 }
1503 goto select_a_new_ep;
1504}
1505
1506void
1507sctp_iterator_worker(void)
1508{
1509 struct sctp_iterator *it, *nit;
1510
1511 /* This function is called with the WQ lock in place */
1512
1513 sctp_it_ctl.iterator_running = 1;
1514 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1515 /* now lets work on this one */
1516 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1517 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1518#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1519 CURVNET_SET(it->vn);
1520#endif
1521 sctp_iterator_work(it);
1522#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1523 CURVNET_RESTORE();
1524#endif
1525 SCTP_IPI_ITERATOR_WQ_LOCK();
1526#if !defined(__FreeBSD__)
1527 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1528 break;
1529 }
1530#endif
1531 /*sa_ignore FREED_MEMORY*/
1532 }
1533 sctp_it_ctl.iterator_running = 0;
1534 return;
1535}
1536
1537
1538static void
1539sctp_handle_addr_wq(void)
1540{
1541 /* deal with the ADDR wq from the rtsock calls */
1542 struct sctp_laddr *wi, *nwi;
1543 struct sctp_asconf_iterator *asc;
1544
1545 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1546 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1547 if (asc == NULL) {
1548 /* Try later, no memory */
1549 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1550 (struct sctp_inpcb *)NULL,
1551 (struct sctp_tcb *)NULL,
1552 (struct sctp_nets *)NULL);
1553 return;
1554 }
1555 LIST_INIT(&asc->list_of_work);
1556 asc->cnt = 0;
1557
1558 SCTP_WQ_ADDR_LOCK();
1559 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1560 LIST_REMOVE(wi, sctp_nxt_addr);
1561 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1562 asc->cnt++;
1563 }
1564 SCTP_WQ_ADDR_UNLOCK();
1565
1566 if (asc->cnt == 0) {
1567 SCTP_FREE(asc, SCTP_M_ASC_IT);
1568 } else {
1569 int ret;
1570
1571 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1572 sctp_asconf_iterator_stcb,
1573 NULL, /* No ep end for boundall */
1574 SCTP_PCB_FLAGS_BOUNDALL,
1575 SCTP_PCB_ANY_FEATURES,
1576 SCTP_ASOC_ANY_STATE,
1577 (void *)asc, 0,
1578 sctp_asconf_iterator_end, NULL, 0);
1579 if (ret) {
1580 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1581 /* Freeing if we are stopping or put back on the addr_wq. */
1582 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1583 sctp_asconf_iterator_end(asc, 0);
1584 } else {
1585 SCTP_WQ_ADDR_LOCK();
1586 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1587 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1588 }
1589 SCTP_WQ_ADDR_UNLOCK();
1590 SCTP_FREE(asc, SCTP_M_ASC_IT);
1591 }
1592 }
1593 }
1594}
1595
1596void
1597sctp_timeout_handler(void *t)
1598{
1599 struct sctp_inpcb *inp;
1600 struct sctp_tcb *stcb;
1601 struct sctp_nets *net;
1602 struct sctp_timer *tmr;
1603 struct mbuf *op_err;
1604#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1605 struct socket *so;
1606#endif
1607 int did_output;
1608 int type;
1609
1610 tmr = (struct sctp_timer *)t;
1611 inp = (struct sctp_inpcb *)tmr->ep;
1612 stcb = (struct sctp_tcb *)tmr->tcb;
1613 net = (struct sctp_nets *)tmr->net;
1614#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1615 CURVNET_SET((struct vnet *)tmr->vnet);
1616#endif
1617 did_output = 1;
1618
1619#ifdef SCTP_AUDITING_ENABLED
1620 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1621 sctp_auditing(3, inp, stcb, net);
1622#endif
1623
1624 /* sanity checks... */
1625 if (tmr->self != (void *)tmr) {
1626 /*
1627 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1628 * (void *)tmr);
1629 */
1630#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1631 CURVNET_RESTORE();
1632#endif
1633 return;
1634 }
1635 tmr->stopped_from = 0xa001;
1636 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1637 /*
1638 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1639 * tmr->type);
1640 */
1641#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1642 CURVNET_RESTORE();
1643#endif
1644 return;
1645 }
1646 tmr->stopped_from = 0xa002;
1647 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1648#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1649 CURVNET_RESTORE();
1650#endif
1651 return;
1652 }
1653 /* if this is an iterator timeout, get the struct and clear inp */
1654 tmr->stopped_from = 0xa003;
1655 if (inp) {
1656 SCTP_INP_INCR_REF(inp);
1657 if ((inp->sctp_socket == NULL) &&
1658 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1659 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1660 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1661 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1662 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1663 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1664 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1665 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1666 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1667 ) {
1668 SCTP_INP_DECR_REF(inp);
1669#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1670 CURVNET_RESTORE();
1671#endif
1672 return;
1673 }
1674 }
1675 tmr->stopped_from = 0xa004;
1676 if (stcb) {
1677 atomic_add_int(&stcb->asoc.refcnt, 1);
1678 if (stcb->asoc.state == 0) {
1679 atomic_add_int(&stcb->asoc.refcnt, -1);
1680 if (inp) {
1681 SCTP_INP_DECR_REF(inp);
1682 }
1683#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1684 CURVNET_RESTORE();
1685#endif
1686 return;
1687 }
1688 }
1689 type = tmr->type;
1690 tmr->stopped_from = 0xa005;
1691 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1692 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1693 if (inp) {
1694 SCTP_INP_DECR_REF(inp);
1695 }
1696 if (stcb) {
1697 atomic_add_int(&stcb->asoc.refcnt, -1);
1698 }
1699#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1700 CURVNET_RESTORE();
1701#endif
1702 return;
1703 }
1704 tmr->stopped_from = 0xa006;
1705
1706 if (stcb) {
1707 SCTP_TCB_LOCK(stcb);
1708 atomic_add_int(&stcb->asoc.refcnt, -1);
1709 if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1710 ((stcb->asoc.state == 0) ||
1711 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1712 SCTP_TCB_UNLOCK(stcb);
1713 if (inp) {
1714 SCTP_INP_DECR_REF(inp);
1715 }
1716#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
1717 CURVNET_RESTORE();
1718#endif
1719 return;
1720 }
1721 }
1722 /* record in stopped what t-o occurred */
1723 tmr->stopped_from = type;
1724
1725 /* mark as being serviced now */
1726 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1727 /*
1728 * Callout has been rescheduled.
1729 */
1730 goto get_out;
1731 }
1732 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1733 /*
1734 * Not active, so no action.
1735 */
1736 goto get_out;
1737 }
1738 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1739
1740 /* call the handler for the appropriate timer type */
1741 switch (type) {
1742 case SCTP_TIMER_TYPE_ZERO_COPY:
1743 if (inp == NULL) {
1744 break;
1745 }
1746 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1747 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1748 }
1749 break;
1750 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1751 if (inp == NULL) {
1752 break;
1753 }
1754 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1755 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1756 }
1757 break;
1758 case SCTP_TIMER_TYPE_ADDR_WQ:
1759 sctp_handle_addr_wq();
1760 break;
1761 case SCTP_TIMER_TYPE_SEND:
1762 if ((stcb == NULL) || (inp == NULL)) {
1763 break;
1764 }
1765 SCTP_STAT_INCR(sctps_timodata);
1766 stcb->asoc.timodata++;
1767 stcb->asoc.num_send_timers_up--;
1768 if (stcb->asoc.num_send_timers_up < 0) {
1769 stcb->asoc.num_send_timers_up = 0;
1770 }
1771 SCTP_TCB_LOCK_ASSERT(stcb);
1772 if (sctp_t3rxt_timer(inp, stcb, net)) {
1773 /* no need to unlock on tcb its gone */
1774
1775 goto out_decr;
1776 }
1777 SCTP_TCB_LOCK_ASSERT(stcb);
1778#ifdef SCTP_AUDITING_ENABLED
1779 sctp_auditing(4, inp, stcb, net);
1780#endif
1781 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1782 if ((stcb->asoc.num_send_timers_up == 0) &&
1783 (stcb->asoc.sent_queue_cnt > 0)) {
1784 struct sctp_tmit_chunk *chk;
1785
1786 /*
1787 * safeguard. If there on some on the sent queue
1788 * somewhere but no timers running something is
1789 * wrong... so we start a timer on the first chunk
1790 * on the send queue on whatever net it is sent to.
1791 */
1792 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1793 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1794 chk->whoTo);
1795 }
1796 break;
1797 case SCTP_TIMER_TYPE_INIT:
1798 if ((stcb == NULL) || (inp == NULL)) {
1799 break;
1800 }
1801 SCTP_STAT_INCR(sctps_timoinit);
1802 stcb->asoc.timoinit++;
1803 if (sctp_t1init_timer(inp, stcb, net)) {
1804 /* no need to unlock on tcb its gone */
1805 goto out_decr;
1806 }
1807 /* We do output but not here */
1808 did_output = 0;
1809 break;
1810 case SCTP_TIMER_TYPE_RECV:
1811 if ((stcb == NULL) || (inp == NULL)) {
1812 break;
1813 }
1814 SCTP_STAT_INCR(sctps_timosack);
1815 stcb->asoc.timosack++;
1816 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1817#ifdef SCTP_AUDITING_ENABLED
1818 sctp_auditing(4, inp, stcb, net);
1819#endif
1820 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1821 break;
1822 case SCTP_TIMER_TYPE_SHUTDOWN:
1823 if ((stcb == NULL) || (inp == NULL)) {
1824 break;
1825 }
1826 if (sctp_shutdown_timer(inp, stcb, net)) {
1827 /* no need to unlock on tcb its gone */
1828 goto out_decr;
1829 }
1830 SCTP_STAT_INCR(sctps_timoshutdown);
1831 stcb->asoc.timoshutdown++;
1832#ifdef SCTP_AUDITING_ENABLED
1833 sctp_auditing(4, inp, stcb, net);
1834#endif
1835 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1836 break;
1837 case SCTP_TIMER_TYPE_HEARTBEAT:
1838 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1839 break;
1840 }
1841 SCTP_STAT_INCR(sctps_timoheartbeat);
1842 stcb->asoc.timoheartbeat++;
1843 if (sctp_heartbeat_timer(inp, stcb, net)) {
1844 /* no need to unlock on tcb its gone */
1845 goto out_decr;
1846 }
1847#ifdef SCTP_AUDITING_ENABLED
1848 sctp_auditing(4, inp, stcb, net);
1849#endif
1850 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1851 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1852 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1853 }
1854 break;
1855 case SCTP_TIMER_TYPE_COOKIE:
1856 if ((stcb == NULL) || (inp == NULL)) {
1857 break;
1858 }
1859
1860 if (sctp_cookie_timer(inp, stcb, net)) {
1861 /* no need to unlock on tcb its gone */
1862 goto out_decr;
1863 }
1864 SCTP_STAT_INCR(sctps_timocookie);
1865 stcb->asoc.timocookie++;
1866#ifdef SCTP_AUDITING_ENABLED
1867 sctp_auditing(4, inp, stcb, net);
1868#endif
1869 /*
1870 * We consider T3 and Cookie timer pretty much the same with
1871 * respect to where from in chunk_output.
1872 */
1873 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1874 break;
1875 case SCTP_TIMER_TYPE_NEWCOOKIE:
1876 {
1877 struct timeval tv;
1878 int i, secret;
1879 if (inp == NULL) {
1880 break;
1881 }
1882 SCTP_STAT_INCR(sctps_timosecret);
1883 (void)SCTP_GETTIME_TIMEVAL(&tv);
1884 SCTP_INP_WLOCK(inp);
1885 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1886 inp->sctp_ep.last_secret_number =
1887 inp->sctp_ep.current_secret_number;
1888 inp->sctp_ep.current_secret_number++;
1889 if (inp->sctp_ep.current_secret_number >=
1890 SCTP_HOW_MANY_SECRETS) {
1891 inp->sctp_ep.current_secret_number = 0;
1892 }
1893 secret = (int)inp->sctp_ep.current_secret_number;
1894 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1895 inp->sctp_ep.secret_key[secret][i] =
1896 sctp_select_initial_TSN(&inp->sctp_ep);
1897 }
1898 SCTP_INP_WUNLOCK(inp);
1899 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1900 }
1901 did_output = 0;
1902 break;
1903 case SCTP_TIMER_TYPE_PATHMTURAISE:
1904 if ((stcb == NULL) || (inp == NULL)) {
1905 break;
1906 }
1907 SCTP_STAT_INCR(sctps_timopathmtu);
1908 sctp_pathmtu_timer(inp, stcb, net);
1909 did_output = 0;
1910 break;
1911 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1912 if ((stcb == NULL) || (inp == NULL)) {
1913 break;
1914 }
1915 if (sctp_shutdownack_timer(inp, stcb, net)) {
1916 /* no need to unlock on tcb its gone */
1917 goto out_decr;
1918 }
1919 SCTP_STAT_INCR(sctps_timoshutdownack);
1920 stcb->asoc.timoshutdownack++;
1921#ifdef SCTP_AUDITING_ENABLED
1922 sctp_auditing(4, inp, stcb, net);
1923#endif
1924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1925 break;
1926 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1927 if ((stcb == NULL) || (inp == NULL)) {
1928 break;
1929 }
1930 SCTP_STAT_INCR(sctps_timoshutdownguard);
1931 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1932 "Shutdown guard timer expired");
1933 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1934 /* no need to unlock on tcb its gone */
1935 goto out_decr;
1936
1937 case SCTP_TIMER_TYPE_STRRESET:
1938 if ((stcb == NULL) || (inp == NULL)) {
1939 break;
1940 }
1941 if (sctp_strreset_timer(inp, stcb, net)) {
1942 /* no need to unlock on tcb its gone */
1943 goto out_decr;
1944 }
1945 SCTP_STAT_INCR(sctps_timostrmrst);
1946 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1947 break;
1948 case SCTP_TIMER_TYPE_ASCONF:
1949 if ((stcb == NULL) || (inp == NULL)) {
1950 break;
1951 }
1952 if (sctp_asconf_timer(inp, stcb, net)) {
1953 /* no need to unlock on tcb its gone */
1954 goto out_decr;
1955 }
1956 SCTP_STAT_INCR(sctps_timoasconf);
1957#ifdef SCTP_AUDITING_ENABLED
1958 sctp_auditing(4, inp, stcb, net);
1959#endif
1960 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1961 break;
1962 case SCTP_TIMER_TYPE_PRIM_DELETED:
1963 if ((stcb == NULL) || (inp == NULL)) {
1964 break;
1965 }
1966 sctp_delete_prim_timer(inp, stcb, net);
1967 SCTP_STAT_INCR(sctps_timodelprim);
1968 break;
1969
1970 case SCTP_TIMER_TYPE_AUTOCLOSE:
1971 if ((stcb == NULL) || (inp == NULL)) {
1972 break;
1973 }
1974 SCTP_STAT_INCR(sctps_timoautoclose);
1975 sctp_autoclose_timer(inp, stcb, net);
1976 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1977 did_output = 0;
1978 break;
1979 case SCTP_TIMER_TYPE_ASOCKILL:
1980 if ((stcb == NULL) || (inp == NULL)) {
1981 break;
1982 }
1983 SCTP_STAT_INCR(sctps_timoassockill);
1984 /* Can we free it yet? */
1985 SCTP_INP_DECR_REF(inp);
1986 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1987 SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1988#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1989 so = SCTP_INP_SO(inp);
1990 atomic_add_int(&stcb->asoc.refcnt, 1);
1991 SCTP_TCB_UNLOCK(stcb);
1992 SCTP_SOCKET_LOCK(so, 1);
1993 SCTP_TCB_LOCK(stcb);
1994 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1995#endif
1996 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1997 SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1998#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1999 SCTP_SOCKET_UNLOCK(so, 1);
2000#endif
2001 /*
2002 * free asoc, always unlocks (or destroy's) so prevent
2003 * duplicate unlock or unlock of a free mtx :-0
2004 */
2005 stcb = NULL;
2006 goto out_no_decr;
2007 case SCTP_TIMER_TYPE_INPKILL:
2008 SCTP_STAT_INCR(sctps_timoinpkill);
2009 if (inp == NULL) {
2010 break;
2011 }
2012 /*
2013 * special case, take away our increment since WE are the
2014 * killer
2015 */
2016 SCTP_INP_DECR_REF(inp);
2017 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2018 SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2019#if defined(__APPLE__)
2020 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
2021#endif
2022 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2023 SCTP_CALLED_FROM_INPKILL_TIMER);
2024#if defined(__APPLE__)
2025 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
2026#endif
2027 inp = NULL;
2028 goto out_no_decr;
2029 default:
2030 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2031 type);
2032 break;
2033 }
2034#ifdef SCTP_AUDITING_ENABLED
2035 sctp_audit_log(0xF1, (uint8_t) type);
2036 if (inp)
2037 sctp_auditing(5, inp, stcb, net);
2038#endif
2039 if ((did_output) && stcb) {
2040 /*
2041 * Now we need to clean up the control chunk chain if an
2042 * ECNE is on it. It must be marked as UNSENT again so next
2043 * call will continue to send it until such time that we get
2044 * a CWR, to remove it. It is, however, less likely that we
2045 * will find a ecn echo on the chain though.
2046 */
2047 sctp_fix_ecn_echo(&stcb->asoc);
2048 }
2049get_out:
2050 if (stcb) {
2051 SCTP_TCB_UNLOCK(stcb);
2052 }
2053
2054out_decr:
2055 if (inp) {
2056 SCTP_INP_DECR_REF(inp);
2057 }
2058
2059out_no_decr:
2060 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2061#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
2062 CURVNET_RESTORE();
2063#endif
2064}
2065
2066void
2067sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2068 struct sctp_nets *net)
2069{
2070 uint32_t to_ticks;
2071 struct sctp_timer *tmr;
2072
2073 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2074 return;
2075
2076 tmr = NULL;
2077 if (stcb) {
2078 SCTP_TCB_LOCK_ASSERT(stcb);
2079 }
2080 switch (t_type) {
2081 case SCTP_TIMER_TYPE_ZERO_COPY:
2082 tmr = &inp->sctp_ep.zero_copy_timer;
2083 to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
2084 break;
2085 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2086 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2087 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
2088 break;
2089 case SCTP_TIMER_TYPE_ADDR_WQ:
2090 /* Only 1 tick away :-) */
2091 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2092 to_ticks = SCTP_ADDRESS_TICK_DELAY;
2093 break;
2094 case SCTP_TIMER_TYPE_SEND:
2095 /* Here we use the RTO timer */
2096 {
2097 int rto_val;
2098
2099 if ((stcb == NULL) || (net == NULL)) {
2100 return;
2101 }
2102 tmr = &net->rxt_timer;
2103 if (net->RTO == 0) {
2104 rto_val = stcb->asoc.initial_rto;
2105 } else {
2106 rto_val = net->RTO;
2107 }
2108 to_ticks = MSEC_TO_TICKS(rto_val);
2109 }
2110 break;
2111 case SCTP_TIMER_TYPE_INIT:
2112 /*
2113 * Here we use the INIT timer default usually about 1
2114 * minute.
2115 */
2116 if ((stcb == NULL) || (net == NULL)) {
2117 return;
2118 }
2119 tmr = &net->rxt_timer;
2120 if (net->RTO == 0) {
2121 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2122 } else {
2123 to_ticks = MSEC_TO_TICKS(net->RTO);
2124 }
2125 break;
2126 case SCTP_TIMER_TYPE_RECV:
2127 /*
2128 * Here we use the Delayed-Ack timer value from the inp
2129 * ususually about 200ms.
2130 */
2131 if (stcb == NULL) {
2132 return;
2133 }
2134 tmr = &stcb->asoc.dack_timer;
2135 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2136 break;
2137 case SCTP_TIMER_TYPE_SHUTDOWN:
2138 /* Here we use the RTO of the destination. */
2139 if ((stcb == NULL) || (net == NULL)) {
2140 return;
2141 }
2142 if (net->RTO == 0) {
2143 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2144 } else {
2145 to_ticks = MSEC_TO_TICKS(net->RTO);
2146 }
2147 tmr = &net->rxt_timer;
2148 break;
2149 case SCTP_TIMER_TYPE_HEARTBEAT:
2150 /*
2151 * the net is used here so that we can add in the RTO. Even
2152 * though we use a different timer. We also add the HB timer
2153 * PLUS a random jitter.
2154 */
2155 if ((stcb == NULL) || (net == NULL)) {
2156 return;
2157 } else {
2158 uint32_t rndval;
2159 uint32_t jitter;
2160
2161 if ((net->dest_state & SCTP_ADDR_NOHB) &&
2162 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2163 return;
2164 }
2165 if (net->RTO == 0) {
2166 to_ticks = stcb->asoc.initial_rto;
2167 } else {
2168 to_ticks = net->RTO;
2169 }
2170 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2171 jitter = rndval % to_ticks;
2172 if (jitter >= (to_ticks >> 1)) {
2173 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2174 } else {
2175 to_ticks = to_ticks - jitter;
2176 }
2177 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2178 !(net->dest_state & SCTP_ADDR_PF)) {
2179 to_ticks += net->heart_beat_delay;
2180 }
2181 /*
2182 * Now we must convert the to_ticks that are now in
2183 * ms to ticks.
2184 */
2185 to_ticks = MSEC_TO_TICKS(to_ticks);
2186 tmr = &net->hb_timer;
2187 }
2188 break;
2189 case SCTP_TIMER_TYPE_COOKIE:
2190 /*
2191 * Here we can use the RTO timer from the network since one
2192 * RTT was compelete. If a retran happened then we will be
2193 * using the RTO initial value.
2194 */
2195 if ((stcb == NULL) || (net == NULL)) {
2196 return;
2197 }
2198 if (net->RTO == 0) {
2199 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2200 } else {
2201 to_ticks = MSEC_TO_TICKS(net->RTO);
2202 }
2203 tmr = &net->rxt_timer;
2204 break;
2205 case SCTP_TIMER_TYPE_NEWCOOKIE:
2206 /*
2207 * nothing needed but the endpoint here ususually about 60
2208 * minutes.
2209 */
2210 tmr = &inp->sctp_ep.signature_change;
2211 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2212 break;
2213 case SCTP_TIMER_TYPE_ASOCKILL:
2214 if (stcb == NULL) {
2215 return;
2216 }
2217 tmr = &stcb->asoc.strreset_timer;
2218 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2219 break;
2220 case SCTP_TIMER_TYPE_INPKILL:
2221 /*
2222 * The inp is setup to die. We re-use the signature_chage
2223 * timer since that has stopped and we are in the GONE
2224 * state.
2225 */
2226 tmr = &inp->sctp_ep.signature_change;
2227 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2228 break;
2229 case SCTP_TIMER_TYPE_PATHMTURAISE:
2230 /*
2231 * Here we use the value found in the EP for PMTU ususually
2232 * about 10 minutes.
2233 */
2234 if ((stcb == NULL) || (net == NULL)) {
2235 return;
2236 }
2237 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2238 return;
2239 }
2240 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2241 tmr = &net->pmtu_timer;
2242 break;
2243 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2244 /* Here we use the RTO of the destination */
2245 if ((stcb == NULL) || (net == NULL)) {
2246 return;
2247 }
2248 if (net->RTO == 0) {
2249 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2250 } else {
2251 to_ticks = MSEC_TO_TICKS(net->RTO);
2252 }
2253 tmr = &net->rxt_timer;
2254 break;
2255 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2256 /*
2257 * Here we use the endpoints shutdown guard timer usually
2258 * about 3 minutes.
2259 */
2260 if (stcb == NULL) {
2261 return;
2262 }
2263 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2264 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2265 } else {
2266 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2267 }
2268 tmr = &stcb->asoc.shut_guard_timer;
2269 break;
2270 case SCTP_TIMER_TYPE_STRRESET:
2271 /*
2272 * Here the timer comes from the stcb but its value is from
2273 * the net's RTO.
2274 */
2275 if ((stcb == NULL) || (net == NULL)) {
2276 return;
2277 }
2278 if (net->RTO == 0) {
2279 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2280 } else {
2281 to_ticks = MSEC_TO_TICKS(net->RTO);
2282 }
2283 tmr = &stcb->asoc.strreset_timer;
2284 break;
2285 case SCTP_TIMER_TYPE_ASCONF:
2286 /*
2287 * Here the timer comes from the stcb but its value is from
2288 * the net's RTO.
2289 */
2290 if ((stcb == NULL) || (net == NULL)) {
2291 return;
2292 }
2293 if (net->RTO == 0) {
2294 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2295 } else {
2296 to_ticks = MSEC_TO_TICKS(net->RTO);
2297 }
2298 tmr = &stcb->asoc.asconf_timer;
2299 break;
2300 case SCTP_TIMER_TYPE_PRIM_DELETED:
2301 if ((stcb == NULL) || (net != NULL)) {
2302 return;
2303 }
2304 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2305 tmr = &stcb->asoc.delete_prim_timer;
2306 break;
2307 case SCTP_TIMER_TYPE_AUTOCLOSE:
2308 if (stcb == NULL) {
2309 return;
2310 }
2311 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2312 /*
2313 * Really an error since stcb is NOT set to
2314 * autoclose
2315 */
2316 return;
2317 }
2318 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2319 tmr = &stcb->asoc.autoclose_timer;
2320 break;
2321 default:
2322 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2323 __func__, t_type);
2324 return;
2325 break;
2326 }
2327 if ((to_ticks <= 0) || (tmr == NULL)) {
2328 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2329 __func__, t_type, to_ticks, (void *)tmr);
2330 return;
2331 }
2332 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2333 /*
2334 * we do NOT allow you to have it already running. if it is
2335 * we leave the current one up unchanged
2336 */
2337 return;
2338 }
2339 /* At this point we can proceed */
2340 if (t_type == SCTP_TIMER_TYPE_SEND) {
2341 stcb->asoc.num_send_timers_up++;
2342 }
2343 tmr->stopped_from = 0;
2344 tmr->type = t_type;
2345 tmr->ep = (void *)inp;
2346 tmr->tcb = (void *)stcb;
2347 tmr->net = (void *)net;
2348 tmr->self = (void *)tmr;
2349#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
2350 tmr->vnet = (void *)curvnet;
2351#endif
2352#ifndef __Panda__
2353 tmr->ticks = sctp_get_tick_count();
2354#endif
2355 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2356 return;
2357}
2358
2359void
2360sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2361 struct sctp_nets *net, uint32_t from)
2362{
2363 struct sctp_timer *tmr;
2364
2365 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2366 (inp == NULL))
2367 return;
2368
2369 tmr = NULL;
2370 if (stcb) {
2371 SCTP_TCB_LOCK_ASSERT(stcb);
2372 }
2373 switch (t_type) {
2374 case SCTP_TIMER_TYPE_ZERO_COPY:
2375 tmr = &inp->sctp_ep.zero_copy_timer;
2376 break;
2377 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2378 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2379 break;
2380 case SCTP_TIMER_TYPE_ADDR_WQ:
2381 tmr = &SCTP_BASE_INFO(addr_wq_timer);
2382 break;
2383 case SCTP_TIMER_TYPE_SEND:
2384 if ((stcb == NULL) || (net == NULL)) {
2385 return;
2386 }
2387 tmr = &net->rxt_timer;
2388 break;
2389 case SCTP_TIMER_TYPE_INIT:
2390 if ((stcb == NULL) || (net == NULL)) {
2391 return;
2392 }
2393 tmr = &net->rxt_timer;
2394 break;
2395 case SCTP_TIMER_TYPE_RECV:
2396 if (stcb == NULL) {
2397 return;
2398 }
2399 tmr = &stcb->asoc.dack_timer;
2400 break;
2401 case SCTP_TIMER_TYPE_SHUTDOWN:
2402 if ((stcb == NULL) || (net == NULL)) {
2403 return;
2404 }
2405 tmr = &net->rxt_timer;
2406 break;
2407 case SCTP_TIMER_TYPE_HEARTBEAT:
2408 if ((stcb == NULL) || (net == NULL)) {
2409 return;
2410 }
2411 tmr = &net->hb_timer;
2412 break;
2413 case SCTP_TIMER_TYPE_COOKIE:
2414 if ((stcb == NULL) || (net == NULL)) {
2415 return;
2416 }
2417 tmr = &net->rxt_timer;
2418 break;
2419 case SCTP_TIMER_TYPE_NEWCOOKIE:
2420 /* nothing needed but the endpoint here */
2421 tmr = &inp->sctp_ep.signature_change;
2422 /*
2423 * We re-use the newcookie timer for the INP kill timer. We
2424 * must assure that we do not kill it by accident.
2425 */
2426 break;
2427 case SCTP_TIMER_TYPE_ASOCKILL:
2428 /*
2429 * Stop the asoc kill timer.
2430 */
2431 if (stcb == NULL) {
2432 return;
2433 }
2434 tmr = &stcb->asoc.strreset_timer;
2435 break;
2436
2437 case SCTP_TIMER_TYPE_INPKILL:
2438 /*
2439 * The inp is setup to die. We re-use the signature_chage
2440 * timer since that has stopped and we are in the GONE
2441 * state.
2442 */
2443 tmr = &inp->sctp_ep.signature_change;
2444 break;
2445 case SCTP_TIMER_TYPE_PATHMTURAISE:
2446 if ((stcb == NULL) || (net == NULL)) {
2447 return;
2448 }
2449 tmr = &net->pmtu_timer;
2450 break;
2451 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2452 if ((stcb == NULL) || (net == NULL)) {
2453 return;
2454 }
2455 tmr = &net->rxt_timer;
2456 break;
2457 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2458 if (stcb == NULL) {
2459 return;
2460 }
2461 tmr = &stcb->asoc.shut_guard_timer;
2462 break;
2463 case SCTP_TIMER_TYPE_STRRESET:
2464 if (stcb == NULL) {
2465 return;
2466 }
2467 tmr = &stcb->asoc.strreset_timer;
2468 break;
2469 case SCTP_TIMER_TYPE_ASCONF:
2470 if (stcb == NULL) {
2471 return;
2472 }
2473 tmr = &stcb->asoc.asconf_timer;
2474 break;
2475 case SCTP_TIMER_TYPE_PRIM_DELETED:
2476 if (stcb == NULL) {
2477 return;
2478 }
2479 tmr = &stcb->asoc.delete_prim_timer;
2480 break;
2481 case SCTP_TIMER_TYPE_AUTOCLOSE:
2482 if (stcb == NULL) {
2483 return;
2484 }
2485 tmr = &stcb->asoc.autoclose_timer;
2486 break;
2487 default:
2488 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2489 __func__, t_type);
2490 break;
2491 }
2492 if (tmr == NULL) {
2493 return;
2494 }
2495 if ((tmr->type != t_type) && tmr->type) {
2496 /*
2497 * Ok we have a timer that is under joint use. Cookie timer
2498 * per chance with the SEND timer. We therefore are NOT
2499 * running the timer that the caller wants stopped. So just
2500 * return.
2501 */
2502 return;
2503 }
2504 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2505 stcb->asoc.num_send_timers_up--;
2506 if (stcb->asoc.num_send_timers_up < 0) {
2507 stcb->asoc.num_send_timers_up = 0;
2508 }
2509 }
2510 tmr->self = NULL;
2511 tmr->stopped_from = from;
2512 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
2513 return;
2514}
2515
2516uint32_t
2517sctp_calculate_len(struct mbuf *m)
2518{
2519 uint32_t tlen = 0;
2520 struct mbuf *at;
2521
2522 at = m;
2523 while (at) {
2524 tlen += SCTP_BUF_LEN(at);
2525 at = SCTP_BUF_NEXT(at);
2526 }
2527 return (tlen);
2528}
2529
2530void
2531sctp_mtu_size_reset(struct sctp_inpcb *inp,
2532 struct sctp_association *asoc, uint32_t mtu)
2533{
2534 /*
2535 * Reset the P-MTU size on this association, this involves changing
2536 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2537 * allow the DF flag to be cleared.
2538 */
2539 struct sctp_tmit_chunk *chk;
2540 unsigned int eff_mtu, ovh;
2541
2542 asoc->smallest_mtu = mtu;
2543 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2544 ovh = SCTP_MIN_OVERHEAD;
2545 } else {
2546 ovh = SCTP_MIN_V4_OVERHEAD;
2547 }
2548 eff_mtu = mtu - ovh;
2549 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2550 if (chk->send_size > eff_mtu) {
2551 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2552 }
2553 }
2554 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2555 if (chk->send_size > eff_mtu) {
2556 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2557 }
2558 }
2559}
2560
2561
2562/*
2563 * given an association and starting time of the current RTT period return
2564 * RTO in number of msecs net should point to the current network
2565 */
2566
2567uint32_t
2568sctp_calculate_rto(struct sctp_tcb *stcb,
2569 struct sctp_association *asoc,
2570 struct sctp_nets *net,
2571 struct timeval *told,
2572 int safe, int rtt_from_sack)
2573{
2574 /*-
2575 * given an association and the starting time of the current RTT
2576 * period (in value1/value2) return RTO in number of msecs.
2577 */
2578 int32_t rtt; /* RTT in ms */
2579 uint32_t new_rto;
2580 int first_measure = 0;
2581 struct timeval now, then, *old;
2582
2583 /* Copy it out for sparc64 */
2584 if (safe == sctp_align_unsafe_makecopy) {
2585 old = &then;
2586 memcpy(&then, told, sizeof(struct timeval));
2587 } else if (safe == sctp_align_safe_nocopy) {
2588 old = told;
2589 } else {
2590 /* error */
2591 SCTP_PRINTF("Huh, bad rto calc call\n");
2592 return (0);
2593 }
2594 /************************/
2595 /* 1. calculate new RTT */
2596 /************************/
2597 /* get the current time */
2598 if (stcb->asoc.use_precise_time) {
2599 (void)SCTP_GETPTIME_TIMEVAL(&now);
2600 } else {
2601 (void)SCTP_GETTIME_TIMEVAL(&now);
2602 }
2603 timevalsub(&now, old);
2604 /* store the current RTT in us */
2605 net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2606 (uint64_t)now.tv_usec;
2607 /* compute rtt in ms */
2608 rtt = (int32_t)(net->rtt / 1000);
2609 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2610 /* Tell the CC module that a new update has just occurred from a sack */
2611 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
2612 }
2613 /* Do we need to determine the lan? We do this only
2614 * on sacks i.e. RTT being determined from data not
2615 * non-data (HB/INIT->INITACK).
2616 */
2617 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2618 (net->lan_type == SCTP_LAN_UNKNOWN)) {
2619 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2620 net->lan_type = SCTP_LAN_INTERNET;
2621 } else {
2622 net->lan_type = SCTP_LAN_LOCAL;
2623 }
2624 }
2625
2626 /***************************/
2627 /* 2. update RTTVAR & SRTT */
2628 /***************************/
2629 /*-
2630 * Compute the scaled average lastsa and the
2631 * scaled variance lastsv as described in van Jacobson
2632 * Paper "Congestion Avoidance and Control", Annex A.
2633 *
2634 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2635 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2636 */
2637 if (net->RTO_measured) {
2638 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2639 net->lastsa += rtt;
2640 if (rtt < 0) {
2641 rtt = -rtt;
2642 }
2643 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2644 net->lastsv += rtt;
2645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2646 rto_logging(net, SCTP_LOG_RTTVAR);
2647 }
2648 } else {
2649 /* First RTO measurment */
2650 net->RTO_measured = 1;
2651 first_measure = 1;
2652 net->lastsa = rtt << SCTP_RTT_SHIFT;
2653 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2654 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2655 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2656 }
2657 }
2658 if (net->lastsv == 0) {
2659 net->lastsv = SCTP_CLOCK_GRANULARITY;
2660 }
2661 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2662 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2663 (stcb->asoc.sat_network_lockout == 0)) {
2664 stcb->asoc.sat_network = 1;
2665 } else if ((!first_measure) && stcb->asoc.sat_network) {
2666 stcb->asoc.sat_network = 0;
2667 stcb->asoc.sat_network_lockout = 1;
2668 }
2669 /* bound it, per C6/C7 in Section 5.3.1 */
2670 if (new_rto < stcb->asoc.minrto) {
2671 new_rto = stcb->asoc.minrto;
2672 }
2673 if (new_rto > stcb->asoc.maxrto) {
2674 new_rto = stcb->asoc.maxrto;
2675 }
2676 /* we are now returning the RTO */
2677 return (new_rto);
2678}
2679
2680/*
2681 * return a pointer to a contiguous piece of data from the given mbuf chain
2682 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2683 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2684 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2685 */
2686caddr_t
2687sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2688{
2689 uint32_t count;
2690 uint8_t *ptr;
2691
2692 ptr = in_ptr;
2693 if ((off < 0) || (len <= 0))
2694 return (NULL);
2695
2696 /* find the desired start location */
2697 while ((m != NULL) && (off > 0)) {
2698 if (off < SCTP_BUF_LEN(m))
2699 break;
2700 off -= SCTP_BUF_LEN(m);
2701 m = SCTP_BUF_NEXT(m);
2702 }
2703 if (m == NULL)
2704 return (NULL);
2705
2706 /* is the current mbuf large enough (eg. contiguous)? */
2707 if ((SCTP_BUF_LEN(m) - off) >= len) {
2708 return (mtod(m, caddr_t) + off);
2709 } else {
2710 /* else, it spans more than one mbuf, so save a temp copy... */
2711 while ((m != NULL) && (len > 0)) {
2712 count = min(SCTP_BUF_LEN(m) - off, len);
2713 bcopy(mtod(m, caddr_t) + off, ptr, count);
2714 len -= count;
2715 ptr += count;
2716 off = 0;
2717 m = SCTP_BUF_NEXT(m);
2718 }
2719 if ((m == NULL) && (len > 0))
2720 return (NULL);
2721 else
2722 return ((caddr_t)in_ptr);
2723 }
2724}
2725
2726
2727
2728struct sctp_paramhdr *
2729sctp_get_next_param(struct mbuf *m,
2730 int offset,
2731 struct sctp_paramhdr *pull,
2732 int pull_limit)
2733{
2734 /* This just provides a typed signature to Peter's Pull routine */
2735 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2736 (uint8_t *) pull));
2737}
2738
2739
2740struct mbuf *
2741sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2742{
2743 struct mbuf *m_last;
2744 caddr_t dp;
2745
2746 if (padlen > 3) {
2747 return (NULL);
2748 }
2749 if (padlen <= M_TRAILINGSPACE(m)) {
2750 /*
2751 * The easy way. We hope the majority of the time we hit
2752 * here :)
2753 */
2754 m_last = m;
2755 } else {
2756 /* Hard way we must grow the mbuf chain */
2757 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2758 if (m_last == NULL) {
2759 return (NULL);
2760 }
2761 SCTP_BUF_LEN(m_last) = 0;
2762 SCTP_BUF_NEXT(m_last) = NULL;
2763 SCTP_BUF_NEXT(m) = m_last;
2764 }
2765 dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
2766 SCTP_BUF_LEN(m_last) += padlen;
2767 memset(dp, 0, padlen);
2768 return (m_last);
2769}
2770
2771struct mbuf *
2772sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2773{
2774 /* find the last mbuf in chain and pad it */
2775 struct mbuf *m_at;
2776
2777 if (last_mbuf != NULL) {
2778 return (sctp_add_pad_tombuf(last_mbuf, padval));
2779 } else {
2780 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2781 if (SCTP_BUF_NEXT(m_at) == NULL) {
2782 return (sctp_add_pad_tombuf(m_at, padval));
2783 }
2784 }
2785 }
2786 return (NULL);
2787}
2788
2789static void
2790sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2791 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2792#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2793 SCTP_UNUSED
2794#endif
2795 )
2796{
2797 struct mbuf *m_notify;
2798 struct sctp_assoc_change *sac;
2799 struct sctp_queued_to_read *control;
2800 unsigned int notif_len;
2801 uint16_t abort_len;
2802 unsigned int i;
2803#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2804 struct socket *so;
2805#endif
2806
2807 if (stcb == NULL) {
2808 return;
2809 }
2810 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2811 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2812 if (abort != NULL) {
2813 abort_len = ntohs(abort->ch.chunk_length);
2814 } else {
2815 abort_len = 0;
2816 }
2817 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2818 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2819 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2820 notif_len += abort_len;
2821 }
2822 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2823 if (m_notify == NULL) {
2824 /* Retry with smaller value. */
2825 notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2826 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2827 if (m_notify == NULL) {
2828 goto set_error;
2829 }
2830 }
2831 SCTP_BUF_NEXT(m_notify) = NULL;
2832 sac = mtod(m_notify, struct sctp_assoc_change *);
2833 memset(sac, 0, notif_len);
2834 sac->sac_type = SCTP_ASSOC_CHANGE;
2835 sac->sac_flags = 0;
2836 sac->sac_length = sizeof(struct sctp_assoc_change);
2837 sac->sac_state = state;
2838 sac->sac_error = error;
2839 /* XXX verify these stream counts */
2840 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2841 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2842 sac->sac_assoc_id = sctp_get_associd(stcb);
2843 if (notif_len > sizeof(struct sctp_assoc_change)) {
2844 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2845 i = 0;
2846 if (stcb->asoc.prsctp_supported == 1) {
2847 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2848 }
2849 if (stcb->asoc.auth_supported == 1) {
2850 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2851 }
2852 if (stcb->asoc.asconf_supported == 1) {
2853 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2854 }
2855 if (stcb->asoc.idata_supported == 1) {
2856 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2857 }
2858 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2859 if (stcb->asoc.reconfig_supported == 1) {
2860 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2861 }
2862 sac->sac_length += i;
2863 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2864 memcpy(sac->sac_info, abort, abort_len);
2865 sac->sac_length += abort_len;
2866 }
2867 }
2868 SCTP_BUF_LEN(m_notify) = sac->sac_length;
2869 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2870 0, 0, stcb->asoc.context, 0, 0, 0,
2871 m_notify);
2872 if (control != NULL) {
2873 control->length = SCTP_BUF_LEN(m_notify);
2874 /* not that we need this */
2875 control->tail_mbuf = m_notify;
2876 control->spec_flags = M_NOTIFICATION;
2877 sctp_add_to_readq(stcb->sctp_ep, stcb,
2878 control,
2879 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2880 so_locked);
2881 } else {
2882 sctp_m_freem(m_notify);
2883 }
2884 }
2885 /*
2886 * For 1-to-1 style sockets, we send up and error when an ABORT
2887 * comes in.
2888 */
2889set_error:
2890 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2891 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2892 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2893 SOCK_LOCK(stcb->sctp_socket);
2894 if (from_peer) {
2895 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2896 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2897 stcb->sctp_socket->so_error = ECONNREFUSED;
2898 } else {
2899 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2900 stcb->sctp_socket->so_error = ECONNRESET;
2901 }
2902 } else {
2903 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2904 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2905 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2906 stcb->sctp_socket->so_error = ETIMEDOUT;
2907 } else {
2908 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2909 stcb->sctp_socket->so_error = ECONNABORTED;
2910 }
2911 }
2912 }
2913 /* Wake ANY sleepers */
2914#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2915 so = SCTP_INP_SO(stcb->sctp_ep);
2916 if (!so_locked) {
2917 atomic_add_int(&stcb->asoc.refcnt, 1);
2918 SCTP_TCB_UNLOCK(stcb);
2919 SCTP_SOCKET_LOCK(so, 1);
2920 SCTP_TCB_LOCK(stcb);
2921 atomic_subtract_int(&stcb->asoc.refcnt, 1);
2922 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2923 SCTP_SOCKET_UNLOCK(so, 1);
2924 return;
2925 }
2926 }
2927#endif
2928 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2929 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2930 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2931#if defined(__APPLE__)
2932 socantrcvmore(stcb->sctp_socket);
2933#else
2934 socantrcvmore_locked(stcb->sctp_socket);
2935#endif
2936 }
2937 sorwakeup(stcb->sctp_socket);
2938 sowwakeup(stcb->sctp_socket);
2939#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2940 if (!so_locked) {
2941 SCTP_SOCKET_UNLOCK(so, 1);
2942 }
2943#endif
2944}
2945
2946static void
2947sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2948 struct sockaddr *sa, uint32_t error, int so_locked
2949#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2950 SCTP_UNUSED
2951#endif
2952)
2953{
2954 struct mbuf *m_notify;
2955 struct sctp_paddr_change *spc;
2956 struct sctp_queued_to_read *control;
2957
2958 if ((stcb == NULL) ||
2959 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2960 /* event not enabled */
2961 return;
2962 }
2963 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2964 if (m_notify == NULL)
2965 return;
2966 SCTP_BUF_LEN(m_notify) = 0;
2967 spc = mtod(m_notify, struct sctp_paddr_change *);
2968 memset(spc, 0, sizeof(struct sctp_paddr_change));
2969 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2970 spc->spc_flags = 0;
2971 spc->spc_length = sizeof(struct sctp_paddr_change);
2972 switch (sa->sa_family) {
2973#ifdef INET
2974 case AF_INET:
2975#ifdef INET6
2976 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2977 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2978 (struct sockaddr_in6 *)&spc->spc_aaddr);
2979 } else {
2980 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2981 }
2982#else
2983 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2984#endif
2985 break;
2986#endif
2987#ifdef INET6
2988 case AF_INET6:
2989 {
2990#ifdef SCTP_EMBEDDED_V6_SCOPE
2991 struct sockaddr_in6 *sin6;
2992#endif /* SCTP_EMBEDDED_V6_SCOPE */
2993 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2994
2995#ifdef SCTP_EMBEDDED_V6_SCOPE
2996 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2997 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2998 if (sin6->sin6_scope_id == 0) {
2999 /* recover scope_id for user */
3000#ifdef SCTP_KAME
3001 (void)sa6_recoverscope(sin6);
3002#else
3003 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
3004 NULL);
3005#endif
3006 } else {
3007 /* clear embedded scope_id for user */
3008 in6_clearscope(&sin6->sin6_addr);
3009 }
3010 }
3011#endif /* SCTP_EMBEDDED_V6_SCOPE */
3012 break;
3013 }
3014#endif
3015#if defined(__Userspace__)
3016 case AF_CONN:
3017 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
3018 break;
3019#endif
3020 default:
3021 /* TSNH */
3022 break;
3023 }
3024 spc->spc_state = state;
3025 spc->spc_error = error;
3026 spc->spc_assoc_id = sctp_get_associd(stcb);
3027
3028 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3029 SCTP_BUF_NEXT(m_notify) = NULL;
3030
3031 /* append to socket */
3032 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3033 0, 0, stcb->asoc.context, 0, 0, 0,
3034 m_notify);
3035 if (control == NULL) {
3036 /* no memory */
3037 sctp_m_freem(m_notify);
3038 return;
3039 }
3040 control->length = SCTP_BUF_LEN(m_notify);
3041 control->spec_flags = M_NOTIFICATION;
3042 /* not that we need this */
3043 control->tail_mbuf = m_notify;
3044 sctp_add_to_readq(stcb->sctp_ep, stcb,
3045 control,
3046 &stcb->sctp_socket->so_rcv, 1,
3047 SCTP_READ_LOCK_NOT_HELD,
3048 so_locked);
3049}
3050
3051
3052static void
3053sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3054 struct sctp_tmit_chunk *chk, int so_locked
3055#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3056 SCTP_UNUSED
3057#endif
3058 )
3059{
3060 struct mbuf *m_notify;
3061 struct sctp_send_failed *ssf;
3062 struct sctp_send_failed_event *ssfe;
3063 struct sctp_queued_to_read *control;
3064 struct sctp_chunkhdr *chkhdr;
3065 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3066
3067 if ((stcb == NULL) ||
3068 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3069 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3070 /* event not enabled */
3071 return;
3072 }
3073
3074 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3075 notifhdr_len = sizeof(struct sctp_send_failed_event);
3076 } else {
3077 notifhdr_len = sizeof(struct sctp_send_failed);
3078 }
3079 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3080 if (m_notify == NULL)
3081 /* no space left */
3082 return;
3083 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3084 if (stcb->asoc.idata_supported) {
3085 chkhdr_len = sizeof(struct sctp_idata_chunk);
3086 } else {
3087 chkhdr_len = sizeof(struct sctp_data_chunk);
3088 }
3089 /* Use some defaults in case we can't access the chunk header */
3090 if (chk->send_size >= chkhdr_len) {
3091 payload_len = chk->send_size - chkhdr_len;
3092 } else {
3093 payload_len = 0;
3094 }
3095 padding_len = 0;
3096 if (chk->data != NULL) {
3097 chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3098 if (chkhdr != NULL) {
3099 chk_len = ntohs(chkhdr->chunk_length);
3100 if ((chk_len >= chkhdr_len) &&
3101 (chk->send_size >= chk_len) &&
3102 (chk->send_size - chk_len < 4)) {
3103 padding_len = chk->send_size - chk_len;
3104 payload_len = chk->send_size - chkhdr_len - padding_len;
3105 }
3106 }
3107 }
3108 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3109 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3110 memset(ssfe, 0, notifhdr_len);
3111 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3112 if (sent) {
3113 ssfe->ssfe_flags = SCTP_DATA_SENT;
3114 } else {
3115 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3116 }
3117 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3118 ssfe->ssfe_error = error;
3119 /* not exactly what the user sent in, but should be close :) */
3120 ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3121 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3122 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3123 ssfe->ssfe_info.snd_context = chk->rec.data.context;
3124 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3125 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3126 } else {
3127 ssf = mtod(m_notify, struct sctp_send_failed *);
3128 memset(ssf, 0, notifhdr_len);
3129 ssf->ssf_type = SCTP_SEND_FAILED;
3130 if (sent) {
3131 ssf->ssf_flags = SCTP_DATA_SENT;
3132 } else {
3133 ssf->ssf_flags = SCTP_DATA_UNSENT;
3134 }
3135 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3136 ssf->ssf_error = error;
3137 /* not exactly what the user sent in, but should be close :) */
3138 ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3139 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3140 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3141 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3142 ssf->ssf_info.sinfo_context = chk->rec.data.context;
3143 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3144 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3145 }
3146 if (chk->data != NULL) {
3147 /* Trim off the sctp chunk header (it should be there) */
3148 if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3149 m_adj(chk->data, chkhdr_len);
3150 m_adj(chk->data, -padding_len);
3151 sctp_mbuf_crush(chk->data);
3152 chk->send_size -= (chkhdr_len + padding_len);
3153 }
3154 }
3155 SCTP_BUF_NEXT(m_notify) = chk->data;
3156 /* Steal off the mbuf */
3157 chk->data = NULL;
3158 /*
3159 * For this case, we check the actual socket buffer, since the assoc
3160 * is going away we don't want to overfill the socket buffer for a
3161 * non-reader
3162 */
3163 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3164 sctp_m_freem(m_notify);
3165 return;
3166 }
3167 /* append to socket */
3168 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3169 0, 0, stcb->asoc.context, 0, 0, 0,
3170 m_notify);
3171 if (control == NULL) {
3172 /* no memory */
3173 sctp_m_freem(m_notify);
3174 return;
3175 }
3176 control->spec_flags = M_NOTIFICATION;
3177 sctp_add_to_readq(stcb->sctp_ep, stcb,
3178 control,
3179 &stcb->sctp_socket->so_rcv, 1,
3180 SCTP_READ_LOCK_NOT_HELD,
3181 so_locked);
3182}
3183
3184
3185static void
3186sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3187 struct sctp_stream_queue_pending *sp, int so_locked
3188#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3189 SCTP_UNUSED
3190#endif
3191 )
3192{
3193 struct mbuf *m_notify;
3194 struct sctp_send_failed *ssf;
3195 struct sctp_send_failed_event *ssfe;
3196 struct sctp_queued_to_read *control;
3197 int notifhdr_len;
3198
3199 if ((stcb == NULL) ||
3200 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3201 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3202 /* event not enabled */
3203 return;
3204 }
3205 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3206 notifhdr_len = sizeof(struct sctp_send_failed_event);
3207 } else {
3208 notifhdr_len = sizeof(struct sctp_send_failed);
3209 }
3210 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3211 if (m_notify == NULL) {
3212 /* no space left */
3213 return;
3214 }
3215 SCTP_BUF_LEN(m_notify) = notifhdr_len;
3216 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3217 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3218 memset(ssfe, 0, notifhdr_len);
3219 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3220 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3221 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3222 ssfe->ssfe_error = error;
3223 /* not exactly what the user sent in, but should be close :) */
3224 ssfe->ssfe_info.snd_sid = sp->sid;
3225 if (sp->some_taken) {
3226 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3227 } else {
3228 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3229 }
3230 ssfe->ssfe_info.snd_ppid = sp->ppid;
3231 ssfe->ssfe_info.snd_context = sp->context;
3232 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3233 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3234 } else {
3235 ssf = mtod(m_notify, struct sctp_send_failed *);
3236 memset(ssf, 0, notifhdr_len);
3237 ssf->ssf_type = SCTP_SEND_FAILED;
3238 ssf->ssf_flags = SCTP_DATA_UNSENT;
3239 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3240 ssf->ssf_error = error;
3241 /* not exactly what the user sent in, but should be close :) */
3242 ssf->ssf_info.sinfo_stream = sp->sid;
3243 ssf->ssf_info.sinfo_ssn = 0;
3244 if (sp->some_taken) {
3245 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3246 } else {
3247 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3248 }
3249 ssf->ssf_info.sinfo_ppid = sp->ppid;
3250 ssf->ssf_info.sinfo_context = sp->context;
3251 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3252 ssf->ssf_assoc_id = sctp_get_associd(stcb);
3253 }
3254 SCTP_BUF_NEXT(m_notify) = sp->data;
3255
3256 /* Steal off the mbuf */
3257 sp->data = NULL;
3258 /*
3259 * For this case, we check the actual socket buffer, since the assoc
3260 * is going away we don't want to overfill the socket buffer for a
3261 * non-reader
3262 */
3263 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3264 sctp_m_freem(m_notify);
3265 return;
3266 }
3267 /* append to socket */
3268 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3269 0, 0, stcb->asoc.context, 0, 0, 0,
3270 m_notify);
3271 if (control == NULL) {
3272 /* no memory */
3273 sctp_m_freem(m_notify);
3274 return;
3275 }
3276 control->spec_flags = M_NOTIFICATION;
3277 sctp_add_to_readq(stcb->sctp_ep, stcb,
3278 control,
3279 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3280}
3281
3282
3283
3284static void
3285sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3286{
3287 struct mbuf *m_notify;
3288 struct sctp_adaptation_event *sai;
3289 struct sctp_queued_to_read *control;
3290
3291 if ((stcb == NULL) ||
3292 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3293 /* event not enabled */
3294 return;
3295 }
3296
3297 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3298 if (m_notify == NULL)
3299 /* no space left */
3300 return;
3301 SCTP_BUF_LEN(m_notify) = 0;
3302 sai = mtod(m_notify, struct sctp_adaptation_event *);
3303 memset(sai, 0, sizeof(struct sctp_adaptation_event));
3304 sai->sai_type = SCTP_ADAPTATION_INDICATION;
3305 sai->sai_flags = 0;
3306 sai->sai_length = sizeof(struct sctp_adaptation_event);
3307 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3308 sai->sai_assoc_id = sctp_get_associd(stcb);
3309
3310 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3311 SCTP_BUF_NEXT(m_notify) = NULL;
3312
3313 /* append to socket */
3314 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3315 0, 0, stcb->asoc.context, 0, 0, 0,
3316 m_notify);
3317 if (control == NULL) {
3318 /* no memory */
3319 sctp_m_freem(m_notify);
3320 return;
3321 }
3322 control->length = SCTP_BUF_LEN(m_notify);
3323 control->spec_flags = M_NOTIFICATION;
3324 /* not that we need this */
3325 control->tail_mbuf = m_notify;
3326 sctp_add_to_readq(stcb->sctp_ep, stcb,
3327 control,
3328 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3329}
3330
3331/* This always must be called with the read-queue LOCKED in the INP */
3332static void
3333sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3334 uint32_t val, int so_locked
3335#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3336 SCTP_UNUSED
3337#endif
3338 )
3339{
3340 struct mbuf *m_notify;
3341 struct sctp_pdapi_event *pdapi;
3342 struct sctp_queued_to_read *control;
3343 struct sockbuf *sb;
3344
3345 if ((stcb == NULL) ||
3346 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3347 /* event not enabled */
3348 return;
3349 }
3350 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3351 return;
3352 }
3353
3354 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3355 if (m_notify == NULL)
3356 /* no space left */
3357 return;
3358 SCTP_BUF_LEN(m_notify) = 0;
3359 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3360 memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3361 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3362 pdapi->pdapi_flags = 0;
3363 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3364 pdapi->pdapi_indication = error;
3365 pdapi->pdapi_stream = (val >> 16);
3366 pdapi->pdapi_seq = (val & 0x0000ffff);
3367 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3368
3369 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3370 SCTP_BUF_NEXT(m_notify) = NULL;
3371 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3372 0, 0, stcb->asoc.context, 0, 0, 0,
3373 m_notify);
3374 if (control == NULL) {
3375 /* no memory */
3376 sctp_m_freem(m_notify);
3377 return;
3378 }
3379 control->spec_flags = M_NOTIFICATION;
3380 control->length = SCTP_BUF_LEN(m_notify);
3381 /* not that we need this */
3382 control->tail_mbuf = m_notify;
3383 control->held_length = 0;
3384 control->length = 0;
3385 sb = &stcb->sctp_socket->so_rcv;
3386 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3387 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3388 }
3389 sctp_sballoc(stcb, sb, m_notify);
3390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3391 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
3392 }
3393 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3394 control->end_added = 1;
3395 if (stcb->asoc.control_pdapi)
3396 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3397 else {
3398 /* we really should not see this case */
3399 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3400 }
3401 if (stcb->sctp_ep && stcb->sctp_socket) {
3402 /* This should always be the case */
3403#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3404 struct socket *so;
3405
3406 so = SCTP_INP_SO(stcb->sctp_ep);
3407 if (!so_locked) {
3408 atomic_add_int(&stcb->asoc.refcnt, 1);
3409 SCTP_TCB_UNLOCK(stcb);
3410 SCTP_SOCKET_LOCK(so, 1);
3411 SCTP_TCB_LOCK(stcb);
3412 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3413 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3414 SCTP_SOCKET_UNLOCK(so, 1);
3415 return;
3416 }
3417 }
3418#endif
3419 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3420#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3421 if (!so_locked) {
3422 SCTP_SOCKET_UNLOCK(so, 1);
3423 }
3424#endif
3425 }
3426}
3427
3428static void
3429sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3430{
3431 struct mbuf *m_notify;
3432 struct sctp_shutdown_event *sse;
3433 struct sctp_queued_to_read *control;
3434
3435 /*
3436 * For TCP model AND UDP connected sockets we will send an error up
3437 * when an SHUTDOWN completes
3438 */
3439 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3440 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3441 /* mark socket closed for read/write and wakeup! */
3442#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3443 struct socket *so;
3444
3445 so = SCTP_INP_SO(stcb->sctp_ep);
3446 atomic_add_int(&stcb->asoc.refcnt, 1);
3447 SCTP_TCB_UNLOCK(stcb);
3448 SCTP_SOCKET_LOCK(so, 1);
3449 SCTP_TCB_LOCK(stcb);
3450 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3451 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3452 SCTP_SOCKET_UNLOCK(so, 1);
3453 return;
3454 }
3455#endif
3456 socantsendmore(stcb->sctp_socket);
3457#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3458 SCTP_SOCKET_UNLOCK(so, 1);
3459#endif
3460 }
3461 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3462 /* event not enabled */
3463 return;
3464 }
3465
3466 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3467 if (m_notify == NULL)
3468 /* no space left */
3469 return;
3470 sse = mtod(m_notify, struct sctp_shutdown_event *);
3471 memset(sse, 0, sizeof(struct sctp_shutdown_event));
3472 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3473 sse->sse_flags = 0;
3474 sse->sse_length = sizeof(struct sctp_shutdown_event);
3475 sse->sse_assoc_id = sctp_get_associd(stcb);
3476
3477 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3478 SCTP_BUF_NEXT(m_notify) = NULL;
3479
3480 /* append to socket */
3481 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3482 0, 0, stcb->asoc.context, 0, 0, 0,
3483 m_notify);
3484 if (control == NULL) {
3485 /* no memory */
3486 sctp_m_freem(m_notify);
3487 return;
3488 }
3489 control->spec_flags = M_NOTIFICATION;
3490 control->length = SCTP_BUF_LEN(m_notify);
3491 /* not that we need this */
3492 control->tail_mbuf = m_notify;
3493 sctp_add_to_readq(stcb->sctp_ep, stcb,
3494 control,
3495 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3496}
3497
3498static void
3499sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3500 int so_locked
3501#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3502 SCTP_UNUSED
3503#endif
3504 )
3505{
3506 struct mbuf *m_notify;
3507 struct sctp_sender_dry_event *event;
3508 struct sctp_queued_to_read *control;
3509
3510 if ((stcb == NULL) ||
3511 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3512 /* event not enabled */
3513 return;
3514 }
3515
3516 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3517 if (m_notify == NULL) {
3518 /* no space left */
3519 return;
3520 }
3521 SCTP_BUF_LEN(m_notify) = 0;
3522 event = mtod(m_notify, struct sctp_sender_dry_event *);
3523 memset(event, 0, sizeof(struct sctp_sender_dry_event));
3524 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3525 event->sender_dry_flags = 0;
3526 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3527 event->sender_dry_assoc_id = sctp_get_associd(stcb);
3528
3529 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3530 SCTP_BUF_NEXT(m_notify) = NULL;
3531
3532 /* append to socket */
3533 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3534 0, 0, stcb->asoc.context, 0, 0, 0,
3535 m_notify);
3536 if (control == NULL) {
3537 /* no memory */
3538 sctp_m_freem(m_notify);
3539 return;
3540 }
3541 control->length = SCTP_BUF_LEN(m_notify);
3542 control->spec_flags = M_NOTIFICATION;
3543 /* not that we need this */
3544 control->tail_mbuf = m_notify;
3545 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3546 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3547}
3548
3549
3550void
3551sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3552{
3553 struct mbuf *m_notify;
3554 struct sctp_queued_to_read *control;
3555 struct sctp_stream_change_event *stradd;
3556
3557 if ((stcb == NULL) ||
3558 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3559 /* event not enabled */
3560 return;
3561 }
3562 if ((stcb->asoc.peer_req_out) && flag) {
3563 /* Peer made the request, don't tell the local user */
3564 stcb->asoc.peer_req_out = 0;
3565 return;
3566 }
3567 stcb->asoc.peer_req_out = 0;
3568 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3569 if (m_notify == NULL)
3570 /* no space left */
3571 return;
3572 SCTP_BUF_LEN(m_notify) = 0;
3573 stradd = mtod(m_notify, struct sctp_stream_change_event *);
3574 memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3575 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3576 stradd->strchange_flags = flag;
3577 stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3578 stradd->strchange_assoc_id = sctp_get_associd(stcb);
3579 stradd->strchange_instrms = numberin;
3580 stradd->strchange_outstrms = numberout;
3581 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3582 SCTP_BUF_NEXT(m_notify) = NULL;
3583 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3584 /* no space */
3585 sctp_m_freem(m_notify);
3586 return;
3587 }
3588 /* append to socket */
3589 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3590 0, 0, stcb->asoc.context, 0, 0, 0,
3591 m_notify);
3592 if (control == NULL) {
3593 /* no memory */
3594 sctp_m_freem(m_notify);
3595 return;
3596 }
3597 control->spec_flags = M_NOTIFICATION;
3598 control->length = SCTP_BUF_LEN(m_notify);
3599 /* not that we need this */
3600 control->tail_mbuf = m_notify;
3601 sctp_add_to_readq(stcb->sctp_ep, stcb,
3602 control,
3603 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3604}
3605
3606void
3607sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3608{
3609 struct mbuf *m_notify;
3610 struct sctp_queued_to_read *control;
3611 struct sctp_assoc_reset_event *strasoc;
3612
3613 if ((stcb == NULL) ||
3614 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3615 /* event not enabled */
3616 return;
3617 }
3618 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3619 if (m_notify == NULL)
3620 /* no space left */
3621 return;
3622 SCTP_BUF_LEN(m_notify) = 0;
3623 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3624 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3625 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3626 strasoc->assocreset_flags = flag;
3627 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3628 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
3629 strasoc->assocreset_local_tsn = sending_tsn;
3630 strasoc->assocreset_remote_tsn = recv_tsn;
3631 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3632 SCTP_BUF_NEXT(m_notify) = NULL;
3633 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3634 /* no space */
3635 sctp_m_freem(m_notify);
3636 return;
3637 }
3638 /* append to socket */
3639 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3640 0, 0, stcb->asoc.context, 0, 0, 0,
3641 m_notify);
3642 if (control == NULL) {
3643 /* no memory */
3644 sctp_m_freem(m_notify);
3645 return;
3646 }
3647 control->spec_flags = M_NOTIFICATION;
3648 control->length = SCTP_BUF_LEN(m_notify);
3649 /* not that we need this */
3650 control->tail_mbuf = m_notify;
3651 sctp_add_to_readq(stcb->sctp_ep, stcb,
3652 control,
3653 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3654}
3655
3656
3657
3658static void
3659sctp_notify_stream_reset(struct sctp_tcb *stcb,
3660 int number_entries, uint16_t * list, int flag)
3661{
3662 struct mbuf *m_notify;
3663 struct sctp_queued_to_read *control;
3664 struct sctp_stream_reset_event *strreset;
3665 int len;
3666
3667 if ((stcb == NULL) ||
3668 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3669 /* event not enabled */
3670 return;
3671 }
3672
3673 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3674 if (m_notify == NULL)
3675 /* no space left */
3676 return;
3677 SCTP_BUF_LEN(m_notify) = 0;
3678 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3679 if (len > M_TRAILINGSPACE(m_notify)) {
3680 /* never enough room */
3681 sctp_m_freem(m_notify);
3682 return;
3683 }
3684 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3685 memset(strreset, 0, len);
3686 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3687 strreset->strreset_flags = flag;
3688 strreset->strreset_length = len;
3689 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3690 if (number_entries) {
3691 int i;
3692
3693 for (i = 0; i < number_entries; i++) {
3694 strreset->strreset_stream_list[i] = ntohs(list[i]);
3695 }
3696 }
3697 SCTP_BUF_LEN(m_notify) = len;
3698 SCTP_BUF_NEXT(m_notify) = NULL;
3699 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3700 /* no space */
3701 sctp_m_freem(m_notify);
3702 return;
3703 }
3704 /* append to socket */
3705 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3706 0, 0, stcb->asoc.context, 0, 0, 0,
3707 m_notify);
3708 if (control == NULL) {
3709 /* no memory */
3710 sctp_m_freem(m_notify);
3711 return;
3712 }
3713 control->spec_flags = M_NOTIFICATION;
3714 control->length = SCTP_BUF_LEN(m_notify);
3715 /* not that we need this */
3716 control->tail_mbuf = m_notify;
3717 sctp_add_to_readq(stcb->sctp_ep, stcb,
3718 control,
3719 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3720}
3721
3722
3723static void
3724sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3725{
3726 struct mbuf *m_notify;
3727 struct sctp_remote_error *sre;
3728 struct sctp_queued_to_read *control;
3729 unsigned int notif_len;
3730 uint16_t chunk_len;
3731
3732 if ((stcb == NULL) ||
3733 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3734 return;
3735 }
3736 if (chunk != NULL) {
3737 chunk_len = ntohs(chunk->ch.chunk_length);
3738 } else {
3739 chunk_len = 0;
3740 }
3741 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3742 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3743 if (m_notify == NULL) {
3744 /* Retry with smaller value. */
3745 notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3746 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3747 if (m_notify == NULL) {
3748 return;
3749 }
3750 }
3751 SCTP_BUF_NEXT(m_notify) = NULL;
3752 sre = mtod(m_notify, struct sctp_remote_error *);
3753 memset(sre, 0, notif_len);
3754 sre->sre_type = SCTP_REMOTE_ERROR;
3755 sre->sre_flags = 0;
3756 sre->sre_length = sizeof(struct sctp_remote_error);
3757 sre->sre_error = error;
3758 sre->sre_assoc_id = sctp_get_associd(stcb);
3759 if (notif_len > sizeof(struct sctp_remote_error)) {
3760 memcpy(sre->sre_data, chunk, chunk_len);
3761 sre->sre_length += chunk_len;
3762 }
3763 SCTP_BUF_LEN(m_notify) = sre->sre_length;
3764 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3765 0, 0, stcb->asoc.context, 0, 0, 0,
3766 m_notify);
3767 if (control != NULL) {
3768 control->length = SCTP_BUF_LEN(m_notify);
3769 /* not that we need this */
3770 control->tail_mbuf = m_notify;
3771 control->spec_flags = M_NOTIFICATION;
3772 sctp_add_to_readq(stcb->sctp_ep, stcb,
3773 control,
3774 &stcb->sctp_socket->so_rcv, 1,
3775 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3776 } else {
3777 sctp_m_freem(m_notify);
3778 }
3779}
3780
3781
3782void
3783sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3784 uint32_t error, void *data, int so_locked
3785#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3786 SCTP_UNUSED
3787#endif
3788 )
3789{
3790 if ((stcb == NULL) ||
3791 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3792 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3793 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3794 /* If the socket is gone we are out of here */
3795 return;
3796 }
3797#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
3798 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3799#else
3800 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
3801#endif
3802 return;
3803 }
3804#if defined(__APPLE__)
3805 if (so_locked) {
3806 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
3807 } else {
3808 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
3809 }
3810#endif
3811 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3812 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3813 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3814 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3815 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3816 /* Don't report these in front states */
3817 return;
3818 }
3819 }
3820 switch (notification) {
3821 case SCTP_NOTIFY_ASSOC_UP:
3822 if (stcb->asoc.assoc_up_sent == 0) {
3823 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3824 stcb->asoc.assoc_up_sent = 1;
3825 }
3826 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3827 sctp_notify_adaptation_layer(stcb);
3828 }
3829 if (stcb->asoc.auth_supported == 0) {
3830 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3831 NULL, so_locked);
3832 }
3833 break;
3834 case SCTP_NOTIFY_ASSOC_DOWN:
3835 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3836#if defined(__Userspace__)
3837 if (stcb->sctp_ep->recv_callback) {
3838 if (stcb->sctp_socket) {
3839 union sctp_sockstore addr;
3840 struct sctp_rcvinfo rcv;
3841
3842 memset(&addr, 0, sizeof(union sctp_sockstore));
3843 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
3844 atomic_add_int(&stcb->asoc.refcnt, 1);
3845 SCTP_TCB_UNLOCK(stcb);
3846 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
3847 SCTP_TCB_LOCK(stcb);
3848 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3849 }
3850 }
3851#endif
3852 break;
3853 case SCTP_NOTIFY_INTERFACE_DOWN:
3854 {
3855 struct sctp_nets *net;
3856
3857 net = (struct sctp_nets *)data;
3858 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3859 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3860 break;
3861 }
3862 case SCTP_NOTIFY_INTERFACE_UP:
3863 {
3864 struct sctp_nets *net;
3865
3866 net = (struct sctp_nets *)data;
3867 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3868 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3869 break;
3870 }
3871 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3872 {
3873 struct sctp_nets *net;
3874
3875 net = (struct sctp_nets *)data;
3876 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3877 (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3878 break;
3879 }
3880 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3881 sctp_notify_send_failed2(stcb, error,
3882 (struct sctp_stream_queue_pending *)data, so_locked);
3883 break;
3884 case SCTP_NOTIFY_SENT_DG_FAIL:
3885 sctp_notify_send_failed(stcb, 1, error,
3886 (struct sctp_tmit_chunk *)data, so_locked);
3887 break;
3888 case SCTP_NOTIFY_UNSENT_DG_FAIL:
3889 sctp_notify_send_failed(stcb, 0, error,
3890 (struct sctp_tmit_chunk *)data, so_locked);
3891 break;
3892 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3893 {
3894 uint32_t val;
3895 val = *((uint32_t *)data);
3896
3897 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3898 break;
3899 }
3900 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3901 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3902 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3903 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3904 } else {
3905 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3906 }
3907 break;
3908 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3909 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3910 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3911 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3912 } else {
3913 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3914 }
3915 break;
3916 case SCTP_NOTIFY_ASSOC_RESTART:
3917 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3918 if (stcb->asoc.auth_supported == 0) {
3919 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3920 NULL, so_locked);
3921 }
3922 break;
3923 case SCTP_NOTIFY_STR_RESET_SEND:
3924 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3925 break;
3926 case SCTP_NOTIFY_STR_RESET_RECV:
3927 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3928 break;
3929 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3930 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3931 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
3932 break;
3933 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3934 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3935 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
3936 break;
3937 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3938 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3939 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
3940 break;
3941 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3942 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3943 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
3944 break;
3945 case SCTP_NOTIFY_ASCONF_ADD_IP:
3946 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3947 error, so_locked);
3948 break;
3949 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3950 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3951 error, so_locked);
3952 break;
3953 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3954 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3955 error, so_locked);
3956 break;
3957 case SCTP_NOTIFY_PEER_SHUTDOWN:
3958 sctp_notify_shutdown_event(stcb);
3959 break;
3960 case SCTP_NOTIFY_AUTH_NEW_KEY:
3961 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3962 (uint16_t)(uintptr_t)data,
3963 so_locked);
3964 break;
3965 case SCTP_NOTIFY_AUTH_FREE_KEY:
3966 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3967 (uint16_t)(uintptr_t)data,
3968 so_locked);
3969 break;
3970 case SCTP_NOTIFY_NO_PEER_AUTH:
3971 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3972 (uint16_t)(uintptr_t)data,
3973 so_locked);
3974 break;
3975 case SCTP_NOTIFY_SENDER_DRY:
3976 sctp_notify_sender_dry_event(stcb, so_locked);
3977 break;
3978 case SCTP_NOTIFY_REMOTE_ERROR:
3979 sctp_notify_remote_error(stcb, error, data);
3980 break;
3981 default:
3982 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3983 __func__, notification, notification);
3984 break;
3985 } /* end switch */
3986}
3987
3988void
3989sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3990#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3991 SCTP_UNUSED
3992#endif
3993 )
3994{
3995 struct sctp_association *asoc;
3996 struct sctp_stream_out *outs;
3997 struct sctp_tmit_chunk *chk, *nchk;
3998 struct sctp_stream_queue_pending *sp, *nsp;
3999 int i;
4000
4001 if (stcb == NULL) {
4002 return;
4003 }
4004 asoc = &stcb->asoc;
4005 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4006 /* already being freed */
4007 return;
4008 }
4009#if defined(__APPLE__)
4010 if (so_locked) {
4011 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4012 } else {
4013 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4014 }
4015#endif
4016 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4017 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4018 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4019 return;
4020 }
4021 /* now through all the gunk freeing chunks */
4022 if (holds_lock == 0) {
4023 SCTP_TCB_SEND_LOCK(stcb);
4024 }
4025 /* sent queue SHOULD be empty */
4026 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4027 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4028 asoc->sent_queue_cnt--;
4029 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4030 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4031 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4032#ifdef INVARIANTS
4033 } else {
4034 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4035#endif
4036 }
4037 }
4038 if (chk->data != NULL) {
4039 sctp_free_bufspace(stcb, asoc, chk, 1);
4040 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4041 error, chk, so_locked);
4042 if (chk->data) {
4043 sctp_m_freem(chk->data);
4044 chk->data = NULL;
4045 }
4046 }
4047 sctp_free_a_chunk(stcb, chk, so_locked);
4048 /*sa_ignore FREED_MEMORY*/
4049 }
4050 /* pending send queue SHOULD be empty */
4051 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4052 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4053 asoc->send_queue_cnt--;
4054 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4055 asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4056#ifdef INVARIANTS
4057 } else {
4058 panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4059#endif
4060 }
4061 if (chk->data != NULL) {
4062 sctp_free_bufspace(stcb, asoc, chk, 1);
4063 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4064 error, chk, so_locked);
4065 if (chk->data) {
4066 sctp_m_freem(chk->data);
4067 chk->data = NULL;
4068 }
4069 }
4070 sctp_free_a_chunk(stcb, chk, so_locked);
4071 /*sa_ignore FREED_MEMORY*/
4072 }
4073 for (i = 0; i < asoc->streamoutcnt; i++) {
4074 /* For each stream */
4075 outs = &asoc->strmout[i];
4076 /* clean up any sends there */
4077 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4078 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4079 TAILQ_REMOVE(&outs->outqueue, sp, next);
4080 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
4081 sctp_free_spbufspace(stcb, asoc, sp);
4082 if (sp->data) {
4083 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4084 error, (void *)sp, so_locked);
4085 if (sp->data) {
4086 sctp_m_freem(sp->data);
4087 sp->data = NULL;
4088 sp->tail_mbuf = NULL;
4089 sp->length = 0;
4090 }
4091 }
4092 if (sp->net) {
4093 sctp_free_remote_addr(sp->net);
4094 sp->net = NULL;
4095 }
4096 /* Free the chunk */
4097 sctp_free_a_strmoq(stcb, sp, so_locked);
4098 /*sa_ignore FREED_MEMORY*/
4099 }
4100 }
4101
4102 if (holds_lock == 0) {
4103 SCTP_TCB_SEND_UNLOCK(stcb);
4104 }
4105}
4106
4107void
4108sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4109 struct sctp_abort_chunk *abort, int so_locked
4110#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4111 SCTP_UNUSED
4112#endif
4113 )
4114{
4115 if (stcb == NULL) {
4116 return;
4117 }
4118#if defined(__APPLE__)
4119 if (so_locked) {
4120 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
4121 } else {
4122 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
4123 }
4124#endif
4125 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4126 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4127 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4128 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4129 }
4130 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4131 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4132 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4133 return;
4134 }
4135 /* Tell them we lost the asoc */
4136 sctp_report_all_outbound(stcb, error, 1, so_locked);
4137 if (from_peer) {
4138 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4139 } else {
4140 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4141 }
4142}
4143
4144void
4145sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4146 struct mbuf *m, int iphlen,
4147 struct sockaddr *src, struct sockaddr *dst,
4148 struct sctphdr *sh, struct mbuf *op_err,
4149#if defined(__FreeBSD__)
4150 uint8_t mflowtype, uint32_t mflowid,
4151#endif
4152 uint32_t vrf_id, uint16_t port)
4153{
4154 uint32_t vtag;
4155#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4156 struct socket *so;
4157#endif
4158
4159 vtag = 0;
4160 if (stcb != NULL) {
4161 vtag = stcb->asoc.peer_vtag;
4162 vrf_id = stcb->asoc.vrf_id;
4163 }
4164 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4165#if defined(__FreeBSD__)
4166 mflowtype, mflowid, inp->fibnum,
4167#endif
4168 vrf_id, port);
4169 if (stcb != NULL) {
4170 /* We have a TCB to abort, send notification too */
4171 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4172 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4173 /* Ok, now lets free it */
4174#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4175 so = SCTP_INP_SO(inp);
4176 atomic_add_int(&stcb->asoc.refcnt, 1);
4177 SCTP_TCB_UNLOCK(stcb);
4178 SCTP_SOCKET_LOCK(so, 1);
4179 SCTP_TCB_LOCK(stcb);
4180 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4181#endif
4182 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4183 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4184 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4185 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4186 }
4187 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4188 SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4189#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4190 SCTP_SOCKET_UNLOCK(so, 1);
4191#endif
4192 }
4193}
4194#ifdef SCTP_ASOCLOG_OF_TSNS
4195void
4196sctp_print_out_track_log(struct sctp_tcb *stcb)
4197{
4198#ifdef NOSIY_PRINTS
4199 int i;
4200 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4201 SCTP_PRINTF("IN bound TSN log-aaa\n");
4202 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4203 SCTP_PRINTF("None rcvd\n");
4204 goto none_in;
4205 }
4206 if (stcb->asoc.tsn_in_wrapped) {
4207 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4208 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4209 stcb->asoc.in_tsnlog[i].tsn,
4210 stcb->asoc.in_tsnlog[i].strm,
4211 stcb->asoc.in_tsnlog[i].seq,
4212 stcb->asoc.in_tsnlog[i].flgs,
4213 stcb->asoc.in_tsnlog[i].sz);
4214 }
4215 }
4216 if (stcb->asoc.tsn_in_at) {
4217 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4218 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4219 stcb->asoc.in_tsnlog[i].tsn,
4220 stcb->asoc.in_tsnlog[i].strm,
4221 stcb->asoc.in_tsnlog[i].seq,
4222 stcb->asoc.in_tsnlog[i].flgs,
4223 stcb->asoc.in_tsnlog[i].sz);
4224 }
4225 }
4226 none_in:
4227 SCTP_PRINTF("OUT bound TSN log-aaa\n");
4228 if ((stcb->asoc.tsn_out_at == 0) &&
4229 (stcb->asoc.tsn_out_wrapped == 0)) {
4230 SCTP_PRINTF("None sent\n");
4231 }
4232 if (stcb->asoc.tsn_out_wrapped) {
4233 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4234 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4235 stcb->asoc.out_tsnlog[i].tsn,
4236 stcb->asoc.out_tsnlog[i].strm,
4237 stcb->asoc.out_tsnlog[i].seq,
4238 stcb->asoc.out_tsnlog[i].flgs,
4239 stcb->asoc.out_tsnlog[i].sz);
4240 }
4241 }
4242 if (stcb->asoc.tsn_out_at) {
4243 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4244 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4245 stcb->asoc.out_tsnlog[i].tsn,
4246 stcb->asoc.out_tsnlog[i].strm,
4247 stcb->asoc.out_tsnlog[i].seq,
4248 stcb->asoc.out_tsnlog[i].flgs,
4249 stcb->asoc.out_tsnlog[i].sz);
4250 }
4251 }
4252#endif
4253}
4254#endif
4255
4256void
4257sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4258 struct mbuf *op_err,
4259 int so_locked
4260#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4261 SCTP_UNUSED
4262#endif
4263)
4264{
4265#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4266 struct socket *so;
4267#endif
4268
4269#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4270 so = SCTP_INP_SO(inp);
4271#endif
4272#if defined(__APPLE__)
4273 if (so_locked) {
4274 sctp_lock_assert(SCTP_INP_SO(inp));
4275 } else {
4276 sctp_unlock_assert(SCTP_INP_SO(inp));
4277 }
4278#endif
4279 if (stcb == NULL) {
4280 /* Got to have a TCB */
4281 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4282 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4283#if defined(__APPLE__)
4284 if (!so_locked) {
4285 SCTP_SOCKET_LOCK(so, 1);
4286 }
4287#endif
4288 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4289 SCTP_CALLED_DIRECTLY_NOCMPSET);
4290#if defined(__APPLE__)
4291 if (!so_locked) {
4292 SCTP_SOCKET_UNLOCK(so, 1);
4293 }
4294#endif
4295 }
4296 }
4297 return;
4298 } else {
4299 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4300 }
4301 /* notify the peer */
4302 sctp_send_abort_tcb(stcb, op_err, so_locked);
4303 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4304 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4305 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4306 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4307 }
4308 /* notify the ulp */
4309 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4310 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4311 }
4312 /* now free the asoc */
4313#ifdef SCTP_ASOCLOG_OF_TSNS
4314 sctp_print_out_track_log(stcb);
4315#endif
4316#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4317 if (!so_locked) {
4318 atomic_add_int(&stcb->asoc.refcnt, 1);
4319 SCTP_TCB_UNLOCK(stcb);
4320 SCTP_SOCKET_LOCK(so, 1);
4321 SCTP_TCB_LOCK(stcb);
4322 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4323 }
4324#endif
4325 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4326 SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4327#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4328 if (!so_locked) {
4329 SCTP_SOCKET_UNLOCK(so, 1);
4330 }
4331#endif
4332}
4333
4334void
4335sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4336 struct sockaddr *src, struct sockaddr *dst,
4337 struct sctphdr *sh, struct sctp_inpcb *inp,
4338 struct mbuf *cause,
4339#if defined(__FreeBSD__)
4340 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4341#endif
4342 uint32_t vrf_id, uint16_t port)
4343{
4344 struct sctp_chunkhdr *ch, chunk_buf;
4345 unsigned int chk_length;
4346 int contains_init_chunk;
4347
4348 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4349 /* Generate a TO address for future reference */
4350 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4351 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4352#if defined(__APPLE__)
4353 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
4354#endif
4355 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4356 SCTP_CALLED_DIRECTLY_NOCMPSET);
4357#if defined(__APPLE__)
4358 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
4359#endif
4360 }
4361 }
4362 contains_init_chunk = 0;
4363 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4364 sizeof(*ch), (uint8_t *) & chunk_buf);
4365 while (ch != NULL) {
4366 chk_length = ntohs(ch->chunk_length);
4367 if (chk_length < sizeof(*ch)) {
4368 /* break to abort land */
4369 break;
4370 }
4371 switch (ch->chunk_type) {
4372 case SCTP_INIT:
4373 contains_init_chunk = 1;
4374 break;
4375 case SCTP_PACKET_DROPPED:
4376 /* we don't respond to pkt-dropped */
4377 return;
4378 case SCTP_ABORT_ASSOCIATION:
4379 /* we don't respond with an ABORT to an ABORT */
4380 return;
4381 case SCTP_SHUTDOWN_COMPLETE:
4382 /*
4383 * we ignore it since we are not waiting for it and
4384 * peer is gone
4385 */
4386 return;
4387 case SCTP_SHUTDOWN_ACK:
4388 sctp_send_shutdown_complete2(src, dst, sh,
4389#if defined(__FreeBSD__)
4390 mflowtype, mflowid, fibnum,
4391#endif
4392 vrf_id, port);
4393 return;
4394 default:
4395 break;
4396 }
4397 offset += SCTP_SIZE32(chk_length);
4398 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4399 sizeof(*ch), (uint8_t *) & chunk_buf);
4400 }
4401 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4402 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4403 (contains_init_chunk == 0))) {
4404 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4405#if defined(__FreeBSD__)
4406 mflowtype, mflowid, fibnum,
4407#endif
4408 vrf_id, port);
4409 }
4410}
4411
4412/*
4413 * check the inbound datagram to make sure there is not an abort inside it,
4414 * if there is return 1, else return 0.
4415 */
4416int
4417sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4418{
4419 struct sctp_chunkhdr *ch;
4420 struct sctp_init_chunk *init_chk, chunk_buf;
4421 int offset;
4422 unsigned int chk_length;
4423
4424 offset = iphlen + sizeof(struct sctphdr);
4425 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4426 (uint8_t *) & chunk_buf);
4427 while (ch != NULL) {
4428 chk_length = ntohs(ch->chunk_length);
4429 if (chk_length < sizeof(*ch)) {
4430 /* packet is probably corrupt */
4431 break;
4432 }
4433 /* we seem to be ok, is it an abort? */
4434 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4435 /* yep, tell them */
4436 return (1);
4437 }
4438 if (ch->chunk_type == SCTP_INITIATION) {
4439 /* need to update the Vtag */
4440 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4441 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4442 if (init_chk != NULL) {
4443 *vtagfill = ntohl(init_chk->init.initiate_tag);
4444 }
4445 }
4446 /* Nope, move to the next chunk */
4447 offset += SCTP_SIZE32(chk_length);
4448 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4449 sizeof(*ch), (uint8_t *) & chunk_buf);
4450 }
4451 return (0);
4452}
4453
4454/*
4455 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4456 * set (i.e. it's 0) so, create this function to compare link local scopes
4457 */
4458#ifdef INET6
4459uint32_t
4460sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4461{
4462#if defined(__Userspace__)
4463 /*__Userspace__ Returning 1 here always */
4464#endif
4465#if defined(SCTP_EMBEDDED_V6_SCOPE)
4466 struct sockaddr_in6 a, b;
4467
4468 /* save copies */
4469 a = *addr1;
4470 b = *addr2;
4471
4472 if (a.sin6_scope_id == 0)
4473#ifdef SCTP_KAME
4474 if (sa6_recoverscope(&a)) {
4475#else
4476 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
4477#endif /* SCTP_KAME */
4478 /* can't get scope, so can't match */
4479 return (0);
4480 }
4481 if (b.sin6_scope_id == 0)
4482#ifdef SCTP_KAME
4483 if (sa6_recoverscope(&b)) {
4484#else
4485 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
4486#endif /* SCTP_KAME */
4487 /* can't get scope, so can't match */
4488 return (0);
4489 }
4490 if (a.sin6_scope_id != b.sin6_scope_id)
4491 return (0);
4492#else
4493 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
4494 return (0);
4495#endif /* SCTP_EMBEDDED_V6_SCOPE */
4496
4497 return (1);
4498}
4499
4500#if defined(SCTP_EMBEDDED_V6_SCOPE)
4501/*
4502 * returns a sockaddr_in6 with embedded scope recovered and removed
4503 */
4504struct sockaddr_in6 *
4505sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4506{
4507 /* check and strip embedded scope junk */
4508 if (addr->sin6_family == AF_INET6) {
4509 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4510 if (addr->sin6_scope_id == 0) {
4511 *store = *addr;
4512#ifdef SCTP_KAME
4513 if (!sa6_recoverscope(store)) {
4514#else
4515 if (!in6_recoverscope(store, &store->sin6_addr,
4516 NULL)) {
4517#endif /* SCTP_KAME */
4518 /* use the recovered scope */
4519 addr = store;
4520 }
4521 } else {
4522 /* else, return the original "to" addr */
4523 in6_clearscope(&addr->sin6_addr);
4524 }
4525 }
4526 }
4527 return (addr);
4528}
4529#endif /* SCTP_EMBEDDED_V6_SCOPE */
4530#endif
4531
4532/*
4533 * are the two addresses the same? currently a "scopeless" check returns: 1
4534 * if same, 0 if not
4535 */
4536int
4537sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4538{
4539
4540 /* must be valid */
4541 if (sa1 == NULL || sa2 == NULL)
4542 return (0);
4543
4544 /* must be the same family */
4545 if (sa1->sa_family != sa2->sa_family)
4546 return (0);
4547
4548 switch (sa1->sa_family) {
4549#ifdef INET6
4550 case AF_INET6:
4551 {
4552 /* IPv6 addresses */
4553 struct sockaddr_in6 *sin6_1, *sin6_2;
4554
4555 sin6_1 = (struct sockaddr_in6 *)sa1;
4556 sin6_2 = (struct sockaddr_in6 *)sa2;
4557 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4558 sin6_2));
4559 }
4560#endif
4561#ifdef INET
4562 case AF_INET:
4563 {
4564 /* IPv4 addresses */
4565 struct sockaddr_in *sin_1, *sin_2;
4566
4567 sin_1 = (struct sockaddr_in *)sa1;
4568 sin_2 = (struct sockaddr_in *)sa2;
4569 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4570 }
4571#endif
4572#if defined(__Userspace__)
4573 case AF_CONN:
4574 {
4575 struct sockaddr_conn *sconn_1, *sconn_2;
4576
4577 sconn_1 = (struct sockaddr_conn *)sa1;
4578 sconn_2 = (struct sockaddr_conn *)sa2;
4579 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
4580 }
4581#endif
4582 default:
4583 /* we don't do these... */
4584 return (0);
4585 }
4586}
4587
4588void
4589sctp_print_address(struct sockaddr *sa)
4590{
4591#ifdef INET6
4592#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4593 char ip6buf[INET6_ADDRSTRLEN];
4594#endif
4595#endif
4596
4597 switch (sa->sa_family) {
4598#ifdef INET6
4599 case AF_INET6:
4600 {
4601 struct sockaddr_in6 *sin6;
4602
4603 sin6 = (struct sockaddr_in6 *)sa;
4604#if defined(__Userspace__)
4605 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
4606 ntohs(sin6->sin6_addr.s6_addr16[0]),
4607 ntohs(sin6->sin6_addr.s6_addr16[1]),
4608 ntohs(sin6->sin6_addr.s6_addr16[2]),
4609 ntohs(sin6->sin6_addr.s6_addr16[3]),
4610 ntohs(sin6->sin6_addr.s6_addr16[4]),
4611 ntohs(sin6->sin6_addr.s6_addr16[5]),
4612 ntohs(sin6->sin6_addr.s6_addr16[6]),
4613 ntohs(sin6->sin6_addr.s6_addr16[7]),
4614 ntohs(sin6->sin6_port),
4615 sin6->sin6_scope_id);
4616#else
4617#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
4618 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4619 ip6_sprintf(ip6buf, &sin6->sin6_addr),
4620 ntohs(sin6->sin6_port),
4621 sin6->sin6_scope_id);
4622#else
4623 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4624 ip6_sprintf(&sin6->sin6_addr),
4625 ntohs(sin6->sin6_port),
4626 sin6->sin6_scope_id);
4627#endif
4628#endif
4629 break;
4630 }
4631#endif
4632#ifdef INET
4633 case AF_INET:
4634 {
4635 struct sockaddr_in *sin;
4636 unsigned char *p;
4637
4638 sin = (struct sockaddr_in *)sa;
4639 p = (unsigned char *)&sin->sin_addr;
4640 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4641 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4642 break;
4643 }
4644#endif
4645#if defined(__Userspace__)
4646 case AF_CONN:
4647 {
4648 struct sockaddr_conn *sconn;
4649
4650 sconn = (struct sockaddr_conn *)sa;
4651 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
4652 break;
4653 }
4654#endif
4655 default:
4656 SCTP_PRINTF("?\n");
4657 break;
4658 }
4659}
4660
4661void
4662sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4663 struct sctp_inpcb *new_inp,
4664 struct sctp_tcb *stcb,
4665 int waitflags)
4666{
4667 /*
4668 * go through our old INP and pull off any control structures that
4669 * belong to stcb and move then to the new inp.
4670 */
4671 struct socket *old_so, *new_so;
4672 struct sctp_queued_to_read *control, *nctl;
4673 struct sctp_readhead tmp_queue;
4674 struct mbuf *m;
4675 int error = 0;
4676
4677 old_so = old_inp->sctp_socket;
4678 new_so = new_inp->sctp_socket;
4679 TAILQ_INIT(&tmp_queue);
4680#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4681 SOCKBUF_LOCK(&(old_so->so_rcv));
4682#endif
4683#if defined(__FreeBSD__) || defined(__APPLE__)
4684 error = sblock(&old_so->so_rcv, waitflags);
4685#endif
4686#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4687 SOCKBUF_UNLOCK(&(old_so->so_rcv));
4688#endif
4689 if (error) {
4690 /* Gak, can't get sblock, we have a problem.
4691 * data will be left stranded.. and we
4692 * don't dare look at it since the
4693 * other thread may be reading something.
4694 * Oh well, its a screwed up app that does
4695 * a peeloff OR a accept while reading
4696 * from the main socket... actually its
4697 * only the peeloff() case, since I think
4698 * read will fail on a listening socket..
4699 */
4700 return;
4701 }
4702 /* lock the socket buffers */
4703 SCTP_INP_READ_LOCK(old_inp);
4704 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4705 /* Pull off all for out target stcb */
4706 if (control->stcb == stcb) {
4707 /* remove it we want it */
4708 TAILQ_REMOVE(&old_inp->read_queue, control, next);
4709 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4710 m = control->data;
4711 while (m) {
4712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4713 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
4714 }
4715 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4716 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4717 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4718 }
4719 m = SCTP_BUF_NEXT(m);
4720 }
4721 }
4722 }
4723 SCTP_INP_READ_UNLOCK(old_inp);
4724 /* Remove the sb-lock on the old socket */
4725#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4726 SOCKBUF_LOCK(&(old_so->so_rcv));
4727#endif
4728#if defined(__APPLE__)
4729 sbunlock(&old_so->so_rcv, 1);
4730#endif
4731
4732#if defined(__FreeBSD__)
4733 sbunlock(&old_so->so_rcv);
4734#endif
4735#if defined(__FreeBSD__) && __FreeBSD_version < 700000
4736 SOCKBUF_UNLOCK(&(old_so->so_rcv));
4737#endif
4738 /* Now we move them over to the new socket buffer */
4739 SCTP_INP_READ_LOCK(new_inp);
4740 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4741 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4742 m = control->data;
4743 while (m) {
4744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4745 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4746 }
4747 sctp_sballoc(stcb, &new_so->so_rcv, m);
4748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4749 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4750 }
4751 m = SCTP_BUF_NEXT(m);
4752 }
4753 }
4754 SCTP_INP_READ_UNLOCK(new_inp);
4755}
4756
4757void
4758sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4759 struct sctp_tcb *stcb,
4760 int so_locked
4761#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4762 SCTP_UNUSED
4763#endif
4764)
4765{
4766 if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4767 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4768 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4769 } else {
4770#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4771 struct socket *so;
4772
4773 so = SCTP_INP_SO(inp);
4774 if (!so_locked) {
4775 if (stcb) {
4776 atomic_add_int(&stcb->asoc.refcnt, 1);
4777 SCTP_TCB_UNLOCK(stcb);
4778 }
4779 SCTP_SOCKET_LOCK(so, 1);
4780 if (stcb) {
4781 SCTP_TCB_LOCK(stcb);
4782 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4783 }
4784 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4785 SCTP_SOCKET_UNLOCK(so, 1);
4786 return;
4787 }
4788 }
4789#endif
4790 sctp_sorwakeup(inp, inp->sctp_socket);
4791#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4792 if (!so_locked) {
4793 SCTP_SOCKET_UNLOCK(so, 1);
4794 }
4795#endif
4796 }
4797 }
4798}
4799#if defined(__Userspace__)
4800
4801void
4802sctp_invoke_recv_callback(struct sctp_inpcb *inp,
4803 struct sctp_tcb *stcb,
4804 struct sctp_queued_to_read *control,
4805 int inp_read_lock_held)
4806{
4807 uint32_t pd_point, length;
4808
4809 if ((inp->recv_callback == NULL) ||
4810 (stcb == NULL) ||
4811 (stcb->sctp_socket == NULL)) {
4812 return;
4813 }
4814
4815 length = control->length;
4816 if (stcb != NULL && stcb->sctp_socket != NULL) {
4817 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
4818 stcb->sctp_ep->partial_delivery_point);
4819 } else {
4820 pd_point = inp->partial_delivery_point;
4821 }
4822 if ((control->end_added == 1) || (length >= pd_point)) {
4823 struct socket *so;
4824 struct mbuf *m;
4825 char *buffer;
4826 struct sctp_rcvinfo rcv;
4827 union sctp_sockstore addr;
4828 int flags;
4829
4830 if ((buffer = malloc(length)) == NULL) {
4831 return;
4832 }
4833 if (inp_read_lock_held == 0) {
4834 SCTP_INP_READ_LOCK(inp);
4835 }
4836 so = stcb->sctp_socket;
4837 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
4838 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
4839 }
4840 m_copydata(control->data, 0, length, buffer);
4841 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
4842 rcv.rcv_sid = control->sinfo_stream;
4843 rcv.rcv_ssn = (uint16_t)control->mid;
4844 rcv.rcv_flags = control->sinfo_flags;
4845 rcv.rcv_ppid = control->sinfo_ppid;
4846 rcv.rcv_tsn = control->sinfo_tsn;
4847 rcv.rcv_cumtsn = control->sinfo_cumtsn;
4848 rcv.rcv_context = control->sinfo_context;
4849 rcv.rcv_assoc_id = control->sinfo_assoc_id;
4850 memset(&addr, 0, sizeof(union sctp_sockstore));
4851 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
4852#ifdef INET
4853 case AF_INET:
4854 addr.sin = control->whoFrom->ro._l_addr.sin;
4855 break;
4856#endif
4857#ifdef INET6
4858 case AF_INET6:
4859 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
4860 break;
4861#endif
4862 case AF_CONN:
4863 addr.sconn = control->whoFrom->ro._l_addr.sconn;
4864 break;
4865 default:
4866 addr.sa = control->whoFrom->ro._l_addr.sa;
4867 break;
4868 }
4869 flags = 0;
4870 if (control->end_added == 1) {
4871 flags |= MSG_EOR;
4872 }
4873 if (control->spec_flags & M_NOTIFICATION) {
4874 flags |= MSG_NOTIFICATION;
4875 }
4876 sctp_m_freem(control->data);
4877 control->data = NULL;
4878 control->tail_mbuf = NULL;
4879 control->length = 0;
4880 if (control->end_added) {
4881 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
4882 control->on_read_q = 0;
4883 sctp_free_remote_addr(control->whoFrom);
4884 control->whoFrom = NULL;
4885 sctp_free_a_readq(stcb, control);
4886 }
4887 atomic_add_int(&stcb->asoc.refcnt, 1);
4888 SCTP_TCB_UNLOCK(stcb);
4889 if (inp_read_lock_held == 0) {
4890 SCTP_INP_READ_UNLOCK(inp);
4891 }
4892 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
4893 SCTP_TCB_LOCK(stcb);
4894 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4895 }
4896}
4897#endif
4898
4899void
4900sctp_add_to_readq(struct sctp_inpcb *inp,
4901 struct sctp_tcb *stcb,
4902 struct sctp_queued_to_read *control,
4903 struct sockbuf *sb,
4904 int end,
4905 int inp_read_lock_held,
4906 int so_locked
4907#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4908 SCTP_UNUSED
4909#endif
4910 )
4911{
4912 /*
4913 * Here we must place the control on the end of the socket read
4914 * queue AND increment sb_cc so that select will work properly on
4915 * read.
4916 */
4917 struct mbuf *m, *prev = NULL;
4918
4919 if (inp == NULL) {
4920 /* Gak, TSNH!! */
4921#ifdef INVARIANTS
4922 panic("Gak, inp NULL on add_to_readq");
4923#endif
4924 return;
4925 }
4926#if defined(__APPLE__)
4927 if (so_locked) {
4928 sctp_lock_assert(SCTP_INP_SO(inp));
4929 } else {
4930 sctp_unlock_assert(SCTP_INP_SO(inp));
4931 }
4932#endif
4933 if (inp_read_lock_held == 0)
4934 SCTP_INP_READ_LOCK(inp);
4935 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4936 sctp_free_remote_addr(control->whoFrom);
4937 if (control->data) {
4938 sctp_m_freem(control->data);
4939 control->data = NULL;
4940 }
4941 sctp_free_a_readq(stcb, control);
4942 if (inp_read_lock_held == 0)
4943 SCTP_INP_READ_UNLOCK(inp);
4944 return;
4945 }
4946 if (!(control->spec_flags & M_NOTIFICATION)) {
4947 atomic_add_int(&inp->total_recvs, 1);
4948 if (!control->do_not_ref_stcb) {
4949 atomic_add_int(&stcb->total_recvs, 1);
4950 }
4951 }
4952 m = control->data;
4953 control->held_length = 0;
4954 control->length = 0;
4955 while (m) {
4956 if (SCTP_BUF_LEN(m) == 0) {
4957 /* Skip mbufs with NO length */
4958 if (prev == NULL) {
4959 /* First one */
4960 control->data = sctp_m_free(m);
4961 m = control->data;
4962 } else {
4963 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4964 m = SCTP_BUF_NEXT(prev);
4965 }
4966 if (m == NULL) {
4967 control->tail_mbuf = prev;
4968 }
4969 continue;
4970 }
4971 prev = m;
4972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4973 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4974 }
4975 sctp_sballoc(stcb, sb, m);
4976 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4977 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
4978 }
4979 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4980 m = SCTP_BUF_NEXT(m);
4981 }
4982 if (prev != NULL) {
4983 control->tail_mbuf = prev;
4984 } else {
4985 /* Everything got collapsed out?? */
4986 sctp_free_remote_addr(control->whoFrom);
4987 sctp_free_a_readq(stcb, control);
4988 if (inp_read_lock_held == 0)
4989 SCTP_INP_READ_UNLOCK(inp);
4990 return;
4991 }
4992 if (end) {
4993 control->end_added = 1;
4994 }
4995 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4996 control->on_read_q = 1;
4997 if (inp_read_lock_held == 0)
4998 SCTP_INP_READ_UNLOCK(inp);
4999#if defined(__Userspace__)
5000 sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
5001#endif
5002 if (inp && inp->sctp_socket) {
5003 sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5004 }
5005}
5006
5007/*************HOLD THIS COMMENT FOR PATCH FILE OF
5008 *************ALTERNATE ROUTING CODE
5009 */
5010
5011/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5012 *************ALTERNATE ROUTING CODE
5013 */
5014
5015struct mbuf *
5016sctp_generate_cause(uint16_t code, char *info)
5017{
5018 struct mbuf *m;
5019 struct sctp_gen_error_cause *cause;
5020 size_t info_len;
5021 uint16_t len;
5022
5023 if ((code == 0) || (info == NULL)) {
5024 return (NULL);
5025 }
5026 info_len = strlen(info);
5027 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5028 return (NULL);
5029 }
5030 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5031 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5032 if (m != NULL) {
5033 SCTP_BUF_LEN(m) = len;
5034 cause = mtod(m, struct sctp_gen_error_cause *);
5035 cause->code = htons(code);
5036 cause->length = htons(len);
5037 memcpy(cause->info, info, info_len);
5038 }
5039 return (m);
5040}
5041
5042struct mbuf *
5043sctp_generate_no_user_data_cause(uint32_t tsn)
5044{
5045 struct mbuf *m;
5046 struct sctp_error_no_user_data *no_user_data_cause;
5047 uint16_t len;
5048
5049 len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5050 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5051 if (m != NULL) {
5052 SCTP_BUF_LEN(m) = len;
5053 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5054 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5055 no_user_data_cause->cause.length = htons(len);
5056 no_user_data_cause->tsn = htonl(tsn);
5057 }
5058 return (m);
5059}
5060
5061#ifdef SCTP_MBCNT_LOGGING
5062void
5063sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5064 struct sctp_tmit_chunk *tp1, int chk_cnt)
5065{
5066 if (tp1->data == NULL) {
5067 return;
5068 }
5069 asoc->chunks_on_out_queue -= chk_cnt;
5070 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5071 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5072 asoc->total_output_queue_size,
5073 tp1->book_size,
5074 0,
5075 tp1->mbcnt);
5076 }
5077 if (asoc->total_output_queue_size >= tp1->book_size) {
5078 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5079 } else {
5080 asoc->total_output_queue_size = 0;
5081 }
5082
5083 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5084 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5085 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5086 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5087 } else {
5088 stcb->sctp_socket->so_snd.sb_cc = 0;
5089
5090 }
5091 }
5092}
5093
5094#endif
5095
5096int
5097sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5098 uint8_t sent, int so_locked
5099#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5100 SCTP_UNUSED
5101#endif
5102 )
5103{
5104 struct sctp_stream_out *strq;
5105 struct sctp_tmit_chunk *chk = NULL, *tp2;
5106 struct sctp_stream_queue_pending *sp;
5107 uint32_t mid;
5108 uint16_t sid;
5109 uint8_t foundeom = 0;
5110 int ret_sz = 0;
5111 int notdone;
5112 int do_wakeup_routine = 0;
5113
5114#if defined(__APPLE__)
5115 if (so_locked) {
5116 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
5117 } else {
5118 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
5119 }
5120#endif
5121 sid = tp1->rec.data.sid;
5122 mid = tp1->rec.data.mid;
5123 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5124 stcb->asoc.abandoned_sent[0]++;
5125 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5126 stcb->asoc.strmout[sid].abandoned_sent[0]++;
5127#if defined(SCTP_DETAILED_STR_STATS)
5128 stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5129#endif
5130 } else {
5131 stcb->asoc.abandoned_unsent[0]++;
5132 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5133 stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5134#if defined(SCTP_DETAILED_STR_STATS)
5135 stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5136#endif
5137 }
5138 do {
5139 ret_sz += tp1->book_size;
5140 if (tp1->data != NULL) {
5141 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5142 sctp_flight_size_decrease(tp1);
5143 sctp_total_flight_decrease(stcb, tp1);
5144 }
5145 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5146 stcb->asoc.peers_rwnd += tp1->send_size;
5147 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5148 if (sent) {
5149 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5150 } else {
5151 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5152 }
5153 if (tp1->data) {
5154 sctp_m_freem(tp1->data);
5155 tp1->data = NULL;
5156 }
5157 do_wakeup_routine = 1;
5158 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5159 stcb->asoc.sent_queue_cnt_removeable--;
5160 }
5161 }
5162 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5163 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5164 SCTP_DATA_NOT_FRAG) {
5165 /* not frag'ed we ae done */
5166 notdone = 0;
5167 foundeom = 1;
5168 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5169 /* end of frag, we are done */
5170 notdone = 0;
5171 foundeom = 1;
5172 } else {
5173 /*
5174 * Its a begin or middle piece, we must mark all of
5175 * it
5176 */
5177 notdone = 1;
5178 tp1 = TAILQ_NEXT(tp1, sctp_next);
5179 }
5180 } while (tp1 && notdone);
5181 if (foundeom == 0) {
5182 /*
5183 * The multi-part message was scattered across the send and
5184 * sent queue.
5185 */
5186 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5187 if ((tp1->rec.data.sid != sid) ||
5188 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5189 break;
5190 }
5191 /* save to chk in case we have some on stream out
5192 * queue. If so and we have an un-transmitted one
5193 * we don't have to fudge the TSN.
5194 */
5195 chk = tp1;
5196 ret_sz += tp1->book_size;
5197 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5198 if (sent) {
5199 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5200 } else {
5201 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5202 }
5203 if (tp1->data) {
5204 sctp_m_freem(tp1->data);
5205 tp1->data = NULL;
5206 }
5207 /* No flight involved here book the size to 0 */
5208 tp1->book_size = 0;
5209 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5210 foundeom = 1;
5211 }
5212 do_wakeup_routine = 1;
5213 tp1->sent = SCTP_FORWARD_TSN_SKIP;
5214 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5215 /* on to the sent queue so we can wait for it to be passed by. */
5216 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5217 sctp_next);
5218 stcb->asoc.send_queue_cnt--;
5219 stcb->asoc.sent_queue_cnt++;
5220 }
5221 }
5222 if (foundeom == 0) {
5223 /*
5224 * Still no eom found. That means there
5225 * is stuff left on the stream out queue.. yuck.
5226 */
5227 SCTP_TCB_SEND_LOCK(stcb);
5228 strq = &stcb->asoc.strmout[sid];
5229 sp = TAILQ_FIRST(&strq->outqueue);
5230 if (sp != NULL) {
5231 sp->discard_rest = 1;
5232 /*
5233 * We may need to put a chunk on the
5234 * queue that holds the TSN that
5235 * would have been sent with the LAST
5236 * bit.
5237 */
5238 if (chk == NULL) {
5239 /* Yep, we have to */
5240 sctp_alloc_a_chunk(stcb, chk);
5241 if (chk == NULL) {
5242 /* we are hosed. All we can
5243 * do is nothing.. which will
5244 * cause an abort if the peer is
5245 * paying attention.
5246 */
5247 goto oh_well;
5248 }
5249 memset(chk, 0, sizeof(*chk));
5250 chk->rec.data.rcv_flags = 0;
5251 chk->sent = SCTP_FORWARD_TSN_SKIP;
5252 chk->asoc = &stcb->asoc;
5253 if (stcb->asoc.idata_supported == 0) {
5254 if (sp->sinfo_flags & SCTP_UNORDERED) {
5255 chk->rec.data.mid = 0;
5256 } else {
5257 chk->rec.data.mid = strq->next_mid_ordered;
5258 }
5259 } else {
5260 if (sp->sinfo_flags & SCTP_UNORDERED) {
5261 chk->rec.data.mid = strq->next_mid_unordered;
5262 } else {
5263 chk->rec.data.mid = strq->next_mid_ordered;
5264 }
5265 }
5266 chk->rec.data.sid = sp->sid;
5267 chk->rec.data.ppid = sp->ppid;
5268 chk->rec.data.context = sp->context;
5269 chk->flags = sp->act_flags;
5270 chk->whoTo = NULL;
5271#if defined(__FreeBSD__) || defined(__Panda__)
5272 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5273#else
5274 chk->rec.data.tsn = stcb->asoc.sending_seq++;
5275#endif
5276 strq->chunks_on_queues++;
5277 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5278 stcb->asoc.sent_queue_cnt++;
5279 stcb->asoc.pr_sctp_cnt++;
5280 }
5281 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5282 if (sp->sinfo_flags & SCTP_UNORDERED) {
5283 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5284 }
5285 if (stcb->asoc.idata_supported == 0) {
5286 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5287 strq->next_mid_ordered++;
5288 }
5289 } else {
5290 if (sp->sinfo_flags & SCTP_UNORDERED) {
5291 strq->next_mid_unordered++;
5292 } else {
5293 strq->next_mid_ordered++;
5294 }
5295 }
5296 oh_well:
5297 if (sp->data) {
5298 /* Pull any data to free up the SB and
5299 * allow sender to "add more" while we
5300 * will throw away :-)
5301 */
5302 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5303 ret_sz += sp->length;
5304 do_wakeup_routine = 1;
5305 sp->some_taken = 1;
5306 sctp_m_freem(sp->data);
5307 sp->data = NULL;
5308 sp->tail_mbuf = NULL;
5309 sp->length = 0;
5310 }
5311 }
5312 SCTP_TCB_SEND_UNLOCK(stcb);
5313 }
5314 if (do_wakeup_routine) {
5315#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5316 struct socket *so;
5317
5318 so = SCTP_INP_SO(stcb->sctp_ep);
5319 if (!so_locked) {
5320 atomic_add_int(&stcb->asoc.refcnt, 1);
5321 SCTP_TCB_UNLOCK(stcb);
5322 SCTP_SOCKET_LOCK(so, 1);
5323 SCTP_TCB_LOCK(stcb);
5324 atomic_subtract_int(&stcb->asoc.refcnt, 1);
5325 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5326 /* assoc was freed while we were unlocked */
5327 SCTP_SOCKET_UNLOCK(so, 1);
5328 return (ret_sz);
5329 }
5330 }
5331#endif
5332 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5333#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5334 if (!so_locked) {
5335 SCTP_SOCKET_UNLOCK(so, 1);
5336 }
5337#endif
5338 }
5339 return (ret_sz);
5340}
5341
5342/*
5343 * checks to see if the given address, sa, is one that is currently known by
5344 * the kernel note: can't distinguish the same address on multiple interfaces
5345 * and doesn't handle multiple addresses with different zone/scope id's note:
5346 * ifa_ifwithaddr() compares the entire sockaddr struct
5347 */
5348struct sctp_ifa *
5349sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5350 int holds_lock)
5351{
5352 struct sctp_laddr *laddr;
5353
5354 if (holds_lock == 0) {
5355 SCTP_INP_RLOCK(inp);
5356 }
5357
5358 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5359 if (laddr->ifa == NULL)
5360 continue;
5361 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5362 continue;
5363#ifdef INET
5364 if (addr->sa_family == AF_INET) {
5365 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5366 laddr->ifa->address.sin.sin_addr.s_addr) {
5367 /* found him. */
5368 if (holds_lock == 0) {
5369 SCTP_INP_RUNLOCK(inp);
5370 }
5371 return (laddr->ifa);
5372 break;
5373 }
5374 }
5375#endif
5376#ifdef INET6
5377 if (addr->sa_family == AF_INET6) {
5378 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5379 &laddr->ifa->address.sin6)) {
5380 /* found him. */
5381 if (holds_lock == 0) {
5382 SCTP_INP_RUNLOCK(inp);
5383 }
5384 return (laddr->ifa);
5385 break;
5386 }
5387 }
5388#endif
5389#if defined(__Userspace__)
5390 if (addr->sa_family == AF_CONN) {
5391 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
5392 /* found him. */
5393 if (holds_lock == 0) {
5394 SCTP_INP_RUNLOCK(inp);
5395 }
5396 return (laddr->ifa);
5397 break;
5398 }
5399 }
5400#endif
5401 }
5402 if (holds_lock == 0) {
5403 SCTP_INP_RUNLOCK(inp);
5404 }
5405 return (NULL);
5406}
5407
5408uint32_t
5409sctp_get_ifa_hash_val(struct sockaddr *addr)
5410{
5411 switch (addr->sa_family) {
5412#ifdef INET
5413 case AF_INET:
5414 {
5415 struct sockaddr_in *sin;
5416
5417 sin = (struct sockaddr_in *)addr;
5418 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5419 }
5420#endif
5421#ifdef INET6
5422 case AF_INET6:
5423 {
5424 struct sockaddr_in6 *sin6;
5425 uint32_t hash_of_addr;
5426
5427 sin6 = (struct sockaddr_in6 *)addr;
5428#if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
5429 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5430 sin6->sin6_addr.s6_addr32[1] +
5431 sin6->sin6_addr.s6_addr32[2] +
5432 sin6->sin6_addr.s6_addr32[3]);
5433#else
5434 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
5435 ((uint32_t *)&sin6->sin6_addr)[1] +
5436 ((uint32_t *)&sin6->sin6_addr)[2] +
5437 ((uint32_t *)&sin6->sin6_addr)[3]);
5438#endif
5439 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5440 return (hash_of_addr);
5441 }
5442#endif
5443#if defined(__Userspace__)
5444 case AF_CONN:
5445 {
5446 struct sockaddr_conn *sconn;
5447 uintptr_t temp;
5448
5449 sconn = (struct sockaddr_conn *)addr;
5450 temp = (uintptr_t)sconn->sconn_addr;
5451 return ((uint32_t)(temp ^ (temp >> 16)));
5452 }
5453#endif
5454 default:
5455 break;
5456 }
5457 return (0);
5458}
5459
5460struct sctp_ifa *
5461sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5462{
5463 struct sctp_ifa *sctp_ifap;
5464 struct sctp_vrf *vrf;
5465 struct sctp_ifalist *hash_head;
5466 uint32_t hash_of_addr;
5467
5468 if (holds_lock == 0)
5469 SCTP_IPI_ADDR_RLOCK();
5470
5471 vrf = sctp_find_vrf(vrf_id);
5472 if (vrf == NULL) {
5473 if (holds_lock == 0)
5474 SCTP_IPI_ADDR_RUNLOCK();
5475 return (NULL);
5476 }
5477
5478 hash_of_addr = sctp_get_ifa_hash_val(addr);
5479
5480 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5481 if (hash_head == NULL) {
5482 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5483 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5484 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5485 sctp_print_address(addr);
5486 SCTP_PRINTF("No such bucket for address\n");
5487 if (holds_lock == 0)
5488 SCTP_IPI_ADDR_RUNLOCK();
5489
5490 return (NULL);
5491 }
5492 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5493 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5494 continue;
5495#ifdef INET
5496 if (addr->sa_family == AF_INET) {
5497 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5498 sctp_ifap->address.sin.sin_addr.s_addr) {
5499 /* found him. */
5500 if (holds_lock == 0)
5501 SCTP_IPI_ADDR_RUNLOCK();
5502 return (sctp_ifap);
5503 break;
5504 }
5505 }
5506#endif
5507#ifdef INET6
5508 if (addr->sa_family == AF_INET6) {
5509 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5510 &sctp_ifap->address.sin6)) {
5511 /* found him. */
5512 if (holds_lock == 0)
5513 SCTP_IPI_ADDR_RUNLOCK();
5514 return (sctp_ifap);
5515 break;
5516 }
5517 }
5518#endif
5519#if defined(__Userspace__)
5520 if (addr->sa_family == AF_CONN) {
5521 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
5522 /* found him. */
5523 if (holds_lock == 0)
5524 SCTP_IPI_ADDR_RUNLOCK();
5525 return (sctp_ifap);
5526 break;
5527 }
5528 }
5529#endif
5530 }
5531 if (holds_lock == 0)
5532 SCTP_IPI_ADDR_RUNLOCK();
5533 return (NULL);
5534}
5535
5536static void
5537sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5538 uint32_t rwnd_req)
5539{
5540 /* User pulled some data, do we need a rwnd update? */
5541 int r_unlocked = 0;
5542 uint32_t dif, rwnd;
5543 struct socket *so = NULL;
5544
5545 if (stcb == NULL)
5546 return;
5547
5548 atomic_add_int(&stcb->asoc.refcnt, 1);
5549
5550 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5551 SCTP_STATE_SHUTDOWN_RECEIVED |
5552 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5553 /* Pre-check If we are freeing no update */
5554 goto no_lock;
5555 }
5556 SCTP_INP_INCR_REF(stcb->sctp_ep);
5557 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5558 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5559 goto out;
5560 }
5561 so = stcb->sctp_socket;
5562 if (so == NULL) {
5563 goto out;
5564 }
5565 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5566 /* Have you have freed enough to look */
5567 *freed_so_far = 0;
5568 /* Yep, its worth a look and the lock overhead */
5569
5570 /* Figure out what the rwnd would be */
5571 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5572 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5573 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5574 } else {
5575 dif = 0;
5576 }
5577 if (dif >= rwnd_req) {
5578 if (hold_rlock) {
5579 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5580 r_unlocked = 1;
5581 }
5582 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5583 /*
5584 * One last check before we allow the guy possibly
5585 * to get in. There is a race, where the guy has not
5586 * reached the gate. In that case
5587 */
5588 goto out;
5589 }
5590 SCTP_TCB_LOCK(stcb);
5591 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5592 /* No reports here */
5593 SCTP_TCB_UNLOCK(stcb);
5594 goto out;
5595 }
5596 SCTP_STAT_INCR(sctps_wu_sacks_sent);
5597 sctp_send_sack(stcb, SCTP_SO_LOCKED);
5598
5599 sctp_chunk_output(stcb->sctp_ep, stcb,
5600 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5601 /* make sure no timer is running */
5602 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5603 SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5604 SCTP_TCB_UNLOCK(stcb);
5605 } else {
5606 /* Update how much we have pending */
5607 stcb->freed_by_sorcv_sincelast = dif;
5608 }
5609 out:
5610 if (so && r_unlocked && hold_rlock) {
5611 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5612 }
5613
5614 SCTP_INP_DECR_REF(stcb->sctp_ep);
5615 no_lock:
5616 atomic_add_int(&stcb->asoc.refcnt, -1);
5617 return;
5618}
5619
5620int
5621sctp_sorecvmsg(struct socket *so,
5622 struct uio *uio,
5623 struct mbuf **mp,
5624 struct sockaddr *from,
5625 int fromlen,
5626 int *msg_flags,
5627 struct sctp_sndrcvinfo *sinfo,
5628 int filling_sinfo)
5629{
5630 /*
5631 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5632 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5633 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5634 * On the way out we may send out any combination of:
5635 * MSG_NOTIFICATION MSG_EOR
5636 *
5637 */
5638 struct sctp_inpcb *inp = NULL;
5639 int my_len = 0;
5640 int cp_len = 0, error = 0;
5641 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5642 struct mbuf *m = NULL;
5643 struct sctp_tcb *stcb = NULL;
5644 int wakeup_read_socket = 0;
5645 int freecnt_applied = 0;
5646 int out_flags = 0, in_flags = 0;
5647 int block_allowed = 1;
5648 uint32_t freed_so_far = 0;
5649 uint32_t copied_so_far = 0;
5650 int in_eeor_mode = 0;
5651 int no_rcv_needed = 0;
5652 uint32_t rwnd_req = 0;
5653 int hold_sblock = 0;
5654 int hold_rlock = 0;
5655 ssize_t slen = 0;
5656 uint32_t held_length = 0;
5657#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5658 int sockbuf_lock = 0;
5659#endif
5660
5661 if (uio == NULL) {
5662 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5663 return (EINVAL);
5664 }
5665
5666 if (msg_flags) {
5667 in_flags = *msg_flags;
5668 if (in_flags & MSG_PEEK)
5669 SCTP_STAT_INCR(sctps_read_peeks);
5670 } else {
5671 in_flags = 0;
5672 }
5673#if defined(__APPLE__)
5674#if defined(APPLE_LEOPARD)
5675 slen = uio->uio_resid;
5676#else
5677 slen = uio_resid(uio);
5678#endif
5679#else
5680 slen = uio->uio_resid;
5681#endif
5682
5683 /* Pull in and set up our int flags */
5684 if (in_flags & MSG_OOB) {
5685 /* Out of band's NOT supported */
5686 return (EOPNOTSUPP);
5687 }
5688 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5690 return (EINVAL);
5691 }
5692 if ((in_flags & (MSG_DONTWAIT
5693#if defined(__FreeBSD__) && __FreeBSD_version > 500000
5694 | MSG_NBIO
5695#endif
5696 )) ||
5697 SCTP_SO_IS_NBIO(so)) {
5698 block_allowed = 0;
5699 }
5700 /* setup the endpoint */
5701 inp = (struct sctp_inpcb *)so->so_pcb;
5702 if (inp == NULL) {
5703 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5704 return (EFAULT);
5705 }
5706 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5707 /* Must be at least a MTU's worth */
5708 if (rwnd_req < SCTP_MIN_RWND)
5709 rwnd_req = SCTP_MIN_RWND;
5710 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5711 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5712#if defined(__APPLE__)
5713#if defined(APPLE_LEOPARD)
5714 sctp_misc_ints(SCTP_SORECV_ENTER,
5715 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5716#else
5717 sctp_misc_ints(SCTP_SORECV_ENTER,
5718 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
5719#endif
5720#else
5721 sctp_misc_ints(SCTP_SORECV_ENTER,
5722 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5723#endif
5724 }
5725#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5726 SOCKBUF_LOCK(&so->so_rcv);
5727 hold_sblock = 1;
5728#endif
5729 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
5730#if defined(__APPLE__)
5731#if defined(APPLE_LEOPARD)
5732 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5733 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5734#else
5735 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5736 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
5737#endif
5738#else
5739 sctp_misc_ints(SCTP_SORECV_ENTERPL,
5740 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5741#endif
5742 }
5743
5744#if defined(__APPLE__)
5745 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5746#endif
5747
5748#if defined(__FreeBSD__)
5749 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5750#endif
5751 if (error) {
5752 goto release_unlocked;
5753 }
5754#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
5755 sockbuf_lock = 1;
5756#endif
5757 restart:
5758#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
5759 if (hold_sblock == 0) {
5760 SOCKBUF_LOCK(&so->so_rcv);
5761 hold_sblock = 1;
5762 }
5763#endif
5764#if defined(__APPLE__)
5765 sbunlock(&so->so_rcv, 1);
5766#endif
5767
5768#if defined(__FreeBSD__) && __FreeBSD_version < 700000
5769 sbunlock(&so->so_rcv);
5770#endif
5771
5772 restart_nosblocks:
5773 if (hold_sblock == 0) {
5774 SOCKBUF_LOCK(&so->so_rcv);
5775 hold_sblock = 1;
5776 }
5777 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5778 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5779 goto out;
5780 }
5781#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
5782 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5783#else
5784 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5785#endif
5786 if (so->so_error) {
5787 error = so->so_error;
5788 if ((in_flags & MSG_PEEK) == 0)
5789 so->so_error = 0;
5790 goto out;
5791 } else {
5792 if (so->so_rcv.sb_cc == 0) {
5793 /* indicate EOF */
5794 error = 0;
5795 goto out;
5796 }
5797 }
5798 }
5799 if (so->so_rcv.sb_cc <= held_length) {
5800 if (so->so_error) {
5801 error = so->so_error;
5802 if ((in_flags & MSG_PEEK) == 0) {
5803 so->so_error = 0;
5804 }
5805 goto out;
5806 }
5807 if ((so->so_rcv.sb_cc == 0) &&
5808 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5809 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5810 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5811 /* For active open side clear flags for re-use
5812 * passive open is blocked by connect.
5813 */
5814 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5815 /* You were aborted, passive side always hits here */
5816 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5817 error = ECONNRESET;
5818 }
5819 so->so_state &= ~(SS_ISCONNECTING |
5820 SS_ISDISCONNECTING |
5821 SS_ISCONFIRMING |
5822 SS_ISCONNECTED);
5823 if (error == 0) {
5824 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5825 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5826 error = ENOTCONN;
5827 }
5828 }
5829 goto out;
5830 }
5831 }
5832 if (block_allowed) {
5833 error = sbwait(&so->so_rcv);
5834 if (error) {
5835 goto out;
5836 }
5837 held_length = 0;
5838 goto restart_nosblocks;
5839 } else {
5840 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5841 error = EWOULDBLOCK;
5842 goto out;
5843 }
5844 }
5845 if (hold_sblock == 1) {
5846 SOCKBUF_UNLOCK(&so->so_rcv);
5847 hold_sblock = 0;
5848 }
5849#if defined(__APPLE__)
5850 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
5851#endif
5852#if defined(__FreeBSD__) && __FreeBSD_version < 700000
5853 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
5854#endif
5855 /* we possibly have data we can read */
5856 /*sa_ignore FREED_MEMORY*/
5857 control = TAILQ_FIRST(&inp->read_queue);
5858 if (control == NULL) {
5859 /* This could be happening since
5860 * the appender did the increment but as not
5861 * yet did the tailq insert onto the read_queue
5862 */
5863 if (hold_rlock == 0) {
5864 SCTP_INP_READ_LOCK(inp);
5865 }
5866 control = TAILQ_FIRST(&inp->read_queue);
5867 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5868#ifdef INVARIANTS
5869 panic("Huh, its non zero and nothing on control?");
5870#endif
5871 so->so_rcv.sb_cc = 0;
5872 }
5873 SCTP_INP_READ_UNLOCK(inp);
5874 hold_rlock = 0;
5875 goto restart;
5876 }
5877
5878 if ((control->length == 0) &&
5879 (control->do_not_ref_stcb)) {
5880 /* Clean up code for freeing assoc that left behind a pdapi..
5881 * maybe a peer in EEOR that just closed after sending and
5882 * never indicated a EOR.
5883 */
5884 if (hold_rlock == 0) {
5885 hold_rlock = 1;
5886 SCTP_INP_READ_LOCK(inp);
5887 }
5888 control->held_length = 0;
5889 if (control->data) {
5890 /* Hmm there is data here .. fix */
5891 struct mbuf *m_tmp;
5892 int cnt = 0;
5893 m_tmp = control->data;
5894 while (m_tmp) {
5895 cnt += SCTP_BUF_LEN(m_tmp);
5896 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5897 control->tail_mbuf = m_tmp;
5898 control->end_added = 1;
5899 }
5900 m_tmp = SCTP_BUF_NEXT(m_tmp);
5901 }
5902 control->length = cnt;
5903 } else {
5904 /* remove it */
5905 TAILQ_REMOVE(&inp->read_queue, control, next);
5906 /* Add back any hiddend data */
5907 sctp_free_remote_addr(control->whoFrom);
5908 sctp_free_a_readq(stcb, control);
5909 }
5910 if (hold_rlock) {
5911 hold_rlock = 0;
5912 SCTP_INP_READ_UNLOCK(inp);
5913 }
5914 goto restart;
5915 }
5916 if ((control->length == 0) &&
5917 (control->end_added == 1)) {
5918 /* Do we also need to check for (control->pdapi_aborted == 1)? */
5919 if (hold_rlock == 0) {
5920 hold_rlock = 1;
5921 SCTP_INP_READ_LOCK(inp);
5922 }
5923 TAILQ_REMOVE(&inp->read_queue, control, next);
5924 if (control->data) {
5925#ifdef INVARIANTS
5926 panic("control->data not null but control->length == 0");
5927#else
5928 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5929 sctp_m_freem(control->data);
5930 control->data = NULL;
5931#endif
5932 }
5933 if (control->aux_data) {
5934 sctp_m_free (control->aux_data);
5935 control->aux_data = NULL;
5936 }
5937#ifdef INVARIANTS
5938 if (control->on_strm_q) {
5939 panic("About to free ctl:%p so:%p and its in %d",
5940 control, so, control->on_strm_q);
5941 }
5942#endif
5943 sctp_free_remote_addr(control->whoFrom);
5944 sctp_free_a_readq(stcb, control);
5945 if (hold_rlock) {
5946 hold_rlock = 0;
5947 SCTP_INP_READ_UNLOCK(inp);
5948 }
5949 goto restart;
5950 }
5951 if (control->length == 0) {
5952 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5953 (filling_sinfo)) {
5954 /* find a more suitable one then this */
5955 ctl = TAILQ_NEXT(control, next);
5956 while (ctl) {
5957 if ((ctl->stcb != control->stcb) && (ctl->length) &&
5958 (ctl->some_taken ||
5959 (ctl->spec_flags & M_NOTIFICATION) ||
5960 ((ctl->do_not_ref_stcb == 0) &&
5961 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5962 ) {
5963 /*-
5964 * If we have a different TCB next, and there is data
5965 * present. If we have already taken some (pdapi), OR we can
5966 * ref the tcb and no delivery as started on this stream, we
5967 * take it. Note we allow a notification on a different
5968 * assoc to be delivered..
5969 */
5970 control = ctl;
5971 goto found_one;
5972 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5973 (ctl->length) &&
5974 ((ctl->some_taken) ||
5975 ((ctl->do_not_ref_stcb == 0) &&
5976 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5977 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5978 /*-
5979 * If we have the same tcb, and there is data present, and we
5980 * have the strm interleave feature present. Then if we have
5981 * taken some (pdapi) or we can refer to tht tcb AND we have
5982 * not started a delivery for this stream, we can take it.
5983 * Note we do NOT allow a notificaiton on the same assoc to
5984 * be delivered.
5985 */
5986 control = ctl;
5987 goto found_one;
5988 }
5989 ctl = TAILQ_NEXT(ctl, next);
5990 }
5991 }
5992 /*
5993 * if we reach here, not suitable replacement is available
5994 * <or> fragment interleave is NOT on. So stuff the sb_cc
5995 * into the our held count, and its time to sleep again.
5996 */
5997 held_length = so->so_rcv.sb_cc;
5998 control->held_length = so->so_rcv.sb_cc;
5999 goto restart;
6000 }
6001 /* Clear the held length since there is something to read */
6002 control->held_length = 0;
6003 found_one:
6004 /*
6005 * If we reach here, control has a some data for us to read off.
6006 * Note that stcb COULD be NULL.
6007 */
6008 if (hold_rlock == 0) {
6009 hold_rlock = 1;
6010 SCTP_INP_READ_LOCK(inp);
6011 }
6012 control->some_taken++;
6013 stcb = control->stcb;
6014 if (stcb) {
6015 if ((control->do_not_ref_stcb == 0) &&
6016 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
6017 if (freecnt_applied == 0)
6018 stcb = NULL;
6019 } else if (control->do_not_ref_stcb == 0) {
6020 /* you can't free it on me please */
6021 /*
6022 * The lock on the socket buffer protects us so the
6023 * free code will stop. But since we used the socketbuf
6024 * lock and the sender uses the tcb_lock to increment,
6025 * we need to use the atomic add to the refcnt
6026 */
6027 if (freecnt_applied) {
6028#ifdef INVARIANTS
6029 panic("refcnt already incremented");
6030#else
6031 SCTP_PRINTF("refcnt already incremented?\n");
6032#endif
6033 } else {
6034 atomic_add_int(&stcb->asoc.refcnt, 1);
6035 freecnt_applied = 1;
6036 }
6037 /*
6038 * Setup to remember how much we have not yet told
6039 * the peer our rwnd has opened up. Note we grab
6040 * the value from the tcb from last time.
6041 * Note too that sack sending clears this when a sack
6042 * is sent, which is fine. Once we hit the rwnd_req,
6043 * we then will go to the sctp_user_rcvd() that will
6044 * not lock until it KNOWs it MUST send a WUP-SACK.
6045 */
6046 freed_so_far = stcb->freed_by_sorcv_sincelast;
6047 stcb->freed_by_sorcv_sincelast = 0;
6048 }
6049 }
6050 if (stcb &&
6051 ((control->spec_flags & M_NOTIFICATION) == 0) &&
6052 control->do_not_ref_stcb == 0) {
6053 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6054 }
6055
6056 /* First lets get off the sinfo and sockaddr info */
6057 if ((sinfo != NULL) && (filling_sinfo != 0)) {
6058 sinfo->sinfo_stream = control->sinfo_stream;
6059 sinfo->sinfo_ssn = (uint16_t)control->mid;
6060 sinfo->sinfo_flags = control->sinfo_flags;
6061 sinfo->sinfo_ppid = control->sinfo_ppid;
6062 sinfo->sinfo_context =control->sinfo_context;
6063 sinfo->sinfo_timetolive = control->sinfo_timetolive;
6064 sinfo->sinfo_tsn = control->sinfo_tsn;
6065 sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6066 sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6067 nxt = TAILQ_NEXT(control, next);
6068 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6069 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6070 struct sctp_extrcvinfo *s_extra;
6071 s_extra = (struct sctp_extrcvinfo *)sinfo;
6072 if ((nxt) &&
6073 (nxt->length)) {
6074 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6075 if (nxt->sinfo_flags & SCTP_UNORDERED) {
6076 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6077 }
6078 if (nxt->spec_flags & M_NOTIFICATION) {
6079 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6080 }
6081 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6082 s_extra->serinfo_next_length = nxt->length;
6083 s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6084 s_extra->serinfo_next_stream = nxt->sinfo_stream;
6085 if (nxt->tail_mbuf != NULL) {
6086 if (nxt->end_added) {
6087 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6088 }
6089 }
6090 } else {
6091 /* we explicitly 0 this, since the memcpy got
6092 * some other things beyond the older sinfo_
6093 * that is on the control's structure :-D
6094 */
6095 nxt = NULL;
6096 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6097 s_extra->serinfo_next_aid = 0;
6098 s_extra->serinfo_next_length = 0;
6099 s_extra->serinfo_next_ppid = 0;
6100 s_extra->serinfo_next_stream = 0;
6101 }
6102 }
6103 /*
6104 * update off the real current cum-ack, if we have an stcb.
6105 */
6106 if ((control->do_not_ref_stcb == 0) && stcb)
6107 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6108 /*
6109 * mask off the high bits, we keep the actual chunk bits in
6110 * there.
6111 */
6112 sinfo->sinfo_flags &= 0x00ff;
6113 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6114 sinfo->sinfo_flags |= SCTP_UNORDERED;
6115 }
6116 }
6117#ifdef SCTP_ASOCLOG_OF_TSNS
6118 {
6119 int index, newindex;
6120 struct sctp_pcbtsn_rlog *entry;
6121 do {
6122 index = inp->readlog_index;
6123 newindex = index + 1;
6124 if (newindex >= SCTP_READ_LOG_SIZE) {
6125 newindex = 0;
6126 }
6127 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6128 entry = &inp->readlog[index];
6129 entry->vtag = control->sinfo_assoc_id;
6130 entry->strm = control->sinfo_stream;
6131 entry->seq = (uint16_t)control->mid;
6132 entry->sz = control->length;
6133 entry->flgs = control->sinfo_flags;
6134 }
6135#endif
6136 if ((fromlen > 0) && (from != NULL)) {
6137 union sctp_sockstore store;
6138 size_t len;
6139
6140 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6141#ifdef INET6
6142 case AF_INET6:
6143 len = sizeof(struct sockaddr_in6);
6144 store.sin6 = control->whoFrom->ro._l_addr.sin6;
6145 store.sin6.sin6_port = control->port_from;
6146 break;
6147#endif
6148#ifdef INET
6149 case AF_INET:
6150#ifdef INET6
6151 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6152 len = sizeof(struct sockaddr_in6);
6153 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6154 &store.sin6);
6155 store.sin6.sin6_port = control->port_from;
6156 } else {
6157 len = sizeof(struct sockaddr_in);
6158 store.sin = control->whoFrom->ro._l_addr.sin;
6159 store.sin.sin_port = control->port_from;
6160 }
6161#else
6162 len = sizeof(struct sockaddr_in);
6163 store.sin = control->whoFrom->ro._l_addr.sin;
6164 store.sin.sin_port = control->port_from;
6165#endif
6166 break;
6167#endif
6168#if defined(__Userspace__)
6169 case AF_CONN:
6170 len = sizeof(struct sockaddr_conn);
6171 store.sconn = control->whoFrom->ro._l_addr.sconn;
6172 store.sconn.sconn_port = control->port_from;
6173 break;
6174#endif
6175 default:
6176 len = 0;
6177 break;
6178 }
6179 memcpy(from, &store, min((size_t)fromlen, len));
6180#if defined(SCTP_EMBEDDED_V6_SCOPE)
6181#ifdef INET6
6182 {
6183 struct sockaddr_in6 lsa6, *from6;
6184
6185 from6 = (struct sockaddr_in6 *)from;
6186 sctp_recover_scope_mac(from6, (&lsa6));
6187 }
6188#endif
6189#endif
6190 }
6191 if (hold_rlock) {
6192 SCTP_INP_READ_UNLOCK(inp);
6193 hold_rlock = 0;
6194 }
6195 if (hold_sblock) {
6196 SOCKBUF_UNLOCK(&so->so_rcv);
6197 hold_sblock = 0;
6198 }
6199 /* now copy out what data we can */
6200 if (mp == NULL) {
6201 /* copy out each mbuf in the chain up to length */
6202 get_more_data:
6203 m = control->data;
6204 while (m) {
6205 /* Move out all we can */
6206#if defined(__APPLE__)
6207#if defined(APPLE_LEOPARD)
6208 cp_len = (int)uio->uio_resid;
6209#else
6210 cp_len = (int)uio_resid(uio);
6211#endif
6212#else
6213 cp_len = (int)uio->uio_resid;
6214#endif
6215 my_len = (int)SCTP_BUF_LEN(m);
6216 if (cp_len > my_len) {
6217 /* not enough in this buf */
6218 cp_len = my_len;
6219 }
6220 if (hold_rlock) {
6221 SCTP_INP_READ_UNLOCK(inp);
6222 hold_rlock = 0;
6223 }
6224#if defined(__APPLE__)
6225 SCTP_SOCKET_UNLOCK(so, 0);
6226#endif
6227 if (cp_len > 0)
6228 error = uiomove(mtod(m, char *), cp_len, uio);
6229#if defined(__APPLE__)
6230 SCTP_SOCKET_LOCK(so, 0);
6231#endif
6232 /* re-read */
6233 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6234 goto release;
6235 }
6236
6237 if ((control->do_not_ref_stcb == 0) && stcb &&
6238 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6239 no_rcv_needed = 1;
6240 }
6241 if (error) {
6242 /* error we are out of here */
6243 goto release;
6244 }
6245 SCTP_INP_READ_LOCK(inp);
6246 hold_rlock = 1;
6247 if (cp_len == SCTP_BUF_LEN(m)) {
6248 if ((SCTP_BUF_NEXT(m)== NULL) &&
6249 (control->end_added)) {
6250 out_flags |= MSG_EOR;
6251 if ((control->do_not_ref_stcb == 0) &&
6252 (control->stcb != NULL) &&
6253 ((control->spec_flags & M_NOTIFICATION) == 0))
6254 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6255 }
6256 if (control->spec_flags & M_NOTIFICATION) {
6257 out_flags |= MSG_NOTIFICATION;
6258 }
6259 /* we ate up the mbuf */
6260 if (in_flags & MSG_PEEK) {
6261 /* just looking */
6262 m = SCTP_BUF_NEXT(m);
6263 copied_so_far += cp_len;
6264 } else {
6265 /* dispose of the mbuf */
6266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6267 sctp_sblog(&so->so_rcv,
6268 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6269 }
6270 sctp_sbfree(control, stcb, &so->so_rcv, m);
6271 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6272 sctp_sblog(&so->so_rcv,
6273 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6274 }
6275 copied_so_far += cp_len;
6276 freed_so_far += cp_len;
6277 freed_so_far += MSIZE;
6278 atomic_subtract_int(&control->length, cp_len);
6279 control->data = sctp_m_free(m);
6280 m = control->data;
6281 /* been through it all, must hold sb lock ok to null tail */
6282 if (control->data == NULL) {
6283#ifdef INVARIANTS
6284#if defined(__FreeBSD__)
6285 if ((control->end_added == 0) ||
6286 (TAILQ_NEXT(control, next) == NULL)) {
6287 /* If the end is not added, OR the
6288 * next is NOT null we MUST have the lock.
6289 */
6290 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6291 panic("Hmm we don't own the lock?");
6292 }
6293 }
6294#endif
6295#endif
6296 control->tail_mbuf = NULL;
6297#ifdef INVARIANTS
6298 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6299 panic("end_added, nothing left and no MSG_EOR");
6300 }
6301#endif
6302 }
6303 }
6304 } else {
6305 /* Do we need to trim the mbuf? */
6306 if (control->spec_flags & M_NOTIFICATION) {
6307 out_flags |= MSG_NOTIFICATION;
6308 }
6309 if ((in_flags & MSG_PEEK) == 0) {
6310 SCTP_BUF_RESV_UF(m, cp_len);
6311 SCTP_BUF_LEN(m) -= cp_len;
6312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6313 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
6314 }
6315 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6316 if ((control->do_not_ref_stcb == 0) &&
6317 stcb) {
6318 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6319 }
6320 copied_so_far += cp_len;
6321 freed_so_far += cp_len;
6322 freed_so_far += MSIZE;
6323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6324 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
6325 SCTP_LOG_SBRESULT, 0);
6326 }
6327 atomic_subtract_int(&control->length, cp_len);
6328 } else {
6329 copied_so_far += cp_len;
6330 }
6331 }
6332#if defined(__APPLE__)
6333#if defined(APPLE_LEOPARD)
6334 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6335#else
6336 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
6337#endif
6338#else
6339 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6340#endif
6341 break;
6342 }
6343 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6344 (control->do_not_ref_stcb == 0) &&
6345 (freed_so_far >= rwnd_req)) {
6346 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6347 }
6348 } /* end while(m) */
6349 /*
6350 * At this point we have looked at it all and we either have
6351 * a MSG_EOR/or read all the user wants... <OR>
6352 * control->length == 0.
6353 */
6354 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6355 /* we are done with this control */
6356 if (control->length == 0) {
6357 if (control->data) {
6358#ifdef INVARIANTS
6359 panic("control->data not null at read eor?");
6360#else
6361 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6362 sctp_m_freem(control->data);
6363 control->data = NULL;
6364#endif
6365 }
6366 done_with_control:
6367 if (hold_rlock == 0) {
6368 SCTP_INP_READ_LOCK(inp);
6369 hold_rlock = 1;
6370 }
6371 TAILQ_REMOVE(&inp->read_queue, control, next);
6372 /* Add back any hiddend data */
6373 if (control->held_length) {
6374 held_length = 0;
6375 control->held_length = 0;
6376 wakeup_read_socket = 1;
6377 }
6378 if (control->aux_data) {
6379 sctp_m_free (control->aux_data);
6380 control->aux_data = NULL;
6381 }
6382 no_rcv_needed = control->do_not_ref_stcb;
6383 sctp_free_remote_addr(control->whoFrom);
6384 control->data = NULL;
6385#ifdef INVARIANTS
6386 if (control->on_strm_q) {
6387 panic("About to free ctl:%p so:%p and its in %d",
6388 control, so, control->on_strm_q);
6389 }
6390#endif
6391 sctp_free_a_readq(stcb, control);
6392 control = NULL;
6393 if ((freed_so_far >= rwnd_req) &&
6394 (no_rcv_needed == 0))
6395 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6396
6397 } else {
6398 /*
6399 * The user did not read all of this
6400 * message, turn off the returned MSG_EOR
6401 * since we are leaving more behind on the
6402 * control to read.
6403 */
6404#ifdef INVARIANTS
6405 if (control->end_added &&
6406 (control->data == NULL) &&
6407 (control->tail_mbuf == NULL)) {
6408 panic("Gak, control->length is corrupt?");
6409 }
6410#endif
6411 no_rcv_needed = control->do_not_ref_stcb;
6412 out_flags &= ~MSG_EOR;
6413 }
6414 }
6415 if (out_flags & MSG_EOR) {
6416 goto release;
6417 }
6418#if defined(__APPLE__)
6419#if defined(APPLE_LEOPARD)
6420 if ((uio->uio_resid == 0) ||
6421#else
6422 if ((uio_resid(uio) == 0) ||
6423#endif
6424#else
6425 if ((uio->uio_resid == 0) ||
6426#endif
6427 ((in_eeor_mode) &&
6428 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
6429 goto release;
6430 }
6431 /*
6432 * If I hit here the receiver wants more and this message is
6433 * NOT done (pd-api). So two questions. Can we block? if not
6434 * we are done. Did the user NOT set MSG_WAITALL?
6435 */
6436 if (block_allowed == 0) {
6437 goto release;
6438 }
6439 /*
6440 * We need to wait for more data a few things: - We don't
6441 * sbunlock() so we don't get someone else reading. - We
6442 * must be sure to account for the case where what is added
6443 * is NOT to our control when we wakeup.
6444 */
6445
6446 /* Do we need to tell the transport a rwnd update might be
6447 * needed before we go to sleep?
6448 */
6449 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6450 ((freed_so_far >= rwnd_req) &&
6451 (control->do_not_ref_stcb == 0) &&
6452 (no_rcv_needed == 0))) {
6453 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6454 }
6455 wait_some_more:
6456#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6457 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6458 goto release;
6459 }
6460#else
6461 if (so->so_state & SS_CANTRCVMORE) {
6462 goto release;
6463 }
6464#endif
6465
6466 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6467 goto release;
6468
6469 if (hold_rlock == 1) {
6470 SCTP_INP_READ_UNLOCK(inp);
6471 hold_rlock = 0;
6472 }
6473 if (hold_sblock == 0) {
6474 SOCKBUF_LOCK(&so->so_rcv);
6475 hold_sblock = 1;
6476 }
6477 if ((copied_so_far) && (control->length == 0) &&
6478 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6479 goto release;
6480 }
6481#if defined(__APPLE__)
6482 sbunlock(&so->so_rcv, 1);
6483#endif
6484 if (so->so_rcv.sb_cc <= control->held_length) {
6485 error = sbwait(&so->so_rcv);
6486 if (error) {
6487#if defined(__FreeBSD__)
6488 goto release;
6489#else
6490 goto release_unlocked;
6491#endif
6492 }
6493 control->held_length = 0;
6494 }
6495#if defined(__APPLE__)
6496 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
6497#endif
6498 if (hold_sblock) {
6499 SOCKBUF_UNLOCK(&so->so_rcv);
6500 hold_sblock = 0;
6501 }
6502 if (control->length == 0) {
6503 /* still nothing here */
6504 if (control->end_added == 1) {
6505 /* he aborted, or is done i.e.did a shutdown */
6506 out_flags |= MSG_EOR;
6507 if (control->pdapi_aborted) {
6508 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6509 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6510
6511 out_flags |= MSG_TRUNC;
6512 } else {
6513 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6514 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6515 }
6516 goto done_with_control;
6517 }
6518 if (so->so_rcv.sb_cc > held_length) {
6519 control->held_length = so->so_rcv.sb_cc;
6520 held_length = 0;
6521 }
6522 goto wait_some_more;
6523 } else if (control->data == NULL) {
6524 /* we must re-sync since data
6525 * is probably being added
6526 */
6527 SCTP_INP_READ_LOCK(inp);
6528 if ((control->length > 0) && (control->data == NULL)) {
6529 /* big trouble.. we have the lock and its corrupt? */
6530#ifdef INVARIANTS
6531 panic ("Impossible data==NULL length !=0");
6532#endif
6533 out_flags |= MSG_EOR;
6534 out_flags |= MSG_TRUNC;
6535 control->length = 0;
6536 SCTP_INP_READ_UNLOCK(inp);
6537 goto done_with_control;
6538 }
6539 SCTP_INP_READ_UNLOCK(inp);
6540 /* We will fall around to get more data */
6541 }
6542 goto get_more_data;
6543 } else {
6544 /*-
6545 * Give caller back the mbuf chain,
6546 * store in uio_resid the length
6547 */
6548 wakeup_read_socket = 0;
6549 if ((control->end_added == 0) ||
6550 (TAILQ_NEXT(control, next) == NULL)) {
6551 /* Need to get rlock */
6552 if (hold_rlock == 0) {
6553 SCTP_INP_READ_LOCK(inp);
6554 hold_rlock = 1;
6555 }
6556 }
6557 if (control->end_added) {
6558 out_flags |= MSG_EOR;
6559 if ((control->do_not_ref_stcb == 0) &&
6560 (control->stcb != NULL) &&
6561 ((control->spec_flags & M_NOTIFICATION) == 0))
6562 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6563 }
6564 if (control->spec_flags & M_NOTIFICATION) {
6565 out_flags |= MSG_NOTIFICATION;
6566 }
6567#if defined(__APPLE__)
6568#if defined(APPLE_LEOPARD)
6569 uio->uio_resid = control->length;
6570#else
6571 uio_setresid(uio, control->length);
6572#endif
6573#else
6574 uio->uio_resid = control->length;
6575#endif
6576 *mp = control->data;
6577 m = control->data;
6578 while (m) {
6579 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6580 sctp_sblog(&so->so_rcv,
6581 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6582 }
6583 sctp_sbfree(control, stcb, &so->so_rcv, m);
6584 freed_so_far += SCTP_BUF_LEN(m);
6585 freed_so_far += MSIZE;
6586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6587 sctp_sblog(&so->so_rcv,
6588 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
6589 }
6590 m = SCTP_BUF_NEXT(m);
6591 }
6592 control->data = control->tail_mbuf = NULL;
6593 control->length = 0;
6594 if (out_flags & MSG_EOR) {
6595 /* Done with this control */
6596 goto done_with_control;
6597 }
6598 }
6599 release:
6600 if (hold_rlock == 1) {
6601 SCTP_INP_READ_UNLOCK(inp);
6602 hold_rlock = 0;
6603 }
6604#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
6605 if (hold_sblock == 0) {
6606 SOCKBUF_LOCK(&so->so_rcv);
6607 hold_sblock = 1;
6608 }
6609#else
6610 if (hold_sblock == 1) {
6611 SOCKBUF_UNLOCK(&so->so_rcv);
6612 hold_sblock = 0;
6613 }
6614#endif
6615#if defined(__APPLE__)
6616 sbunlock(&so->so_rcv, 1);
6617#endif
6618
6619#if defined(__FreeBSD__)
6620 sbunlock(&so->so_rcv);
6621#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6622 sockbuf_lock = 0;
6623#endif
6624#endif
6625
6626 release_unlocked:
6627 if (hold_sblock) {
6628 SOCKBUF_UNLOCK(&so->so_rcv);
6629 hold_sblock = 0;
6630 }
6631 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6632 if ((freed_so_far >= rwnd_req) &&
6633 (control && (control->do_not_ref_stcb == 0)) &&
6634 (no_rcv_needed == 0))
6635 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6636 }
6637 out:
6638 if (msg_flags) {
6639 *msg_flags = out_flags;
6640 }
6641 if (((out_flags & MSG_EOR) == 0) &&
6642 ((in_flags & MSG_PEEK) == 0) &&
6643 (sinfo) &&
6644 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6645 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6646 struct sctp_extrcvinfo *s_extra;
6647 s_extra = (struct sctp_extrcvinfo *)sinfo;
6648 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6649 }
6650 if (hold_rlock == 1) {
6651 SCTP_INP_READ_UNLOCK(inp);
6652 }
6653 if (hold_sblock) {
6654 SOCKBUF_UNLOCK(&so->so_rcv);
6655 }
6656#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
6657 if (sockbuf_lock) {
6658 sbunlock(&so->so_rcv);
6659 }
6660#endif
6661
6662 if (freecnt_applied) {
6663 /*
6664 * The lock on the socket buffer protects us so the free
6665 * code will stop. But since we used the socketbuf lock and
6666 * the sender uses the tcb_lock to increment, we need to use
6667 * the atomic add to the refcnt.
6668 */
6669 if (stcb == NULL) {
6670#ifdef INVARIANTS
6671 panic("stcb for refcnt has gone NULL?");
6672 goto stage_left;
6673#else
6674 goto stage_left;
6675#endif
6676 }
6677 /* Save the value back for next time */
6678 stcb->freed_by_sorcv_sincelast = freed_so_far;
6679 atomic_add_int(&stcb->asoc.refcnt, -1);
6680 }
6681 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
6682 if (stcb) {
6683 sctp_misc_ints(SCTP_SORECV_DONE,
6684 freed_so_far,
6685#if defined(__APPLE__)
6686#if defined(APPLE_LEOPARD)
6687 ((uio) ? (slen - uio->uio_resid) : slen),
6688#else
6689 ((uio) ? (slen - uio_resid(uio)) : slen),
6690#endif
6691#else
6692 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6693#endif
6694 stcb->asoc.my_rwnd,
6695 so->so_rcv.sb_cc);
6696 } else {
6697 sctp_misc_ints(SCTP_SORECV_DONE,
6698 freed_so_far,
6699#if defined(__APPLE__)
6700#if defined(APPLE_LEOPARD)
6701 ((uio) ? (slen - uio->uio_resid) : slen),
6702#else
6703 ((uio) ? (slen - uio_resid(uio)) : slen),
6704#endif
6705#else
6706 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6707#endif
6708 0,
6709 so->so_rcv.sb_cc);
6710 }
6711 }
6712 stage_left:
6713 if (wakeup_read_socket) {
6714 sctp_sorwakeup(inp, so);
6715 }
6716 return (error);
6717}
6718
6719
6720#ifdef SCTP_MBUF_LOGGING
6721struct mbuf *
6722sctp_m_free(struct mbuf *m)
6723{
6724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6725 sctp_log_mb(m, SCTP_MBUF_IFREE);
6726 }
6727 return (m_free(m));
6728}
6729
6730void sctp_m_freem(struct mbuf *mb)
6731{
6732 while (mb != NULL)
6733 mb = sctp_m_free(mb);
6734}
6735
6736#endif
6737
6738int
6739sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6740{
6741 /* Given a local address. For all associations
6742 * that holds the address, request a peer-set-primary.
6743 */
6744 struct sctp_ifa *ifa;
6745 struct sctp_laddr *wi;
6746
6747 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6748 if (ifa == NULL) {
6749 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6750 return (EADDRNOTAVAIL);
6751 }
6752 /* Now that we have the ifa we must awaken the
6753 * iterator with this message.
6754 */
6755 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6756 if (wi == NULL) {
6757 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6758 return (ENOMEM);
6759 }
6760 /* Now incr the count and int wi structure */
6761 SCTP_INCR_LADDR_COUNT();
6762 bzero(wi, sizeof(*wi));
6763 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6764 wi->ifa = ifa;
6765 wi->action = SCTP_SET_PRIM_ADDR;
6766 atomic_add_int(&ifa->refcount, 1);
6767
6768 /* Now add it to the work queue */
6769 SCTP_WQ_ADDR_LOCK();
6770 /*
6771 * Should this really be a tailq? As it is we will process the
6772 * newest first :-0
6773 */
6774 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6775 SCTP_WQ_ADDR_UNLOCK();
6776 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6777 (struct sctp_inpcb *)NULL,
6778 (struct sctp_tcb *)NULL,
6779 (struct sctp_nets *)NULL);
6780 return (0);
6781}
6782
6783#if defined(__Userspace__)
6784/* no sctp_soreceive for __Userspace__ now */
6785#endif
6786
6787#if !defined(__Userspace__)
6788int
6789sctp_soreceive( struct socket *so,
6790 struct sockaddr **psa,
6791 struct uio *uio,
6792 struct mbuf **mp0,
6793 struct mbuf **controlp,
6794 int *flagsp)
6795{
6796 int error, fromlen;
6797 uint8_t sockbuf[256];
6798 struct sockaddr *from;
6799 struct sctp_extrcvinfo sinfo;
6800 int filling_sinfo = 1;
6801 struct sctp_inpcb *inp;
6802
6803 inp = (struct sctp_inpcb *)so->so_pcb;
6804 /* pickup the assoc we are reading from */
6805 if (inp == NULL) {
6806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6807 return (EINVAL);
6808 }
6809 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6810 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6811 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6812 (controlp == NULL)) {
6813 /* user does not want the sndrcv ctl */
6814 filling_sinfo = 0;
6815 }
6816 if (psa) {
6817 from = (struct sockaddr *)sockbuf;
6818 fromlen = sizeof(sockbuf);
6819#ifdef HAVE_SA_LEN
6820 from->sa_len = 0;
6821#endif
6822 } else {
6823 from = NULL;
6824 fromlen = 0;
6825 }
6826
6827#if defined(__APPLE__)
6828 SCTP_SOCKET_LOCK(so, 1);
6829#endif
6830 if (filling_sinfo) {
6831 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6832 }
6833 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6834 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6835 if (controlp != NULL) {
6836 /* copy back the sinfo in a CMSG format */
6837 if (filling_sinfo)
6838 *controlp = sctp_build_ctl_nchunk(inp,
6839 (struct sctp_sndrcvinfo *)&sinfo);
6840 else
6841 *controlp = NULL;
6842 }
6843 if (psa) {
6844 /* copy back the address info */
6845#ifdef HAVE_SA_LEN
6846 if (from && from->sa_len) {
6847#else
6848 if (from) {
6849#endif
6850#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
6851 *psa = sodupsockaddr(from, M_NOWAIT);
6852#else
6853 *psa = dup_sockaddr(from, mp0 == 0);
6854#endif
6855 } else {
6856 *psa = NULL;
6857 }
6858 }
6859#if defined(__APPLE__)
6860 SCTP_SOCKET_UNLOCK(so, 1);
6861#endif
6862 return (error);
6863}
6864
6865
6866#if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
6867/*
6868 * General routine to allocate a hash table with control of memory flags.
6869 * is in 7.0 and beyond for sure :-)
6870 */
6871void *
6872sctp_hashinit_flags(int elements, struct malloc_type *type,
6873 u_long *hashmask, int flags)
6874{
6875 long hashsize;
6876 LIST_HEAD(generic, generic) *hashtbl;
6877 int i;
6878
6879
6880 if (elements <= 0) {
6881#ifdef INVARIANTS
6882 panic("hashinit: bad elements");
6883#else
6884 SCTP_PRINTF("hashinit: bad elements?");
6885 elements = 1;
6886#endif
6887 }
6888 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6889 continue;
6890 hashsize >>= 1;
6891 if (flags & HASH_WAITOK)
6892 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
6893 else if (flags & HASH_NOWAIT)
6894 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
6895 else {
6896#ifdef INVARIANTS
6897 panic("flag incorrect in hashinit_flags");
6898#else
6899 return (NULL);
6900#endif
6901 }
6902
6903 /* no memory? */
6904 if (hashtbl == NULL)
6905 return (NULL);
6906
6907 for (i = 0; i < hashsize; i++)
6908 LIST_INIT(&hashtbl[i]);
6909 *hashmask = hashsize - 1;
6910 return (hashtbl);
6911}
6912#endif
6913
6914#else /* __Userspace__ ifdef above sctp_soreceive */
6915/*
6916 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
6917 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
6918 *__FreeBSD__ must be excluded.
6919 *
6920 */
6921
6922void *
6923sctp_hashinit_flags(int elements, struct malloc_type *type,
6924 u_long *hashmask, int flags)
6925{
6926 long hashsize;
6927 LIST_HEAD(generic, generic) *hashtbl;
6928 int i;
6929
6930 if (elements <= 0) {
6931 SCTP_PRINTF("hashinit: bad elements?");
6932#ifdef INVARIANTS
6933 return (NULL);
6934#else
6935 elements = 1;
6936#endif
6937 }
6938 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
6939 continue;
6940 hashsize >>= 1;
6941 /*cannot use MALLOC here because it has to be declared or defined
6942 using MALLOC_DECLARE or MALLOC_DEFINE first. */
6943 if (flags & HASH_WAITOK)
6944 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
6945 else if (flags & HASH_NOWAIT)
6946 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
6947 else {
6948#ifdef INVARIANTS
6949 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
6950#endif
6951 return (NULL);
6952 }
6953
6954 /* no memory? */
6955 if (hashtbl == NULL)
6956 return (NULL);
6957
6958 for (i = 0; i < hashsize; i++)
6959 LIST_INIT(&hashtbl[i]);
6960 *hashmask = hashsize - 1;
6961 return (hashtbl);
6962}
6963
6964
6965void
6966sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
6967{
6968 LIST_HEAD(generic, generic) *hashtbl, *hp;
6969
6970 hashtbl = vhashtbl;
6971 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
6972 if (!LIST_EMPTY(hp)) {
6973 SCTP_PRINTF("hashdestroy: hash not empty.\n");
6974 return;
6975 }
6976 FREE(hashtbl, type);
6977}
6978
6979
6980void
6981sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
6982{
6983 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
6984 /*
6985 LIST_ENTRY(type) *start, *temp;
6986 */
6987 hashtbl = vhashtbl;
6988 /* Apparently temp is not dynamically allocated, so attempts to
6989 free it results in error.
6990 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
6991 if (!LIST_EMPTY(hp)) {
6992 start = LIST_FIRST(hp);
6993 while (start != NULL) {
6994 temp = start;
6995 start = start->le_next;
6996 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
6997 FREE(temp, type);
6998 }
6999 }
7000 */
7001 FREE(hashtbl, type);
7002}
7003
7004
7005#endif
7006
7007
7008int
7009sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
7010 int totaddr, int *error)
7011{
7012 int added = 0;
7013 int i;
7014 struct sctp_inpcb *inp;
7015 struct sockaddr *sa;
7016 size_t incr = 0;
7017#ifdef INET
7018 struct sockaddr_in *sin;
7019#endif
7020#ifdef INET6
7021 struct sockaddr_in6 *sin6;
7022#endif
7023
7024 sa = addr;
7025 inp = stcb->sctp_ep;
7026 *error = 0;
7027 for (i = 0; i < totaddr; i++) {
7028 switch (sa->sa_family) {
7029#ifdef INET
7030 case AF_INET:
7031 incr = sizeof(struct sockaddr_in);
7032 sin = (struct sockaddr_in *)sa;
7033 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
7034 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
7035 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
7036 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7037 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7038 SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
7039 *error = EINVAL;
7040 goto out_now;
7041 }
7042 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7043 SCTP_DONOT_SETSCOPE,
7044 SCTP_ADDR_IS_CONFIRMED)) {
7045 /* assoc gone no un-lock */
7046 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7047 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7048 SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
7049 *error = ENOBUFS;
7050 goto out_now;
7051 }
7052 added++;
7053 break;
7054#endif
7055#ifdef INET6
7056 case AF_INET6:
7057 incr = sizeof(struct sockaddr_in6);
7058 sin6 = (struct sockaddr_in6 *)sa;
7059 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
7060 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7061 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7062 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7063 SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
7064 *error = EINVAL;
7065 goto out_now;
7066 }
7067 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7068 SCTP_DONOT_SETSCOPE,
7069 SCTP_ADDR_IS_CONFIRMED)) {
7070 /* assoc gone no un-lock */
7071 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7072 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7073 SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
7074 *error = ENOBUFS;
7075 goto out_now;
7076 }
7077 added++;
7078 break;
7079#endif
7080#if defined(__Userspace__)
7081 case AF_CONN:
7082 incr = sizeof(struct sockaddr_in6);
7083 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
7084 SCTP_DONOT_SETSCOPE,
7085 SCTP_ADDR_IS_CONFIRMED)) {
7086 /* assoc gone no un-lock */
7087 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
7088 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
7089 SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
7090 *error = ENOBUFS;
7091 goto out_now;
7092 }
7093 added++;
7094 break;
7095#endif
7096 default:
7097 break;
7098 }
7099 sa = (struct sockaddr *)((caddr_t)sa + incr);
7100 }
7101 out_now:
7102 return (added);
7103}
7104
7105struct sctp_tcb *
7106sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
7107 unsigned int *totaddr,
7108 unsigned int *num_v4, unsigned int *num_v6, int *error,
7109 unsigned int limit, int *bad_addr)
7110{
7111 struct sockaddr *sa;
7112 struct sctp_tcb *stcb = NULL;
7113 unsigned int incr, at, i;
7114
7115 at = 0;
7116 sa = addr;
7117 *error = *num_v6 = *num_v4 = 0;
7118 /* account and validate addresses */
7119 for (i = 0; i < *totaddr; i++) {
7120 switch (sa->sa_family) {
7121#ifdef INET
7122 case AF_INET:
7123 incr = (unsigned int)sizeof(struct sockaddr_in);
7124#ifdef HAVE_SA_LEN
7125 if (sa->sa_len != incr) {
7126 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7127 *error = EINVAL;
7128 *bad_addr = 1;
7129 return (NULL);
7130 }
7131#endif
7132 (*num_v4) += 1;
7133 break;
7134#endif
7135#ifdef INET6
7136 case AF_INET6:
7137 {
7138 struct sockaddr_in6 *sin6;
7139
7140 sin6 = (struct sockaddr_in6 *)sa;
7141 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7142 /* Must be non-mapped for connectx */
7143 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7144 *error = EINVAL;
7145 *bad_addr = 1;
7146 return (NULL);
7147 }
7148 incr = (unsigned int)sizeof(struct sockaddr_in6);
7149#ifdef HAVE_SA_LEN
7150 if (sa->sa_len != incr) {
7151 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7152 *error = EINVAL;
7153 *bad_addr = 1;
7154 return (NULL);
7155 }
7156#endif
7157 (*num_v6) += 1;
7158 break;
7159 }
7160#endif
7161 default:
7162 *totaddr = i;
7163 incr = 0;
7164 /* we are done */
7165 break;
7166 }
7167 if (i == *totaddr) {
7168 break;
7169 }
7170 SCTP_INP_INCR_REF(inp);
7171 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
7172 if (stcb != NULL) {
7173 /* Already have or am bring up an association */
7174 return (stcb);
7175 } else {
7176 SCTP_INP_DECR_REF(inp);
7177 }
7178 if ((at + incr) > limit) {
7179 *totaddr = i;
7180 break;
7181 }
7182 sa = (struct sockaddr *)((caddr_t)sa + incr);
7183 }
7184 return ((struct sctp_tcb *)NULL);
7185}
7186
7187/*
7188 * sctp_bindx(ADD) for one address.
7189 * assumes all arguments are valid/checked by caller.
7190 */
7191void
7192sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
7193 struct sockaddr *sa, sctp_assoc_t assoc_id,
7194 uint32_t vrf_id, int *error, void *p)
7195{
7196 struct sockaddr *addr_touse;
7197#if defined(INET) && defined(INET6)
7198 struct sockaddr_in sin;
7199#endif
7200#ifdef SCTP_MVRF
7201 int i, fnd = 0;
7202#endif
7203
7204 /* see if we're bound all already! */
7205 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7206 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7207 *error = EINVAL;
7208 return;
7209 }
7210#ifdef SCTP_MVRF
7211 /* Is the VRF one we have */
7212 for (i = 0; i < inp->num_vrfs; i++) {
7213 if (vrf_id == inp->m_vrf_ids[i]) {
7214 fnd = 1;
7215 break;
7216 }
7217 }
7218 if (!fnd) {
7219 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7220 *error = EINVAL;
7221 return;
7222 }
7223#endif
7224 addr_touse = sa;
7225#ifdef INET6
7226 if (sa->sa_family == AF_INET6) {
7227#ifdef INET
7228 struct sockaddr_in6 *sin6;
7229
7230#endif
7231#ifdef HAVE_SA_LEN
7232 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7233 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7234 *error = EINVAL;
7235 return;
7236 }
7237#endif
7238 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7239 /* can only bind v6 on PF_INET6 sockets */
7240 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7241 *error = EINVAL;
7242 return;
7243 }
7244#ifdef INET
7245 sin6 = (struct sockaddr_in6 *)addr_touse;
7246 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7247 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7248 SCTP_IPV6_V6ONLY(inp)) {
7249 /* can't bind v4-mapped on PF_INET sockets */
7250 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7251 *error = EINVAL;
7252 return;
7253 }
7254 in6_sin6_2_sin(&sin, sin6);
7255 addr_touse = (struct sockaddr *)&sin;
7256 }
7257#endif
7258 }
7259#endif
7260#ifdef INET
7261 if (sa->sa_family == AF_INET) {
7262#ifdef HAVE_SA_LEN
7263 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7264 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7265 *error = EINVAL;
7266 return;
7267 }
7268#endif
7269 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7270 SCTP_IPV6_V6ONLY(inp)) {
7271 /* can't bind v4 on PF_INET sockets */
7272 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7273 *error = EINVAL;
7274 return;
7275 }
7276 }
7277#endif
7278 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
7279#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
7280 if (p == NULL) {
7281 /* Can't get proc for Net/Open BSD */
7282 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7283 *error = EINVAL;
7284 return;
7285 }
7286#endif
7287 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
7288 return;
7289 }
7290 /*
7291 * No locks required here since bind and mgmt_ep_sa
7292 * all do their own locking. If we do something for
7293 * the FIX: below we may need to lock in that case.
7294 */
7295 if (assoc_id == 0) {
7296 /* add the address */
7297 struct sctp_inpcb *lep;
7298 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7299
7300 /* validate the incoming port */
7301 if ((lsin->sin_port != 0) &&
7302 (lsin->sin_port != inp->sctp_lport)) {
7303 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7304 *error = EINVAL;
7305 return;
7306 } else {
7307 /* user specified 0 port, set it to existing port */
7308 lsin->sin_port = inp->sctp_lport;
7309 }
7310
7311 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7312 if (lep != NULL) {
7313 /*
7314 * We must decrement the refcount
7315 * since we have the ep already and
7316 * are binding. No remove going on
7317 * here.
7318 */
7319 SCTP_INP_DECR_REF(lep);
7320 }
7321 if (lep == inp) {
7322 /* already bound to it.. ok */
7323 return;
7324 } else if (lep == NULL) {
7325 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
7326 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7327 SCTP_ADD_IP_ADDRESS,
7328 vrf_id, NULL);
7329 } else {
7330 *error = EADDRINUSE;
7331 }
7332 if (*error)
7333 return;
7334 } else {
7335 /*
7336 * FIX: decide whether we allow assoc based
7337 * bindx
7338 */
7339 }
7340}
7341
7342/*
7343 * sctp_bindx(DELETE) for one address.
7344 * assumes all arguments are valid/checked by caller.
7345 */
7346void
7347sctp_bindx_delete_address(struct sctp_inpcb *inp,
7348 struct sockaddr *sa, sctp_assoc_t assoc_id,
7349 uint32_t vrf_id, int *error)
7350{
7351 struct sockaddr *addr_touse;
7352#if defined(INET) && defined(INET6)
7353 struct sockaddr_in sin;
7354#endif
7355#ifdef SCTP_MVRF
7356 int i, fnd = 0;
7357#endif
7358
7359 /* see if we're bound all already! */
7360 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7361 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7362 *error = EINVAL;
7363 return;
7364 }
7365#ifdef SCTP_MVRF
7366 /* Is the VRF one we have */
7367 for (i = 0; i < inp->num_vrfs; i++) {
7368 if (vrf_id == inp->m_vrf_ids[i]) {
7369 fnd = 1;
7370 break;
7371 }
7372 }
7373 if (!fnd) {
7374 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7375 *error = EINVAL;
7376 return;
7377 }
7378#endif
7379 addr_touse = sa;
7380#ifdef INET6
7381 if (sa->sa_family == AF_INET6) {
7382#ifdef INET
7383 struct sockaddr_in6 *sin6;
7384#endif
7385
7386#ifdef HAVE_SA_LEN
7387 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7388 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7389 *error = EINVAL;
7390 return;
7391 }
7392#endif
7393 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7394 /* can only bind v6 on PF_INET6 sockets */
7395 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7396 *error = EINVAL;
7397 return;
7398 }
7399#ifdef INET
7400 sin6 = (struct sockaddr_in6 *)addr_touse;
7401 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7402 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7403 SCTP_IPV6_V6ONLY(inp)) {
7404 /* can't bind mapped-v4 on PF_INET sockets */
7405 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7406 *error = EINVAL;
7407 return;
7408 }
7409 in6_sin6_2_sin(&sin, sin6);
7410 addr_touse = (struct sockaddr *)&sin;
7411 }
7412#endif
7413 }
7414#endif
7415#ifdef INET
7416 if (sa->sa_family == AF_INET) {
7417#ifdef HAVE_SA_LEN
7418 if (sa->sa_len != sizeof(struct sockaddr_in)) {
7419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7420 *error = EINVAL;
7421 return;
7422 }
7423#endif
7424 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7425 SCTP_IPV6_V6ONLY(inp)) {
7426 /* can't bind v4 on PF_INET sockets */
7427 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7428 *error = EINVAL;
7429 return;
7430 }
7431 }
7432#endif
7433 /*
7434 * No lock required mgmt_ep_sa does its own locking.
7435 * If the FIX: below is ever changed we may need to
7436 * lock before calling association level binding.
7437 */
7438 if (assoc_id == 0) {
7439 /* delete the address */
7440 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7441 SCTP_DEL_IP_ADDRESS,
7442 vrf_id, NULL);
7443 } else {
7444 /*
7445 * FIX: decide whether we allow assoc based
7446 * bindx
7447 */
7448 }
7449}
7450
7451/*
7452 * returns the valid local address count for an assoc, taking into account
7453 * all scoping rules
7454 */
7455int
7456sctp_local_addr_count(struct sctp_tcb *stcb)
7457{
7458 int loopback_scope;
7459#if defined(INET)
7460 int ipv4_local_scope, ipv4_addr_legal;
7461#endif
7462#if defined (INET6)
7463 int local_scope, site_scope, ipv6_addr_legal;
7464#endif
7465#if defined(__Userspace__)
7466 int conn_addr_legal;
7467#endif
7468 struct sctp_vrf *vrf;
7469 struct sctp_ifn *sctp_ifn;
7470 struct sctp_ifa *sctp_ifa;
7471 int count = 0;
7472
7473 /* Turn on all the appropriate scopes */
7474 loopback_scope = stcb->asoc.scope.loopback_scope;
7475#if defined(INET)
7476 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7477 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7478#endif
7479#if defined(INET6)
7480 local_scope = stcb->asoc.scope.local_scope;
7481 site_scope = stcb->asoc.scope.site_scope;
7482 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7483#endif
7484#if defined(__Userspace__)
7485 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
7486#endif
7487 SCTP_IPI_ADDR_RLOCK();
7488 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7489 if (vrf == NULL) {
7490 /* no vrf, no addresses */
7491 SCTP_IPI_ADDR_RUNLOCK();
7492 return (0);
7493 }
7494
7495 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7496 /*
7497 * bound all case: go through all ifns on the vrf
7498 */
7499 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7500 if ((loopback_scope == 0) &&
7501 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7502 continue;
7503 }
7504 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7505 if (sctp_is_addr_restricted(stcb, sctp_ifa))
7506 continue;
7507 switch (sctp_ifa->address.sa.sa_family) {
7508#ifdef INET
7509 case AF_INET:
7510 if (ipv4_addr_legal) {
7511 struct sockaddr_in *sin;
7512
7513 sin = &sctp_ifa->address.sin;
7514 if (sin->sin_addr.s_addr == 0) {
7515 /* skip unspecified addrs */
7516 continue;
7517 }
7518#if defined(__FreeBSD__)
7519 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7520 &sin->sin_addr) != 0) {
7521 continue;
7522 }
7523#endif
7524 if ((ipv4_local_scope == 0) &&
7525 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7526 continue;
7527 }
7528 /* count this one */
7529 count++;
7530 } else {
7531 continue;
7532 }
7533 break;
7534#endif
7535#ifdef INET6
7536 case AF_INET6:
7537 if (ipv6_addr_legal) {
7538 struct sockaddr_in6 *sin6;
7539
7540#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
7541 struct sockaddr_in6 lsa6;
7542#endif
7543 sin6 = &sctp_ifa->address.sin6;
7544 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7545 continue;
7546 }
7547#if defined(__FreeBSD__)
7548 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7549 &sin6->sin6_addr) != 0) {
7550 continue;
7551 }
7552#endif
7553 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7554 if (local_scope == 0)
7555 continue;
7556#if defined(SCTP_EMBEDDED_V6_SCOPE)
7557 if (sin6->sin6_scope_id == 0) {
7558#ifdef SCTP_KAME
7559 if (sa6_recoverscope(sin6) != 0)
7560 /*
7561 * bad link
7562 * local
7563 * address
7564 */
7565 continue;
7566#else
7567 lsa6 = *sin6;
7568 if (in6_recoverscope(&lsa6,
7569 &lsa6.sin6_addr,
7570 NULL))
7571 /*
7572 * bad link
7573 * local
7574 * address
7575 */
7576 continue;
7577 sin6 = &lsa6;
7578#endif /* SCTP_KAME */
7579 }
7580#endif /* SCTP_EMBEDDED_V6_SCOPE */
7581 }
7582 if ((site_scope == 0) &&
7583 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7584 continue;
7585 }
7586 /* count this one */
7587 count++;
7588 }
7589 break;
7590#endif
7591#if defined(__Userspace__)
7592 case AF_CONN:
7593 if (conn_addr_legal) {
7594 count++;
7595 }
7596 break;
7597#endif
7598 default:
7599 /* TSNH */
7600 break;
7601 }
7602 }
7603 }
7604 } else {
7605 /*
7606 * subset bound case
7607 */
7608 struct sctp_laddr *laddr;
7609 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7610 sctp_nxt_addr) {
7611 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7612 continue;
7613 }
7614 /* count this one */
7615 count++;
7616 }
7617 }
7618 SCTP_IPI_ADDR_RUNLOCK();
7619 return (count);
7620}
7621
7622#if defined(SCTP_LOCAL_TRACE_BUF)
7623
7624void
7625sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7626{
7627 uint32_t saveindex, newindex;
7628
7629#if defined(__Windows__)
7630 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
7631 return;
7632 }
7633 do {
7634 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
7635 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7636 newindex = 1;
7637 } else {
7638 newindex = saveindex + 1;
7639 }
7640 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
7641 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7642 saveindex = 0;
7643 }
7644 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7645 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
7646 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
7647 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
7648 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
7649 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
7650 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
7651 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
7652#else
7653 do {
7654 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7655 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7656 newindex = 1;
7657 } else {
7658 newindex = saveindex + 1;
7659 }
7660 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7661 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7662 saveindex = 0;
7663 }
7664 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7665 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7666 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7667 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7668 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7669 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7670 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7671 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7672#endif
7673}
7674
7675#endif
7676#if defined(__FreeBSD__)
7677#if __FreeBSD_version >= 800044
7678static void
7679sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7680 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7681{
7682 struct ip *iph;
7683#ifdef INET6
7684 struct ip6_hdr *ip6;
7685#endif
7686 struct mbuf *sp, *last;
7687 struct udphdr *uhdr;
7688 uint16_t port;
7689
7690 if ((m->m_flags & M_PKTHDR) == 0) {
7691 /* Can't handle one that is not a pkt hdr */
7692 goto out;
7693 }
7694 /* Pull the src port */
7695 iph = mtod(m, struct ip *);
7696 uhdr = (struct udphdr *)((caddr_t)iph + off);
7697 port = uhdr->uh_sport;
7698 /* Split out the mbuf chain. Leave the
7699 * IP header in m, place the
7700 * rest in the sp.
7701 */
7702 sp = m_split(m, off, M_NOWAIT);
7703 if (sp == NULL) {
7704 /* Gak, drop packet, we can't do a split */
7705 goto out;
7706 }
7707 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7708 /* Gak, packet can't have an SCTP header in it - too small */
7709 m_freem(sp);
7710 goto out;
7711 }
7712 /* Now pull up the UDP header and SCTP header together */
7713 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7714 if (sp == NULL) {
7715 /* Gak pullup failed */
7716 goto out;
7717 }
7718 /* Trim out the UDP header */
7719 m_adj(sp, sizeof(struct udphdr));
7720
7721 /* Now reconstruct the mbuf chain */
7722 for (last = m; last->m_next; last = last->m_next);
7723 last->m_next = sp;
7724 m->m_pkthdr.len += sp->m_pkthdr.len;
7725 /*
7726 * The CSUM_DATA_VALID flags indicates that the HW checked the
7727 * UDP checksum and it was valid.
7728 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
7729 * the HW also verified the SCTP checksum. Therefore, clear the bit.
7730 */
7731#if __FreeBSD_version > 1000049
7732 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7733 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7734 m->m_pkthdr.len,
7735 if_name(m->m_pkthdr.rcvif),
7736 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7737#else
7738 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7739 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%x.\n",
7740 m->m_pkthdr.len,
7741 if_name(m->m_pkthdr.rcvif),
7742 m->m_pkthdr.csum_flags);
7743#endif
7744 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7745 iph = mtod(m, struct ip *);
7746 switch (iph->ip_v) {
7747#ifdef INET
7748 case IPVERSION:
7749#if __FreeBSD_version >= 1000000
7750 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7751#else
7752 iph->ip_len -= sizeof(struct udphdr);
7753#endif
7754 sctp_input_with_port(m, off, port);
7755 break;
7756#endif
7757#ifdef INET6
7758 case IPV6_VERSION >> 4:
7759 ip6 = mtod(m, struct ip6_hdr *);
7760 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7761 sctp6_input_with_port(&m, &off, port);
7762 break;
7763#endif
7764 default:
7765 goto out;
7766 break;
7767 }
7768 return;
7769 out:
7770 m_freem(m);
7771}
7772#endif
7773
7774#if __FreeBSD_version >= 1100000
7775#ifdef INET
7776static void
7777sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7778{
7779 struct ip *outer_ip, *inner_ip;
7780 struct sctphdr *sh;
7781 struct icmp *icmp;
7782 struct udphdr *udp;
7783 struct sctp_inpcb *inp;
7784 struct sctp_tcb *stcb;
7785 struct sctp_nets *net;
7786 struct sctp_init_chunk *ch;
7787 struct sockaddr_in src, dst;
7788 uint8_t type, code;
7789
7790 inner_ip = (struct ip *)vip;
7791 icmp = (struct icmp *)((caddr_t)inner_ip -
7792 (sizeof(struct icmp) - sizeof(struct ip)));
7793 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7794 if (ntohs(outer_ip->ip_len) <
7795 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7796 return;
7797 }
7798 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7799 sh = (struct sctphdr *)(udp + 1);
7800 memset(&src, 0, sizeof(struct sockaddr_in));
7801 src.sin_family = AF_INET;
7802#ifdef HAVE_SIN_LEN
7803 src.sin_len = sizeof(struct sockaddr_in);
7804#endif
7805 src.sin_port = sh->src_port;
7806 src.sin_addr = inner_ip->ip_src;
7807 memset(&dst, 0, sizeof(struct sockaddr_in));
7808 dst.sin_family = AF_INET;
7809#ifdef HAVE_SIN_LEN
7810 dst.sin_len = sizeof(struct sockaddr_in);
7811#endif
7812 dst.sin_port = sh->dest_port;
7813 dst.sin_addr = inner_ip->ip_dst;
7814 /*
7815 * 'dst' holds the dest of the packet that failed to be sent.
7816 * 'src' holds our local endpoint address. Thus we reverse
7817 * the dst and the src in the lookup.
7818 */
7819 inp = NULL;
7820 net = NULL;
7821 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7822 (struct sockaddr *)&src,
7823 &inp, &net, 1,
7824 SCTP_DEFAULT_VRFID);
7825 if ((stcb != NULL) &&
7826 (net != NULL) &&
7827 (inp != NULL)) {
7828 /* Check the UDP port numbers */
7829 if ((udp->uh_dport != net->port) ||
7830 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7831 SCTP_TCB_UNLOCK(stcb);
7832 return;
7833 }
7834 /* Check the verification tag */
7835 if (ntohl(sh->v_tag) != 0) {
7836 /*
7837 * This must be the verification tag used
7838 * for sending out packets. We don't
7839 * consider packets reflecting the
7840 * verification tag.
7841 */
7842 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7843 SCTP_TCB_UNLOCK(stcb);
7844 return;
7845 }
7846 } else {
7847 if (ntohs(outer_ip->ip_len) >=
7848 sizeof(struct ip) +
7849 8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7850 /*
7851 * In this case we can check if we
7852 * got an INIT chunk and if the
7853 * initiate tag matches.
7854 */
7855 ch = (struct sctp_init_chunk *)(sh + 1);
7856 if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7857 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7858 SCTP_TCB_UNLOCK(stcb);
7859 return;
7860 }
7861 } else {
7862 SCTP_TCB_UNLOCK(stcb);
7863 return;
7864 }
7865 }
7866 type = icmp->icmp_type;
7867 code = icmp->icmp_code;
7868 if ((type == ICMP_UNREACH) &&
7869 (code == ICMP_UNREACH_PORT)) {
7870 code = ICMP_UNREACH_PROTOCOL;
7871 }
7872 sctp_notify(inp, stcb, net, type, code,
7873 ntohs(inner_ip->ip_len),
7874 ntohs(icmp->icmp_nextmtu));
7875 } else {
7876#if defined(__FreeBSD__) && __FreeBSD_version < 500000
7877 /*
7878 * XXX must be fixed for 5.x and higher, leave for
7879 * 4.x
7880 */
7881 if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
7882 in_rtchange((struct inpcb *)inp,
7883 inetctlerrmap[cmd]);
7884 }
7885#endif
7886 if ((stcb == NULL) && (inp != NULL)) {
7887 /* reduce ref-count */
7888 SCTP_INP_WLOCK(inp);
7889 SCTP_INP_DECR_REF(inp);
7890 SCTP_INP_WUNLOCK(inp);
7891 }
7892 if (stcb) {
7893 SCTP_TCB_UNLOCK(stcb);
7894 }
7895 }
7896 return;
7897}
7898#endif
7899
7900#ifdef INET6
7901static void
7902sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7903{
7904 struct ip6ctlparam *ip6cp;
7905 struct sctp_inpcb *inp;
7906 struct sctp_tcb *stcb;
7907 struct sctp_nets *net;
7908 struct sctphdr sh;
7909 struct udphdr udp;
7910 struct sockaddr_in6 src, dst;
7911 uint8_t type, code;
7912
7913 ip6cp = (struct ip6ctlparam *)d;
7914 /*
7915 * XXX: We assume that when IPV6 is non NULL, M and OFF are
7916 * valid.
7917 */
7918 if (ip6cp->ip6c_m == NULL) {
7919 return;
7920 }
7921 /* Check if we can safely examine the ports and the
7922 * verification tag of the SCTP common header.
7923 */
7924 if (ip6cp->ip6c_m->m_pkthdr.len <
7925 ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
7926 return;
7927 }
7928 /* Copy out the UDP header. */
7929 memset(&udp, 0, sizeof(struct udphdr));
7930 m_copydata(ip6cp->ip6c_m,
7931 ip6cp->ip6c_off,
7932 sizeof(struct udphdr),
7933 (caddr_t)&udp);
7934 /* Copy out the port numbers and the verification tag. */
7935 memset(&sh, 0, sizeof(struct sctphdr));
7936 m_copydata(ip6cp->ip6c_m,
7937 ip6cp->ip6c_off + sizeof(struct udphdr),
7938 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7939 (caddr_t)&sh);
7940 memset(&src, 0, sizeof(struct sockaddr_in6));
7941 src.sin6_family = AF_INET6;
7942#ifdef HAVE_SIN6_LEN
7943 src.sin6_len = sizeof(struct sockaddr_in6);
7944#endif
7945 src.sin6_port = sh.src_port;
7946 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7947#if defined(__FreeBSD__)
7948 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7949 return;
7950 }
7951#endif
7952 memset(&dst, 0, sizeof(struct sockaddr_in6));
7953 dst.sin6_family = AF_INET6;
7954#ifdef HAVE_SIN6_LEN
7955 dst.sin6_len = sizeof(struct sockaddr_in6);
7956#endif
7957 dst.sin6_port = sh.dest_port;
7958 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7959#if defined(__FreeBSD__)
7960 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7961 return;
7962 }
7963#endif
7964 inp = NULL;
7965 net = NULL;
7966 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7967 (struct sockaddr *)&src,
7968 &inp, &net, 1, SCTP_DEFAULT_VRFID);
7969 if ((stcb != NULL) &&
7970 (net != NULL) &&
7971 (inp != NULL)) {
7972 /* Check the UDP port numbers */
7973 if ((udp.uh_dport != net->port) ||
7974 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7975 SCTP_TCB_UNLOCK(stcb);
7976 return;
7977 }
7978 /* Check the verification tag */
7979 if (ntohl(sh.v_tag) != 0) {
7980 /*
7981 * This must be the verification tag used for
7982 * sending out packets. We don't consider
7983 * packets reflecting the verification tag.
7984 */
7985 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7986 SCTP_TCB_UNLOCK(stcb);
7987 return;
7988 }
7989 } else {
7990#if defined(__FreeBSD__)
7991 if (ip6cp->ip6c_m->m_pkthdr.len >=
7992 ip6cp->ip6c_off + sizeof(struct udphdr) +
7993 sizeof(struct sctphdr) +
7994 sizeof(struct sctp_chunkhdr) +
7995 offsetof(struct sctp_init, a_rwnd)) {
7996 /*
7997 * In this case we can check if we
7998 * got an INIT chunk and if the
7999 * initiate tag matches.
8000 */
8001 uint32_t initiate_tag;
8002 uint8_t chunk_type;
8003
8004 m_copydata(ip6cp->ip6c_m,
8005 ip6cp->ip6c_off +
8006 sizeof(struct udphdr) +
8007 sizeof(struct sctphdr),
8008 sizeof(uint8_t),
8009 (caddr_t)&chunk_type);
8010 m_copydata(ip6cp->ip6c_m,
8011 ip6cp->ip6c_off +
8012 sizeof(struct udphdr) +
8013 sizeof(struct sctphdr) +
8014 sizeof(struct sctp_chunkhdr),
8015 sizeof(uint32_t),
8016 (caddr_t)&initiate_tag);
8017 if ((chunk_type != SCTP_INITIATION) ||
8018 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
8019 SCTP_TCB_UNLOCK(stcb);
8020 return;
8021 }
8022 } else {
8023 SCTP_TCB_UNLOCK(stcb);
8024 return;
8025 }
8026#else
8027 SCTP_TCB_UNLOCK(stcb);
8028 return;
8029#endif
8030 }
8031 type = ip6cp->ip6c_icmp6->icmp6_type;
8032 code = ip6cp->ip6c_icmp6->icmp6_code;
8033 if ((type == ICMP6_DST_UNREACH) &&
8034 (code == ICMP6_DST_UNREACH_NOPORT)) {
8035 type = ICMP6_PARAM_PROB;
8036 code = ICMP6_PARAMPROB_NEXTHEADER;
8037 }
8038 sctp6_notify(inp, stcb, net, type, code,
8039 (uint16_t)ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
8040 } else {
8041#if defined(__FreeBSD__) && __FreeBSD_version < 500000
8042 if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
8043 in6_rtchange((struct in6pcb *)inp,
8044 inet6ctlerrmap[cmd]);
8045 }
8046#endif
8047 if ((stcb == NULL) && (inp != NULL)) {
8048 /* reduce inp's ref-count */
8049 SCTP_INP_WLOCK(inp);
8050 SCTP_INP_DECR_REF(inp);
8051 SCTP_INP_WUNLOCK(inp);
8052 }
8053 if (stcb) {
8054 SCTP_TCB_UNLOCK(stcb);
8055 }
8056 }
8057}
8058#endif
8059#endif
8060
8061void
8062sctp_over_udp_stop(void)
8063{
8064 /*
8065 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8066 */
8067#ifdef INET
8068 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8069 soclose(SCTP_BASE_INFO(udp4_tun_socket));
8070 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
8071 }
8072#endif
8073#ifdef INET6
8074 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8075 soclose(SCTP_BASE_INFO(udp6_tun_socket));
8076 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
8077 }
8078#endif
8079}
8080
8081int
8082sctp_over_udp_start(void)
8083{
8084#if __FreeBSD_version >= 800044
8085 uint16_t port;
8086 int ret;
8087#ifdef INET
8088 struct sockaddr_in sin;
8089#endif
8090#ifdef INET6
8091 struct sockaddr_in6 sin6;
8092#endif
8093 /*
8094 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
8095 */
8096 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
8097 if (ntohs(port) == 0) {
8098 /* Must have a port set */
8099 return (EINVAL);
8100 }
8101#ifdef INET
8102 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
8103 /* Already running -- must stop first */
8104 return (EALREADY);
8105 }
8106#endif
8107#ifdef INET6
8108 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
8109 /* Already running -- must stop first */
8110 return (EALREADY);
8111 }
8112#endif
8113#ifdef INET
8114 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
8115 SOCK_DGRAM, IPPROTO_UDP,
8116 curthread->td_ucred, curthread))) {
8117 sctp_over_udp_stop();
8118 return (ret);
8119 }
8120 /* Call the special UDP hook. */
8121 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
8122 sctp_recv_udp_tunneled_packet,
8123#if __FreeBSD_version >= 1100000
8124 sctp_recv_icmp_tunneled_packet,
8125#endif
8126 NULL))) {
8127 sctp_over_udp_stop();
8128 return (ret);
8129 }
8130 /* Ok, we have a socket, bind it to the port. */
8131 memset(&sin, 0, sizeof(struct sockaddr_in));
8132 sin.sin_len = sizeof(struct sockaddr_in);
8133 sin.sin_family = AF_INET;
8134 sin.sin_port = htons(port);
8135 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
8136 (struct sockaddr *)&sin, curthread))) {
8137 sctp_over_udp_stop();
8138 return (ret);
8139 }
8140#endif
8141#ifdef INET6
8142 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
8143 SOCK_DGRAM, IPPROTO_UDP,
8144 curthread->td_ucred, curthread))) {
8145 sctp_over_udp_stop();
8146 return (ret);
8147 }
8148 /* Call the special UDP hook. */
8149 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
8150 sctp_recv_udp_tunneled_packet,
8151#if __FreeBSD_version >= 1100000
8152 sctp_recv_icmp6_tunneled_packet,
8153#endif
8154 NULL))) {
8155 sctp_over_udp_stop();
8156 return (ret);
8157 }
8158 /* Ok, we have a socket, bind it to the port. */
8159 memset(&sin6, 0, sizeof(struct sockaddr_in6));
8160 sin6.sin6_len = sizeof(struct sockaddr_in6);
8161 sin6.sin6_family = AF_INET6;
8162 sin6.sin6_port = htons(port);
8163 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
8164 (struct sockaddr *)&sin6, curthread))) {
8165 sctp_over_udp_stop();
8166 return (ret);
8167 }
8168#endif
8169 return (0);
8170#else
8171 return (ENOTSUP);
8172#endif
8173}
8174#endif