blob: dc4b908de4bd03c62b9998ee2db793e7a7a54dba [file] [log] [blame]
James Kuszmaul4cb043c2021-01-17 11:25:51 -08001/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifdef __FreeBSD__
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 310642 2016-12-27 22:14:41Z tuexen $");
36#endif
37
38#include <netinet/sctp_os.h>
39#ifdef __FreeBSD__
40#include <sys/proc.h>
41#endif
42#include <netinet/sctp_var.h>
43#include <netinet/sctp_sysctl.h>
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_pcb.h>
46#include <netinet/sctputil.h>
47#include <netinet/sctp_output.h>
48#include <netinet/sctp_uio.h>
49#include <netinet/sctputil.h>
50#include <netinet/sctp_auth.h>
51#include <netinet/sctp_timer.h>
52#include <netinet/sctp_asconf.h>
53#include <netinet/sctp_indata.h>
54#include <netinet/sctp_bsd_addr.h>
55#include <netinet/sctp_input.h>
56#include <netinet/sctp_crc32.h>
57#if defined(__Userspace_os_Linux)
58#define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
59#endif
60#if defined(INET) || defined(INET6)
61#if !defined(__Userspace_os_Windows)
62#include <netinet/udp.h>
63#endif
64#endif
65#if defined(__APPLE__)
66#include <netinet/in.h>
67#endif
68#if defined(__FreeBSD__)
69#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
70#include <netinet/udp_var.h>
71#endif
72#include <machine/in_cksum.h>
73#endif
74#if defined(__Userspace__) && defined(INET6)
75#include <netinet6/sctp6_var.h>
76#endif
77
78#if defined(__APPLE__)
79#define APPLE_FILE_NO 3
80#endif
81
82#if defined(__APPLE__)
83#if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
84#define SCTP_MAX_LINKHDR 16
85#endif
86#endif
87
88#define SCTP_MAX_GAPS_INARRAY 4
89struct sack_track {
90 uint8_t right_edge; /* mergable on the right edge */
91 uint8_t left_edge; /* mergable on the left edge */
92 uint8_t num_entries;
93 uint8_t spare;
94 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
95};
96
97const struct sack_track sack_array[256] = {
98 {0, 0, 0, 0, /* 0x00 */
99 {{0, 0},
100 {0, 0},
101 {0, 0},
102 {0, 0}
103 }
104 },
105 {1, 0, 1, 0, /* 0x01 */
106 {{0, 0},
107 {0, 0},
108 {0, 0},
109 {0, 0}
110 }
111 },
112 {0, 0, 1, 0, /* 0x02 */
113 {{1, 1},
114 {0, 0},
115 {0, 0},
116 {0, 0}
117 }
118 },
119 {1, 0, 1, 0, /* 0x03 */
120 {{0, 1},
121 {0, 0},
122 {0, 0},
123 {0, 0}
124 }
125 },
126 {0, 0, 1, 0, /* 0x04 */
127 {{2, 2},
128 {0, 0},
129 {0, 0},
130 {0, 0}
131 }
132 },
133 {1, 0, 2, 0, /* 0x05 */
134 {{0, 0},
135 {2, 2},
136 {0, 0},
137 {0, 0}
138 }
139 },
140 {0, 0, 1, 0, /* 0x06 */
141 {{1, 2},
142 {0, 0},
143 {0, 0},
144 {0, 0}
145 }
146 },
147 {1, 0, 1, 0, /* 0x07 */
148 {{0, 2},
149 {0, 0},
150 {0, 0},
151 {0, 0}
152 }
153 },
154 {0, 0, 1, 0, /* 0x08 */
155 {{3, 3},
156 {0, 0},
157 {0, 0},
158 {0, 0}
159 }
160 },
161 {1, 0, 2, 0, /* 0x09 */
162 {{0, 0},
163 {3, 3},
164 {0, 0},
165 {0, 0}
166 }
167 },
168 {0, 0, 2, 0, /* 0x0a */
169 {{1, 1},
170 {3, 3},
171 {0, 0},
172 {0, 0}
173 }
174 },
175 {1, 0, 2, 0, /* 0x0b */
176 {{0, 1},
177 {3, 3},
178 {0, 0},
179 {0, 0}
180 }
181 },
182 {0, 0, 1, 0, /* 0x0c */
183 {{2, 3},
184 {0, 0},
185 {0, 0},
186 {0, 0}
187 }
188 },
189 {1, 0, 2, 0, /* 0x0d */
190 {{0, 0},
191 {2, 3},
192 {0, 0},
193 {0, 0}
194 }
195 },
196 {0, 0, 1, 0, /* 0x0e */
197 {{1, 3},
198 {0, 0},
199 {0, 0},
200 {0, 0}
201 }
202 },
203 {1, 0, 1, 0, /* 0x0f */
204 {{0, 3},
205 {0, 0},
206 {0, 0},
207 {0, 0}
208 }
209 },
210 {0, 0, 1, 0, /* 0x10 */
211 {{4, 4},
212 {0, 0},
213 {0, 0},
214 {0, 0}
215 }
216 },
217 {1, 0, 2, 0, /* 0x11 */
218 {{0, 0},
219 {4, 4},
220 {0, 0},
221 {0, 0}
222 }
223 },
224 {0, 0, 2, 0, /* 0x12 */
225 {{1, 1},
226 {4, 4},
227 {0, 0},
228 {0, 0}
229 }
230 },
231 {1, 0, 2, 0, /* 0x13 */
232 {{0, 1},
233 {4, 4},
234 {0, 0},
235 {0, 0}
236 }
237 },
238 {0, 0, 2, 0, /* 0x14 */
239 {{2, 2},
240 {4, 4},
241 {0, 0},
242 {0, 0}
243 }
244 },
245 {1, 0, 3, 0, /* 0x15 */
246 {{0, 0},
247 {2, 2},
248 {4, 4},
249 {0, 0}
250 }
251 },
252 {0, 0, 2, 0, /* 0x16 */
253 {{1, 2},
254 {4, 4},
255 {0, 0},
256 {0, 0}
257 }
258 },
259 {1, 0, 2, 0, /* 0x17 */
260 {{0, 2},
261 {4, 4},
262 {0, 0},
263 {0, 0}
264 }
265 },
266 {0, 0, 1, 0, /* 0x18 */
267 {{3, 4},
268 {0, 0},
269 {0, 0},
270 {0, 0}
271 }
272 },
273 {1, 0, 2, 0, /* 0x19 */
274 {{0, 0},
275 {3, 4},
276 {0, 0},
277 {0, 0}
278 }
279 },
280 {0, 0, 2, 0, /* 0x1a */
281 {{1, 1},
282 {3, 4},
283 {0, 0},
284 {0, 0}
285 }
286 },
287 {1, 0, 2, 0, /* 0x1b */
288 {{0, 1},
289 {3, 4},
290 {0, 0},
291 {0, 0}
292 }
293 },
294 {0, 0, 1, 0, /* 0x1c */
295 {{2, 4},
296 {0, 0},
297 {0, 0},
298 {0, 0}
299 }
300 },
301 {1, 0, 2, 0, /* 0x1d */
302 {{0, 0},
303 {2, 4},
304 {0, 0},
305 {0, 0}
306 }
307 },
308 {0, 0, 1, 0, /* 0x1e */
309 {{1, 4},
310 {0, 0},
311 {0, 0},
312 {0, 0}
313 }
314 },
315 {1, 0, 1, 0, /* 0x1f */
316 {{0, 4},
317 {0, 0},
318 {0, 0},
319 {0, 0}
320 }
321 },
322 {0, 0, 1, 0, /* 0x20 */
323 {{5, 5},
324 {0, 0},
325 {0, 0},
326 {0, 0}
327 }
328 },
329 {1, 0, 2, 0, /* 0x21 */
330 {{0, 0},
331 {5, 5},
332 {0, 0},
333 {0, 0}
334 }
335 },
336 {0, 0, 2, 0, /* 0x22 */
337 {{1, 1},
338 {5, 5},
339 {0, 0},
340 {0, 0}
341 }
342 },
343 {1, 0, 2, 0, /* 0x23 */
344 {{0, 1},
345 {5, 5},
346 {0, 0},
347 {0, 0}
348 }
349 },
350 {0, 0, 2, 0, /* 0x24 */
351 {{2, 2},
352 {5, 5},
353 {0, 0},
354 {0, 0}
355 }
356 },
357 {1, 0, 3, 0, /* 0x25 */
358 {{0, 0},
359 {2, 2},
360 {5, 5},
361 {0, 0}
362 }
363 },
364 {0, 0, 2, 0, /* 0x26 */
365 {{1, 2},
366 {5, 5},
367 {0, 0},
368 {0, 0}
369 }
370 },
371 {1, 0, 2, 0, /* 0x27 */
372 {{0, 2},
373 {5, 5},
374 {0, 0},
375 {0, 0}
376 }
377 },
378 {0, 0, 2, 0, /* 0x28 */
379 {{3, 3},
380 {5, 5},
381 {0, 0},
382 {0, 0}
383 }
384 },
385 {1, 0, 3, 0, /* 0x29 */
386 {{0, 0},
387 {3, 3},
388 {5, 5},
389 {0, 0}
390 }
391 },
392 {0, 0, 3, 0, /* 0x2a */
393 {{1, 1},
394 {3, 3},
395 {5, 5},
396 {0, 0}
397 }
398 },
399 {1, 0, 3, 0, /* 0x2b */
400 {{0, 1},
401 {3, 3},
402 {5, 5},
403 {0, 0}
404 }
405 },
406 {0, 0, 2, 0, /* 0x2c */
407 {{2, 3},
408 {5, 5},
409 {0, 0},
410 {0, 0}
411 }
412 },
413 {1, 0, 3, 0, /* 0x2d */
414 {{0, 0},
415 {2, 3},
416 {5, 5},
417 {0, 0}
418 }
419 },
420 {0, 0, 2, 0, /* 0x2e */
421 {{1, 3},
422 {5, 5},
423 {0, 0},
424 {0, 0}
425 }
426 },
427 {1, 0, 2, 0, /* 0x2f */
428 {{0, 3},
429 {5, 5},
430 {0, 0},
431 {0, 0}
432 }
433 },
434 {0, 0, 1, 0, /* 0x30 */
435 {{4, 5},
436 {0, 0},
437 {0, 0},
438 {0, 0}
439 }
440 },
441 {1, 0, 2, 0, /* 0x31 */
442 {{0, 0},
443 {4, 5},
444 {0, 0},
445 {0, 0}
446 }
447 },
448 {0, 0, 2, 0, /* 0x32 */
449 {{1, 1},
450 {4, 5},
451 {0, 0},
452 {0, 0}
453 }
454 },
455 {1, 0, 2, 0, /* 0x33 */
456 {{0, 1},
457 {4, 5},
458 {0, 0},
459 {0, 0}
460 }
461 },
462 {0, 0, 2, 0, /* 0x34 */
463 {{2, 2},
464 {4, 5},
465 {0, 0},
466 {0, 0}
467 }
468 },
469 {1, 0, 3, 0, /* 0x35 */
470 {{0, 0},
471 {2, 2},
472 {4, 5},
473 {0, 0}
474 }
475 },
476 {0, 0, 2, 0, /* 0x36 */
477 {{1, 2},
478 {4, 5},
479 {0, 0},
480 {0, 0}
481 }
482 },
483 {1, 0, 2, 0, /* 0x37 */
484 {{0, 2},
485 {4, 5},
486 {0, 0},
487 {0, 0}
488 }
489 },
490 {0, 0, 1, 0, /* 0x38 */
491 {{3, 5},
492 {0, 0},
493 {0, 0},
494 {0, 0}
495 }
496 },
497 {1, 0, 2, 0, /* 0x39 */
498 {{0, 0},
499 {3, 5},
500 {0, 0},
501 {0, 0}
502 }
503 },
504 {0, 0, 2, 0, /* 0x3a */
505 {{1, 1},
506 {3, 5},
507 {0, 0},
508 {0, 0}
509 }
510 },
511 {1, 0, 2, 0, /* 0x3b */
512 {{0, 1},
513 {3, 5},
514 {0, 0},
515 {0, 0}
516 }
517 },
518 {0, 0, 1, 0, /* 0x3c */
519 {{2, 5},
520 {0, 0},
521 {0, 0},
522 {0, 0}
523 }
524 },
525 {1, 0, 2, 0, /* 0x3d */
526 {{0, 0},
527 {2, 5},
528 {0, 0},
529 {0, 0}
530 }
531 },
532 {0, 0, 1, 0, /* 0x3e */
533 {{1, 5},
534 {0, 0},
535 {0, 0},
536 {0, 0}
537 }
538 },
539 {1, 0, 1, 0, /* 0x3f */
540 {{0, 5},
541 {0, 0},
542 {0, 0},
543 {0, 0}
544 }
545 },
546 {0, 0, 1, 0, /* 0x40 */
547 {{6, 6},
548 {0, 0},
549 {0, 0},
550 {0, 0}
551 }
552 },
553 {1, 0, 2, 0, /* 0x41 */
554 {{0, 0},
555 {6, 6},
556 {0, 0},
557 {0, 0}
558 }
559 },
560 {0, 0, 2, 0, /* 0x42 */
561 {{1, 1},
562 {6, 6},
563 {0, 0},
564 {0, 0}
565 }
566 },
567 {1, 0, 2, 0, /* 0x43 */
568 {{0, 1},
569 {6, 6},
570 {0, 0},
571 {0, 0}
572 }
573 },
574 {0, 0, 2, 0, /* 0x44 */
575 {{2, 2},
576 {6, 6},
577 {0, 0},
578 {0, 0}
579 }
580 },
581 {1, 0, 3, 0, /* 0x45 */
582 {{0, 0},
583 {2, 2},
584 {6, 6},
585 {0, 0}
586 }
587 },
588 {0, 0, 2, 0, /* 0x46 */
589 {{1, 2},
590 {6, 6},
591 {0, 0},
592 {0, 0}
593 }
594 },
595 {1, 0, 2, 0, /* 0x47 */
596 {{0, 2},
597 {6, 6},
598 {0, 0},
599 {0, 0}
600 }
601 },
602 {0, 0, 2, 0, /* 0x48 */
603 {{3, 3},
604 {6, 6},
605 {0, 0},
606 {0, 0}
607 }
608 },
609 {1, 0, 3, 0, /* 0x49 */
610 {{0, 0},
611 {3, 3},
612 {6, 6},
613 {0, 0}
614 }
615 },
616 {0, 0, 3, 0, /* 0x4a */
617 {{1, 1},
618 {3, 3},
619 {6, 6},
620 {0, 0}
621 }
622 },
623 {1, 0, 3, 0, /* 0x4b */
624 {{0, 1},
625 {3, 3},
626 {6, 6},
627 {0, 0}
628 }
629 },
630 {0, 0, 2, 0, /* 0x4c */
631 {{2, 3},
632 {6, 6},
633 {0, 0},
634 {0, 0}
635 }
636 },
637 {1, 0, 3, 0, /* 0x4d */
638 {{0, 0},
639 {2, 3},
640 {6, 6},
641 {0, 0}
642 }
643 },
644 {0, 0, 2, 0, /* 0x4e */
645 {{1, 3},
646 {6, 6},
647 {0, 0},
648 {0, 0}
649 }
650 },
651 {1, 0, 2, 0, /* 0x4f */
652 {{0, 3},
653 {6, 6},
654 {0, 0},
655 {0, 0}
656 }
657 },
658 {0, 0, 2, 0, /* 0x50 */
659 {{4, 4},
660 {6, 6},
661 {0, 0},
662 {0, 0}
663 }
664 },
665 {1, 0, 3, 0, /* 0x51 */
666 {{0, 0},
667 {4, 4},
668 {6, 6},
669 {0, 0}
670 }
671 },
672 {0, 0, 3, 0, /* 0x52 */
673 {{1, 1},
674 {4, 4},
675 {6, 6},
676 {0, 0}
677 }
678 },
679 {1, 0, 3, 0, /* 0x53 */
680 {{0, 1},
681 {4, 4},
682 {6, 6},
683 {0, 0}
684 }
685 },
686 {0, 0, 3, 0, /* 0x54 */
687 {{2, 2},
688 {4, 4},
689 {6, 6},
690 {0, 0}
691 }
692 },
693 {1, 0, 4, 0, /* 0x55 */
694 {{0, 0},
695 {2, 2},
696 {4, 4},
697 {6, 6}
698 }
699 },
700 {0, 0, 3, 0, /* 0x56 */
701 {{1, 2},
702 {4, 4},
703 {6, 6},
704 {0, 0}
705 }
706 },
707 {1, 0, 3, 0, /* 0x57 */
708 {{0, 2},
709 {4, 4},
710 {6, 6},
711 {0, 0}
712 }
713 },
714 {0, 0, 2, 0, /* 0x58 */
715 {{3, 4},
716 {6, 6},
717 {0, 0},
718 {0, 0}
719 }
720 },
721 {1, 0, 3, 0, /* 0x59 */
722 {{0, 0},
723 {3, 4},
724 {6, 6},
725 {0, 0}
726 }
727 },
728 {0, 0, 3, 0, /* 0x5a */
729 {{1, 1},
730 {3, 4},
731 {6, 6},
732 {0, 0}
733 }
734 },
735 {1, 0, 3, 0, /* 0x5b */
736 {{0, 1},
737 {3, 4},
738 {6, 6},
739 {0, 0}
740 }
741 },
742 {0, 0, 2, 0, /* 0x5c */
743 {{2, 4},
744 {6, 6},
745 {0, 0},
746 {0, 0}
747 }
748 },
749 {1, 0, 3, 0, /* 0x5d */
750 {{0, 0},
751 {2, 4},
752 {6, 6},
753 {0, 0}
754 }
755 },
756 {0, 0, 2, 0, /* 0x5e */
757 {{1, 4},
758 {6, 6},
759 {0, 0},
760 {0, 0}
761 }
762 },
763 {1, 0, 2, 0, /* 0x5f */
764 {{0, 4},
765 {6, 6},
766 {0, 0},
767 {0, 0}
768 }
769 },
770 {0, 0, 1, 0, /* 0x60 */
771 {{5, 6},
772 {0, 0},
773 {0, 0},
774 {0, 0}
775 }
776 },
777 {1, 0, 2, 0, /* 0x61 */
778 {{0, 0},
779 {5, 6},
780 {0, 0},
781 {0, 0}
782 }
783 },
784 {0, 0, 2, 0, /* 0x62 */
785 {{1, 1},
786 {5, 6},
787 {0, 0},
788 {0, 0}
789 }
790 },
791 {1, 0, 2, 0, /* 0x63 */
792 {{0, 1},
793 {5, 6},
794 {0, 0},
795 {0, 0}
796 }
797 },
798 {0, 0, 2, 0, /* 0x64 */
799 {{2, 2},
800 {5, 6},
801 {0, 0},
802 {0, 0}
803 }
804 },
805 {1, 0, 3, 0, /* 0x65 */
806 {{0, 0},
807 {2, 2},
808 {5, 6},
809 {0, 0}
810 }
811 },
812 {0, 0, 2, 0, /* 0x66 */
813 {{1, 2},
814 {5, 6},
815 {0, 0},
816 {0, 0}
817 }
818 },
819 {1, 0, 2, 0, /* 0x67 */
820 {{0, 2},
821 {5, 6},
822 {0, 0},
823 {0, 0}
824 }
825 },
826 {0, 0, 2, 0, /* 0x68 */
827 {{3, 3},
828 {5, 6},
829 {0, 0},
830 {0, 0}
831 }
832 },
833 {1, 0, 3, 0, /* 0x69 */
834 {{0, 0},
835 {3, 3},
836 {5, 6},
837 {0, 0}
838 }
839 },
840 {0, 0, 3, 0, /* 0x6a */
841 {{1, 1},
842 {3, 3},
843 {5, 6},
844 {0, 0}
845 }
846 },
847 {1, 0, 3, 0, /* 0x6b */
848 {{0, 1},
849 {3, 3},
850 {5, 6},
851 {0, 0}
852 }
853 },
854 {0, 0, 2, 0, /* 0x6c */
855 {{2, 3},
856 {5, 6},
857 {0, 0},
858 {0, 0}
859 }
860 },
861 {1, 0, 3, 0, /* 0x6d */
862 {{0, 0},
863 {2, 3},
864 {5, 6},
865 {0, 0}
866 }
867 },
868 {0, 0, 2, 0, /* 0x6e */
869 {{1, 3},
870 {5, 6},
871 {0, 0},
872 {0, 0}
873 }
874 },
875 {1, 0, 2, 0, /* 0x6f */
876 {{0, 3},
877 {5, 6},
878 {0, 0},
879 {0, 0}
880 }
881 },
882 {0, 0, 1, 0, /* 0x70 */
883 {{4, 6},
884 {0, 0},
885 {0, 0},
886 {0, 0}
887 }
888 },
889 {1, 0, 2, 0, /* 0x71 */
890 {{0, 0},
891 {4, 6},
892 {0, 0},
893 {0, 0}
894 }
895 },
896 {0, 0, 2, 0, /* 0x72 */
897 {{1, 1},
898 {4, 6},
899 {0, 0},
900 {0, 0}
901 }
902 },
903 {1, 0, 2, 0, /* 0x73 */
904 {{0, 1},
905 {4, 6},
906 {0, 0},
907 {0, 0}
908 }
909 },
910 {0, 0, 2, 0, /* 0x74 */
911 {{2, 2},
912 {4, 6},
913 {0, 0},
914 {0, 0}
915 }
916 },
917 {1, 0, 3, 0, /* 0x75 */
918 {{0, 0},
919 {2, 2},
920 {4, 6},
921 {0, 0}
922 }
923 },
924 {0, 0, 2, 0, /* 0x76 */
925 {{1, 2},
926 {4, 6},
927 {0, 0},
928 {0, 0}
929 }
930 },
931 {1, 0, 2, 0, /* 0x77 */
932 {{0, 2},
933 {4, 6},
934 {0, 0},
935 {0, 0}
936 }
937 },
938 {0, 0, 1, 0, /* 0x78 */
939 {{3, 6},
940 {0, 0},
941 {0, 0},
942 {0, 0}
943 }
944 },
945 {1, 0, 2, 0, /* 0x79 */
946 {{0, 0},
947 {3, 6},
948 {0, 0},
949 {0, 0}
950 }
951 },
952 {0, 0, 2, 0, /* 0x7a */
953 {{1, 1},
954 {3, 6},
955 {0, 0},
956 {0, 0}
957 }
958 },
959 {1, 0, 2, 0, /* 0x7b */
960 {{0, 1},
961 {3, 6},
962 {0, 0},
963 {0, 0}
964 }
965 },
966 {0, 0, 1, 0, /* 0x7c */
967 {{2, 6},
968 {0, 0},
969 {0, 0},
970 {0, 0}
971 }
972 },
973 {1, 0, 2, 0, /* 0x7d */
974 {{0, 0},
975 {2, 6},
976 {0, 0},
977 {0, 0}
978 }
979 },
980 {0, 0, 1, 0, /* 0x7e */
981 {{1, 6},
982 {0, 0},
983 {0, 0},
984 {0, 0}
985 }
986 },
987 {1, 0, 1, 0, /* 0x7f */
988 {{0, 6},
989 {0, 0},
990 {0, 0},
991 {0, 0}
992 }
993 },
994 {0, 1, 1, 0, /* 0x80 */
995 {{7, 7},
996 {0, 0},
997 {0, 0},
998 {0, 0}
999 }
1000 },
1001 {1, 1, 2, 0, /* 0x81 */
1002 {{0, 0},
1003 {7, 7},
1004 {0, 0},
1005 {0, 0}
1006 }
1007 },
1008 {0, 1, 2, 0, /* 0x82 */
1009 {{1, 1},
1010 {7, 7},
1011 {0, 0},
1012 {0, 0}
1013 }
1014 },
1015 {1, 1, 2, 0, /* 0x83 */
1016 {{0, 1},
1017 {7, 7},
1018 {0, 0},
1019 {0, 0}
1020 }
1021 },
1022 {0, 1, 2, 0, /* 0x84 */
1023 {{2, 2},
1024 {7, 7},
1025 {0, 0},
1026 {0, 0}
1027 }
1028 },
1029 {1, 1, 3, 0, /* 0x85 */
1030 {{0, 0},
1031 {2, 2},
1032 {7, 7},
1033 {0, 0}
1034 }
1035 },
1036 {0, 1, 2, 0, /* 0x86 */
1037 {{1, 2},
1038 {7, 7},
1039 {0, 0},
1040 {0, 0}
1041 }
1042 },
1043 {1, 1, 2, 0, /* 0x87 */
1044 {{0, 2},
1045 {7, 7},
1046 {0, 0},
1047 {0, 0}
1048 }
1049 },
1050 {0, 1, 2, 0, /* 0x88 */
1051 {{3, 3},
1052 {7, 7},
1053 {0, 0},
1054 {0, 0}
1055 }
1056 },
1057 {1, 1, 3, 0, /* 0x89 */
1058 {{0, 0},
1059 {3, 3},
1060 {7, 7},
1061 {0, 0}
1062 }
1063 },
1064 {0, 1, 3, 0, /* 0x8a */
1065 {{1, 1},
1066 {3, 3},
1067 {7, 7},
1068 {0, 0}
1069 }
1070 },
1071 {1, 1, 3, 0, /* 0x8b */
1072 {{0, 1},
1073 {3, 3},
1074 {7, 7},
1075 {0, 0}
1076 }
1077 },
1078 {0, 1, 2, 0, /* 0x8c */
1079 {{2, 3},
1080 {7, 7},
1081 {0, 0},
1082 {0, 0}
1083 }
1084 },
1085 {1, 1, 3, 0, /* 0x8d */
1086 {{0, 0},
1087 {2, 3},
1088 {7, 7},
1089 {0, 0}
1090 }
1091 },
1092 {0, 1, 2, 0, /* 0x8e */
1093 {{1, 3},
1094 {7, 7},
1095 {0, 0},
1096 {0, 0}
1097 }
1098 },
1099 {1, 1, 2, 0, /* 0x8f */
1100 {{0, 3},
1101 {7, 7},
1102 {0, 0},
1103 {0, 0}
1104 }
1105 },
1106 {0, 1, 2, 0, /* 0x90 */
1107 {{4, 4},
1108 {7, 7},
1109 {0, 0},
1110 {0, 0}
1111 }
1112 },
1113 {1, 1, 3, 0, /* 0x91 */
1114 {{0, 0},
1115 {4, 4},
1116 {7, 7},
1117 {0, 0}
1118 }
1119 },
1120 {0, 1, 3, 0, /* 0x92 */
1121 {{1, 1},
1122 {4, 4},
1123 {7, 7},
1124 {0, 0}
1125 }
1126 },
1127 {1, 1, 3, 0, /* 0x93 */
1128 {{0, 1},
1129 {4, 4},
1130 {7, 7},
1131 {0, 0}
1132 }
1133 },
1134 {0, 1, 3, 0, /* 0x94 */
1135 {{2, 2},
1136 {4, 4},
1137 {7, 7},
1138 {0, 0}
1139 }
1140 },
1141 {1, 1, 4, 0, /* 0x95 */
1142 {{0, 0},
1143 {2, 2},
1144 {4, 4},
1145 {7, 7}
1146 }
1147 },
1148 {0, 1, 3, 0, /* 0x96 */
1149 {{1, 2},
1150 {4, 4},
1151 {7, 7},
1152 {0, 0}
1153 }
1154 },
1155 {1, 1, 3, 0, /* 0x97 */
1156 {{0, 2},
1157 {4, 4},
1158 {7, 7},
1159 {0, 0}
1160 }
1161 },
1162 {0, 1, 2, 0, /* 0x98 */
1163 {{3, 4},
1164 {7, 7},
1165 {0, 0},
1166 {0, 0}
1167 }
1168 },
1169 {1, 1, 3, 0, /* 0x99 */
1170 {{0, 0},
1171 {3, 4},
1172 {7, 7},
1173 {0, 0}
1174 }
1175 },
1176 {0, 1, 3, 0, /* 0x9a */
1177 {{1, 1},
1178 {3, 4},
1179 {7, 7},
1180 {0, 0}
1181 }
1182 },
1183 {1, 1, 3, 0, /* 0x9b */
1184 {{0, 1},
1185 {3, 4},
1186 {7, 7},
1187 {0, 0}
1188 }
1189 },
1190 {0, 1, 2, 0, /* 0x9c */
1191 {{2, 4},
1192 {7, 7},
1193 {0, 0},
1194 {0, 0}
1195 }
1196 },
1197 {1, 1, 3, 0, /* 0x9d */
1198 {{0, 0},
1199 {2, 4},
1200 {7, 7},
1201 {0, 0}
1202 }
1203 },
1204 {0, 1, 2, 0, /* 0x9e */
1205 {{1, 4},
1206 {7, 7},
1207 {0, 0},
1208 {0, 0}
1209 }
1210 },
1211 {1, 1, 2, 0, /* 0x9f */
1212 {{0, 4},
1213 {7, 7},
1214 {0, 0},
1215 {0, 0}
1216 }
1217 },
1218 {0, 1, 2, 0, /* 0xa0 */
1219 {{5, 5},
1220 {7, 7},
1221 {0, 0},
1222 {0, 0}
1223 }
1224 },
1225 {1, 1, 3, 0, /* 0xa1 */
1226 {{0, 0},
1227 {5, 5},
1228 {7, 7},
1229 {0, 0}
1230 }
1231 },
1232 {0, 1, 3, 0, /* 0xa2 */
1233 {{1, 1},
1234 {5, 5},
1235 {7, 7},
1236 {0, 0}
1237 }
1238 },
1239 {1, 1, 3, 0, /* 0xa3 */
1240 {{0, 1},
1241 {5, 5},
1242 {7, 7},
1243 {0, 0}
1244 }
1245 },
1246 {0, 1, 3, 0, /* 0xa4 */
1247 {{2, 2},
1248 {5, 5},
1249 {7, 7},
1250 {0, 0}
1251 }
1252 },
1253 {1, 1, 4, 0, /* 0xa5 */
1254 {{0, 0},
1255 {2, 2},
1256 {5, 5},
1257 {7, 7}
1258 }
1259 },
1260 {0, 1, 3, 0, /* 0xa6 */
1261 {{1, 2},
1262 {5, 5},
1263 {7, 7},
1264 {0, 0}
1265 }
1266 },
1267 {1, 1, 3, 0, /* 0xa7 */
1268 {{0, 2},
1269 {5, 5},
1270 {7, 7},
1271 {0, 0}
1272 }
1273 },
1274 {0, 1, 3, 0, /* 0xa8 */
1275 {{3, 3},
1276 {5, 5},
1277 {7, 7},
1278 {0, 0}
1279 }
1280 },
1281 {1, 1, 4, 0, /* 0xa9 */
1282 {{0, 0},
1283 {3, 3},
1284 {5, 5},
1285 {7, 7}
1286 }
1287 },
1288 {0, 1, 4, 0, /* 0xaa */
1289 {{1, 1},
1290 {3, 3},
1291 {5, 5},
1292 {7, 7}
1293 }
1294 },
1295 {1, 1, 4, 0, /* 0xab */
1296 {{0, 1},
1297 {3, 3},
1298 {5, 5},
1299 {7, 7}
1300 }
1301 },
1302 {0, 1, 3, 0, /* 0xac */
1303 {{2, 3},
1304 {5, 5},
1305 {7, 7},
1306 {0, 0}
1307 }
1308 },
1309 {1, 1, 4, 0, /* 0xad */
1310 {{0, 0},
1311 {2, 3},
1312 {5, 5},
1313 {7, 7}
1314 }
1315 },
1316 {0, 1, 3, 0, /* 0xae */
1317 {{1, 3},
1318 {5, 5},
1319 {7, 7},
1320 {0, 0}
1321 }
1322 },
1323 {1, 1, 3, 0, /* 0xaf */
1324 {{0, 3},
1325 {5, 5},
1326 {7, 7},
1327 {0, 0}
1328 }
1329 },
1330 {0, 1, 2, 0, /* 0xb0 */
1331 {{4, 5},
1332 {7, 7},
1333 {0, 0},
1334 {0, 0}
1335 }
1336 },
1337 {1, 1, 3, 0, /* 0xb1 */
1338 {{0, 0},
1339 {4, 5},
1340 {7, 7},
1341 {0, 0}
1342 }
1343 },
1344 {0, 1, 3, 0, /* 0xb2 */
1345 {{1, 1},
1346 {4, 5},
1347 {7, 7},
1348 {0, 0}
1349 }
1350 },
1351 {1, 1, 3, 0, /* 0xb3 */
1352 {{0, 1},
1353 {4, 5},
1354 {7, 7},
1355 {0, 0}
1356 }
1357 },
1358 {0, 1, 3, 0, /* 0xb4 */
1359 {{2, 2},
1360 {4, 5},
1361 {7, 7},
1362 {0, 0}
1363 }
1364 },
1365 {1, 1, 4, 0, /* 0xb5 */
1366 {{0, 0},
1367 {2, 2},
1368 {4, 5},
1369 {7, 7}
1370 }
1371 },
1372 {0, 1, 3, 0, /* 0xb6 */
1373 {{1, 2},
1374 {4, 5},
1375 {7, 7},
1376 {0, 0}
1377 }
1378 },
1379 {1, 1, 3, 0, /* 0xb7 */
1380 {{0, 2},
1381 {4, 5},
1382 {7, 7},
1383 {0, 0}
1384 }
1385 },
1386 {0, 1, 2, 0, /* 0xb8 */
1387 {{3, 5},
1388 {7, 7},
1389 {0, 0},
1390 {0, 0}
1391 }
1392 },
1393 {1, 1, 3, 0, /* 0xb9 */
1394 {{0, 0},
1395 {3, 5},
1396 {7, 7},
1397 {0, 0}
1398 }
1399 },
1400 {0, 1, 3, 0, /* 0xba */
1401 {{1, 1},
1402 {3, 5},
1403 {7, 7},
1404 {0, 0}
1405 }
1406 },
1407 {1, 1, 3, 0, /* 0xbb */
1408 {{0, 1},
1409 {3, 5},
1410 {7, 7},
1411 {0, 0}
1412 }
1413 },
1414 {0, 1, 2, 0, /* 0xbc */
1415 {{2, 5},
1416 {7, 7},
1417 {0, 0},
1418 {0, 0}
1419 }
1420 },
1421 {1, 1, 3, 0, /* 0xbd */
1422 {{0, 0},
1423 {2, 5},
1424 {7, 7},
1425 {0, 0}
1426 }
1427 },
1428 {0, 1, 2, 0, /* 0xbe */
1429 {{1, 5},
1430 {7, 7},
1431 {0, 0},
1432 {0, 0}
1433 }
1434 },
1435 {1, 1, 2, 0, /* 0xbf */
1436 {{0, 5},
1437 {7, 7},
1438 {0, 0},
1439 {0, 0}
1440 }
1441 },
1442 {0, 1, 1, 0, /* 0xc0 */
1443 {{6, 7},
1444 {0, 0},
1445 {0, 0},
1446 {0, 0}
1447 }
1448 },
1449 {1, 1, 2, 0, /* 0xc1 */
1450 {{0, 0},
1451 {6, 7},
1452 {0, 0},
1453 {0, 0}
1454 }
1455 },
1456 {0, 1, 2, 0, /* 0xc2 */
1457 {{1, 1},
1458 {6, 7},
1459 {0, 0},
1460 {0, 0}
1461 }
1462 },
1463 {1, 1, 2, 0, /* 0xc3 */
1464 {{0, 1},
1465 {6, 7},
1466 {0, 0},
1467 {0, 0}
1468 }
1469 },
1470 {0, 1, 2, 0, /* 0xc4 */
1471 {{2, 2},
1472 {6, 7},
1473 {0, 0},
1474 {0, 0}
1475 }
1476 },
1477 {1, 1, 3, 0, /* 0xc5 */
1478 {{0, 0},
1479 {2, 2},
1480 {6, 7},
1481 {0, 0}
1482 }
1483 },
1484 {0, 1, 2, 0, /* 0xc6 */
1485 {{1, 2},
1486 {6, 7},
1487 {0, 0},
1488 {0, 0}
1489 }
1490 },
1491 {1, 1, 2, 0, /* 0xc7 */
1492 {{0, 2},
1493 {6, 7},
1494 {0, 0},
1495 {0, 0}
1496 }
1497 },
1498 {0, 1, 2, 0, /* 0xc8 */
1499 {{3, 3},
1500 {6, 7},
1501 {0, 0},
1502 {0, 0}
1503 }
1504 },
1505 {1, 1, 3, 0, /* 0xc9 */
1506 {{0, 0},
1507 {3, 3},
1508 {6, 7},
1509 {0, 0}
1510 }
1511 },
1512 {0, 1, 3, 0, /* 0xca */
1513 {{1, 1},
1514 {3, 3},
1515 {6, 7},
1516 {0, 0}
1517 }
1518 },
1519 {1, 1, 3, 0, /* 0xcb */
1520 {{0, 1},
1521 {3, 3},
1522 {6, 7},
1523 {0, 0}
1524 }
1525 },
1526 {0, 1, 2, 0, /* 0xcc */
1527 {{2, 3},
1528 {6, 7},
1529 {0, 0},
1530 {0, 0}
1531 }
1532 },
1533 {1, 1, 3, 0, /* 0xcd */
1534 {{0, 0},
1535 {2, 3},
1536 {6, 7},
1537 {0, 0}
1538 }
1539 },
1540 {0, 1, 2, 0, /* 0xce */
1541 {{1, 3},
1542 {6, 7},
1543 {0, 0},
1544 {0, 0}
1545 }
1546 },
1547 {1, 1, 2, 0, /* 0xcf */
1548 {{0, 3},
1549 {6, 7},
1550 {0, 0},
1551 {0, 0}
1552 }
1553 },
1554 {0, 1, 2, 0, /* 0xd0 */
1555 {{4, 4},
1556 {6, 7},
1557 {0, 0},
1558 {0, 0}
1559 }
1560 },
1561 {1, 1, 3, 0, /* 0xd1 */
1562 {{0, 0},
1563 {4, 4},
1564 {6, 7},
1565 {0, 0}
1566 }
1567 },
1568 {0, 1, 3, 0, /* 0xd2 */
1569 {{1, 1},
1570 {4, 4},
1571 {6, 7},
1572 {0, 0}
1573 }
1574 },
1575 {1, 1, 3, 0, /* 0xd3 */
1576 {{0, 1},
1577 {4, 4},
1578 {6, 7},
1579 {0, 0}
1580 }
1581 },
1582 {0, 1, 3, 0, /* 0xd4 */
1583 {{2, 2},
1584 {4, 4},
1585 {6, 7},
1586 {0, 0}
1587 }
1588 },
1589 {1, 1, 4, 0, /* 0xd5 */
1590 {{0, 0},
1591 {2, 2},
1592 {4, 4},
1593 {6, 7}
1594 }
1595 },
1596 {0, 1, 3, 0, /* 0xd6 */
1597 {{1, 2},
1598 {4, 4},
1599 {6, 7},
1600 {0, 0}
1601 }
1602 },
1603 {1, 1, 3, 0, /* 0xd7 */
1604 {{0, 2},
1605 {4, 4},
1606 {6, 7},
1607 {0, 0}
1608 }
1609 },
1610 {0, 1, 2, 0, /* 0xd8 */
1611 {{3, 4},
1612 {6, 7},
1613 {0, 0},
1614 {0, 0}
1615 }
1616 },
1617 {1, 1, 3, 0, /* 0xd9 */
1618 {{0, 0},
1619 {3, 4},
1620 {6, 7},
1621 {0, 0}
1622 }
1623 },
1624 {0, 1, 3, 0, /* 0xda */
1625 {{1, 1},
1626 {3, 4},
1627 {6, 7},
1628 {0, 0}
1629 }
1630 },
1631 {1, 1, 3, 0, /* 0xdb */
1632 {{0, 1},
1633 {3, 4},
1634 {6, 7},
1635 {0, 0}
1636 }
1637 },
1638 {0, 1, 2, 0, /* 0xdc */
1639 {{2, 4},
1640 {6, 7},
1641 {0, 0},
1642 {0, 0}
1643 }
1644 },
1645 {1, 1, 3, 0, /* 0xdd */
1646 {{0, 0},
1647 {2, 4},
1648 {6, 7},
1649 {0, 0}
1650 }
1651 },
1652 {0, 1, 2, 0, /* 0xde */
1653 {{1, 4},
1654 {6, 7},
1655 {0, 0},
1656 {0, 0}
1657 }
1658 },
1659 {1, 1, 2, 0, /* 0xdf */
1660 {{0, 4},
1661 {6, 7},
1662 {0, 0},
1663 {0, 0}
1664 }
1665 },
1666 {0, 1, 1, 0, /* 0xe0 */
1667 {{5, 7},
1668 {0, 0},
1669 {0, 0},
1670 {0, 0}
1671 }
1672 },
1673 {1, 1, 2, 0, /* 0xe1 */
1674 {{0, 0},
1675 {5, 7},
1676 {0, 0},
1677 {0, 0}
1678 }
1679 },
1680 {0, 1, 2, 0, /* 0xe2 */
1681 {{1, 1},
1682 {5, 7},
1683 {0, 0},
1684 {0, 0}
1685 }
1686 },
1687 {1, 1, 2, 0, /* 0xe3 */
1688 {{0, 1},
1689 {5, 7},
1690 {0, 0},
1691 {0, 0}
1692 }
1693 },
1694 {0, 1, 2, 0, /* 0xe4 */
1695 {{2, 2},
1696 {5, 7},
1697 {0, 0},
1698 {0, 0}
1699 }
1700 },
1701 {1, 1, 3, 0, /* 0xe5 */
1702 {{0, 0},
1703 {2, 2},
1704 {5, 7},
1705 {0, 0}
1706 }
1707 },
1708 {0, 1, 2, 0, /* 0xe6 */
1709 {{1, 2},
1710 {5, 7},
1711 {0, 0},
1712 {0, 0}
1713 }
1714 },
1715 {1, 1, 2, 0, /* 0xe7 */
1716 {{0, 2},
1717 {5, 7},
1718 {0, 0},
1719 {0, 0}
1720 }
1721 },
1722 {0, 1, 2, 0, /* 0xe8 */
1723 {{3, 3},
1724 {5, 7},
1725 {0, 0},
1726 {0, 0}
1727 }
1728 },
1729 {1, 1, 3, 0, /* 0xe9 */
1730 {{0, 0},
1731 {3, 3},
1732 {5, 7},
1733 {0, 0}
1734 }
1735 },
1736 {0, 1, 3, 0, /* 0xea */
1737 {{1, 1},
1738 {3, 3},
1739 {5, 7},
1740 {0, 0}
1741 }
1742 },
1743 {1, 1, 3, 0, /* 0xeb */
1744 {{0, 1},
1745 {3, 3},
1746 {5, 7},
1747 {0, 0}
1748 }
1749 },
1750 {0, 1, 2, 0, /* 0xec */
1751 {{2, 3},
1752 {5, 7},
1753 {0, 0},
1754 {0, 0}
1755 }
1756 },
1757 {1, 1, 3, 0, /* 0xed */
1758 {{0, 0},
1759 {2, 3},
1760 {5, 7},
1761 {0, 0}
1762 }
1763 },
1764 {0, 1, 2, 0, /* 0xee */
1765 {{1, 3},
1766 {5, 7},
1767 {0, 0},
1768 {0, 0}
1769 }
1770 },
1771 {1, 1, 2, 0, /* 0xef */
1772 {{0, 3},
1773 {5, 7},
1774 {0, 0},
1775 {0, 0}
1776 }
1777 },
1778 {0, 1, 1, 0, /* 0xf0 */
1779 {{4, 7},
1780 {0, 0},
1781 {0, 0},
1782 {0, 0}
1783 }
1784 },
1785 {1, 1, 2, 0, /* 0xf1 */
1786 {{0, 0},
1787 {4, 7},
1788 {0, 0},
1789 {0, 0}
1790 }
1791 },
1792 {0, 1, 2, 0, /* 0xf2 */
1793 {{1, 1},
1794 {4, 7},
1795 {0, 0},
1796 {0, 0}
1797 }
1798 },
1799 {1, 1, 2, 0, /* 0xf3 */
1800 {{0, 1},
1801 {4, 7},
1802 {0, 0},
1803 {0, 0}
1804 }
1805 },
1806 {0, 1, 2, 0, /* 0xf4 */
1807 {{2, 2},
1808 {4, 7},
1809 {0, 0},
1810 {0, 0}
1811 }
1812 },
1813 {1, 1, 3, 0, /* 0xf5 */
1814 {{0, 0},
1815 {2, 2},
1816 {4, 7},
1817 {0, 0}
1818 }
1819 },
1820 {0, 1, 2, 0, /* 0xf6 */
1821 {{1, 2},
1822 {4, 7},
1823 {0, 0},
1824 {0, 0}
1825 }
1826 },
1827 {1, 1, 2, 0, /* 0xf7 */
1828 {{0, 2},
1829 {4, 7},
1830 {0, 0},
1831 {0, 0}
1832 }
1833 },
1834 {0, 1, 1, 0, /* 0xf8 */
1835 {{3, 7},
1836 {0, 0},
1837 {0, 0},
1838 {0, 0}
1839 }
1840 },
1841 {1, 1, 2, 0, /* 0xf9 */
1842 {{0, 0},
1843 {3, 7},
1844 {0, 0},
1845 {0, 0}
1846 }
1847 },
1848 {0, 1, 2, 0, /* 0xfa */
1849 {{1, 1},
1850 {3, 7},
1851 {0, 0},
1852 {0, 0}
1853 }
1854 },
1855 {1, 1, 2, 0, /* 0xfb */
1856 {{0, 1},
1857 {3, 7},
1858 {0, 0},
1859 {0, 0}
1860 }
1861 },
1862 {0, 1, 1, 0, /* 0xfc */
1863 {{2, 7},
1864 {0, 0},
1865 {0, 0},
1866 {0, 0}
1867 }
1868 },
1869 {1, 1, 2, 0, /* 0xfd */
1870 {{0, 0},
1871 {2, 7},
1872 {0, 0},
1873 {0, 0}
1874 }
1875 },
1876 {0, 1, 1, 0, /* 0xfe */
1877 {{1, 7},
1878 {0, 0},
1879 {0, 0},
1880 {0, 0}
1881 }
1882 },
1883 {1, 1, 1, 0, /* 0xff */
1884 {{0, 7},
1885 {0, 0},
1886 {0, 0},
1887 {0, 0}
1888 }
1889 }
1890};
1891
1892
1893int
1894sctp_is_address_in_scope(struct sctp_ifa *ifa,
1895 struct sctp_scoping *scope,
1896 int do_update)
1897{
1898 if ((scope->loopback_scope == 0) &&
1899 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1900 /*
1901 * skip loopback if not in scope *
1902 */
1903 return (0);
1904 }
1905 switch (ifa->address.sa.sa_family) {
1906#ifdef INET
1907 case AF_INET:
1908 if (scope->ipv4_addr_legal) {
1909 struct sockaddr_in *sin;
1910
1911 sin = &ifa->address.sin;
1912 if (sin->sin_addr.s_addr == 0) {
1913 /* not in scope , unspecified */
1914 return (0);
1915 }
1916 if ((scope->ipv4_local_scope == 0) &&
1917 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1918 /* private address not in scope */
1919 return (0);
1920 }
1921 } else {
1922 return (0);
1923 }
1924 break;
1925#endif
1926#ifdef INET6
1927 case AF_INET6:
1928 if (scope->ipv6_addr_legal) {
1929 struct sockaddr_in6 *sin6;
1930
1931#if !defined(__Panda__)
1932 /* Must update the flags, bummer, which
1933 * means any IFA locks must now be applied HERE <->
1934 */
1935 if (do_update) {
1936 sctp_gather_internal_ifa_flags(ifa);
1937 }
1938#endif
1939 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1940 return (0);
1941 }
1942 /* ok to use deprecated addresses? */
1943 sin6 = &ifa->address.sin6;
1944 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1945 /* skip unspecifed addresses */
1946 return (0);
1947 }
1948 if ( /* (local_scope == 0) && */
1949 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1950 return (0);
1951 }
1952 if ((scope->site_scope == 0) &&
1953 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1954 return (0);
1955 }
1956 } else {
1957 return (0);
1958 }
1959 break;
1960#endif
1961#if defined(__Userspace__)
1962 case AF_CONN:
1963 if (!scope->conn_addr_legal) {
1964 return (0);
1965 }
1966 break;
1967#endif
1968 default:
1969 return (0);
1970 }
1971 return (1);
1972}
1973
1974static struct mbuf *
1975sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1976{
1977#if defined(INET) || defined(INET6)
1978 struct sctp_paramhdr *parmh;
1979 struct mbuf *mret;
1980 uint16_t plen;
1981#endif
1982
1983 switch (ifa->address.sa.sa_family) {
1984#ifdef INET
1985 case AF_INET:
1986 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1987 break;
1988#endif
1989#ifdef INET6
1990 case AF_INET6:
1991 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1992 break;
1993#endif
1994 default:
1995 return (m);
1996 }
1997#if defined(INET) || defined(INET6)
1998 if (M_TRAILINGSPACE(m) >= plen) {
1999 /* easy side we just drop it on the end */
2000 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
2001 mret = m;
2002 } else {
2003 /* Need more space */
2004 mret = m;
2005 while (SCTP_BUF_NEXT(mret) != NULL) {
2006 mret = SCTP_BUF_NEXT(mret);
2007 }
2008 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
2009 if (SCTP_BUF_NEXT(mret) == NULL) {
2010 /* We are hosed, can't add more addresses */
2011 return (m);
2012 }
2013 mret = SCTP_BUF_NEXT(mret);
2014 parmh = mtod(mret, struct sctp_paramhdr *);
2015 }
2016 /* now add the parameter */
2017 switch (ifa->address.sa.sa_family) {
2018#ifdef INET
2019 case AF_INET:
2020 {
2021 struct sctp_ipv4addr_param *ipv4p;
2022 struct sockaddr_in *sin;
2023
2024 sin = &ifa->address.sin;
2025 ipv4p = (struct sctp_ipv4addr_param *)parmh;
2026 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
2027 parmh->param_length = htons(plen);
2028 ipv4p->addr = sin->sin_addr.s_addr;
2029 SCTP_BUF_LEN(mret) += plen;
2030 break;
2031 }
2032#endif
2033#ifdef INET6
2034 case AF_INET6:
2035 {
2036 struct sctp_ipv6addr_param *ipv6p;
2037 struct sockaddr_in6 *sin6;
2038
2039 sin6 = &ifa->address.sin6;
2040 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2041 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2042 parmh->param_length = htons(plen);
2043 memcpy(ipv6p->addr, &sin6->sin6_addr,
2044 sizeof(ipv6p->addr));
2045#if defined(SCTP_EMBEDDED_V6_SCOPE)
2046 /* clear embedded scope in the address */
2047 in6_clearscope((struct in6_addr *)ipv6p->addr);
2048#endif
2049 SCTP_BUF_LEN(mret) += plen;
2050 break;
2051 }
2052#endif
2053 default:
2054 return (m);
2055 }
2056 if (len != NULL) {
2057 *len += plen;
2058 }
2059 return (mret);
2060#endif
2061}
2062
2063
2064struct mbuf *
2065sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2066 struct sctp_scoping *scope,
2067 struct mbuf *m_at, int cnt_inits_to,
2068 uint16_t *padding_len, uint16_t *chunk_len)
2069{
2070 struct sctp_vrf *vrf = NULL;
2071 int cnt, limit_out = 0, total_count;
2072 uint32_t vrf_id;
2073
2074 vrf_id = inp->def_vrf_id;
2075 SCTP_IPI_ADDR_RLOCK();
2076 vrf = sctp_find_vrf(vrf_id);
2077 if (vrf == NULL) {
2078 SCTP_IPI_ADDR_RUNLOCK();
2079 return (m_at);
2080 }
2081 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2082 struct sctp_ifa *sctp_ifap;
2083 struct sctp_ifn *sctp_ifnp;
2084
2085 cnt = cnt_inits_to;
2086 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2087 limit_out = 1;
2088 cnt = SCTP_ADDRESS_LIMIT;
2089 goto skip_count;
2090 }
2091 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2092 if ((scope->loopback_scope == 0) &&
2093 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2094 /*
2095 * Skip loopback devices if loopback_scope
2096 * not set
2097 */
2098 continue;
2099 }
2100 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2101#if defined(__FreeBSD__)
2102#ifdef INET
2103 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2104 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2105 &sctp_ifap->address.sin.sin_addr) != 0)) {
2106 continue;
2107 }
2108#endif
2109#ifdef INET6
2110 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2111 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2112 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2113 continue;
2114 }
2115#endif
2116#endif
2117 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2118 continue;
2119 }
2120#if defined(__Userspace__)
2121 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2122 continue;
2123 }
2124#endif
2125 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2126 continue;
2127 }
2128 cnt++;
2129 if (cnt > SCTP_ADDRESS_LIMIT) {
2130 break;
2131 }
2132 }
2133 if (cnt > SCTP_ADDRESS_LIMIT) {
2134 break;
2135 }
2136 }
2137 skip_count:
2138 if (cnt > 1) {
2139 total_count = 0;
2140 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2141 cnt = 0;
2142 if ((scope->loopback_scope == 0) &&
2143 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2144 /*
2145 * Skip loopback devices if
2146 * loopback_scope not set
2147 */
2148 continue;
2149 }
2150 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2151#if defined(__FreeBSD__)
2152#ifdef INET
2153 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2154 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2155 &sctp_ifap->address.sin.sin_addr) != 0)) {
2156 continue;
2157 }
2158#endif
2159#ifdef INET6
2160 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2161 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2162 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2163 continue;
2164 }
2165#endif
2166#endif
2167 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2168 continue;
2169 }
2170#if defined(__Userspace__)
2171 if (sctp_ifap->address.sa.sa_family == AF_CONN) {
2172 continue;
2173 }
2174#endif
2175 if (sctp_is_address_in_scope(sctp_ifap,
2176 scope, 0) == 0) {
2177 continue;
2178 }
2179 if ((chunk_len != NULL) &&
2180 (padding_len != NULL) &&
2181 (*padding_len > 0)) {
2182 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2183 SCTP_BUF_LEN(m_at) += *padding_len;
2184 *chunk_len += *padding_len;
2185 *padding_len = 0;
2186 }
2187 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2188 if (limit_out) {
2189 cnt++;
2190 total_count++;
2191 if (cnt >= 2) {
2192 /* two from each address */
2193 break;
2194 }
2195 if (total_count > SCTP_ADDRESS_LIMIT) {
2196 /* No more addresses */
2197 break;
2198 }
2199 }
2200 }
2201 }
2202 }
2203 } else {
2204 struct sctp_laddr *laddr;
2205
2206 cnt = cnt_inits_to;
2207 /* First, how many ? */
2208 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2209 if (laddr->ifa == NULL) {
2210 continue;
2211 }
2212 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2213 /* Address being deleted by the system, dont
2214 * list.
2215 */
2216 continue;
2217 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2218 /* Address being deleted on this ep
2219 * don't list.
2220 */
2221 continue;
2222 }
2223#if defined(__Userspace__)
2224 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2225 continue;
2226 }
2227#endif
2228 if (sctp_is_address_in_scope(laddr->ifa,
2229 scope, 1) == 0) {
2230 continue;
2231 }
2232 cnt++;
2233 }
2234 /*
2235 * To get through a NAT we only list addresses if we have
2236 * more than one. That way if you just bind a single address
2237 * we let the source of the init dictate our address.
2238 */
2239 if (cnt > 1) {
2240 cnt = cnt_inits_to;
2241 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2242 if (laddr->ifa == NULL) {
2243 continue;
2244 }
2245 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2246 continue;
2247 }
2248#if defined(__Userspace__)
2249 if (laddr->ifa->address.sa.sa_family == AF_CONN) {
2250 continue;
2251 }
2252#endif
2253 if (sctp_is_address_in_scope(laddr->ifa,
2254 scope, 0) == 0) {
2255 continue;
2256 }
2257 if ((chunk_len != NULL) &&
2258 (padding_len != NULL) &&
2259 (*padding_len > 0)) {
2260 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
2261 SCTP_BUF_LEN(m_at) += *padding_len;
2262 *chunk_len += *padding_len;
2263 *padding_len = 0;
2264 }
2265 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2266 cnt++;
2267 if (cnt >= SCTP_ADDRESS_LIMIT) {
2268 break;
2269 }
2270 }
2271 }
2272 }
2273 SCTP_IPI_ADDR_RUNLOCK();
2274 return (m_at);
2275}
2276
2277static struct sctp_ifa *
2278sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2279 uint8_t dest_is_loop,
2280 uint8_t dest_is_priv,
2281 sa_family_t fam)
2282{
2283 uint8_t dest_is_global = 0;
2284 /* dest_is_priv is true if destination is a private address */
2285 /* dest_is_loop is true if destination is a loopback addresses */
2286
2287 /**
2288 * Here we determine if its a preferred address. A preferred address
2289 * means it is the same scope or higher scope then the destination.
2290 * L = loopback, P = private, G = global
2291 * -----------------------------------------
2292 * src | dest | result
2293 * ----------------------------------------
2294 * L | L | yes
2295 * -----------------------------------------
2296 * P | L | yes-v4 no-v6
2297 * -----------------------------------------
2298 * G | L | yes-v4 no-v6
2299 * -----------------------------------------
2300 * L | P | no
2301 * -----------------------------------------
2302 * P | P | yes
2303 * -----------------------------------------
2304 * G | P | no
2305 * -----------------------------------------
2306 * L | G | no
2307 * -----------------------------------------
2308 * P | G | no
2309 * -----------------------------------------
2310 * G | G | yes
2311 * -----------------------------------------
2312 */
2313
2314 if (ifa->address.sa.sa_family != fam) {
2315 /* forget mis-matched family */
2316 return (NULL);
2317 }
2318 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2319 dest_is_global = 1;
2320 }
2321 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2322 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2323 /* Ok the address may be ok */
2324#ifdef INET6
2325 if (fam == AF_INET6) {
2326 /* ok to use deprecated addresses? no lets not! */
2327 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2328 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2329 return (NULL);
2330 }
2331 if (ifa->src_is_priv && !ifa->src_is_loop) {
2332 if (dest_is_loop) {
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2334 return (NULL);
2335 }
2336 }
2337 if (ifa->src_is_glob) {
2338 if (dest_is_loop) {
2339 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2340 return (NULL);
2341 }
2342 }
2343 }
2344#endif
2345 /* Now that we know what is what, implement or table
2346 * this could in theory be done slicker (it used to be), but this
2347 * is straightforward and easier to validate :-)
2348 */
2349 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2350 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2351 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2352 dest_is_loop, dest_is_priv, dest_is_global);
2353
2354 if ((ifa->src_is_loop) && (dest_is_priv)) {
2355 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2356 return (NULL);
2357 }
2358 if ((ifa->src_is_glob) && (dest_is_priv)) {
2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2360 return (NULL);
2361 }
2362 if ((ifa->src_is_loop) && (dest_is_global)) {
2363 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2364 return (NULL);
2365 }
2366 if ((ifa->src_is_priv) && (dest_is_global)) {
2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2368 return (NULL);
2369 }
2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2371 /* its a preferred address */
2372 return (ifa);
2373}
2374
2375static struct sctp_ifa *
2376sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2377 uint8_t dest_is_loop,
2378 uint8_t dest_is_priv,
2379 sa_family_t fam)
2380{
2381 uint8_t dest_is_global = 0;
2382
2383 /**
2384 * Here we determine if its a acceptable address. A acceptable
2385 * address means it is the same scope or higher scope but we can
2386 * allow for NAT which means its ok to have a global dest and a
2387 * private src.
2388 *
2389 * L = loopback, P = private, G = global
2390 * -----------------------------------------
2391 * src | dest | result
2392 * -----------------------------------------
2393 * L | L | yes
2394 * -----------------------------------------
2395 * P | L | yes-v4 no-v6
2396 * -----------------------------------------
2397 * G | L | yes
2398 * -----------------------------------------
2399 * L | P | no
2400 * -----------------------------------------
2401 * P | P | yes
2402 * -----------------------------------------
2403 * G | P | yes - May not work
2404 * -----------------------------------------
2405 * L | G | no
2406 * -----------------------------------------
2407 * P | G | yes - May not work
2408 * -----------------------------------------
2409 * G | G | yes
2410 * -----------------------------------------
2411 */
2412
2413 if (ifa->address.sa.sa_family != fam) {
2414 /* forget non matching family */
2415 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2416 ifa->address.sa.sa_family, fam);
2417 return (NULL);
2418 }
2419 /* Ok the address may be ok */
2420 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2421 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2422 dest_is_loop, dest_is_priv);
2423 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2424 dest_is_global = 1;
2425 }
2426#ifdef INET6
2427 if (fam == AF_INET6) {
2428 /* ok to use deprecated addresses? */
2429 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2430 return (NULL);
2431 }
2432 if (ifa->src_is_priv) {
2433 /* Special case, linklocal to loop */
2434 if (dest_is_loop)
2435 return (NULL);
2436 }
2437 }
2438#endif
2439 /*
2440 * Now that we know what is what, implement our table.
2441 * This could in theory be done slicker (it used to be), but this
2442 * is straightforward and easier to validate :-)
2443 */
2444 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2445 ifa->src_is_loop,
2446 dest_is_priv);
2447 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2448 return (NULL);
2449 }
2450 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2451 ifa->src_is_loop,
2452 dest_is_global);
2453 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2454 return (NULL);
2455 }
2456 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2457 /* its an acceptable address */
2458 return (ifa);
2459}
2460
2461int
2462sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2463{
2464 struct sctp_laddr *laddr;
2465
2466 if (stcb == NULL) {
2467 /* There are no restrictions, no TCB :-) */
2468 return (0);
2469 }
2470 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2471 if (laddr->ifa == NULL) {
2472 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2473 __func__);
2474 continue;
2475 }
2476 if (laddr->ifa == ifa) {
2477 /* Yes it is on the list */
2478 return (1);
2479 }
2480 }
2481 return (0);
2482}
2483
2484
2485int
2486sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2487{
2488 struct sctp_laddr *laddr;
2489
2490 if (ifa == NULL)
2491 return (0);
2492 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2493 if (laddr->ifa == NULL) {
2494 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2495 __func__);
2496 continue;
2497 }
2498 if ((laddr->ifa == ifa) && laddr->action == 0)
2499 /* same pointer */
2500 return (1);
2501 }
2502 return (0);
2503}
2504
2505
2506
2507static struct sctp_ifa *
2508sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2509 sctp_route_t *ro,
2510 uint32_t vrf_id,
2511 int non_asoc_addr_ok,
2512 uint8_t dest_is_priv,
2513 uint8_t dest_is_loop,
2514 sa_family_t fam)
2515{
2516 struct sctp_laddr *laddr, *starting_point;
2517 void *ifn;
2518 int resettotop = 0;
2519 struct sctp_ifn *sctp_ifn;
2520 struct sctp_ifa *sctp_ifa, *sifa;
2521 struct sctp_vrf *vrf;
2522 uint32_t ifn_index;
2523
2524 vrf = sctp_find_vrf(vrf_id);
2525 if (vrf == NULL)
2526 return (NULL);
2527
2528 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2529 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2530 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2531 /*
2532 * first question, is the ifn we will emit on in our list, if so, we
2533 * want such an address. Note that we first looked for a
2534 * preferred address.
2535 */
2536 if (sctp_ifn) {
2537 /* is a preferred one on the interface we route out? */
2538 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2539#if defined(__FreeBSD__)
2540#ifdef INET
2541 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2542 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2543 &sctp_ifa->address.sin.sin_addr) != 0)) {
2544 continue;
2545 }
2546#endif
2547#ifdef INET6
2548 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2549 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2550 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2551 continue;
2552 }
2553#endif
2554#endif
2555 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2556 (non_asoc_addr_ok == 0))
2557 continue;
2558 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2559 dest_is_loop,
2560 dest_is_priv, fam);
2561 if (sifa == NULL)
2562 continue;
2563 if (sctp_is_addr_in_ep(inp, sifa)) {
2564 atomic_add_int(&sifa->refcount, 1);
2565 return (sifa);
2566 }
2567 }
2568 }
2569 /*
2570 * ok, now we now need to find one on the list of the addresses.
2571 * We can't get one on the emitting interface so let's find first
2572 * a preferred one. If not that an acceptable one otherwise...
2573 * we return NULL.
2574 */
2575 starting_point = inp->next_addr_touse;
2576 once_again:
2577 if (inp->next_addr_touse == NULL) {
2578 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2579 resettotop = 1;
2580 }
2581 for (laddr = inp->next_addr_touse; laddr;
2582 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2583 if (laddr->ifa == NULL) {
2584 /* address has been removed */
2585 continue;
2586 }
2587 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2588 /* address is being deleted */
2589 continue;
2590 }
2591 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2592 dest_is_priv, fam);
2593 if (sifa == NULL)
2594 continue;
2595 atomic_add_int(&sifa->refcount, 1);
2596 return (sifa);
2597 }
2598 if (resettotop == 0) {
2599 inp->next_addr_touse = NULL;
2600 goto once_again;
2601 }
2602
2603 inp->next_addr_touse = starting_point;
2604 resettotop = 0;
2605 once_again_too:
2606 if (inp->next_addr_touse == NULL) {
2607 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2608 resettotop = 1;
2609 }
2610
2611 /* ok, what about an acceptable address in the inp */
2612 for (laddr = inp->next_addr_touse; laddr;
2613 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2614 if (laddr->ifa == NULL) {
2615 /* address has been removed */
2616 continue;
2617 }
2618 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2619 /* address is being deleted */
2620 continue;
2621 }
2622 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2623 dest_is_priv, fam);
2624 if (sifa == NULL)
2625 continue;
2626 atomic_add_int(&sifa->refcount, 1);
2627 return (sifa);
2628 }
2629 if (resettotop == 0) {
2630 inp->next_addr_touse = NULL;
2631 goto once_again_too;
2632 }
2633
2634 /*
2635 * no address bound can be a source for the destination we are in
2636 * trouble
2637 */
2638 return (NULL);
2639}
2640
2641
2642
2643static struct sctp_ifa *
2644sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2645 struct sctp_tcb *stcb,
2646 sctp_route_t *ro,
2647 uint32_t vrf_id,
2648 uint8_t dest_is_priv,
2649 uint8_t dest_is_loop,
2650 int non_asoc_addr_ok,
2651 sa_family_t fam)
2652{
2653 struct sctp_laddr *laddr, *starting_point;
2654 void *ifn;
2655 struct sctp_ifn *sctp_ifn;
2656 struct sctp_ifa *sctp_ifa, *sifa;
2657 uint8_t start_at_beginning = 0;
2658 struct sctp_vrf *vrf;
2659 uint32_t ifn_index;
2660
2661 /*
2662 * first question, is the ifn we will emit on in our list, if so, we
2663 * want that one.
2664 */
2665 vrf = sctp_find_vrf(vrf_id);
2666 if (vrf == NULL)
2667 return (NULL);
2668
2669 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2670 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2671 sctp_ifn = sctp_find_ifn( ifn, ifn_index);
2672
2673 /*
2674 * first question, is the ifn we will emit on in our list? If so,
2675 * we want that one. First we look for a preferred. Second, we go
2676 * for an acceptable.
2677 */
2678 if (sctp_ifn) {
2679 /* first try for a preferred address on the ep */
2680 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2681#if defined(__FreeBSD__)
2682#ifdef INET
2683 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2684 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2685 &sctp_ifa->address.sin.sin_addr) != 0)) {
2686 continue;
2687 }
2688#endif
2689#ifdef INET6
2690 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2691 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2692 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2693 continue;
2694 }
2695#endif
2696#endif
2697 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2698 continue;
2699 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2700 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2701 if (sifa == NULL)
2702 continue;
2703 if (((non_asoc_addr_ok == 0) &&
2704 (sctp_is_addr_restricted(stcb, sifa))) ||
2705 (non_asoc_addr_ok &&
2706 (sctp_is_addr_restricted(stcb, sifa)) &&
2707 (!sctp_is_addr_pending(stcb, sifa)))) {
2708 /* on the no-no list */
2709 continue;
2710 }
2711 atomic_add_int(&sifa->refcount, 1);
2712 return (sifa);
2713 }
2714 }
2715 /* next try for an acceptable address on the ep */
2716 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2717#if defined(__FreeBSD__)
2718#ifdef INET
2719 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2720 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2721 &sctp_ifa->address.sin.sin_addr) != 0)) {
2722 continue;
2723 }
2724#endif
2725#ifdef INET6
2726 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2727 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2728 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2729 continue;
2730 }
2731#endif
2732#endif
2733 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2734 continue;
2735 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2736 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
2737 if (sifa == NULL)
2738 continue;
2739 if (((non_asoc_addr_ok == 0) &&
2740 (sctp_is_addr_restricted(stcb, sifa))) ||
2741 (non_asoc_addr_ok &&
2742 (sctp_is_addr_restricted(stcb, sifa)) &&
2743 (!sctp_is_addr_pending(stcb, sifa)))) {
2744 /* on the no-no list */
2745 continue;
2746 }
2747 atomic_add_int(&sifa->refcount, 1);
2748 return (sifa);
2749 }
2750 }
2751
2752 }
2753 /*
2754 * if we can't find one like that then we must look at all
2755 * addresses bound to pick one at first preferable then
2756 * secondly acceptable.
2757 */
2758 starting_point = stcb->asoc.last_used_address;
2759 sctp_from_the_top:
2760 if (stcb->asoc.last_used_address == NULL) {
2761 start_at_beginning = 1;
2762 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2763 }
2764 /* search beginning with the last used address */
2765 for (laddr = stcb->asoc.last_used_address; laddr;
2766 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2767 if (laddr->ifa == NULL) {
2768 /* address has been removed */
2769 continue;
2770 }
2771 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2772 /* address is being deleted */
2773 continue;
2774 }
2775 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2776 if (sifa == NULL)
2777 continue;
2778 if (((non_asoc_addr_ok == 0) &&
2779 (sctp_is_addr_restricted(stcb, sifa))) ||
2780 (non_asoc_addr_ok &&
2781 (sctp_is_addr_restricted(stcb, sifa)) &&
2782 (!sctp_is_addr_pending(stcb, sifa)))) {
2783 /* on the no-no list */
2784 continue;
2785 }
2786 stcb->asoc.last_used_address = laddr;
2787 atomic_add_int(&sifa->refcount, 1);
2788 return (sifa);
2789 }
2790 if (start_at_beginning == 0) {
2791 stcb->asoc.last_used_address = NULL;
2792 goto sctp_from_the_top;
2793 }
2794 /* now try for any higher scope than the destination */
2795 stcb->asoc.last_used_address = starting_point;
2796 start_at_beginning = 0;
2797 sctp_from_the_top2:
2798 if (stcb->asoc.last_used_address == NULL) {
2799 start_at_beginning = 1;
2800 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2801 }
2802 /* search beginning with the last used address */
2803 for (laddr = stcb->asoc.last_used_address; laddr;
2804 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2805 if (laddr->ifa == NULL) {
2806 /* address has been removed */
2807 continue;
2808 }
2809 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2810 /* address is being deleted */
2811 continue;
2812 }
2813 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2814 dest_is_priv, fam);
2815 if (sifa == NULL)
2816 continue;
2817 if (((non_asoc_addr_ok == 0) &&
2818 (sctp_is_addr_restricted(stcb, sifa))) ||
2819 (non_asoc_addr_ok &&
2820 (sctp_is_addr_restricted(stcb, sifa)) &&
2821 (!sctp_is_addr_pending(stcb, sifa)))) {
2822 /* on the no-no list */
2823 continue;
2824 }
2825 stcb->asoc.last_used_address = laddr;
2826 atomic_add_int(&sifa->refcount, 1);
2827 return (sifa);
2828 }
2829 if (start_at_beginning == 0) {
2830 stcb->asoc.last_used_address = NULL;
2831 goto sctp_from_the_top2;
2832 }
2833 return (NULL);
2834}
2835
2836static struct sctp_ifa *
2837sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2838#if defined(__FreeBSD__)
2839 struct sctp_inpcb *inp,
2840#else
2841 struct sctp_inpcb *inp SCTP_UNUSED,
2842#endif
2843 struct sctp_tcb *stcb,
2844 int non_asoc_addr_ok,
2845 uint8_t dest_is_loop,
2846 uint8_t dest_is_priv,
2847 int addr_wanted,
2848 sa_family_t fam,
2849 sctp_route_t *ro
2850 )
2851{
2852 struct sctp_ifa *ifa, *sifa;
2853 int num_eligible_addr = 0;
2854#ifdef INET6
2855#ifdef SCTP_EMBEDDED_V6_SCOPE
2856 struct sockaddr_in6 sin6, lsa6;
2857
2858 if (fam == AF_INET6) {
2859 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2860#ifdef SCTP_KAME
2861 (void)sa6_recoverscope(&sin6);
2862#else
2863 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
2864#endif /* SCTP_KAME */
2865 }
2866#endif /* SCTP_EMBEDDED_V6_SCOPE */
2867#endif /* INET6 */
2868 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2869#if defined(__FreeBSD__)
2870#ifdef INET
2871 if ((ifa->address.sa.sa_family == AF_INET) &&
2872 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2873 &ifa->address.sin.sin_addr) != 0)) {
2874 continue;
2875 }
2876#endif
2877#ifdef INET6
2878 if ((ifa->address.sa.sa_family == AF_INET6) &&
2879 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2880 &ifa->address.sin6.sin6_addr) != 0)) {
2881 continue;
2882 }
2883#endif
2884#endif
2885 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2886 (non_asoc_addr_ok == 0))
2887 continue;
2888 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2889 dest_is_priv, fam);
2890 if (sifa == NULL)
2891 continue;
2892#ifdef INET6
2893 if (fam == AF_INET6 &&
2894 dest_is_loop &&
2895 sifa->src_is_loop && sifa->src_is_priv) {
2896 /* don't allow fe80::1 to be a src on loop ::1, we don't list it
2897 * to the peer so we will get an abort.
2898 */
2899 continue;
2900 }
2901#ifdef SCTP_EMBEDDED_V6_SCOPE
2902 if (fam == AF_INET6 &&
2903 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2904 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2905 /* link-local <-> link-local must belong to the same scope. */
2906 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2907#ifdef SCTP_KAME
2908 (void)sa6_recoverscope(&lsa6);
2909#else
2910 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
2911#endif /* SCTP_KAME */
2912 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2913 continue;
2914 }
2915 }
2916#endif /* SCTP_EMBEDDED_V6_SCOPE */
2917#endif /* INET6 */
2918
2919#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
2920 /* Check if the IPv6 address matches to next-hop.
2921 In the mobile case, old IPv6 address may be not deleted
2922 from the interface. Then, the interface has previous and
2923 new addresses. We should use one corresponding to the
2924 next-hop. (by micchie)
2925 */
2926#ifdef INET6
2927 if (stcb && fam == AF_INET6 &&
2928 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2929 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2930 == 0) {
2931 continue;
2932 }
2933 }
2934#endif
2935#ifdef INET
2936 /* Avoid topologically incorrect IPv4 address */
2937 if (stcb && fam == AF_INET &&
2938 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2939 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2940 continue;
2941 }
2942 }
2943#endif
2944#endif
2945 if (stcb) {
2946 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2947 continue;
2948 }
2949 if (((non_asoc_addr_ok == 0) &&
2950 (sctp_is_addr_restricted(stcb, sifa))) ||
2951 (non_asoc_addr_ok &&
2952 (sctp_is_addr_restricted(stcb, sifa)) &&
2953 (!sctp_is_addr_pending(stcb, sifa)))) {
2954 /*
2955 * It is restricted for some reason..
2956 * probably not yet added.
2957 */
2958 continue;
2959 }
2960 }
2961 if (num_eligible_addr >= addr_wanted) {
2962 return (sifa);
2963 }
2964 num_eligible_addr++;
2965 }
2966 return (NULL);
2967}
2968
2969
2970static int
2971sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2972#if defined(__FreeBSD__)
2973 struct sctp_inpcb *inp,
2974#else
2975 struct sctp_inpcb *inp SCTP_UNUSED,
2976#endif
2977 struct sctp_tcb *stcb,
2978 int non_asoc_addr_ok,
2979 uint8_t dest_is_loop,
2980 uint8_t dest_is_priv,
2981 sa_family_t fam)
2982{
2983 struct sctp_ifa *ifa, *sifa;
2984 int num_eligible_addr = 0;
2985
2986 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2987#if defined(__FreeBSD__)
2988#ifdef INET
2989 if ((ifa->address.sa.sa_family == AF_INET) &&
2990 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2991 &ifa->address.sin.sin_addr) != 0)) {
2992 continue;
2993 }
2994#endif
2995#ifdef INET6
2996 if ((ifa->address.sa.sa_family == AF_INET6) &&
2997 (stcb != NULL) &&
2998 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2999 &ifa->address.sin6.sin6_addr) != 0)) {
3000 continue;
3001 }
3002#endif
3003#endif
3004 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3005 (non_asoc_addr_ok == 0)) {
3006 continue;
3007 }
3008 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
3009 dest_is_priv, fam);
3010 if (sifa == NULL) {
3011 continue;
3012 }
3013 if (stcb) {
3014 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
3015 continue;
3016 }
3017 if (((non_asoc_addr_ok == 0) &&
3018 (sctp_is_addr_restricted(stcb, sifa))) ||
3019 (non_asoc_addr_ok &&
3020 (sctp_is_addr_restricted(stcb, sifa)) &&
3021 (!sctp_is_addr_pending(stcb, sifa)))) {
3022 /*
3023 * It is restricted for some reason..
3024 * probably not yet added.
3025 */
3026 continue;
3027 }
3028 }
3029 num_eligible_addr++;
3030 }
3031 return (num_eligible_addr);
3032}
3033
3034static struct sctp_ifa *
3035sctp_choose_boundall(struct sctp_inpcb *inp,
3036 struct sctp_tcb *stcb,
3037 struct sctp_nets *net,
3038 sctp_route_t *ro,
3039 uint32_t vrf_id,
3040 uint8_t dest_is_priv,
3041 uint8_t dest_is_loop,
3042 int non_asoc_addr_ok,
3043 sa_family_t fam)
3044{
3045 int cur_addr_num = 0, num_preferred = 0;
3046 void *ifn;
3047 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
3048 struct sctp_ifa *sctp_ifa, *sifa;
3049 uint32_t ifn_index;
3050 struct sctp_vrf *vrf;
3051#ifdef INET
3052 int retried = 0;
3053#endif
3054
3055 /*-
3056 * For boundall we can use any address in the association.
3057 * If non_asoc_addr_ok is set we can use any address (at least in
3058 * theory). So we look for preferred addresses first. If we find one,
3059 * we use it. Otherwise we next try to get an address on the
3060 * interface, which we should be able to do (unless non_asoc_addr_ok
3061 * is false and we are routed out that way). In these cases where we
3062 * can't use the address of the interface we go through all the
3063 * ifn's looking for an address we can use and fill that in. Punting
3064 * means we send back address 0, which will probably cause problems
3065 * actually since then IP will fill in the address of the route ifn,
3066 * which means we probably already rejected it.. i.e. here comes an
3067 * abort :-<.
3068 */
3069 vrf = sctp_find_vrf(vrf_id);
3070 if (vrf == NULL)
3071 return (NULL);
3072
3073 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3074 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
3075 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
3076 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
3077 if (sctp_ifn == NULL) {
3078 /* ?? We don't have this guy ?? */
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
3080 goto bound_all_plan_b;
3081 }
3082 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
3083 ifn_index, sctp_ifn->ifn_name);
3084
3085 if (net) {
3086 cur_addr_num = net->indx_of_eligible_next_to_use;
3087 }
3088 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3089 inp, stcb,
3090 non_asoc_addr_ok,
3091 dest_is_loop,
3092 dest_is_priv, fam);
3093 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3094 num_preferred, sctp_ifn->ifn_name);
3095 if (num_preferred == 0) {
3096 /*
3097 * no eligible addresses, we must use some other interface
3098 * address if we can find one.
3099 */
3100 goto bound_all_plan_b;
3101 }
3102 /*
3103 * Ok we have num_eligible_addr set with how many we can use, this
3104 * may vary from call to call due to addresses being deprecated
3105 * etc..
3106 */
3107 if (cur_addr_num >= num_preferred) {
3108 cur_addr_num = 0;
3109 }
3110 /*
3111 * select the nth address from the list (where cur_addr_num is the
3112 * nth) and 0 is the first one, 1 is the second one etc...
3113 */
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3115
3116 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3117 dest_is_priv, cur_addr_num, fam, ro);
3118
3119 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3120 if (sctp_ifa) {
3121 atomic_add_int(&sctp_ifa->refcount, 1);
3122 if (net) {
3123 /* save off where the next one we will want */
3124 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3125 }
3126 return (sctp_ifa);
3127 }
3128 /*
3129 * plan_b: Look at all interfaces and find a preferred address. If
3130 * no preferred fall through to plan_c.
3131 */
3132 bound_all_plan_b:
3133 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3134 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3135 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3136 sctp_ifn->ifn_name);
3137 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3138 /* wrong base scope */
3139 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3140 continue;
3141 }
3142 if ((sctp_ifn == looked_at) && looked_at) {
3143 /* already looked at this guy */
3144 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3145 continue;
3146 }
3147 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3148 dest_is_loop, dest_is_priv, fam);
3149 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3150 "Found ifn:%p %d preferred source addresses\n",
3151 ifn, num_preferred);
3152 if (num_preferred == 0) {
3153 /* None on this interface. */
3154 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3155 continue;
3156 }
3157 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3158 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3159 num_preferred, (void *)sctp_ifn, cur_addr_num);
3160
3161 /*
3162 * Ok we have num_eligible_addr set with how many we can
3163 * use, this may vary from call to call due to addresses
3164 * being deprecated etc..
3165 */
3166 if (cur_addr_num >= num_preferred) {
3167 cur_addr_num = 0;
3168 }
3169 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3170 dest_is_priv, cur_addr_num, fam, ro);
3171 if (sifa == NULL)
3172 continue;
3173 if (net) {
3174 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3175 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3176 cur_addr_num);
3177 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3178 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3179 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3180 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3181 }
3182 atomic_add_int(&sifa->refcount, 1);
3183 return (sifa);
3184 }
3185#ifdef INET
3186again_with_private_addresses_allowed:
3187#endif
3188 /* plan_c: do we have an acceptable address on the emit interface */
3189 sifa = NULL;
3190 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
3191 if (emit_ifn == NULL) {
3192 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
3193 goto plan_d;
3194 }
3195 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3196 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3197#if defined(__FreeBSD__)
3198#ifdef INET
3199 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3200 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3201 &sctp_ifa->address.sin.sin_addr) != 0)) {
3202 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3203 continue;
3204 }
3205#endif
3206#ifdef INET6
3207 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3208 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3209 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3210 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
3211 continue;
3212 }
3213#endif
3214#endif
3215 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3216 (non_asoc_addr_ok == 0)) {
3217 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
3218 continue;
3219 }
3220 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3221 dest_is_priv, fam);
3222 if (sifa == NULL) {
3223 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3224 continue;
3225 }
3226 if (stcb) {
3227 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3228 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3229 sifa = NULL;
3230 continue;
3231 }
3232 if (((non_asoc_addr_ok == 0) &&
3233 (sctp_is_addr_restricted(stcb, sifa))) ||
3234 (non_asoc_addr_ok &&
3235 (sctp_is_addr_restricted(stcb, sifa)) &&
3236 (!sctp_is_addr_pending(stcb, sifa)))) {
3237 /*
3238 * It is restricted for some
3239 * reason.. probably not yet added.
3240 */
3241 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3242 sifa = NULL;
3243 continue;
3244 }
3245 }
3246 atomic_add_int(&sifa->refcount, 1);
3247 goto out;
3248 }
3249 plan_d:
3250 /*
3251 * plan_d: We are in trouble. No preferred address on the emit
3252 * interface. And not even a preferred address on all interfaces.
3253 * Go out and see if we can find an acceptable address somewhere
3254 * amongst all interfaces.
3255 */
3256 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3257 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3258 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3259 /* wrong base scope */
3260 continue;
3261 }
3262 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3263#if defined(__FreeBSD__)
3264#ifdef INET
3265 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3266 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3267 &sctp_ifa->address.sin.sin_addr) != 0)) {
3268 continue;
3269 }
3270#endif
3271#ifdef INET6
3272 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3273 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3274 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3275 continue;
3276 }
3277#endif
3278#endif
3279 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3280 (non_asoc_addr_ok == 0))
3281 continue;
3282 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3283 dest_is_loop,
3284 dest_is_priv, fam);
3285 if (sifa == NULL)
3286 continue;
3287 if (stcb) {
3288 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3289 sifa = NULL;
3290 continue;
3291 }
3292 if (((non_asoc_addr_ok == 0) &&
3293 (sctp_is_addr_restricted(stcb, sifa))) ||
3294 (non_asoc_addr_ok &&
3295 (sctp_is_addr_restricted(stcb, sifa)) &&
3296 (!sctp_is_addr_pending(stcb, sifa)))) {
3297 /*
3298 * It is restricted for some
3299 * reason.. probably not yet added.
3300 */
3301 sifa = NULL;
3302 continue;
3303 }
3304 }
3305 goto out;
3306 }
3307 }
3308#ifdef INET
3309 if (stcb) {
3310 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3311 stcb->asoc.scope.ipv4_local_scope = 1;
3312 retried = 1;
3313 goto again_with_private_addresses_allowed;
3314 } else if (retried == 1) {
3315 stcb->asoc.scope.ipv4_local_scope = 0;
3316 }
3317 }
3318#endif
3319out:
3320#ifdef INET
3321 if (sifa) {
3322 if (retried == 1) {
3323 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3324 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3325 /* wrong base scope */
3326 continue;
3327 }
3328 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3329 struct sctp_ifa *tmp_sifa;
3330
3331#if defined(__FreeBSD__)
3332#ifdef INET
3333 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3334 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3335 &sctp_ifa->address.sin.sin_addr) != 0)) {
3336 continue;
3337 }
3338#endif
3339#ifdef INET6
3340 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3341 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3342 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3343 continue;
3344 }
3345#endif
3346#endif
3347 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3348 (non_asoc_addr_ok == 0))
3349 continue;
3350 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3351 dest_is_loop,
3352 dest_is_priv, fam);
3353 if (tmp_sifa == NULL) {
3354 continue;
3355 }
3356 if (tmp_sifa == sifa) {
3357 continue;
3358 }
3359 if (stcb) {
3360 if (sctp_is_address_in_scope(tmp_sifa,
3361 &stcb->asoc.scope, 0) == 0) {
3362 continue;
3363 }
3364 if (((non_asoc_addr_ok == 0) &&
3365 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3366 (non_asoc_addr_ok &&
3367 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3368 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3369 /*
3370 * It is restricted for some
3371 * reason.. probably not yet added.
3372 */
3373 continue;
3374 }
3375 }
3376 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3377 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3378 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3379 }
3380 }
3381 }
3382 }
3383 atomic_add_int(&sifa->refcount, 1);
3384 }
3385#endif
3386 return (sifa);
3387}
3388
3389
3390
3391/* tcb may be NULL */
3392struct sctp_ifa *
3393sctp_source_address_selection(struct sctp_inpcb *inp,
3394 struct sctp_tcb *stcb,
3395 sctp_route_t *ro,
3396 struct sctp_nets *net,
3397 int non_asoc_addr_ok, uint32_t vrf_id)
3398{
3399 struct sctp_ifa *answer;
3400 uint8_t dest_is_priv, dest_is_loop;
3401 sa_family_t fam;
3402#ifdef INET
3403 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3404#endif
3405#ifdef INET6
3406 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3407#endif
3408
3409 /**
3410 * Rules:
3411 * - Find the route if needed, cache if I can.
3412 * - Look at interface address in route, Is it in the bound list. If so we
3413 * have the best source.
3414 * - If not we must rotate amongst the addresses.
3415 *
3416 * Cavets and issues
3417 *
3418 * Do we need to pay attention to scope. We can have a private address
3419 * or a global address we are sourcing or sending to. So if we draw
3420 * it out
3421 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3422 * For V4
3423 * ------------------------------------------
3424 * source * dest * result
3425 * -----------------------------------------
3426 * <a> Private * Global * NAT
3427 * -----------------------------------------
3428 * <b> Private * Private * No problem
3429 * -----------------------------------------
3430 * <c> Global * Private * Huh, How will this work?
3431 * -----------------------------------------
3432 * <d> Global * Global * No Problem
3433 *------------------------------------------
3434 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3435 * For V6
3436 *------------------------------------------
3437 * source * dest * result
3438 * -----------------------------------------
3439 * <a> Linklocal * Global *
3440 * -----------------------------------------
3441 * <b> Linklocal * Linklocal * No problem
3442 * -----------------------------------------
3443 * <c> Global * Linklocal * Huh, How will this work?
3444 * -----------------------------------------
3445 * <d> Global * Global * No Problem
3446 *------------------------------------------
3447 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3448 *
3449 * And then we add to that what happens if there are multiple addresses
3450 * assigned to an interface. Remember the ifa on a ifn is a linked
3451 * list of addresses. So one interface can have more than one IP
3452 * address. What happens if we have both a private and a global
3453 * address? Do we then use context of destination to sort out which
3454 * one is best? And what about NAT's sending P->G may get you a NAT
3455 * translation, or should you select the G thats on the interface in
3456 * preference.
3457 *
3458 * Decisions:
3459 *
3460 * - count the number of addresses on the interface.
3461 * - if it is one, no problem except case <c>.
3462 * For <a> we will assume a NAT out there.
3463 * - if there are more than one, then we need to worry about scope P
3464 * or G. We should prefer G -> G and P -> P if possible.
3465 * Then as a secondary fall back to mixed types G->P being a last
3466 * ditch one.
3467 * - The above all works for bound all, but bound specific we need to
3468 * use the same concept but instead only consider the bound
3469 * addresses. If the bound set is NOT assigned to the interface then
3470 * we must use rotation amongst the bound addresses..
3471 */
3472 if (ro->ro_rt == NULL) {
3473 /*
3474 * Need a route to cache.
3475 */
3476 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3477 }
3478 if (ro->ro_rt == NULL) {
3479 return (NULL);
3480 }
3481#if defined(__Userspace_os_Windows)
3482 /* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
3483 fam = (sa_family_t)ro->ro_dst.sa_family;
3484#else
3485 fam = ro->ro_dst.sa_family;
3486#endif
3487 dest_is_priv = dest_is_loop = 0;
3488 /* Setup our scopes for the destination */
3489 switch (fam) {
3490#ifdef INET
3491 case AF_INET:
3492 /* Scope based on outbound address */
3493 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3494 dest_is_loop = 1;
3495 if (net != NULL) {
3496 /* mark it as local */
3497 net->addr_is_local = 1;
3498 }
3499 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3500 dest_is_priv = 1;
3501 }
3502 break;
3503#endif
3504#ifdef INET6
3505 case AF_INET6:
3506 /* Scope based on outbound address */
3507#if defined(__Userspace_os_Windows)
3508 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
3509#else
3510 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3511 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3512#endif
3513 /*
3514 * If the address is a loopback address, which
3515 * consists of "::1" OR "fe80::1%lo0", we are loopback
3516 * scope. But we don't use dest_is_priv (link local
3517 * addresses).
3518 */
3519 dest_is_loop = 1;
3520 if (net != NULL) {
3521 /* mark it as local */
3522 net->addr_is_local = 1;
3523 }
3524 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3525 dest_is_priv = 1;
3526 }
3527 break;
3528#endif
3529 }
3530 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3531 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3532 SCTP_IPI_ADDR_RLOCK();
3533 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3534 /*
3535 * Bound all case
3536 */
3537 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3538 dest_is_priv, dest_is_loop,
3539 non_asoc_addr_ok, fam);
3540 SCTP_IPI_ADDR_RUNLOCK();
3541 return (answer);
3542 }
3543 /*
3544 * Subset bound case
3545 */
3546 if (stcb) {
3547 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3548 vrf_id, dest_is_priv,
3549 dest_is_loop,
3550 non_asoc_addr_ok, fam);
3551 } else {
3552 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3553 non_asoc_addr_ok,
3554 dest_is_priv,
3555 dest_is_loop, fam);
3556 }
3557 SCTP_IPI_ADDR_RUNLOCK();
3558 return (answer);
3559}
3560
3561static int
3562sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3563{
3564#if defined(__Userspace_os_Windows)
3565 WSACMSGHDR cmh;
3566#else
3567 struct cmsghdr cmh;
3568#endif
3569 int tlen, at, found;
3570 struct sctp_sndinfo sndinfo;
3571 struct sctp_prinfo prinfo;
3572 struct sctp_authinfo authinfo;
3573
3574 tlen = SCTP_BUF_LEN(control);
3575 at = 0;
3576 found = 0;
3577 /*
3578 * Independent of how many mbufs, find the c_type inside the control
3579 * structure and copy out the data.
3580 */
3581 while (at < tlen) {
3582 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3583 /* There is not enough room for one more. */
3584 return (found);
3585 }
3586 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3587 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3588 /* We dont't have a complete CMSG header. */
3589 return (found);
3590 }
3591 if (((int)cmh.cmsg_len + at) > tlen) {
3592 /* We don't have the complete CMSG. */
3593 return (found);
3594 }
3595 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3596 ((c_type == cmh.cmsg_type) ||
3597 ((c_type == SCTP_SNDRCV) &&
3598 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3599 (cmh.cmsg_type == SCTP_PRINFO) ||
3600 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3601 if (c_type == cmh.cmsg_type) {
3602 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3603 return (found);
3604 }
3605 /* It is exactly what we want. Copy it out. */
3606 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), (int)cpsize, (caddr_t)data);
3607 return (1);
3608 } else {
3609 struct sctp_sndrcvinfo *sndrcvinfo;
3610
3611 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3612 if (found == 0) {
3613 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3614 return (found);
3615 }
3616 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3617 }
3618 switch (cmh.cmsg_type) {
3619 case SCTP_SNDINFO:
3620 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3621 return (found);
3622 }
3623 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3624 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3625 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3626 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3627 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3628 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3629 break;
3630 case SCTP_PRINFO:
3631 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3632 return (found);
3633 }
3634 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3635 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3636 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3637 } else {
3638 sndrcvinfo->sinfo_timetolive = 0;
3639 }
3640 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3641 break;
3642 case SCTP_AUTHINFO:
3643 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3644 return (found);
3645 }
3646 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3647 sndrcvinfo->sinfo_keynumber_valid = 1;
3648 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3649 break;
3650 default:
3651 return (found);
3652 }
3653 found = 1;
3654 }
3655 }
3656 at += CMSG_ALIGN(cmh.cmsg_len);
3657 }
3658 return (found);
3659}
3660
3661static int
3662sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3663{
3664#if defined(__Userspace_os_Windows)
3665 WSACMSGHDR cmh;
3666#else
3667 struct cmsghdr cmh;
3668#endif
3669 int tlen, at;
3670 struct sctp_initmsg initmsg;
3671#ifdef INET
3672 struct sockaddr_in sin;
3673#endif
3674#ifdef INET6
3675 struct sockaddr_in6 sin6;
3676#endif
3677
3678 tlen = SCTP_BUF_LEN(control);
3679 at = 0;
3680 while (at < tlen) {
3681 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3682 /* There is not enough room for one more. */
3683 *error = EINVAL;
3684 return (1);
3685 }
3686 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3687 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3688 /* We dont't have a complete CMSG header. */
3689 *error = EINVAL;
3690 return (1);
3691 }
3692 if (((int)cmh.cmsg_len + at) > tlen) {
3693 /* We don't have the complete CMSG. */
3694 *error = EINVAL;
3695 return (1);
3696 }
3697 if (cmh.cmsg_level == IPPROTO_SCTP) {
3698 switch (cmh.cmsg_type) {
3699 case SCTP_INIT:
3700 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3701 *error = EINVAL;
3702 return (1);
3703 }
3704 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3705 if (initmsg.sinit_max_attempts)
3706 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3707 if (initmsg.sinit_num_ostreams)
3708 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3709 if (initmsg.sinit_max_instreams)
3710 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3711 if (initmsg.sinit_max_init_timeo)
3712 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3713 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3714 struct sctp_stream_out *tmp_str;
3715 unsigned int i;
3716#if defined(SCTP_DETAILED_STR_STATS)
3717 int j;
3718#endif
3719
3720 /* Default is NOT correct */
3721 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3722 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3723 SCTP_TCB_UNLOCK(stcb);
3724 SCTP_MALLOC(tmp_str,
3725 struct sctp_stream_out *,
3726 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3727 SCTP_M_STRMO);
3728 SCTP_TCB_LOCK(stcb);
3729 if (tmp_str != NULL) {
3730 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3731 stcb->asoc.strmout = tmp_str;
3732 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3733 } else {
3734 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3735 }
3736 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3737 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3738 stcb->asoc.strmout[i].chunks_on_queues = 0;
3739 stcb->asoc.strmout[i].next_mid_ordered = 0;
3740 stcb->asoc.strmout[i].next_mid_unordered = 0;
3741#if defined(SCTP_DETAILED_STR_STATS)
3742 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3743 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3744 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3745 }
3746#else
3747 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3748 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3749#endif
3750 stcb->asoc.strmout[i].sid = i;
3751 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3752 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3753 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3754 }
3755 }
3756 break;
3757#ifdef INET
3758 case SCTP_DSTADDRV4:
3759 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3760 *error = EINVAL;
3761 return (1);
3762 }
3763 memset(&sin, 0, sizeof(struct sockaddr_in));
3764 sin.sin_family = AF_INET;
3765#ifdef HAVE_SIN_LEN
3766 sin.sin_len = sizeof(struct sockaddr_in);
3767#endif
3768 sin.sin_port = stcb->rport;
3769 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3770 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3771 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3772 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3773 *error = EINVAL;
3774 return (1);
3775 }
3776 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3777 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3778 *error = ENOBUFS;
3779 return (1);
3780 }
3781 break;
3782#endif
3783#ifdef INET6
3784 case SCTP_DSTADDRV6:
3785 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3786 *error = EINVAL;
3787 return (1);
3788 }
3789 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3790 sin6.sin6_family = AF_INET6;
3791#ifdef HAVE_SIN6_LEN
3792 sin6.sin6_len = sizeof(struct sockaddr_in6);
3793#endif
3794 sin6.sin6_port = stcb->rport;
3795 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3796 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3797 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3798 *error = EINVAL;
3799 return (1);
3800 }
3801#ifdef INET
3802 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3803 in6_sin6_2_sin(&sin, &sin6);
3804 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3805 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3806 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3807 *error = EINVAL;
3808 return (1);
3809 }
3810 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3811 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3812 *error = ENOBUFS;
3813 return (1);
3814 }
3815 } else
3816#endif
3817 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3818 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3819 *error = ENOBUFS;
3820 return (1);
3821 }
3822 break;
3823#endif
3824 default:
3825 break;
3826 }
3827 }
3828 at += CMSG_ALIGN(cmh.cmsg_len);
3829 }
3830 return (0);
3831}
3832
3833static struct sctp_tcb *
3834sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3835 uint16_t port,
3836 struct mbuf *control,
3837 struct sctp_nets **net_p,
3838 int *error)
3839{
3840#if defined(__Userspace_os_Windows)
3841 WSACMSGHDR cmh;
3842#else
3843 struct cmsghdr cmh;
3844#endif
3845 int tlen, at;
3846 struct sctp_tcb *stcb;
3847 struct sockaddr *addr;
3848#ifdef INET
3849 struct sockaddr_in sin;
3850#endif
3851#ifdef INET6
3852 struct sockaddr_in6 sin6;
3853#endif
3854
3855 tlen = SCTP_BUF_LEN(control);
3856 at = 0;
3857 while (at < tlen) {
3858 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3859 /* There is not enough room for one more. */
3860 *error = EINVAL;
3861 return (NULL);
3862 }
3863 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3864 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3865 /* We dont't have a complete CMSG header. */
3866 *error = EINVAL;
3867 return (NULL);
3868 }
3869 if (((int)cmh.cmsg_len + at) > tlen) {
3870 /* We don't have the complete CMSG. */
3871 *error = EINVAL;
3872 return (NULL);
3873 }
3874 if (cmh.cmsg_level == IPPROTO_SCTP) {
3875 switch (cmh.cmsg_type) {
3876#ifdef INET
3877 case SCTP_DSTADDRV4:
3878 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3879 *error = EINVAL;
3880 return (NULL);
3881 }
3882 memset(&sin, 0, sizeof(struct sockaddr_in));
3883 sin.sin_family = AF_INET;
3884#ifdef HAVE_SIN_LEN
3885 sin.sin_len = sizeof(struct sockaddr_in);
3886#endif
3887 sin.sin_port = port;
3888 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3889 addr = (struct sockaddr *)&sin;
3890 break;
3891#endif
3892#ifdef INET6
3893 case SCTP_DSTADDRV6:
3894 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3895 *error = EINVAL;
3896 return (NULL);
3897 }
3898 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3899 sin6.sin6_family = AF_INET6;
3900#ifdef HAVE_SIN6_LEN
3901 sin6.sin6_len = sizeof(struct sockaddr_in6);
3902#endif
3903 sin6.sin6_port = port;
3904 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3905#ifdef INET
3906 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3907 in6_sin6_2_sin(&sin, &sin6);
3908 addr = (struct sockaddr *)&sin;
3909 } else
3910#endif
3911 addr = (struct sockaddr *)&sin6;
3912 break;
3913#endif
3914 default:
3915 addr = NULL;
3916 break;
3917 }
3918 if (addr) {
3919 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3920 if (stcb != NULL) {
3921 return (stcb);
3922 }
3923 }
3924 }
3925 at += CMSG_ALIGN(cmh.cmsg_len);
3926 }
3927 return (NULL);
3928}
3929
3930static struct mbuf *
3931sctp_add_cookie(struct mbuf *init, int init_offset,
3932 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3933{
3934 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3935 struct sctp_state_cookie *stc;
3936 struct sctp_paramhdr *ph;
3937 uint8_t *foo;
3938 int sig_offset;
3939 uint16_t cookie_sz;
3940
3941 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3942 sizeof(struct sctp_paramhdr)), 0,
3943 M_NOWAIT, 1, MT_DATA);
3944 if (mret == NULL) {
3945 return (NULL);
3946 }
3947 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3948 if (copy_init == NULL) {
3949 sctp_m_freem(mret);
3950 return (NULL);
3951 }
3952#ifdef SCTP_MBUF_LOGGING
3953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3954 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3955 }
3956#endif
3957 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3958 M_NOWAIT);
3959 if (copy_initack == NULL) {
3960 sctp_m_freem(mret);
3961 sctp_m_freem(copy_init);
3962 return (NULL);
3963 }
3964#ifdef SCTP_MBUF_LOGGING
3965 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3966 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3967 }
3968#endif
3969 /* easy side we just drop it on the end */
3970 ph = mtod(mret, struct sctp_paramhdr *);
3971 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3972 sizeof(struct sctp_paramhdr);
3973 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3974 sizeof(struct sctp_paramhdr));
3975 ph->param_type = htons(SCTP_STATE_COOKIE);
3976 ph->param_length = 0; /* fill in at the end */
3977 /* Fill in the stc cookie data */
3978 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3979
3980 /* tack the INIT and then the INIT-ACK onto the chain */
3981 cookie_sz = 0;
3982 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3983 cookie_sz += SCTP_BUF_LEN(m_at);
3984 if (SCTP_BUF_NEXT(m_at) == NULL) {
3985 SCTP_BUF_NEXT(m_at) = copy_init;
3986 break;
3987 }
3988 }
3989 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3990 cookie_sz += SCTP_BUF_LEN(m_at);
3991 if (SCTP_BUF_NEXT(m_at) == NULL) {
3992 SCTP_BUF_NEXT(m_at) = copy_initack;
3993 break;
3994 }
3995 }
3996 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3997 cookie_sz += SCTP_BUF_LEN(m_at);
3998 if (SCTP_BUF_NEXT(m_at) == NULL) {
3999 break;
4000 }
4001 }
4002 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
4003 if (sig == NULL) {
4004 /* no space, so free the entire chain */
4005 sctp_m_freem(mret);
4006 return (NULL);
4007 }
4008 SCTP_BUF_LEN(sig) = 0;
4009 SCTP_BUF_NEXT(m_at) = sig;
4010 sig_offset = 0;
4011 foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
4012 memset(foo, 0, SCTP_SIGNATURE_SIZE);
4013 *signature = foo;
4014 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
4015 cookie_sz += SCTP_SIGNATURE_SIZE;
4016 ph->param_length = htons(cookie_sz);
4017 return (mret);
4018}
4019
4020
4021static uint8_t
4022sctp_get_ect(struct sctp_tcb *stcb)
4023{
4024 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
4025 return (SCTP_ECT0_BIT);
4026 } else {
4027 return (0);
4028 }
4029}
4030
4031#if defined(INET) || defined(INET6)
4032static void
4033sctp_handle_no_route(struct sctp_tcb *stcb,
4034 struct sctp_nets *net,
4035 int so_locked)
4036{
4037 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
4038
4039 if (net) {
4040 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
4041 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
4042 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
4043 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
4044 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
4045 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
4046 stcb, 0,
4047 (void *)net,
4048 so_locked);
4049 net->dest_state &= ~SCTP_ADDR_REACHABLE;
4050 net->dest_state &= ~SCTP_ADDR_PF;
4051 }
4052 }
4053 if (stcb) {
4054 if (net == stcb->asoc.primary_destination) {
4055 /* need a new primary */
4056 struct sctp_nets *alt;
4057
4058 alt = sctp_find_alternate_net(stcb, net, 0);
4059 if (alt != net) {
4060 if (stcb->asoc.alternate) {
4061 sctp_free_remote_addr(stcb->asoc.alternate);
4062 }
4063 stcb->asoc.alternate = alt;
4064 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
4065 if (net->ro._s_addr) {
4066 sctp_free_ifa(net->ro._s_addr);
4067 net->ro._s_addr = NULL;
4068 }
4069 net->src_addr_selected = 0;
4070 }
4071 }
4072 }
4073 }
4074}
4075#endif
4076
4077static int
4078sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
4079 struct sctp_tcb *stcb, /* may be NULL */
4080 struct sctp_nets *net,
4081 struct sockaddr *to,
4082 struct mbuf *m,
4083 uint32_t auth_offset,
4084 struct sctp_auth_chunk *auth,
4085 uint16_t auth_keyid,
4086 int nofragment_flag,
4087 int ecn_ok,
4088 int out_of_asoc_ok,
4089 uint16_t src_port,
4090 uint16_t dest_port,
4091 uint32_t v_tag,
4092 uint16_t port,
4093 union sctp_sockstore *over_addr,
4094#if defined(__FreeBSD__)
4095 uint8_t mflowtype, uint32_t mflowid,
4096#endif
4097#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4098 int so_locked SCTP_UNUSED
4099#else
4100 int so_locked
4101#endif
4102 )
4103/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4104{
4105 /**
4106 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4107 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4108 * - fill in the HMAC digest of any AUTH chunk in the packet.
4109 * - calculate and fill in the SCTP checksum.
4110 * - prepend an IP address header.
4111 * - if boundall use INADDR_ANY.
4112 * - if boundspecific do source address selection.
4113 * - set fragmentation option for ipV4.
4114 * - On return from IP output, check/adjust mtu size of output
4115 * interface and smallest_mtu size as well.
4116 */
4117 /* Will need ifdefs around this */
4118#ifdef __Panda__
4119 pakhandle_type o_pak;
4120#endif
4121 struct mbuf *newm;
4122 struct sctphdr *sctphdr;
4123 int packet_length;
4124 int ret;
4125#if defined(INET) || defined(INET6)
4126 uint32_t vrf_id;
4127#endif
4128#if defined(INET) || defined(INET6)
4129#if !defined(__Panda__)
4130 struct mbuf *o_pak;
4131#endif
4132 sctp_route_t *ro = NULL;
4133 struct udphdr *udp = NULL;
4134#endif
4135 uint8_t tos_value;
4136#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4137 struct socket *so = NULL;
4138#endif
4139
4140#if defined(__APPLE__)
4141 if (so_locked) {
4142 sctp_lock_assert(SCTP_INP_SO(inp));
4143 SCTP_TCB_LOCK_ASSERT(stcb);
4144 } else {
4145 sctp_unlock_assert(SCTP_INP_SO(inp));
4146 }
4147#endif
4148 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4149 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4150 sctp_m_freem(m);
4151 return (EFAULT);
4152 }
4153#if defined(INET) || defined(INET6)
4154 if (stcb) {
4155 vrf_id = stcb->asoc.vrf_id;
4156 } else {
4157 vrf_id = inp->def_vrf_id;
4158 }
4159#endif
4160 /* fill in the HMAC digest for any AUTH chunk in the packet */
4161 if ((auth != NULL) && (stcb != NULL)) {
4162 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4163 }
4164
4165 if (net) {
4166 tos_value = net->dscp;
4167 } else if (stcb) {
4168 tos_value = stcb->asoc.default_dscp;
4169 } else {
4170 tos_value = inp->sctp_ep.default_dscp;
4171 }
4172
4173 switch (to->sa_family) {
4174#ifdef INET
4175 case AF_INET:
4176 {
4177 struct ip *ip = NULL;
4178 sctp_route_t iproute;
4179 int len;
4180
4181 len = SCTP_MIN_V4_OVERHEAD;
4182 if (port) {
4183 len += sizeof(struct udphdr);
4184 }
4185 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4186 if (newm == NULL) {
4187 sctp_m_freem(m);
4188 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4189 return (ENOMEM);
4190 }
4191 SCTP_ALIGN_TO_END(newm, len);
4192 SCTP_BUF_LEN(newm) = len;
4193 SCTP_BUF_NEXT(newm) = m;
4194 m = newm;
4195#if defined(__FreeBSD__)
4196 if (net != NULL) {
4197 m->m_pkthdr.flowid = net->flowid;
4198 M_HASHTYPE_SET(m, net->flowtype);
4199 } else {
4200 m->m_pkthdr.flowid = mflowid;
4201 M_HASHTYPE_SET(m, mflowtype);
4202 }
4203#endif
4204 packet_length = sctp_calculate_len(m);
4205 ip = mtod(m, struct ip *);
4206 ip->ip_v = IPVERSION;
4207 ip->ip_hl = (sizeof(struct ip) >> 2);
4208 if (tos_value == 0) {
4209 /*
4210 * This means especially, that it is not set at the
4211 * SCTP layer. So use the value from the IP layer.
4212 */
4213#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4214 tos_value = inp->ip_inp.inp.inp_ip_tos;
4215#else
4216 tos_value = inp->inp_ip_tos;
4217#endif
4218 }
4219 tos_value &= 0xfc;
4220 if (ecn_ok) {
4221 tos_value |= sctp_get_ect(stcb);
4222 }
4223 if ((nofragment_flag) && (port == 0)) {
4224#if defined(__FreeBSD__)
4225#if __FreeBSD_version >= 1000000
4226 ip->ip_off = htons(IP_DF);
4227#else
4228 ip->ip_off = IP_DF;
4229#endif
4230#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
4231 ip->ip_off = IP_DF;
4232#else
4233 ip->ip_off = htons(IP_DF);
4234#endif
4235 } else {
4236#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4237 ip->ip_off = htons(0);
4238#else
4239 ip->ip_off = 0;
4240#endif
4241 }
4242#if defined(__FreeBSD__)
4243 /* FreeBSD has a function for ip_id's */
4244 ip_fillid(ip);
4245#elif defined(__APPLE__)
4246#if RANDOM_IP_ID
4247 ip->ip_id = ip_randomid();
4248#else
4249 ip->ip_id = htons(ip_id++);
4250#endif
4251#elif defined(__Userspace__)
4252 ip->ip_id = htons(SCTP_IP_ID(inp)++);
4253#else
4254 ip->ip_id = SCTP_IP_ID(inp)++;
4255#endif
4256
4257#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4258 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4259#else
4260 ip->ip_ttl = inp->inp_ip_ttl;
4261#endif
4262#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
4263 ip->ip_len = htons(packet_length);
4264#else
4265 ip->ip_len = packet_length;
4266#endif
4267 ip->ip_tos = tos_value;
4268 if (port) {
4269 ip->ip_p = IPPROTO_UDP;
4270 } else {
4271 ip->ip_p = IPPROTO_SCTP;
4272 }
4273 ip->ip_sum = 0;
4274 if (net == NULL) {
4275 ro = &iproute;
4276 memset(&iproute, 0, sizeof(iproute));
4277#ifdef HAVE_SA_LEN
4278 memcpy(&ro->ro_dst, to, to->sa_len);
4279#else
4280 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
4281#endif
4282 } else {
4283 ro = (sctp_route_t *)&net->ro;
4284 }
4285 /* Now the address selection part */
4286 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4287
4288 /* call the routine to select the src address */
4289 if (net && out_of_asoc_ok == 0) {
4290 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4291 sctp_free_ifa(net->ro._s_addr);
4292 net->ro._s_addr = NULL;
4293 net->src_addr_selected = 0;
4294 if (ro->ro_rt) {
4295 RTFREE(ro->ro_rt);
4296 ro->ro_rt = NULL;
4297 }
4298 }
4299 if (net->src_addr_selected == 0) {
4300 /* Cache the source address */
4301 net->ro._s_addr = sctp_source_address_selection(inp,stcb,
4302 ro, net, 0,
4303 vrf_id);
4304 net->src_addr_selected = 1;
4305 }
4306 if (net->ro._s_addr == NULL) {
4307 /* No route to host */
4308 net->src_addr_selected = 0;
4309 sctp_handle_no_route(stcb, net, so_locked);
4310 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4311 sctp_m_freem(m);
4312 return (EHOSTUNREACH);
4313 }
4314 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4315 } else {
4316 if (over_addr == NULL) {
4317 struct sctp_ifa *_lsrc;
4318
4319 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4320 net,
4321 out_of_asoc_ok,
4322 vrf_id);
4323 if (_lsrc == NULL) {
4324 sctp_handle_no_route(stcb, net, so_locked);
4325 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4326 sctp_m_freem(m);
4327 return (EHOSTUNREACH);
4328 }
4329 ip->ip_src = _lsrc->address.sin.sin_addr;
4330 sctp_free_ifa(_lsrc);
4331 } else {
4332 ip->ip_src = over_addr->sin.sin_addr;
4333 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4334 }
4335 }
4336 if (port) {
4337 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4338 sctp_handle_no_route(stcb, net, so_locked);
4339 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4340 sctp_m_freem(m);
4341 return (EHOSTUNREACH);
4342 }
4343 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4344 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4345 udp->uh_dport = port;
4346 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4347#if !defined(__Windows__) && !defined(__Userspace__)
4348#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4349 if (V_udp_cksum) {
4350 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4351 } else {
4352 udp->uh_sum = 0;
4353 }
4354#else
4355 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4356#endif
4357#else
4358 udp->uh_sum = 0;
4359#endif
4360 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4361 } else {
4362 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4363 }
4364
4365 sctphdr->src_port = src_port;
4366 sctphdr->dest_port = dest_port;
4367 sctphdr->v_tag = v_tag;
4368 sctphdr->checksum = 0;
4369
4370 /*
4371 * If source address selection fails and we find no route
4372 * then the ip_output should fail as well with a
4373 * NO_ROUTE_TO_HOST type error. We probably should catch
4374 * that somewhere and abort the association right away
4375 * (assuming this is an INIT being sent).
4376 */
4377 if (ro->ro_rt == NULL) {
4378 /*
4379 * src addr selection failed to find a route (or
4380 * valid source addr), so we can't get there from
4381 * here (yet)!
4382 */
4383 sctp_handle_no_route(stcb, net, so_locked);
4384 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4385 sctp_m_freem(m);
4386 return (EHOSTUNREACH);
4387 }
4388 if (ro != &iproute) {
4389 memcpy(&iproute, ro, sizeof(*ro));
4390 }
4391 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4392 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4393 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4394 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4395 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4396 (void *)ro->ro_rt);
4397
4398 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4399 /* failed to prepend data, give up */
4400 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4401 sctp_m_freem(m);
4402 return (ENOMEM);
4403 }
4404 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4405 if (port) {
4406#if defined(SCTP_WITH_NO_CSUM)
4407 SCTP_STAT_INCR(sctps_sendnocrc);
4408#else
4409 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4410 SCTP_STAT_INCR(sctps_sendswcrc);
4411#endif
4412#if !defined(__Windows__) && !defined(__Userspace__)
4413#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
4414 if (V_udp_cksum) {
4415 SCTP_ENABLE_UDP_CSUM(o_pak);
4416 }
4417#else
4418 SCTP_ENABLE_UDP_CSUM(o_pak);
4419#endif
4420#endif
4421 } else {
4422#if defined(SCTP_WITH_NO_CSUM)
4423 SCTP_STAT_INCR(sctps_sendnocrc);
4424#else
4425#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4426 m->m_pkthdr.csum_flags = CSUM_SCTP;
4427 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4428 SCTP_STAT_INCR(sctps_sendhwcrc);
4429#else
4430 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4431 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4432 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
4433 SCTP_STAT_INCR(sctps_sendswcrc);
4434 } else {
4435 SCTP_STAT_INCR(sctps_sendnocrc);
4436 }
4437#endif
4438#endif
4439 }
4440#ifdef SCTP_PACKET_LOGGING
4441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4442 sctp_packet_log(o_pak);
4443#endif
4444 /* send it out. table id is taken from stcb */
4445#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4446 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4447 so = SCTP_INP_SO(inp);
4448 SCTP_SOCKET_UNLOCK(so, 0);
4449 }
4450#endif
4451 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4452#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4453 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4454 atomic_add_int(&stcb->asoc.refcnt, 1);
4455 SCTP_TCB_UNLOCK(stcb);
4456 SCTP_SOCKET_LOCK(so, 0);
4457 SCTP_TCB_LOCK(stcb);
4458 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4459 }
4460#endif
4461 SCTP_STAT_INCR(sctps_sendpackets);
4462 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4463 if (ret)
4464 SCTP_STAT_INCR(sctps_senderrors);
4465
4466 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4467 if (net == NULL) {
4468 /* free tempy routes */
4469#if defined(__FreeBSD__) && __FreeBSD_version > 901000
4470 RO_RTFREE(ro);
4471#else
4472 if (ro->ro_rt) {
4473 RTFREE(ro->ro_rt);
4474 ro->ro_rt = NULL;
4475 }
4476#endif
4477 } else {
4478 /* PMTU check versus smallest asoc MTU goes here */
4479 if ((ro->ro_rt != NULL) &&
4480 (net->ro._s_addr)) {
4481 uint32_t mtu;
4482 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4483 if (net->port) {
4484 mtu -= sizeof(struct udphdr);
4485 }
4486 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4487 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4488 net->mtu = mtu;
4489 }
4490 } else if (ro->ro_rt == NULL) {
4491 /* route was freed */
4492 if (net->ro._s_addr &&
4493 net->src_addr_selected) {
4494 sctp_free_ifa(net->ro._s_addr);
4495 net->ro._s_addr = NULL;
4496 }
4497 net->src_addr_selected = 0;
4498 }
4499 }
4500 return (ret);
4501 }
4502#endif
4503#ifdef INET6
4504 case AF_INET6:
4505 {
4506 uint32_t flowlabel, flowinfo;
4507 struct ip6_hdr *ip6h;
4508 struct route_in6 ip6route;
4509#if !(defined(__Panda__) || defined(__Userspace__))
4510 struct ifnet *ifp;
4511#endif
4512 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4513 int prev_scope = 0;
4514#ifdef SCTP_EMBEDDED_V6_SCOPE
4515 struct sockaddr_in6 lsa6_storage;
4516 int error;
4517#endif
4518 u_short prev_port = 0;
4519 int len;
4520
4521 if (net) {
4522 flowlabel = net->flowlabel;
4523 } else if (stcb) {
4524 flowlabel = stcb->asoc.default_flowlabel;
4525 } else {
4526 flowlabel = inp->sctp_ep.default_flowlabel;
4527 }
4528 if (flowlabel == 0) {
4529 /*
4530 * This means especially, that it is not set at the
4531 * SCTP layer. So use the value from the IP layer.
4532 */
4533#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4534 flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
4535#else
4536 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4537#endif
4538 }
4539 flowlabel &= 0x000fffff;
4540 len = SCTP_MIN_OVERHEAD;
4541 if (port) {
4542 len += sizeof(struct udphdr);
4543 }
4544 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4545 if (newm == NULL) {
4546 sctp_m_freem(m);
4547 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4548 return (ENOMEM);
4549 }
4550 SCTP_ALIGN_TO_END(newm, len);
4551 SCTP_BUF_LEN(newm) = len;
4552 SCTP_BUF_NEXT(newm) = m;
4553 m = newm;
4554#if defined(__FreeBSD__)
4555 if (net != NULL) {
4556 m->m_pkthdr.flowid = net->flowid;
4557 M_HASHTYPE_SET(m, net->flowtype);
4558 } else {
4559 m->m_pkthdr.flowid = mflowid;
4560 M_HASHTYPE_SET(m, mflowtype);
4561 }
4562#endif
4563 packet_length = sctp_calculate_len(m);
4564
4565 ip6h = mtod(m, struct ip6_hdr *);
4566 /* protect *sin6 from overwrite */
4567 sin6 = (struct sockaddr_in6 *)to;
4568 tmp = *sin6;
4569 sin6 = &tmp;
4570
4571#ifdef SCTP_EMBEDDED_V6_SCOPE
4572 /* KAME hack: embed scopeid */
4573#if defined(__APPLE__)
4574#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4575 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4576#else
4577 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4578#endif
4579#elif defined(SCTP_KAME)
4580 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4581#else
4582 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4583#endif
4584 {
4585 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4586 return (EINVAL);
4587 }
4588#endif /* SCTP_EMBEDDED_V6_SCOPE */
4589 if (net == NULL) {
4590 memset(&ip6route, 0, sizeof(ip6route));
4591 ro = (sctp_route_t *)&ip6route;
4592#ifdef HAVE_SIN6_LEN
4593 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4594#else
4595 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
4596#endif
4597 } else {
4598 ro = (sctp_route_t *)&net->ro;
4599 }
4600 /*
4601 * We assume here that inp_flow is in host byte order within
4602 * the TCB!
4603 */
4604 if (tos_value == 0) {
4605 /*
4606 * This means especially, that it is not set at the
4607 * SCTP layer. So use the value from the IP layer.
4608 */
4609#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
4610#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
4611 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
4612#else
4613 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4614#endif
4615#endif
4616 }
4617 tos_value &= 0xfc;
4618 if (ecn_ok) {
4619 tos_value |= sctp_get_ect(stcb);
4620 }
4621 flowinfo = 0x06;
4622 flowinfo <<= 8;
4623 flowinfo |= tos_value;
4624 flowinfo <<= 20;
4625 flowinfo |= flowlabel;
4626 ip6h->ip6_flow = htonl(flowinfo);
4627 if (port) {
4628 ip6h->ip6_nxt = IPPROTO_UDP;
4629 } else {
4630 ip6h->ip6_nxt = IPPROTO_SCTP;
4631 }
4632 ip6h->ip6_plen = (uint16_t)(packet_length - sizeof(struct ip6_hdr));
4633 ip6h->ip6_dst = sin6->sin6_addr;
4634
4635 /*
4636 * Add SRC address selection here: we can only reuse to a
4637 * limited degree the kame src-addr-sel, since we can try
4638 * their selection but it may not be bound.
4639 */
4640 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4641 lsa6_tmp.sin6_family = AF_INET6;
4642#ifdef HAVE_SIN6_LEN
4643 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4644#endif
4645 lsa6 = &lsa6_tmp;
4646 if (net && out_of_asoc_ok == 0) {
4647 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
4648 sctp_free_ifa(net->ro._s_addr);
4649 net->ro._s_addr = NULL;
4650 net->src_addr_selected = 0;
4651 if (ro->ro_rt) {
4652 RTFREE(ro->ro_rt);
4653 ro->ro_rt = NULL;
4654 }
4655 }
4656 if (net->src_addr_selected == 0) {
4657#ifdef SCTP_EMBEDDED_V6_SCOPE
4658 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4659 /* KAME hack: embed scopeid */
4660#if defined(__APPLE__)
4661#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4662 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4663#else
4664 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4665#endif
4666#elif defined(SCTP_KAME)
4667 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4668#else
4669 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4670#endif
4671 {
4672 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4673 return (EINVAL);
4674 }
4675#endif /* SCTP_EMBEDDED_V6_SCOPE */
4676 /* Cache the source address */
4677 net->ro._s_addr = sctp_source_address_selection(inp,
4678 stcb,
4679 ro,
4680 net,
4681 0,
4682 vrf_id);
4683#ifdef SCTP_EMBEDDED_V6_SCOPE
4684#ifdef SCTP_KAME
4685 (void)sa6_recoverscope(sin6);
4686#else
4687 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4688#endif /* SCTP_KAME */
4689#endif /* SCTP_EMBEDDED_V6_SCOPE */
4690 net->src_addr_selected = 1;
4691 }
4692 if (net->ro._s_addr == NULL) {
4693 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4694 net->src_addr_selected = 0;
4695 sctp_handle_no_route(stcb, net, so_locked);
4696 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4697 sctp_m_freem(m);
4698 return (EHOSTUNREACH);
4699 }
4700 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4701 } else {
4702#ifdef SCTP_EMBEDDED_V6_SCOPE
4703 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4704 /* KAME hack: embed scopeid */
4705#if defined(__APPLE__)
4706#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
4707 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
4708#else
4709 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
4710#endif
4711#elif defined(SCTP_KAME)
4712 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
4713#else
4714 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
4715#endif
4716 {
4717 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4718 return (EINVAL);
4719 }
4720#endif /* SCTP_EMBEDDED_V6_SCOPE */
4721 if (over_addr == NULL) {
4722 struct sctp_ifa *_lsrc;
4723
4724 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4725 net,
4726 out_of_asoc_ok,
4727 vrf_id);
4728 if (_lsrc == NULL) {
4729 sctp_handle_no_route(stcb, net, so_locked);
4730 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4731 sctp_m_freem(m);
4732 return (EHOSTUNREACH);
4733 }
4734 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4735 sctp_free_ifa(_lsrc);
4736 } else {
4737 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4738 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4739 }
4740#ifdef SCTP_EMBEDDED_V6_SCOPE
4741#ifdef SCTP_KAME
4742 (void)sa6_recoverscope(sin6);
4743#else
4744 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
4745#endif /* SCTP_KAME */
4746#endif /* SCTP_EMBEDDED_V6_SCOPE */
4747 }
4748 lsa6->sin6_port = inp->sctp_lport;
4749
4750 if (ro->ro_rt == NULL) {
4751 /*
4752 * src addr selection failed to find a route (or
4753 * valid source addr), so we can't get there from
4754 * here!
4755 */
4756 sctp_handle_no_route(stcb, net, so_locked);
4757 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4758 sctp_m_freem(m);
4759 return (EHOSTUNREACH);
4760 }
4761#ifndef SCOPEDROUTING
4762#ifdef SCTP_EMBEDDED_V6_SCOPE
4763 /*
4764 * XXX: sa6 may not have a valid sin6_scope_id in the
4765 * non-SCOPEDROUTING case.
4766 */
4767 bzero(&lsa6_storage, sizeof(lsa6_storage));
4768 lsa6_storage.sin6_family = AF_INET6;
4769#ifdef HAVE_SIN6_LEN
4770 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4771#endif
4772#ifdef SCTP_KAME
4773 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4774 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4775#else
4776 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
4777 NULL)) != 0) {
4778#endif /* SCTP_KAME */
4779 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4780 sctp_m_freem(m);
4781 return (error);
4782 }
4783 /* XXX */
4784 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4785 lsa6_storage.sin6_port = inp->sctp_lport;
4786 lsa6 = &lsa6_storage;
4787#endif /* SCTP_EMBEDDED_V6_SCOPE */
4788#endif /* SCOPEDROUTING */
4789 ip6h->ip6_src = lsa6->sin6_addr;
4790
4791 if (port) {
4792 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4793 sctp_handle_no_route(stcb, net, so_locked);
4794 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4795 sctp_m_freem(m);
4796 return (EHOSTUNREACH);
4797 }
4798 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4799 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4800 udp->uh_dport = port;
4801 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4802 udp->uh_sum = 0;
4803 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4804 } else {
4805 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4806 }
4807
4808 sctphdr->src_port = src_port;
4809 sctphdr->dest_port = dest_port;
4810 sctphdr->v_tag = v_tag;
4811 sctphdr->checksum = 0;
4812
4813 /*
4814 * We set the hop limit now since there is a good chance
4815 * that our ro pointer is now filled
4816 */
4817 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4818#if !(defined(__Panda__) || defined(__Userspace__))
4819 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4820#endif
4821
4822#ifdef SCTP_DEBUG
4823 /* Copy to be sure something bad is not happening */
4824 sin6->sin6_addr = ip6h->ip6_dst;
4825 lsa6->sin6_addr = ip6h->ip6_src;
4826#endif
4827
4828 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4829 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4830 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4831 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4832 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4833 if (net) {
4834 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4835 /* preserve the port and scope for link local send */
4836 prev_scope = sin6->sin6_scope_id;
4837 prev_port = sin6->sin6_port;
4838 }
4839
4840 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4841 /* failed to prepend data, give up */
4842 sctp_m_freem(m);
4843 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4844 return (ENOMEM);
4845 }
4846 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4847 if (port) {
4848#if defined(SCTP_WITH_NO_CSUM)
4849 SCTP_STAT_INCR(sctps_sendnocrc);
4850#else
4851 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4852 SCTP_STAT_INCR(sctps_sendswcrc);
4853#endif
4854#if defined(__Windows__)
4855 udp->uh_sum = 0;
4856#elif !defined(__Userspace__)
4857 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4858 udp->uh_sum = 0xffff;
4859 }
4860#endif
4861 } else {
4862#if defined(SCTP_WITH_NO_CSUM)
4863 SCTP_STAT_INCR(sctps_sendnocrc);
4864#else
4865#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
4866#if __FreeBSD_version < 900000
4867 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4868 SCTP_STAT_INCR(sctps_sendswcrc);
4869#else
4870#if __FreeBSD_version > 901000
4871 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4872#else
4873 m->m_pkthdr.csum_flags = CSUM_SCTP;
4874#endif
4875 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4876 SCTP_STAT_INCR(sctps_sendhwcrc);
4877#endif
4878#else
4879 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4880 (stcb) && (stcb->asoc.scope.loopback_scope))) {
4881 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
4882 SCTP_STAT_INCR(sctps_sendswcrc);
4883 } else {
4884 SCTP_STAT_INCR(sctps_sendnocrc);
4885 }
4886#endif
4887#endif
4888 }
4889 /* send it out. table id is taken from stcb */
4890#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4892 so = SCTP_INP_SO(inp);
4893 SCTP_SOCKET_UNLOCK(so, 0);
4894 }
4895#endif
4896#ifdef SCTP_PACKET_LOGGING
4897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4898 sctp_packet_log(o_pak);
4899#endif
4900#if !(defined(__Panda__) || defined(__Userspace__))
4901 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4902#else
4903 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
4904#endif
4905#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4906 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4907 atomic_add_int(&stcb->asoc.refcnt, 1);
4908 SCTP_TCB_UNLOCK(stcb);
4909 SCTP_SOCKET_LOCK(so, 0);
4910 SCTP_TCB_LOCK(stcb);
4911 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4912 }
4913#endif
4914 if (net) {
4915 /* for link local this must be done */
4916 sin6->sin6_scope_id = prev_scope;
4917 sin6->sin6_port = prev_port;
4918 }
4919 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4920 SCTP_STAT_INCR(sctps_sendpackets);
4921 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4922 if (ret) {
4923 SCTP_STAT_INCR(sctps_senderrors);
4924 }
4925 if (net == NULL) {
4926 /* Now if we had a temp route free it */
4927#if defined(__FreeBSD__) && __FreeBSD_version > 901000
4928 RO_RTFREE(ro);
4929#else
4930 if (ro->ro_rt) {
4931 RTFREE(ro->ro_rt);
4932 ro->ro_rt = NULL;
4933 }
4934#endif
4935 } else {
4936 /* PMTU check versus smallest asoc MTU goes here */
4937 if (ro->ro_rt == NULL) {
4938 /* Route was freed */
4939 if (net->ro._s_addr &&
4940 net->src_addr_selected) {
4941 sctp_free_ifa(net->ro._s_addr);
4942 net->ro._s_addr = NULL;
4943 }
4944 net->src_addr_selected = 0;
4945 }
4946 if ((ro->ro_rt != NULL) &&
4947 (net->ro._s_addr)) {
4948 uint32_t mtu;
4949 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4950 if (mtu &&
4951 (stcb->asoc.smallest_mtu > mtu)) {
4952 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4953 net->mtu = mtu;
4954 if (net->port) {
4955 net->mtu -= sizeof(struct udphdr);
4956 }
4957 }
4958 }
4959#if !defined(__Panda__) && !defined(__Userspace__)
4960 else if (ifp) {
4961#if defined(__Windows__)
4962#define ND_IFINFO(ifp) (ifp)
4963#define linkmtu if_mtu
4964#endif
4965 if (ND_IFINFO(ifp)->linkmtu &&
4966 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4967 sctp_mtu_size_reset(inp,
4968 &stcb->asoc,
4969 ND_IFINFO(ifp)->linkmtu);
4970 }
4971 }
4972#endif
4973 }
4974 return (ret);
4975 }
4976#endif
4977#if defined(__Userspace__)
4978 case AF_CONN:
4979 {
4980 char *buffer;
4981 struct sockaddr_conn *sconn;
4982 int len;
4983
4984 sconn = (struct sockaddr_conn *)to;
4985 len = sizeof(struct sctphdr);
4986 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4987 if (newm == NULL) {
4988 sctp_m_freem(m);
4989 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4990 return (ENOMEM);
4991 }
4992 SCTP_ALIGN_TO_END(newm, len);
4993 SCTP_BUF_LEN(newm) = len;
4994 SCTP_BUF_NEXT(newm) = m;
4995 m = newm;
4996 packet_length = sctp_calculate_len(m);
4997 sctphdr = mtod(m, struct sctphdr *);
4998 sctphdr->src_port = src_port;
4999 sctphdr->dest_port = dest_port;
5000 sctphdr->v_tag = v_tag;
5001 sctphdr->checksum = 0;
5002#if defined(SCTP_WITH_NO_CSUM)
5003 SCTP_STAT_INCR(sctps_sendnocrc);
5004#else
5005 if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
5006 sctphdr->checksum = sctp_calculate_cksum(m, 0);
5007 SCTP_STAT_INCR(sctps_sendswcrc);
5008 } else {
5009 SCTP_STAT_INCR(sctps_sendhwcrc);
5010 }
5011#endif
5012 if (tos_value == 0) {
5013 tos_value = inp->ip_inp.inp.inp_ip_tos;
5014 }
5015 tos_value &= 0xfc;
5016 if (ecn_ok) {
5017 tos_value |= sctp_get_ect(stcb);
5018 }
5019 /* Don't alloc/free for each packet */
5020 if ((buffer = malloc(packet_length)) != NULL) {
5021 m_copydata(m, 0, packet_length, buffer);
5022 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
5023 free(buffer);
5024 } else {
5025 ret = ENOMEM;
5026 }
5027 sctp_m_freem(m);
5028 return (ret);
5029 }
5030#endif
5031 default:
5032 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
5033 ((struct sockaddr *)to)->sa_family);
5034 sctp_m_freem(m);
5035 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
5036 return (EFAULT);
5037 }
5038}
5039
5040
5041void
5042sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
5043#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5044 SCTP_UNUSED
5045#endif
5046 )
5047{
5048 struct mbuf *m, *m_last;
5049 struct sctp_nets *net;
5050 struct sctp_init_chunk *init;
5051 struct sctp_supported_addr_param *sup_addr;
5052 struct sctp_adaptation_layer_indication *ali;
5053 struct sctp_supported_chunk_types_param *pr_supported;
5054 struct sctp_paramhdr *ph;
5055 int cnt_inits_to = 0;
5056 int error;
5057 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5058
5059#if defined(__APPLE__)
5060 if (so_locked) {
5061 sctp_lock_assert(SCTP_INP_SO(inp));
5062 } else {
5063 sctp_unlock_assert(SCTP_INP_SO(inp));
5064 }
5065#endif
5066 /* INIT's always go to the primary (and usually ONLY address) */
5067 net = stcb->asoc.primary_destination;
5068 if (net == NULL) {
5069 net = TAILQ_FIRST(&stcb->asoc.nets);
5070 if (net == NULL) {
5071 /* TSNH */
5072 return;
5073 }
5074 /* we confirm any address we send an INIT to */
5075 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5076 (void)sctp_set_primary_addr(stcb, NULL, net);
5077 } else {
5078 /* we confirm any address we send an INIT to */
5079 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
5080 }
5081 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
5082#ifdef INET6
5083 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
5084 /*
5085 * special hook, if we are sending to link local it will not
5086 * show up in our private address count.
5087 */
5088 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
5089 cnt_inits_to = 1;
5090 }
5091#endif
5092 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5093 /* This case should not happen */
5094 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
5095 return;
5096 }
5097 /* start the INIT timer */
5098 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
5099
5100 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
5101 if (m == NULL) {
5102 /* No memory, INIT timer will re-attempt. */
5103 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
5104 return;
5105 }
5106 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
5107 padding_len = 0;
5108 /* Now lets put the chunk header in place */
5109 init = mtod(m, struct sctp_init_chunk *);
5110 /* now the chunk header */
5111 init->ch.chunk_type = SCTP_INITIATION;
5112 init->ch.chunk_flags = 0;
5113 /* fill in later from mbuf we build */
5114 init->ch.chunk_length = 0;
5115 /* place in my tag */
5116 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
5117 /* set up some of the credits. */
5118 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
5119 SCTP_MINIMAL_RWND));
5120 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
5121 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
5122 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
5123
5124 /* Adaptation layer indication parameter */
5125 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5126 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5127 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
5128 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5129 ali->ph.param_length = htons(parameter_len);
5130 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5131 chunk_len += parameter_len;
5132 }
5133
5134 /* ECN parameter */
5135 if (stcb->asoc.ecn_supported == 1) {
5136 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5137 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5138 ph->param_type = htons(SCTP_ECN_CAPABLE);
5139 ph->param_length = htons(parameter_len);
5140 chunk_len += parameter_len;
5141 }
5142
5143 /* PR-SCTP supported parameter */
5144 if (stcb->asoc.prsctp_supported == 1) {
5145 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5146 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5147 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5148 ph->param_length = htons(parameter_len);
5149 chunk_len += parameter_len;
5150 }
5151
5152 /* Add NAT friendly parameter. */
5153 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
5154 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5155 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
5156 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5157 ph->param_length = htons(parameter_len);
5158 chunk_len += parameter_len;
5159 }
5160
5161 /* And now tell the peer which extensions we support */
5162 num_ext = 0;
5163 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
5164 if (stcb->asoc.prsctp_supported == 1) {
5165 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5166 if (stcb->asoc.idata_supported) {
5167 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5168 }
5169 }
5170 if (stcb->asoc.auth_supported == 1) {
5171 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5172 }
5173 if (stcb->asoc.asconf_supported == 1) {
5174 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5175 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5176 }
5177 if (stcb->asoc.reconfig_supported == 1) {
5178 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5179 }
5180 if (stcb->asoc.idata_supported) {
5181 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5182 }
5183 if (stcb->asoc.nrsack_supported == 1) {
5184 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5185 }
5186 if (stcb->asoc.pktdrop_supported == 1) {
5187 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5188 }
5189 if (num_ext > 0) {
5190 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5191 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5192 pr_supported->ph.param_length = htons(parameter_len);
5193 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5194 chunk_len += parameter_len;
5195 }
5196 /* add authentication parameters */
5197 if (stcb->asoc.auth_supported) {
5198 /* attach RANDOM parameter, if available */
5199 if (stcb->asoc.authinfo.random != NULL) {
5200 struct sctp_auth_random *randp;
5201
5202 if (padding_len > 0) {
5203 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5204 chunk_len += padding_len;
5205 padding_len = 0;
5206 }
5207 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
5208 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
5209 /* random key already contains the header */
5210 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
5211 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5212 chunk_len += parameter_len;
5213 }
5214 /* add HMAC_ALGO parameter */
5215 if (stcb->asoc.local_hmacs != NULL) {
5216 struct sctp_auth_hmac_algo *hmacs;
5217
5218 if (padding_len > 0) {
5219 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5220 chunk_len += padding_len;
5221 padding_len = 0;
5222 }
5223 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
5224 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
5225 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
5226 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5227 hmacs->ph.param_length = htons(parameter_len);
5228 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
5229 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5230 chunk_len += parameter_len;
5231 }
5232 /* add CHUNKS parameter */
5233 if (stcb->asoc.local_auth_chunks != NULL) {
5234 struct sctp_auth_chunk_list *chunks;
5235
5236 if (padding_len > 0) {
5237 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5238 chunk_len += padding_len;
5239 padding_len = 0;
5240 }
5241 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
5242 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
5243 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
5244 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5245 chunks->ph.param_length = htons(parameter_len);
5246 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
5247 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5248 chunk_len += parameter_len;
5249 }
5250 }
5251
5252 /* now any cookie time extensions */
5253 if (stcb->asoc.cookie_preserve_req) {
5254 struct sctp_cookie_perserve_param *cookie_preserve;
5255
5256 if (padding_len > 0) {
5257 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5258 chunk_len += padding_len;
5259 padding_len = 0;
5260 }
5261 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
5262 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
5263 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
5264 cookie_preserve->ph.param_length = htons(parameter_len);
5265 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
5266 stcb->asoc.cookie_preserve_req = 0;
5267 chunk_len += parameter_len;
5268 }
5269
5270 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
5271 uint8_t i;
5272
5273 if (padding_len > 0) {
5274 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
5275 chunk_len += padding_len;
5276 padding_len = 0;
5277 }
5278 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5279 if (stcb->asoc.scope.ipv4_addr_legal) {
5280 parameter_len += (uint16_t)sizeof(uint16_t);
5281 }
5282 if (stcb->asoc.scope.ipv6_addr_legal) {
5283 parameter_len += (uint16_t)sizeof(uint16_t);
5284 }
5285 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
5286 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
5287 sup_addr->ph.param_length = htons(parameter_len);
5288 i = 0;
5289 if (stcb->asoc.scope.ipv4_addr_legal) {
5290 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
5291 }
5292 if (stcb->asoc.scope.ipv6_addr_legal) {
5293 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
5294 }
5295 padding_len = 4 - 2 * i;
5296 chunk_len += parameter_len;
5297 }
5298
5299 SCTP_BUF_LEN(m) = chunk_len;
5300 /* now the addresses */
5301 /* To optimize this we could put the scoping stuff
5302 * into a structure and remove the individual uint8's from
5303 * the assoc structure. Then we could just sifa in the
5304 * address within the stcb. But for now this is a quick
5305 * hack to get the address stuff teased apart.
5306 */
5307 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
5308 m, cnt_inits_to,
5309 &padding_len, &chunk_len);
5310
5311 init->ch.chunk_length = htons(chunk_len);
5312 if (padding_len > 0) {
5313 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
5314 sctp_m_freem(m);
5315 return;
5316 }
5317 }
5318 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
5319 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5320 (struct sockaddr *)&net->ro._l_addr,
5321 m, 0, NULL, 0, 0, 0, 0,
5322 inp->sctp_lport, stcb->rport, htonl(0),
5323 net->port, NULL,
5324#if defined(__FreeBSD__)
5325 0, 0,
5326#endif
5327 so_locked))) {
5328 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
5329 if (error == ENOBUFS) {
5330 stcb->asoc.ifp_had_enobuf = 1;
5331 SCTP_STAT_INCR(sctps_lowlevelerr);
5332 }
5333 } else {
5334 stcb->asoc.ifp_had_enobuf = 0;
5335 }
5336 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5337 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5338}
5339
5340struct mbuf *
5341sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
5342 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
5343{
5344 /*
5345 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
5346 * being equal to the beginning of the params i.e. (iphlen +
5347 * sizeof(struct sctp_init_msg) parse through the parameters to the
5348 * end of the mbuf verifying that all parameters are known.
5349 *
5350 * For unknown parameters build and return a mbuf with
5351 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
5352 * processing this chunk stop, and set *abort_processing to 1.
5353 *
5354 * By having param_offset be pre-set to where parameters begin it is
5355 * hoped that this routine may be reused in the future by new
5356 * features.
5357 */
5358 struct sctp_paramhdr *phdr, params;
5359
5360 struct mbuf *mat, *op_err;
5361 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
5362 int at, limit, pad_needed;
5363 uint16_t ptype, plen, padded_size;
5364 int err_at;
5365
5366 *abort_processing = 0;
5367 mat = in_initpkt;
5368 err_at = 0;
5369 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5370 at = param_offset;
5371 op_err = NULL;
5372 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5373 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5374 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5375 ptype = ntohs(phdr->param_type);
5376 plen = ntohs(phdr->param_length);
5377 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5378 /* wacked parameter */
5379 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5380 goto invalid_size;
5381 }
5382 limit -= SCTP_SIZE32(plen);
5383 /*-
5384 * All parameters for all chunks that we know/understand are
5385 * listed here. We process them other places and make
5386 * appropriate stop actions per the upper bits. However this
5387 * is the generic routine processor's can call to get back
5388 * an operr.. to either incorporate (init-ack) or send.
5389 */
5390 padded_size = SCTP_SIZE32(plen);
5391 switch (ptype) {
5392 /* Param's with variable size */
5393 case SCTP_HEARTBEAT_INFO:
5394 case SCTP_STATE_COOKIE:
5395 case SCTP_UNRECOG_PARAM:
5396 case SCTP_ERROR_CAUSE_IND:
5397 /* ok skip fwd */
5398 at += padded_size;
5399 break;
5400 /* Param's with variable size within a range */
5401 case SCTP_CHUNK_LIST:
5402 case SCTP_SUPPORTED_CHUNK_EXT:
5403 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5404 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5405 goto invalid_size;
5406 }
5407 at += padded_size;
5408 break;
5409 case SCTP_SUPPORTED_ADDRTYPE:
5410 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5411 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5412 goto invalid_size;
5413 }
5414 at += padded_size;
5415 break;
5416 case SCTP_RANDOM:
5417 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5418 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5419 goto invalid_size;
5420 }
5421 at += padded_size;
5422 break;
5423 case SCTP_SET_PRIM_ADDR:
5424 case SCTP_DEL_IP_ADDRESS:
5425 case SCTP_ADD_IP_ADDRESS:
5426 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5427 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5428 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5429 goto invalid_size;
5430 }
5431 at += padded_size;
5432 break;
5433 /* Param's with a fixed size */
5434 case SCTP_IPV4_ADDRESS:
5435 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5436 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5437 goto invalid_size;
5438 }
5439 at += padded_size;
5440 break;
5441 case SCTP_IPV6_ADDRESS:
5442 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5443 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5444 goto invalid_size;
5445 }
5446 at += padded_size;
5447 break;
5448 case SCTP_COOKIE_PRESERVE:
5449 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5450 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5451 goto invalid_size;
5452 }
5453 at += padded_size;
5454 break;
5455 case SCTP_HAS_NAT_SUPPORT:
5456 *nat_friendly = 1;
5457 /* fall through */
5458 case SCTP_PRSCTP_SUPPORTED:
5459 if (padded_size != sizeof(struct sctp_paramhdr)) {
5460 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5461 goto invalid_size;
5462 }
5463 at += padded_size;
5464 break;
5465 case SCTP_ECN_CAPABLE:
5466 if (padded_size != sizeof(struct sctp_paramhdr)) {
5467 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5468 goto invalid_size;
5469 }
5470 at += padded_size;
5471 break;
5472 case SCTP_ULP_ADAPTATION:
5473 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5474 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5475 goto invalid_size;
5476 }
5477 at += padded_size;
5478 break;
5479 case SCTP_SUCCESS_REPORT:
5480 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5481 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5482 goto invalid_size;
5483 }
5484 at += padded_size;
5485 break;
5486 case SCTP_HOSTNAME_ADDRESS:
5487 {
5488 /* We can NOT handle HOST NAME addresses!! */
5489 int l_len;
5490
5491 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5492 *abort_processing = 1;
5493 if (op_err == NULL) {
5494 /* Ok need to try to get a mbuf */
5495#ifdef INET6
5496 l_len = SCTP_MIN_OVERHEAD;
5497#else
5498 l_len = SCTP_MIN_V4_OVERHEAD;
5499#endif
5500 l_len += sizeof(struct sctp_chunkhdr);
5501 l_len += plen;
5502 l_len += sizeof(struct sctp_paramhdr);
5503 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5504 if (op_err) {
5505 SCTP_BUF_LEN(op_err) = 0;
5506 /*
5507 * pre-reserve space for ip and sctp
5508 * header and chunk hdr
5509 */
5510#ifdef INET6
5511 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5512#else
5513 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5514#endif
5515 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5516 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5517 }
5518 }
5519 if (op_err) {
5520 /* If we have space */
5521 struct sctp_paramhdr s;
5522
5523 if (err_at % 4) {
5524 uint32_t cpthis = 0;
5525
5526 pad_needed = 4 - (err_at % 4);
5527 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5528 err_at += pad_needed;
5529 }
5530 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5531 s.param_length = htons(sizeof(s) + plen);
5532 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5533 err_at += sizeof(s);
5534 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5535 if (phdr == NULL) {
5536 sctp_m_freem(op_err);
5537 /*
5538 * we are out of memory but we still
5539 * need to have a look at what to do
5540 * (the system is in trouble
5541 * though).
5542 */
5543 return (NULL);
5544 }
5545 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5546 }
5547 return (op_err);
5548 break;
5549 }
5550 default:
5551 /*
5552 * we do not recognize the parameter figure out what
5553 * we do.
5554 */
5555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5556 if ((ptype & 0x4000) == 0x4000) {
5557 /* Report bit is set?? */
5558 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5559 if (op_err == NULL) {
5560 int l_len;
5561 /* Ok need to try to get an mbuf */
5562#ifdef INET6
5563 l_len = SCTP_MIN_OVERHEAD;
5564#else
5565 l_len = SCTP_MIN_V4_OVERHEAD;
5566#endif
5567 l_len += sizeof(struct sctp_chunkhdr);
5568 l_len += plen;
5569 l_len += sizeof(struct sctp_paramhdr);
5570 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5571 if (op_err) {
5572 SCTP_BUF_LEN(op_err) = 0;
5573#ifdef INET6
5574 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5575#else
5576 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5577#endif
5578 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5579 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5580 }
5581 }
5582 if (op_err) {
5583 /* If we have space */
5584 struct sctp_paramhdr s;
5585
5586 if (err_at % 4) {
5587 uint32_t cpthis = 0;
5588
5589 pad_needed = 4 - (err_at % 4);
5590 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5591 err_at += pad_needed;
5592 }
5593 s.param_type = htons(SCTP_UNRECOG_PARAM);
5594 s.param_length = htons(sizeof(s) + plen);
5595 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5596 err_at += sizeof(s);
5597 if (plen > sizeof(tempbuf)) {
5598 plen = sizeof(tempbuf);
5599 }
5600 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
5601 if (phdr == NULL) {
5602 sctp_m_freem(op_err);
5603 /*
5604 * we are out of memory but
5605 * we still need to have a
5606 * look at what to do (the
5607 * system is in trouble
5608 * though).
5609 */
5610 op_err = NULL;
5611 goto more_processing;
5612 }
5613 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5614 err_at += plen;
5615 }
5616 }
5617 more_processing:
5618 if ((ptype & 0x8000) == 0x0000) {
5619 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5620 return (op_err);
5621 } else {
5622 /* skip this chunk and continue processing */
5623 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5624 at += SCTP_SIZE32(plen);
5625 }
5626 break;
5627
5628 }
5629 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5630 }
5631 return (op_err);
5632 invalid_size:
5633 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5634 *abort_processing = 1;
5635 if ((op_err == NULL) && phdr) {
5636 int l_len;
5637#ifdef INET6
5638 l_len = SCTP_MIN_OVERHEAD;
5639#else
5640 l_len = SCTP_MIN_V4_OVERHEAD;
5641#endif
5642 l_len += sizeof(struct sctp_chunkhdr);
5643 l_len += (2 * sizeof(struct sctp_paramhdr));
5644 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5645 if (op_err) {
5646 SCTP_BUF_LEN(op_err) = 0;
5647#ifdef INET6
5648 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5649#else
5650 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5651#endif
5652 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5653 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5654 }
5655 }
5656 if ((op_err) && phdr) {
5657 struct sctp_paramhdr s;
5658
5659 if (err_at % 4) {
5660 uint32_t cpthis = 0;
5661
5662 pad_needed = 4 - (err_at % 4);
5663 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5664 err_at += pad_needed;
5665 }
5666 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5667 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5668 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5669 err_at += sizeof(s);
5670 /* Only copy back the p-hdr that caused the issue */
5671 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5672 }
5673 return (op_err);
5674}
5675
5676static int
5677sctp_are_there_new_addresses(struct sctp_association *asoc,
5678 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5679{
5680 /*
5681 * Given a INIT packet, look through the packet to verify that there
5682 * are NO new addresses. As we go through the parameters add reports
5683 * of any un-understood parameters that require an error. Also we
5684 * must return (1) to drop the packet if we see a un-understood
5685 * parameter that tells us to drop the chunk.
5686 */
5687 struct sockaddr *sa_touse;
5688 struct sockaddr *sa;
5689 struct sctp_paramhdr *phdr, params;
5690 uint16_t ptype, plen;
5691 uint8_t fnd;
5692 struct sctp_nets *net;
5693 int check_src;
5694#ifdef INET
5695 struct sockaddr_in sin4, *sa4;
5696#endif
5697#ifdef INET6
5698 struct sockaddr_in6 sin6, *sa6;
5699#endif
5700#if defined(__Userspace__)
5701 struct sockaddr_conn *sac;
5702#endif
5703
5704#ifdef INET
5705 memset(&sin4, 0, sizeof(sin4));
5706 sin4.sin_family = AF_INET;
5707#ifdef HAVE_SIN_LEN
5708 sin4.sin_len = sizeof(sin4);
5709#endif
5710#endif
5711#ifdef INET6
5712 memset(&sin6, 0, sizeof(sin6));
5713 sin6.sin6_family = AF_INET6;
5714#ifdef HAVE_SIN6_LEN
5715 sin6.sin6_len = sizeof(sin6);
5716#endif
5717#endif
5718 /* First what about the src address of the pkt ? */
5719 check_src = 0;
5720 switch (src->sa_family) {
5721#ifdef INET
5722 case AF_INET:
5723 if (asoc->scope.ipv4_addr_legal) {
5724 check_src = 1;
5725 }
5726 break;
5727#endif
5728#ifdef INET6
5729 case AF_INET6:
5730 if (asoc->scope.ipv6_addr_legal) {
5731 check_src = 1;
5732 }
5733 break;
5734#endif
5735#if defined(__Userspace__)
5736 case AF_CONN:
5737 if (asoc->scope.conn_addr_legal) {
5738 check_src = 1;
5739 }
5740 break;
5741#endif
5742 default:
5743 /* TSNH */
5744 break;
5745 }
5746 if (check_src) {
5747 fnd = 0;
5748 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5749 sa = (struct sockaddr *)&net->ro._l_addr;
5750 if (sa->sa_family == src->sa_family) {
5751#ifdef INET
5752 if (sa->sa_family == AF_INET) {
5753 struct sockaddr_in *src4;
5754
5755 sa4 = (struct sockaddr_in *)sa;
5756 src4 = (struct sockaddr_in *)src;
5757 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5758 fnd = 1;
5759 break;
5760 }
5761 }
5762#endif
5763#ifdef INET6
5764 if (sa->sa_family == AF_INET6) {
5765 struct sockaddr_in6 *src6;
5766
5767 sa6 = (struct sockaddr_in6 *)sa;
5768 src6 = (struct sockaddr_in6 *)src;
5769 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5770 fnd = 1;
5771 break;
5772 }
5773 }
5774#endif
5775#if defined(__Userspace__)
5776 if (sa->sa_family == AF_CONN) {
5777 struct sockaddr_conn *srcc;
5778
5779 sac = (struct sockaddr_conn *)sa;
5780 srcc = (struct sockaddr_conn *)src;
5781 if (sac->sconn_addr == srcc->sconn_addr) {
5782 fnd = 1;
5783 break;
5784 }
5785 }
5786#endif
5787 }
5788 }
5789 if (fnd == 0) {
5790 /* New address added! no need to look further. */
5791 return (1);
5792 }
5793 }
5794 /* Ok so far lets munge through the rest of the packet */
5795 offset += sizeof(struct sctp_init_chunk);
5796 phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5797 while (phdr) {
5798 sa_touse = NULL;
5799 ptype = ntohs(phdr->param_type);
5800 plen = ntohs(phdr->param_length);
5801 switch (ptype) {
5802#ifdef INET
5803 case SCTP_IPV4_ADDRESS:
5804 {
5805 struct sctp_ipv4addr_param *p4, p4_buf;
5806
5807 phdr = sctp_get_next_param(in_initpkt, offset,
5808 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5809 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5810 phdr == NULL) {
5811 return (1);
5812 }
5813 if (asoc->scope.ipv4_addr_legal) {
5814 p4 = (struct sctp_ipv4addr_param *)phdr;
5815 sin4.sin_addr.s_addr = p4->addr;
5816 sa_touse = (struct sockaddr *)&sin4;
5817 }
5818 break;
5819 }
5820#endif
5821#ifdef INET6
5822 case SCTP_IPV6_ADDRESS:
5823 {
5824 struct sctp_ipv6addr_param *p6, p6_buf;
5825
5826 phdr = sctp_get_next_param(in_initpkt, offset,
5827 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5828 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5829 phdr == NULL) {
5830 return (1);
5831 }
5832 if (asoc->scope.ipv6_addr_legal) {
5833 p6 = (struct sctp_ipv6addr_param *)phdr;
5834 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5835 sizeof(p6->addr));
5836 sa_touse = (struct sockaddr *)&sin6;
5837 }
5838 break;
5839 }
5840#endif
5841 default:
5842 sa_touse = NULL;
5843 break;
5844 }
5845 if (sa_touse) {
5846 /* ok, sa_touse points to one to check */
5847 fnd = 0;
5848 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5849 sa = (struct sockaddr *)&net->ro._l_addr;
5850 if (sa->sa_family != sa_touse->sa_family) {
5851 continue;
5852 }
5853#ifdef INET
5854 if (sa->sa_family == AF_INET) {
5855 sa4 = (struct sockaddr_in *)sa;
5856 if (sa4->sin_addr.s_addr ==
5857 sin4.sin_addr.s_addr) {
5858 fnd = 1;
5859 break;
5860 }
5861 }
5862#endif
5863#ifdef INET6
5864 if (sa->sa_family == AF_INET6) {
5865 sa6 = (struct sockaddr_in6 *)sa;
5866 if (SCTP6_ARE_ADDR_EQUAL(
5867 sa6, &sin6)) {
5868 fnd = 1;
5869 break;
5870 }
5871 }
5872#endif
5873 }
5874 if (!fnd) {
5875 /* New addr added! no need to look further */
5876 return (1);
5877 }
5878 }
5879 offset += SCTP_SIZE32(plen);
5880 phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5881 }
5882 return (0);
5883}
5884
5885/*
5886 * Given a MBUF chain that was sent into us containing an INIT. Build a
5887 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5888 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5889 * message (i.e. the struct sctp_init_msg).
5890 */
5891void
5892sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5893 struct sctp_nets *src_net, struct mbuf *init_pkt,
5894 int iphlen, int offset,
5895 struct sockaddr *src, struct sockaddr *dst,
5896 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5897#if defined(__FreeBSD__)
5898 uint8_t mflowtype, uint32_t mflowid,
5899#endif
5900 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5901{
5902 struct sctp_association *asoc;
5903 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5904 struct sctp_init_ack_chunk *initack;
5905 struct sctp_adaptation_layer_indication *ali;
5906 struct sctp_supported_chunk_types_param *pr_supported;
5907 struct sctp_paramhdr *ph;
5908 union sctp_sockstore *over_addr;
5909 struct sctp_scoping scp;
5910#ifdef INET
5911 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5912 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5913 struct sockaddr_in *sin;
5914#endif
5915#ifdef INET6
5916 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5917 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5918 struct sockaddr_in6 *sin6;
5919#endif
5920#if defined(__Userspace__)
5921 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
5922 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
5923 struct sockaddr_conn *sconn;
5924#endif
5925 struct sockaddr *to;
5926 struct sctp_state_cookie stc;
5927 struct sctp_nets *net = NULL;
5928 uint8_t *signature = NULL;
5929 int cnt_inits_to = 0;
5930 uint16_t his_limit, i_want;
5931 int abort_flag;
5932 int nat_friendly = 0;
5933 int error;
5934 struct socket *so;
5935 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5936
5937 if (stcb) {
5938 asoc = &stcb->asoc;
5939 } else {
5940 asoc = NULL;
5941 }
5942 if ((asoc != NULL) &&
5943 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) {
5944 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5945 /*
5946 * new addresses, out of here in non-cookie-wait states
5947 *
5948 * Send an ABORT, without the new address error cause.
5949 * This looks no different than if no listener
5950 * was present.
5951 */
5952 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5953 "Address added");
5954 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5955#if defined(__FreeBSD__)
5956 mflowtype, mflowid, inp->fibnum,
5957#endif
5958 vrf_id, port);
5959 return;
5960 }
5961 if (src_net != NULL && (src_net->port != port)) {
5962 /*
5963 * change of remote encapsulation port, out of here in
5964 * non-cookie-wait states
5965 *
5966 * Send an ABORT, without an specific error cause.
5967 * This looks no different than if no listener
5968 * was present.
5969 */
5970 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5971 "Remote encapsulation port changed");
5972 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5973#if defined(__FreeBSD__)
5974 mflowtype, mflowid, inp->fibnum,
5975#endif
5976 vrf_id, port);
5977 return;
5978 }
5979 }
5980 abort_flag = 0;
5981 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5982 (offset + sizeof(struct sctp_init_chunk)),
5983 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5984 if (abort_flag) {
5985 do_a_abort:
5986 if (op_err == NULL) {
5987 char msg[SCTP_DIAG_INFO_LEN];
5988
5989 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5990 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5991 msg);
5992 }
5993 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5994 init_chk->init.initiate_tag, op_err,
5995#if defined(__FreeBSD__)
5996 mflowtype, mflowid, inp->fibnum,
5997#endif
5998 vrf_id, port);
5999 return;
6000 }
6001 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
6002 if (m == NULL) {
6003 /* No memory, INIT timer will re-attempt. */
6004 if (op_err)
6005 sctp_m_freem(op_err);
6006 return;
6007 }
6008 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
6009 padding_len = 0;
6010
6011 /*
6012 * We might not overwrite the identification[] completely and on
6013 * some platforms time_entered will contain some padding.
6014 * Therefore zero out the cookie to avoid putting
6015 * uninitialized memory on the wire.
6016 */
6017 memset(&stc, 0, sizeof(struct sctp_state_cookie));
6018
6019 /* the time I built cookie */
6020 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
6021
6022 /* populate any tie tags */
6023 if (asoc != NULL) {
6024 /* unlock before tag selections */
6025 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
6026 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
6027 stc.cookie_life = asoc->cookie_life;
6028 net = asoc->primary_destination;
6029 } else {
6030 stc.tie_tag_my_vtag = 0;
6031 stc.tie_tag_peer_vtag = 0;
6032 /* life I will award this cookie */
6033 stc.cookie_life = inp->sctp_ep.def_cookie_life;
6034 }
6035
6036 /* copy in the ports for later check */
6037 stc.myport = sh->dest_port;
6038 stc.peerport = sh->src_port;
6039
6040 /*
6041 * If we wanted to honor cookie life extensions, we would add to
6042 * stc.cookie_life. For now we should NOT honor any extension
6043 */
6044 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
6045 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6046 stc.ipv6_addr_legal = 1;
6047 if (SCTP_IPV6_V6ONLY(inp)) {
6048 stc.ipv4_addr_legal = 0;
6049 } else {
6050 stc.ipv4_addr_legal = 1;
6051 }
6052#if defined(__Userspace__)
6053 stc.conn_addr_legal = 0;
6054#endif
6055 } else {
6056 stc.ipv6_addr_legal = 0;
6057#if defined(__Userspace__)
6058 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
6059 stc.conn_addr_legal = 1;
6060 stc.ipv4_addr_legal = 0;
6061 } else {
6062 stc.conn_addr_legal = 0;
6063 stc.ipv4_addr_legal = 1;
6064 }
6065#else
6066 stc.ipv4_addr_legal = 1;
6067#endif
6068 }
6069 stc.ipv4_scope = 0;
6070 if (net == NULL) {
6071 to = src;
6072 switch (dst->sa_family) {
6073#ifdef INET
6074 case AF_INET:
6075 {
6076 /* lookup address */
6077 stc.address[0] = src4->sin_addr.s_addr;
6078 stc.address[1] = 0;
6079 stc.address[2] = 0;
6080 stc.address[3] = 0;
6081 stc.addr_type = SCTP_IPV4_ADDRESS;
6082 /* local from address */
6083 stc.laddress[0] = dst4->sin_addr.s_addr;
6084 stc.laddress[1] = 0;
6085 stc.laddress[2] = 0;
6086 stc.laddress[3] = 0;
6087 stc.laddr_type = SCTP_IPV4_ADDRESS;
6088 /* scope_id is only for v6 */
6089 stc.scope_id = 0;
6090 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
6091 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
6092 stc.ipv4_scope = 1;
6093 }
6094 /* Must use the address in this case */
6095 if (sctp_is_address_on_local_host(src, vrf_id)) {
6096 stc.loopback_scope = 1;
6097 stc.ipv4_scope = 1;
6098 stc.site_scope = 1;
6099 stc.local_scope = 0;
6100 }
6101 break;
6102 }
6103#endif
6104#ifdef INET6
6105 case AF_INET6:
6106 {
6107 stc.addr_type = SCTP_IPV6_ADDRESS;
6108 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
6109#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
6110 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
6111#else
6112 stc.scope_id = 0;
6113#endif
6114 if (sctp_is_address_on_local_host(src, vrf_id)) {
6115 stc.loopback_scope = 1;
6116 stc.local_scope = 0;
6117 stc.site_scope = 1;
6118 stc.ipv4_scope = 1;
6119 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
6120 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
6121 /*
6122 * If the new destination or source is a
6123 * LINK_LOCAL we must have common both site and
6124 * local scope. Don't set local scope though
6125 * since we must depend on the source to be
6126 * added implicitly. We cannot assure just
6127 * because we share one link that all links are
6128 * common.
6129 */
6130#if defined(__APPLE__)
6131 /* Mac OS X currently doesn't have in6_getscope() */
6132 stc.scope_id = src6->sin6_addr.s6_addr16[1];
6133#endif
6134 stc.local_scope = 0;
6135 stc.site_scope = 1;
6136 stc.ipv4_scope = 1;
6137 /*
6138 * we start counting for the private address
6139 * stuff at 1. since the link local we
6140 * source from won't show up in our scoped
6141 * count.
6142 */
6143 cnt_inits_to = 1;
6144 /* pull out the scope_id from incoming pkt */
6145 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
6146 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
6147 /*
6148 * If the new destination or source is
6149 * SITE_LOCAL then we must have site scope in
6150 * common.
6151 */
6152 stc.site_scope = 1;
6153 }
6154 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
6155 stc.laddr_type = SCTP_IPV6_ADDRESS;
6156 break;
6157 }
6158#endif
6159#if defined(__Userspace__)
6160 case AF_CONN:
6161 {
6162 /* lookup address */
6163 stc.address[0] = 0;
6164 stc.address[1] = 0;
6165 stc.address[2] = 0;
6166 stc.address[3] = 0;
6167 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
6168 stc.addr_type = SCTP_CONN_ADDRESS;
6169 /* local from address */
6170 stc.laddress[0] = 0;
6171 stc.laddress[1] = 0;
6172 stc.laddress[2] = 0;
6173 stc.laddress[3] = 0;
6174 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
6175 stc.laddr_type = SCTP_CONN_ADDRESS;
6176 /* scope_id is only for v6 */
6177 stc.scope_id = 0;
6178 break;
6179 }
6180#endif
6181 default:
6182 /* TSNH */
6183 goto do_a_abort;
6184 break;
6185 }
6186 } else {
6187 /* set the scope per the existing tcb */
6188
6189#ifdef INET6
6190 struct sctp_nets *lnet;
6191#endif
6192
6193 stc.loopback_scope = asoc->scope.loopback_scope;
6194 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
6195 stc.site_scope = asoc->scope.site_scope;
6196 stc.local_scope = asoc->scope.local_scope;
6197#ifdef INET6
6198 /* Why do we not consider IPv4 LL addresses? */
6199 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
6200 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
6201 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
6202 /*
6203 * if we have a LL address, start
6204 * counting at 1.
6205 */
6206 cnt_inits_to = 1;
6207 }
6208 }
6209 }
6210#endif
6211 /* use the net pointer */
6212 to = (struct sockaddr *)&net->ro._l_addr;
6213 switch (to->sa_family) {
6214#ifdef INET
6215 case AF_INET:
6216 sin = (struct sockaddr_in *)to;
6217 stc.address[0] = sin->sin_addr.s_addr;
6218 stc.address[1] = 0;
6219 stc.address[2] = 0;
6220 stc.address[3] = 0;
6221 stc.addr_type = SCTP_IPV4_ADDRESS;
6222 if (net->src_addr_selected == 0) {
6223 /*
6224 * strange case here, the INIT should have
6225 * did the selection.
6226 */
6227 net->ro._s_addr = sctp_source_address_selection(inp,
6228 stcb, (sctp_route_t *)&net->ro,
6229 net, 0, vrf_id);
6230 if (net->ro._s_addr == NULL)
6231 return;
6232
6233 net->src_addr_selected = 1;
6234
6235 }
6236 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
6237 stc.laddress[1] = 0;
6238 stc.laddress[2] = 0;
6239 stc.laddress[3] = 0;
6240 stc.laddr_type = SCTP_IPV4_ADDRESS;
6241 /* scope_id is only for v6 */
6242 stc.scope_id = 0;
6243 break;
6244#endif
6245#ifdef INET6
6246 case AF_INET6:
6247 sin6 = (struct sockaddr_in6 *)to;
6248 memcpy(&stc.address, &sin6->sin6_addr,
6249 sizeof(struct in6_addr));
6250 stc.addr_type = SCTP_IPV6_ADDRESS;
6251 stc.scope_id = sin6->sin6_scope_id;
6252 if (net->src_addr_selected == 0) {
6253 /*
6254 * strange case here, the INIT should have
6255 * done the selection.
6256 */
6257 net->ro._s_addr = sctp_source_address_selection(inp,
6258 stcb, (sctp_route_t *)&net->ro,
6259 net, 0, vrf_id);
6260 if (net->ro._s_addr == NULL)
6261 return;
6262
6263 net->src_addr_selected = 1;
6264 }
6265 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
6266 sizeof(struct in6_addr));
6267 stc.laddr_type = SCTP_IPV6_ADDRESS;
6268 break;
6269#endif
6270#if defined(__Userspace__)
6271 case AF_CONN:
6272 sconn = (struct sockaddr_conn *)to;
6273 stc.address[0] = 0;
6274 stc.address[1] = 0;
6275 stc.address[2] = 0;
6276 stc.address[3] = 0;
6277 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
6278 stc.addr_type = SCTP_CONN_ADDRESS;
6279 stc.laddress[0] = 0;
6280 stc.laddress[1] = 0;
6281 stc.laddress[2] = 0;
6282 stc.laddress[3] = 0;
6283 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
6284 stc.laddr_type = SCTP_CONN_ADDRESS;
6285 stc.scope_id = 0;
6286 break;
6287#endif
6288 }
6289 }
6290 /* Now lets put the SCTP header in place */
6291 initack = mtod(m, struct sctp_init_ack_chunk *);
6292 /* Save it off for quick ref */
6293 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
6294 /* who are we */
6295 memcpy(stc.identification, SCTP_VERSION_STRING,
6296 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
6297 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
6298 /* now the chunk header */
6299 initack->ch.chunk_type = SCTP_INITIATION_ACK;
6300 initack->ch.chunk_flags = 0;
6301 /* fill in later from mbuf we build */
6302 initack->ch.chunk_length = 0;
6303 /* place in my tag */
6304 if ((asoc != NULL) &&
6305 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
6306 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
6307 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
6308 /* re-use the v-tags and init-seq here */
6309 initack->init.initiate_tag = htonl(asoc->my_vtag);
6310 initack->init.initial_tsn = htonl(asoc->init_seq_number);
6311 } else {
6312 uint32_t vtag, itsn;
6313 if (hold_inp_lock) {
6314 SCTP_INP_INCR_REF(inp);
6315 SCTP_INP_RUNLOCK(inp);
6316 }
6317 if (asoc) {
6318 atomic_add_int(&asoc->refcnt, 1);
6319 SCTP_TCB_UNLOCK(stcb);
6320 new_tag:
6321 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6322 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
6323 /* Got a duplicate vtag on some guy behind a nat
6324 * make sure we don't use it.
6325 */
6326 goto new_tag;
6327 }
6328 initack->init.initiate_tag = htonl(vtag);
6329 /* get a TSN to use too */
6330 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
6331 initack->init.initial_tsn = htonl(itsn);
6332 SCTP_TCB_LOCK(stcb);
6333 atomic_add_int(&asoc->refcnt, -1);
6334 } else {
6335 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
6336 initack->init.initiate_tag = htonl(vtag);
6337 /* get a TSN to use too */
6338 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
6339 }
6340 if (hold_inp_lock) {
6341 SCTP_INP_RLOCK(inp);
6342 SCTP_INP_DECR_REF(inp);
6343 }
6344 }
6345 /* save away my tag to */
6346 stc.my_vtag = initack->init.initiate_tag;
6347
6348 /* set up some of the credits. */
6349 so = inp->sctp_socket;
6350 if (so == NULL) {
6351 /* memory problem */
6352 sctp_m_freem(m);
6353 return;
6354 } else {
6355 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
6356 }
6357 /* set what I want */
6358 his_limit = ntohs(init_chk->init.num_inbound_streams);
6359 /* choose what I want */
6360 if (asoc != NULL) {
6361 if (asoc->streamoutcnt > asoc->pre_open_streams) {
6362 i_want = asoc->streamoutcnt;
6363 } else {
6364 i_want = asoc->pre_open_streams;
6365 }
6366 } else {
6367 i_want = inp->sctp_ep.pre_open_stream_count;
6368 }
6369 if (his_limit < i_want) {
6370 /* I Want more :< */
6371 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
6372 } else {
6373 /* I can have what I want :> */
6374 initack->init.num_outbound_streams = htons(i_want);
6375 }
6376 /* tell him his limit. */
6377 initack->init.num_inbound_streams =
6378 htons(inp->sctp_ep.max_open_streams_intome);
6379
6380 /* adaptation layer indication parameter */
6381 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
6382 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
6383 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
6384 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
6385 ali->ph.param_length = htons(parameter_len);
6386 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
6387 chunk_len += parameter_len;
6388 }
6389
6390 /* ECN parameter */
6391 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
6392 ((asoc == NULL) && (inp->ecn_supported == 1))) {
6393 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6394 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6395 ph->param_type = htons(SCTP_ECN_CAPABLE);
6396 ph->param_length = htons(parameter_len);
6397 chunk_len += parameter_len;
6398 }
6399
6400 /* PR-SCTP supported parameter */
6401 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6402 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6403 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6404 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6405 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
6406 ph->param_length = htons(parameter_len);
6407 chunk_len += parameter_len;
6408 }
6409
6410 /* Add NAT friendly parameter */
6411 if (nat_friendly) {
6412 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
6413 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
6414 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
6415 ph->param_length = htons(parameter_len);
6416 chunk_len += parameter_len;
6417 }
6418
6419 /* And now tell the peer which extensions we support */
6420 num_ext = 0;
6421 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
6422 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
6423 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
6424 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
6425 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6426 ((asoc == NULL) && (inp->idata_supported == 1))) {
6427 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
6428 }
6429 }
6430 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6431 ((asoc == NULL) && (inp->auth_supported == 1))) {
6432 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
6433 }
6434 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
6435 ((asoc == NULL) && (inp->asconf_supported == 1))) {
6436 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
6437 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
6438 }
6439 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
6440 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
6441 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
6442 }
6443 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
6444 ((asoc == NULL) && (inp->idata_supported == 1))) {
6445 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
6446 }
6447 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
6448 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
6449 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6450 }
6451 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6452 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6453 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6454 }
6455 if (num_ext > 0) {
6456 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6457 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6458 pr_supported->ph.param_length = htons(parameter_len);
6459 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6460 chunk_len += parameter_len;
6461 }
6462
6463 /* add authentication parameters */
6464 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6465 ((asoc == NULL) && (inp->auth_supported == 1))) {
6466 struct sctp_auth_random *randp;
6467 struct sctp_auth_hmac_algo *hmacs;
6468 struct sctp_auth_chunk_list *chunks;
6469
6470 if (padding_len > 0) {
6471 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6472 chunk_len += padding_len;
6473 padding_len = 0;
6474 }
6475 /* generate and add RANDOM parameter */
6476 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
6477 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6478 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6479 randp->ph.param_type = htons(SCTP_RANDOM);
6480 randp->ph.param_length = htons(parameter_len);
6481 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6482 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6483 chunk_len += parameter_len;
6484
6485 if (padding_len > 0) {
6486 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6487 chunk_len += padding_len;
6488 padding_len = 0;
6489 }
6490 /* add HMAC_ALGO parameter */
6491 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
6492 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6493 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6494 (uint8_t *)hmacs->hmac_ids);
6495 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6496 hmacs->ph.param_length = htons(parameter_len);
6497 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6498 chunk_len += parameter_len;
6499
6500 if (padding_len > 0) {
6501 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6502 chunk_len += padding_len;
6503 padding_len = 0;
6504 }
6505 /* add CHUNKS parameter */
6506 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
6507 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6508 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6509 chunks->chunk_types);
6510 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6511 chunks->ph.param_length = htons(parameter_len);
6512 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6513 chunk_len += parameter_len;
6514 }
6515 SCTP_BUF_LEN(m) = chunk_len;
6516 m_last = m;
6517 /* now the addresses */
6518 /* To optimize this we could put the scoping stuff
6519 * into a structure and remove the individual uint8's from
6520 * the stc structure. Then we could just sifa in the
6521 * address within the stc.. but for now this is a quick
6522 * hack to get the address stuff teased apart.
6523 */
6524 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6525 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6526#if defined(__Userspace__)
6527 scp.conn_addr_legal = stc.conn_addr_legal;
6528#endif
6529 scp.loopback_scope = stc.loopback_scope;
6530 scp.ipv4_local_scope = stc.ipv4_scope;
6531 scp.local_scope = stc.local_scope;
6532 scp.site_scope = stc.site_scope;
6533 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6534 cnt_inits_to,
6535 &padding_len, &chunk_len);
6536 /* padding_len can only be positive, if no addresses have been added */
6537 if (padding_len > 0) {
6538 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
6539 chunk_len += padding_len;
6540 SCTP_BUF_LEN(m) += padding_len;
6541 padding_len = 0;
6542 }
6543
6544 /* tack on the operational error if present */
6545 if (op_err) {
6546 parameter_len = 0;
6547 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6548 parameter_len += SCTP_BUF_LEN(m_tmp);
6549 }
6550 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6551 SCTP_BUF_NEXT(m_last) = op_err;
6552 while (SCTP_BUF_NEXT(m_last) != NULL) {
6553 m_last = SCTP_BUF_NEXT(m_last);
6554 }
6555 chunk_len += parameter_len;
6556 }
6557 if (padding_len > 0) {
6558 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6559 if (m_last == NULL) {
6560 /* Houston we have a problem, no space */
6561 sctp_m_freem(m);
6562 return;
6563 }
6564 chunk_len += padding_len;
6565 padding_len = 0;
6566 }
6567 /* Now we must build a cookie */
6568 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6569 if (m_cookie == NULL) {
6570 /* memory problem */
6571 sctp_m_freem(m);
6572 return;
6573 }
6574 /* Now append the cookie to the end and update the space/size */
6575 SCTP_BUF_NEXT(m_last) = m_cookie;
6576 parameter_len = 0;
6577 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6578 parameter_len += SCTP_BUF_LEN(m_tmp);
6579 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6580 m_last = m_tmp;
6581 }
6582 }
6583 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6584 chunk_len += parameter_len;
6585
6586 /* Place in the size, but we don't include
6587 * the last pad (if any) in the INIT-ACK.
6588 */
6589 initack->ch.chunk_length = htons(chunk_len);
6590
6591 /* Time to sign the cookie, we don't sign over the cookie
6592 * signature though thus we set trailer.
6593 */
6594 (void)sctp_hmac_m(SCTP_HMAC,
6595 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6596 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6597 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6598 /*
6599 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6600 * here since the timer will drive a retranmission.
6601 */
6602 if (padding_len > 0) {
6603 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6604 sctp_m_freem(m);
6605 return;
6606 }
6607 }
6608 if (stc.loopback_scope) {
6609 over_addr = (union sctp_sockstore *)dst;
6610 } else {
6611 over_addr = NULL;
6612 }
6613
6614 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6615 0, 0,
6616 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6617 port, over_addr,
6618#if defined(__FreeBSD__)
6619 mflowtype, mflowid,
6620#endif
6621 SCTP_SO_NOT_LOCKED))) {
6622 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6623 if (error == ENOBUFS) {
6624 if (asoc != NULL) {
6625 asoc->ifp_had_enobuf = 1;
6626 }
6627 SCTP_STAT_INCR(sctps_lowlevelerr);
6628 }
6629 } else {
6630 if (asoc != NULL) {
6631 asoc->ifp_had_enobuf = 0;
6632 }
6633 }
6634 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6635}
6636
6637
6638static void
6639sctp_prune_prsctp(struct sctp_tcb *stcb,
6640 struct sctp_association *asoc,
6641 struct sctp_sndrcvinfo *srcv,
6642 int dataout)
6643{
6644 int freed_spc = 0;
6645 struct sctp_tmit_chunk *chk, *nchk;
6646
6647 SCTP_TCB_LOCK_ASSERT(stcb);
6648 if ((asoc->prsctp_supported) &&
6649 (asoc->sent_queue_cnt_removeable > 0)) {
6650 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6651 /*
6652 * Look for chunks marked with the PR_SCTP flag AND
6653 * the buffer space flag. If the one being sent is
6654 * equal or greater priority then purge the old one
6655 * and free some space.
6656 */
6657 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6658 /*
6659 * This one is PR-SCTP AND buffer space
6660 * limited type
6661 */
6662 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6663 /*
6664 * Lower numbers equates to higher
6665 * priority so if the one we are
6666 * looking at has a larger or equal
6667 * priority we want to drop the data
6668 * and NOT retransmit it.
6669 */
6670 if (chk->data) {
6671 /*
6672 * We release the book_size
6673 * if the mbuf is here
6674 */
6675 int ret_spc;
6676 uint8_t sent;
6677
6678 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6679 sent = 1;
6680 else
6681 sent = 0;
6682 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6683 sent,
6684 SCTP_SO_LOCKED);
6685 freed_spc += ret_spc;
6686 if (freed_spc >= dataout) {
6687 return;
6688 }
6689 } /* if chunk was present */
6690 } /* if of sufficient priority */
6691 } /* if chunk has enabled */
6692 } /* tailqforeach */
6693
6694 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6695 /* Here we must move to the sent queue and mark */
6696 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6697 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6698 if (chk->data) {
6699 /*
6700 * We release the book_size
6701 * if the mbuf is here
6702 */
6703 int ret_spc;
6704
6705 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6706 0, SCTP_SO_LOCKED);
6707
6708 freed_spc += ret_spc;
6709 if (freed_spc >= dataout) {
6710 return;
6711 }
6712 } /* end if chk->data */
6713 } /* end if right class */
6714 } /* end if chk pr-sctp */
6715 } /* tailqforeachsafe (chk) */
6716 } /* if enabled in asoc */
6717}
6718
6719int
6720sctp_get_frag_point(struct sctp_tcb *stcb,
6721 struct sctp_association *asoc)
6722{
6723 int siz, ovh;
6724
6725 /*
6726 * For endpoints that have both v6 and v4 addresses we must reserve
6727 * room for the ipv6 header, for those that are only dealing with V4
6728 * we use a larger frag point.
6729 */
6730 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6731 ovh = SCTP_MIN_OVERHEAD;
6732 } else {
6733 ovh = SCTP_MIN_V4_OVERHEAD;
6734 }
6735 if (stcb->asoc.idata_supported) {
6736 ovh += sizeof(struct sctp_idata_chunk);
6737 } else {
6738 ovh += sizeof(struct sctp_data_chunk);
6739 }
6740 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6741 siz = asoc->smallest_mtu - ovh;
6742 else
6743 siz = (stcb->asoc.sctp_frag_point - ovh);
6744 /*
6745 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6746 */
6747 /* A data chunk MUST fit in a cluster */
6748 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6749 /* } */
6750
6751 /* adjust for an AUTH chunk if DATA requires auth */
6752 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6753 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6754
6755 if (siz % 4) {
6756 /* make it an even word boundary please */
6757 siz -= (siz % 4);
6758 }
6759 return (siz);
6760}
6761
6762static void
6763sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6764{
6765 /*
6766 * We assume that the user wants PR_SCTP_TTL if the user
6767 * provides a positive lifetime but does not specify any
6768 * PR_SCTP policy.
6769 */
6770 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6771 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6772 } else if (sp->timetolive > 0) {
6773 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6774 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6775 } else {
6776 return;
6777 }
6778 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6779 case CHUNK_FLAGS_PR_SCTP_BUF:
6780 /*
6781 * Time to live is a priority stored in tv_sec when
6782 * doing the buffer drop thing.
6783 */
6784 sp->ts.tv_sec = sp->timetolive;
6785 sp->ts.tv_usec = 0;
6786 break;
6787 case CHUNK_FLAGS_PR_SCTP_TTL:
6788 {
6789 struct timeval tv;
6790 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6791 tv.tv_sec = sp->timetolive / 1000;
6792 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6793 /* TODO sctp_constants.h needs alternative time macros when
6794 * _KERNEL is undefined.
6795 */
6796#ifndef __FreeBSD__
6797 timeradd(&sp->ts, &tv, &sp->ts);
6798#else
6799 timevaladd(&sp->ts, &tv);
6800#endif
6801 }
6802 break;
6803 case CHUNK_FLAGS_PR_SCTP_RTX:
6804 /*
6805 * Time to live is a the number or retransmissions
6806 * stored in tv_sec.
6807 */
6808 sp->ts.tv_sec = sp->timetolive;
6809 sp->ts.tv_usec = 0;
6810 break;
6811 default:
6812 SCTPDBG(SCTP_DEBUG_USRREQ1,
6813 "Unknown PR_SCTP policy %u.\n",
6814 PR_SCTP_POLICY(sp->sinfo_flags));
6815 break;
6816 }
6817}
6818
6819static int
6820sctp_msg_append(struct sctp_tcb *stcb,
6821 struct sctp_nets *net,
6822 struct mbuf *m,
6823 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6824{
6825 int error = 0;
6826 struct mbuf *at;
6827 struct sctp_stream_queue_pending *sp = NULL;
6828 struct sctp_stream_out *strm;
6829
6830 /* Given an mbuf chain, put it
6831 * into the association send queue and
6832 * place it on the wheel
6833 */
6834 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6835 /* Invalid stream number */
6836 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6837 error = EINVAL;
6838 goto out_now;
6839 }
6840 if ((stcb->asoc.stream_locked) &&
6841 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6842 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6843 error = EINVAL;
6844 goto out_now;
6845 }
6846 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6847 /* Now can we send this? */
6848 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6849 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6850 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6851 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6852 /* got data while shutting down */
6853 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6854 error = ECONNRESET;
6855 goto out_now;
6856 }
6857 sctp_alloc_a_strmoq(stcb, sp);
6858 if (sp == NULL) {
6859 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6860 error = ENOMEM;
6861 goto out_now;
6862 }
6863 sp->sinfo_flags = srcv->sinfo_flags;
6864 sp->timetolive = srcv->sinfo_timetolive;
6865 sp->ppid = srcv->sinfo_ppid;
6866 sp->context = srcv->sinfo_context;
6867 sp->fsn = 0;
6868 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6869 sp->net = net;
6870 atomic_add_int(&sp->net->ref_count, 1);
6871 } else {
6872 sp->net = NULL;
6873 }
6874 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6875 sp->sid = srcv->sinfo_stream;
6876 sp->msg_is_complete = 1;
6877 sp->sender_all_done = 1;
6878 sp->some_taken = 0;
6879 sp->data = m;
6880 sp->tail_mbuf = NULL;
6881 sctp_set_prsctp_policy(sp);
6882 /* We could in theory (for sendall) sifa the length
6883 * in, but we would still have to hunt through the
6884 * chain since we need to setup the tail_mbuf
6885 */
6886 sp->length = 0;
6887 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6888 if (SCTP_BUF_NEXT(at) == NULL)
6889 sp->tail_mbuf = at;
6890 sp->length += SCTP_BUF_LEN(at);
6891 }
6892 if (srcv->sinfo_keynumber_valid) {
6893 sp->auth_keyid = srcv->sinfo_keynumber;
6894 } else {
6895 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6896 }
6897 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6898 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6899 sp->holds_key_ref = 1;
6900 }
6901 if (hold_stcb_lock == 0) {
6902 SCTP_TCB_SEND_LOCK(stcb);
6903 }
6904 sctp_snd_sb_alloc(stcb, sp->length);
6905 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6906 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6907 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6908 m = NULL;
6909 if (hold_stcb_lock == 0) {
6910 SCTP_TCB_SEND_UNLOCK(stcb);
6911 }
6912out_now:
6913 if (m) {
6914 sctp_m_freem(m);
6915 }
6916 return (error);
6917}
6918
6919
6920static struct mbuf *
6921sctp_copy_mbufchain(struct mbuf *clonechain,
6922 struct mbuf *outchain,
6923 struct mbuf **endofchain,
6924 int can_take_mbuf,
6925 int sizeofcpy,
6926 uint8_t copy_by_ref)
6927{
6928 struct mbuf *m;
6929 struct mbuf *appendchain;
6930 caddr_t cp;
6931 int len;
6932
6933 if (endofchain == NULL) {
6934 /* error */
6935 error_out:
6936 if (outchain)
6937 sctp_m_freem(outchain);
6938 return (NULL);
6939 }
6940 if (can_take_mbuf) {
6941 appendchain = clonechain;
6942 } else {
6943 if (!copy_by_ref &&
6944#if defined(__Panda__)
6945 0
6946#else
6947 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6948#endif
6949 ) {
6950 /* Its not in a cluster */
6951 if (*endofchain == NULL) {
6952 /* lets get a mbuf cluster */
6953 if (outchain == NULL) {
6954 /* This is the general case */
6955 new_mbuf:
6956 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6957 if (outchain == NULL) {
6958 goto error_out;
6959 }
6960 SCTP_BUF_LEN(outchain) = 0;
6961 *endofchain = outchain;
6962 /* get the prepend space */
6963 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
6964 } else {
6965 /* We really should not get a NULL in endofchain */
6966 /* find end */
6967 m = outchain;
6968 while (m) {
6969 if (SCTP_BUF_NEXT(m) == NULL) {
6970 *endofchain = m;
6971 break;
6972 }
6973 m = SCTP_BUF_NEXT(m);
6974 }
6975 /* sanity */
6976 if (*endofchain == NULL) {
6977 /* huh, TSNH XXX maybe we should panic */
6978 sctp_m_freem(outchain);
6979 goto new_mbuf;
6980 }
6981 }
6982 /* get the new end of length */
6983 len = (int)M_TRAILINGSPACE(*endofchain);
6984 } else {
6985 /* how much is left at the end? */
6986 len = (int)M_TRAILINGSPACE(*endofchain);
6987 }
6988 /* Find the end of the data, for appending */
6989 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
6990
6991 /* Now lets copy it out */
6992 if (len >= sizeofcpy) {
6993 /* It all fits, copy it in */
6994 m_copydata(clonechain, 0, sizeofcpy, cp);
6995 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6996 } else {
6997 /* fill up the end of the chain */
6998 if (len > 0) {
6999 m_copydata(clonechain, 0, len, cp);
7000 SCTP_BUF_LEN((*endofchain)) += len;
7001 /* now we need another one */
7002 sizeofcpy -= len;
7003 }
7004 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
7005 if (m == NULL) {
7006 /* We failed */
7007 goto error_out;
7008 }
7009 SCTP_BUF_NEXT((*endofchain)) = m;
7010 *endofchain = m;
7011 cp = mtod((*endofchain), caddr_t);
7012 m_copydata(clonechain, len, sizeofcpy, cp);
7013 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
7014 }
7015 return (outchain);
7016 } else {
7017 /* copy the old fashion way */
7018 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
7019#ifdef SCTP_MBUF_LOGGING
7020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7021 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
7022 }
7023#endif
7024 }
7025 }
7026 if (appendchain == NULL) {
7027 /* error */
7028 if (outchain)
7029 sctp_m_freem(outchain);
7030 return (NULL);
7031 }
7032 if (outchain) {
7033 /* tack on to the end */
7034 if (*endofchain != NULL) {
7035 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
7036 } else {
7037 m = outchain;
7038 while (m) {
7039 if (SCTP_BUF_NEXT(m) == NULL) {
7040 SCTP_BUF_NEXT(m) = appendchain;
7041 break;
7042 }
7043 m = SCTP_BUF_NEXT(m);
7044 }
7045 }
7046 /*
7047 * save off the end and update the end-chain
7048 * position
7049 */
7050 m = appendchain;
7051 while (m) {
7052 if (SCTP_BUF_NEXT(m) == NULL) {
7053 *endofchain = m;
7054 break;
7055 }
7056 m = SCTP_BUF_NEXT(m);
7057 }
7058 return (outchain);
7059 } else {
7060 /* save off the end and update the end-chain position */
7061 m = appendchain;
7062 while (m) {
7063 if (SCTP_BUF_NEXT(m) == NULL) {
7064 *endofchain = m;
7065 break;
7066 }
7067 m = SCTP_BUF_NEXT(m);
7068 }
7069 return (appendchain);
7070 }
7071}
7072
7073static int
7074sctp_med_chunk_output(struct sctp_inpcb *inp,
7075 struct sctp_tcb *stcb,
7076 struct sctp_association *asoc,
7077 int *num_out,
7078 int *reason_code,
7079 int control_only, int from_where,
7080 struct timeval *now, int *now_filled, int frag_point, int so_locked
7081#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7082 SCTP_UNUSED
7083#endif
7084 );
7085
7086static void
7087sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
7088 uint32_t val SCTP_UNUSED)
7089{
7090 struct sctp_copy_all *ca;
7091 struct mbuf *m;
7092 int ret = 0;
7093 int added_control = 0;
7094 int un_sent, do_chunk_output = 1;
7095 struct sctp_association *asoc;
7096 struct sctp_nets *net;
7097
7098 ca = (struct sctp_copy_all *)ptr;
7099 if (ca->m == NULL) {
7100 return;
7101 }
7102 if (ca->inp != inp) {
7103 /* TSNH */
7104 return;
7105 }
7106 if (ca->sndlen > 0) {
7107 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
7108 if (m == NULL) {
7109 /* can't copy so we are done */
7110 ca->cnt_failed++;
7111 return;
7112 }
7113#ifdef SCTP_MBUF_LOGGING
7114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7115 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
7116 }
7117#endif
7118 } else {
7119 m = NULL;
7120 }
7121 SCTP_TCB_LOCK_ASSERT(stcb);
7122 if (stcb->asoc.alternate) {
7123 net = stcb->asoc.alternate;
7124 } else {
7125 net = stcb->asoc.primary_destination;
7126 }
7127 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
7128 /* Abort this assoc with m as the user defined reason */
7129 if (m != NULL) {
7130 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
7131 } else {
7132 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
7133 0, M_NOWAIT, 1, MT_DATA);
7134 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
7135 }
7136 if (m != NULL) {
7137 struct sctp_paramhdr *ph;
7138
7139 ph = mtod(m, struct sctp_paramhdr *);
7140 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7141 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
7142 }
7143 /* We add one here to keep the assoc from
7144 * dis-appearing on us.
7145 */
7146 atomic_add_int(&stcb->asoc.refcnt, 1);
7147 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
7148 /* sctp_abort_an_association calls sctp_free_asoc()
7149 * free association will NOT free it since we
7150 * incremented the refcnt .. we do this to prevent
7151 * it being freed and things getting tricky since
7152 * we could end up (from free_asoc) calling inpcb_free
7153 * which would get a recursive lock call to the
7154 * iterator lock.. But as a consequence of that the
7155 * stcb will return to us un-locked.. since free_asoc
7156 * returns with either no TCB or the TCB unlocked, we
7157 * must relock.. to unlock in the iterator timer :-0
7158 */
7159 SCTP_TCB_LOCK(stcb);
7160 atomic_add_int(&stcb->asoc.refcnt, -1);
7161 goto no_chunk_output;
7162 } else {
7163 if (m) {
7164 ret = sctp_msg_append(stcb, net, m,
7165 &ca->sndrcv, 1);
7166 }
7167 asoc = &stcb->asoc;
7168 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
7169 /* shutdown this assoc */
7170 if (TAILQ_EMPTY(&asoc->send_queue) &&
7171 TAILQ_EMPTY(&asoc->sent_queue) &&
7172 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
7173 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7174 goto abort_anyway;
7175 }
7176 /* there is nothing queued to send, so I'm done... */
7177 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7178 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7179 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7180 /* only send SHUTDOWN the first time through */
7181 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
7182 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7183 }
7184 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7185 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7186 sctp_stop_timers_for_shutdown(stcb);
7187 sctp_send_shutdown(stcb, net);
7188 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
7189 net);
7190 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7191 asoc->primary_destination);
7192 added_control = 1;
7193 do_chunk_output = 0;
7194 }
7195 } else {
7196 /*
7197 * we still got (or just got) data to send, so set
7198 * SHUTDOWN_PENDING
7199 */
7200 /*
7201 * XXX sockets draft says that SCTP_EOF should be
7202 * sent with no data. currently, we will allow user
7203 * data to be sent first and move to
7204 * SHUTDOWN-PENDING
7205 */
7206 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
7207 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
7208 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7209 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
7210 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7211 }
7212 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
7213 if (TAILQ_EMPTY(&asoc->send_queue) &&
7214 TAILQ_EMPTY(&asoc->sent_queue) &&
7215 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
7216 struct mbuf *op_err;
7217 char msg[SCTP_DIAG_INFO_LEN];
7218
7219 abort_anyway:
7220 snprintf(msg, sizeof(msg),
7221 "%s:%d at %s", __FILE__, __LINE__, __func__);
7222 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
7223 msg);
7224 atomic_add_int(&stcb->asoc.refcnt, 1);
7225 sctp_abort_an_association(stcb->sctp_ep, stcb,
7226 op_err, SCTP_SO_NOT_LOCKED);
7227 atomic_add_int(&stcb->asoc.refcnt, -1);
7228 goto no_chunk_output;
7229 }
7230 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
7231 asoc->primary_destination);
7232 }
7233 }
7234
7235 }
7236 }
7237 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7238 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
7239
7240 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
7241 (stcb->asoc.total_flight > 0) &&
7242 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
7243 do_chunk_output = 0;
7244 }
7245 if (do_chunk_output)
7246 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
7247 else if (added_control) {
7248 int num_out, reason, now_filled = 0;
7249 struct timeval now;
7250 int frag_point;
7251
7252 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7253 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
7254 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
7255 }
7256 no_chunk_output:
7257 if (ret) {
7258 ca->cnt_failed++;
7259 } else {
7260 ca->cnt_sent++;
7261 }
7262}
7263
7264static void
7265sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
7266{
7267 struct sctp_copy_all *ca;
7268
7269 ca = (struct sctp_copy_all *)ptr;
7270 /*
7271 * Do a notify here? Kacheong suggests that the notify be done at
7272 * the send time.. so you would push up a notification if any send
7273 * failed. Don't know if this is feasible since the only failures we
7274 * have is "memory" related and if you cannot get an mbuf to send
7275 * the data you surely can't get an mbuf to send up to notify the
7276 * user you can't send the data :->
7277 */
7278
7279 /* now free everything */
7280 sctp_m_freem(ca->m);
7281 SCTP_FREE(ca, SCTP_M_COPYAL);
7282}
7283
7284static struct mbuf *
7285sctp_copy_out_all(struct uio *uio, int len)
7286{
7287 struct mbuf *ret, *at;
7288 int left, willcpy, cancpy, error;
7289
7290 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
7291 if (ret == NULL) {
7292 /* TSNH */
7293 return (NULL);
7294 }
7295 left = len;
7296 SCTP_BUF_LEN(ret) = 0;
7297 /* save space for the data chunk header */
7298 cancpy = (int)M_TRAILINGSPACE(ret);
7299 willcpy = min(cancpy, left);
7300 at = ret;
7301 while (left > 0) {
7302 /* Align data to the end */
7303 error = uiomove(mtod(at, caddr_t), willcpy, uio);
7304 if (error) {
7305 err_out_now:
7306 sctp_m_freem(at);
7307 return (NULL);
7308 }
7309 SCTP_BUF_LEN(at) = willcpy;
7310 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
7311 left -= willcpy;
7312 if (left > 0) {
7313 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
7314 if (SCTP_BUF_NEXT(at) == NULL) {
7315 goto err_out_now;
7316 }
7317 at = SCTP_BUF_NEXT(at);
7318 SCTP_BUF_LEN(at) = 0;
7319 cancpy = (int)M_TRAILINGSPACE(at);
7320 willcpy = min(cancpy, left);
7321 }
7322 }
7323 return (ret);
7324}
7325
7326static int
7327sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
7328 struct sctp_sndrcvinfo *srcv)
7329{
7330 int ret;
7331 struct sctp_copy_all *ca;
7332
7333 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
7334 SCTP_M_COPYAL);
7335 if (ca == NULL) {
7336 sctp_m_freem(m);
7337 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7338 return (ENOMEM);
7339 }
7340 memset(ca, 0, sizeof(struct sctp_copy_all));
7341
7342 ca->inp = inp;
7343 if (srcv) {
7344 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
7345 }
7346 /*
7347 * take off the sendall flag, it would be bad if we failed to do
7348 * this :-0
7349 */
7350 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
7351 /* get length and mbuf chain */
7352 if (uio) {
7353#if defined(__APPLE__)
7354#if defined(APPLE_LEOPARD)
7355 ca->sndlen = uio->uio_resid;
7356#else
7357 ca->sndlen = uio_resid(uio);
7358#endif
7359#else
7360 ca->sndlen = (int)uio->uio_resid;
7361#endif
7362#if defined(__APPLE__)
7363 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
7364#endif
7365 ca->m = sctp_copy_out_all(uio, ca->sndlen);
7366#if defined(__APPLE__)
7367 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
7368#endif
7369 if (ca->m == NULL) {
7370 SCTP_FREE(ca, SCTP_M_COPYAL);
7371 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7372 return (ENOMEM);
7373 }
7374 } else {
7375 /* Gather the length of the send */
7376 struct mbuf *mat;
7377
7378 ca->sndlen = 0;
7379 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
7380 ca->sndlen += SCTP_BUF_LEN(mat);
7381 }
7382 }
7383 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
7384 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
7385 SCTP_ASOC_ANY_STATE,
7386 (void *)ca, 0,
7387 sctp_sendall_completes, inp, 1);
7388 if (ret) {
7389 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
7390 SCTP_FREE(ca, SCTP_M_COPYAL);
7391 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
7392 return (EFAULT);
7393 }
7394 return (0);
7395}
7396
7397
7398void
7399sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
7400{
7401 struct sctp_tmit_chunk *chk, *nchk;
7402
7403 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7404 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7405 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7406 if (chk->data) {
7407 sctp_m_freem(chk->data);
7408 chk->data = NULL;
7409 }
7410 asoc->ctrl_queue_cnt--;
7411 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7412 }
7413 }
7414}
7415
7416void
7417sctp_toss_old_asconf(struct sctp_tcb *stcb)
7418{
7419 struct sctp_association *asoc;
7420 struct sctp_tmit_chunk *chk, *nchk;
7421 struct sctp_asconf_chunk *acp;
7422
7423 asoc = &stcb->asoc;
7424 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7425 /* find SCTP_ASCONF chunk in queue */
7426 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7427 if (chk->data) {
7428 acp = mtod(chk->data, struct sctp_asconf_chunk *);
7429 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
7430 /* Not Acked yet */
7431 break;
7432 }
7433 }
7434 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
7435 if (chk->data) {
7436 sctp_m_freem(chk->data);
7437 chk->data = NULL;
7438 }
7439 asoc->ctrl_queue_cnt--;
7440 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
7441 }
7442 }
7443}
7444
7445
7446static void
7447sctp_clean_up_datalist(struct sctp_tcb *stcb,
7448 struct sctp_association *asoc,
7449 struct sctp_tmit_chunk **data_list,
7450 int bundle_at,
7451 struct sctp_nets *net)
7452{
7453 int i;
7454 struct sctp_tmit_chunk *tp1;
7455
7456 for (i = 0; i < bundle_at; i++) {
7457 /* off of the send queue */
7458 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7459 asoc->send_queue_cnt--;
7460 if (i > 0) {
7461 /*
7462 * Any chunk NOT 0 you zap the time chunk 0 gets
7463 * zapped or set based on if a RTO measurment is
7464 * needed.
7465 */
7466 data_list[i]->do_rtt = 0;
7467 }
7468 /* record time */
7469 data_list[i]->sent_rcv_time = net->last_sent_time;
7470 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7471 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7472 if (data_list[i]->whoTo == NULL) {
7473 data_list[i]->whoTo = net;
7474 atomic_add_int(&net->ref_count, 1);
7475 }
7476 /* on to the sent queue */
7477 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7478 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7479 struct sctp_tmit_chunk *tpp;
7480
7481 /* need to move back */
7482 back_up_more:
7483 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7484 if (tpp == NULL) {
7485 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7486 goto all_done;
7487 }
7488 tp1 = tpp;
7489 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7490 goto back_up_more;
7491 }
7492 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7493 } else {
7494 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7495 data_list[i],
7496 sctp_next);
7497 }
7498 all_done:
7499 /* This does not lower until the cum-ack passes it */
7500 asoc->sent_queue_cnt++;
7501 if ((asoc->peers_rwnd <= 0) &&
7502 (asoc->total_flight == 0) &&
7503 (bundle_at == 1)) {
7504 /* Mark the chunk as being a window probe */
7505 SCTP_STAT_INCR(sctps_windowprobed);
7506 }
7507#ifdef SCTP_AUDITING_ENABLED
7508 sctp_audit_log(0xC2, 3);
7509#endif
7510 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7511 data_list[i]->snd_count = 1;
7512 data_list[i]->rec.data.chunk_was_revoked = 0;
7513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7514 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7515 data_list[i]->whoTo->flight_size,
7516 data_list[i]->book_size,
7517 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7518 data_list[i]->rec.data.tsn);
7519 }
7520 sctp_flight_size_increase(data_list[i]);
7521 sctp_total_flight_increase(stcb, data_list[i]);
7522 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7523 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7524 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7525 }
7526 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7527 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7528 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7529 /* SWS sender side engages */
7530 asoc->peers_rwnd = 0;
7531 }
7532 }
7533 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7534 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
7535 }
7536}
7537
7538static void
7539sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7540#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7541 SCTP_UNUSED
7542#endif
7543)
7544{
7545 struct sctp_tmit_chunk *chk, *nchk;
7546
7547 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7548 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7549 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7550 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7551 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7552 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7553 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7554 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7555 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7556 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7557 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7558 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7559 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7560 /* Stray chunks must be cleaned up */
7561 clean_up_anyway:
7562 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7563 if (chk->data) {
7564 sctp_m_freem(chk->data);
7565 chk->data = NULL;
7566 }
7567 asoc->ctrl_queue_cnt--;
7568 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7569 asoc->fwd_tsn_cnt--;
7570 sctp_free_a_chunk(stcb, chk, so_locked);
7571 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7572 /* special handling, we must look into the param */
7573 if (chk != asoc->str_reset) {
7574 goto clean_up_anyway;
7575 }
7576 }
7577 }
7578}
7579
7580
7581static int
7582sctp_can_we_split_this(struct sctp_tcb *stcb,
7583 uint32_t length,
7584 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7585{
7586 /* Make a decision on if I should split a
7587 * msg into multiple parts. This is only asked of
7588 * incomplete messages.
7589 */
7590 if (eeor_on) {
7591 /* If we are doing EEOR we need to always send
7592 * it if its the entire thing, since it might
7593 * be all the guy is putting in the hopper.
7594 */
7595 if (goal_mtu >= length) {
7596 /*-
7597 * If we have data outstanding,
7598 * we get another chance when the sack
7599 * arrives to transmit - wait for more data
7600 */
7601 if (stcb->asoc.total_flight == 0) {
7602 /* If nothing is in flight, we zero
7603 * the packet counter.
7604 */
7605 return (length);
7606 }
7607 return (0);
7608
7609 } else {
7610 /* You can fill the rest */
7611 return (goal_mtu);
7612 }
7613 }
7614 /*-
7615 * For those strange folk that make the send buffer
7616 * smaller than our fragmentation point, we can't
7617 * get a full msg in so we have to allow splitting.
7618 */
7619 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7620 return (length);
7621 }
7622
7623 if ((length <= goal_mtu) ||
7624 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7625 /* Sub-optimial residual don't split in non-eeor mode. */
7626 return (0);
7627 }
7628 /* If we reach here length is larger
7629 * than the goal_mtu. Do we wish to split
7630 * it for the sake of packet putting together?
7631 */
7632 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7633 /* Its ok to split it */
7634 return (min(goal_mtu, frag_point));
7635 }
7636 /* Nope, can't split */
7637 return (0);
7638
7639}
7640
7641static uint32_t
7642sctp_move_to_outqueue(struct sctp_tcb *stcb,
7643 struct sctp_stream_out *strq,
7644 uint32_t goal_mtu,
7645 uint32_t frag_point,
7646 int *giveup,
7647 int eeor_mode,
7648 int *bail,
7649 int so_locked
7650#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7651 SCTP_UNUSED
7652#endif
7653 )
7654{
7655 /* Move from the stream to the send_queue keeping track of the total */
7656 struct sctp_association *asoc;
7657 struct sctp_stream_queue_pending *sp;
7658 struct sctp_tmit_chunk *chk;
7659 struct sctp_data_chunk *dchkh=NULL;
7660 struct sctp_idata_chunk *ndchkh=NULL;
7661 uint32_t to_move, length;
7662 int leading;
7663 uint8_t rcv_flags = 0;
7664 uint8_t some_taken;
7665 uint8_t send_lock_up = 0;
7666
7667 SCTP_TCB_LOCK_ASSERT(stcb);
7668 asoc = &stcb->asoc;
7669one_more_time:
7670 /*sa_ignore FREED_MEMORY*/
7671 sp = TAILQ_FIRST(&strq->outqueue);
7672 if (sp == NULL) {
7673 if (send_lock_up == 0) {
7674 SCTP_TCB_SEND_LOCK(stcb);
7675 send_lock_up = 1;
7676 }
7677 sp = TAILQ_FIRST(&strq->outqueue);
7678 if (sp) {
7679 goto one_more_time;
7680 }
7681 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7682 (stcb->asoc.idata_supported == 0) &&
7683 (strq->last_msg_incomplete)) {
7684 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7685 strq->sid,
7686 strq->last_msg_incomplete);
7687 strq->last_msg_incomplete = 0;
7688 }
7689 to_move = 0;
7690 if (send_lock_up) {
7691 SCTP_TCB_SEND_UNLOCK(stcb);
7692 send_lock_up = 0;
7693 }
7694 goto out_of;
7695 }
7696 if ((sp->msg_is_complete) && (sp->length == 0)) {
7697 if (sp->sender_all_done) {
7698 /* We are doing differed cleanup. Last
7699 * time through when we took all the data
7700 * the sender_all_done was not set.
7701 */
7702 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7703 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7704 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7705 sp->sender_all_done,
7706 sp->length,
7707 sp->msg_is_complete,
7708 sp->put_last_out,
7709 send_lock_up);
7710 }
7711 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7712 SCTP_TCB_SEND_LOCK(stcb);
7713 send_lock_up = 1;
7714 }
7715 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7716 TAILQ_REMOVE(&strq->outqueue, sp, next);
7717 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7718 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7719 (strq->chunks_on_queues == 0) &&
7720 TAILQ_EMPTY(&strq->outqueue)) {
7721 stcb->asoc.trigger_reset = 1;
7722 }
7723 if (sp->net) {
7724 sctp_free_remote_addr(sp->net);
7725 sp->net = NULL;
7726 }
7727 if (sp->data) {
7728 sctp_m_freem(sp->data);
7729 sp->data = NULL;
7730 }
7731 sctp_free_a_strmoq(stcb, sp, so_locked);
7732 /* we can't be locked to it */
7733 if (send_lock_up) {
7734 SCTP_TCB_SEND_UNLOCK(stcb);
7735 send_lock_up = 0;
7736 }
7737 /* back to get the next msg */
7738 goto one_more_time;
7739 } else {
7740 /* sender just finished this but
7741 * still holds a reference
7742 */
7743 *giveup = 1;
7744 to_move = 0;
7745 goto out_of;
7746 }
7747 } else {
7748 /* is there some to get */
7749 if (sp->length == 0) {
7750 /* no */
7751 *giveup = 1;
7752 to_move = 0;
7753 goto out_of;
7754 } else if (sp->discard_rest) {
7755 if (send_lock_up == 0) {
7756 SCTP_TCB_SEND_LOCK(stcb);
7757 send_lock_up = 1;
7758 }
7759 /* Whack down the size */
7760 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7761 if ((stcb->sctp_socket != NULL) &&
7762 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7763 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7764 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7765 }
7766 if (sp->data) {
7767 sctp_m_freem(sp->data);
7768 sp->data = NULL;
7769 sp->tail_mbuf = NULL;
7770 }
7771 sp->length = 0;
7772 sp->some_taken = 1;
7773 *giveup = 1;
7774 to_move = 0;
7775 goto out_of;
7776 }
7777 }
7778 some_taken = sp->some_taken;
7779re_look:
7780 length = sp->length;
7781 if (sp->msg_is_complete) {
7782 /* The message is complete */
7783 to_move = min(length, frag_point);
7784 if (to_move == length) {
7785 /* All of it fits in the MTU */
7786 if (sp->some_taken) {
7787 rcv_flags |= SCTP_DATA_LAST_FRAG;
7788 } else {
7789 rcv_flags |= SCTP_DATA_NOT_FRAG;
7790 }
7791 sp->put_last_out = 1;
7792 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7793 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7794 }
7795 } else {
7796 /* Not all of it fits, we fragment */
7797 if (sp->some_taken == 0) {
7798 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7799 }
7800 sp->some_taken = 1;
7801 }
7802 } else {
7803 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7804 if (to_move) {
7805 /*-
7806 * We use a snapshot of length in case it
7807 * is expanding during the compare.
7808 */
7809 uint32_t llen;
7810
7811 llen = length;
7812 if (to_move >= llen) {
7813 to_move = llen;
7814 if (send_lock_up == 0) {
7815 /*-
7816 * We are taking all of an incomplete msg
7817 * thus we need a send lock.
7818 */
7819 SCTP_TCB_SEND_LOCK(stcb);
7820 send_lock_up = 1;
7821 if (sp->msg_is_complete) {
7822 /* the sender finished the msg */
7823 goto re_look;
7824 }
7825 }
7826 }
7827 if (sp->some_taken == 0) {
7828 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7829 sp->some_taken = 1;
7830 }
7831 } else {
7832 /* Nothing to take. */
7833 *giveup = 1;
7834 to_move = 0;
7835 goto out_of;
7836 }
7837 }
7838
7839 /* If we reach here, we can copy out a chunk */
7840 sctp_alloc_a_chunk(stcb, chk);
7841 if (chk == NULL) {
7842 /* No chunk memory */
7843 *giveup = 1;
7844 to_move = 0;
7845 goto out_of;
7846 }
7847 /* Setup for unordered if needed by looking
7848 * at the user sent info flags.
7849 */
7850 if (sp->sinfo_flags & SCTP_UNORDERED) {
7851 rcv_flags |= SCTP_DATA_UNORDERED;
7852 }
7853 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7854 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7855 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7856 }
7857 /* clear out the chunk before setting up */
7858 memset(chk, 0, sizeof(*chk));
7859 chk->rec.data.rcv_flags = rcv_flags;
7860
7861 if (to_move >= length) {
7862 /* we think we can steal the whole thing */
7863 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7864 SCTP_TCB_SEND_LOCK(stcb);
7865 send_lock_up = 1;
7866 }
7867 if (to_move < sp->length) {
7868 /* bail, it changed */
7869 goto dont_do_it;
7870 }
7871 chk->data = sp->data;
7872 chk->last_mbuf = sp->tail_mbuf;
7873 /* register the stealing */
7874 sp->data = sp->tail_mbuf = NULL;
7875 } else {
7876 struct mbuf *m;
7877 dont_do_it:
7878 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7879 chk->last_mbuf = NULL;
7880 if (chk->data == NULL) {
7881 sp->some_taken = some_taken;
7882 sctp_free_a_chunk(stcb, chk, so_locked);
7883 *bail = 1;
7884 to_move = 0;
7885 goto out_of;
7886 }
7887#ifdef SCTP_MBUF_LOGGING
7888 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7889 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7890 }
7891#endif
7892 /* Pull off the data */
7893 m_adj(sp->data, to_move);
7894 /* Now lets work our way down and compact it */
7895 m = sp->data;
7896 while (m && (SCTP_BUF_LEN(m) == 0)) {
7897 sp->data = SCTP_BUF_NEXT(m);
7898 SCTP_BUF_NEXT(m) = NULL;
7899 if (sp->tail_mbuf == m) {
7900 /*-
7901 * Freeing tail? TSNH since
7902 * we supposedly were taking less
7903 * than the sp->length.
7904 */
7905#ifdef INVARIANTS
7906 panic("Huh, freing tail? - TSNH");
7907#else
7908 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7909 sp->tail_mbuf = sp->data = NULL;
7910 sp->length = 0;
7911#endif
7912
7913 }
7914 sctp_m_free(m);
7915 m = sp->data;
7916 }
7917 }
7918 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7919 chk->copy_by_ref = 1;
7920 } else {
7921 chk->copy_by_ref = 0;
7922 }
7923 /* get last_mbuf and counts of mb usage
7924 * This is ugly but hopefully its only one mbuf.
7925 */
7926 if (chk->last_mbuf == NULL) {
7927 chk->last_mbuf = chk->data;
7928 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7929 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7930 }
7931 }
7932
7933 if (to_move > length) {
7934 /*- This should not happen either
7935 * since we always lower to_move to the size
7936 * of sp->length if its larger.
7937 */
7938#ifdef INVARIANTS
7939 panic("Huh, how can to_move be larger?");
7940#else
7941 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7942 sp->length = 0;
7943#endif
7944 } else {
7945 atomic_subtract_int(&sp->length, to_move);
7946 }
7947 if (stcb->asoc.idata_supported == 0) {
7948 leading = sizeof(struct sctp_data_chunk);
7949 } else {
7950 leading = sizeof(struct sctp_idata_chunk);
7951 }
7952 if (M_LEADINGSPACE(chk->data) < leading) {
7953 /* Not enough room for a chunk header, get some */
7954 struct mbuf *m;
7955
7956 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7957 if (m == NULL) {
7958 /*
7959 * we're in trouble here. _PREPEND below will free
7960 * all the data if there is no leading space, so we
7961 * must put the data back and restore.
7962 */
7963 if (send_lock_up == 0) {
7964 SCTP_TCB_SEND_LOCK(stcb);
7965 send_lock_up = 1;
7966 }
7967 if (sp->data == NULL) {
7968 /* unsteal the data */
7969 sp->data = chk->data;
7970 sp->tail_mbuf = chk->last_mbuf;
7971 } else {
7972 struct mbuf *m_tmp;
7973 /* reassemble the data */
7974 m_tmp = sp->data;
7975 sp->data = chk->data;
7976 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7977 }
7978 sp->some_taken = some_taken;
7979 atomic_add_int(&sp->length, to_move);
7980 chk->data = NULL;
7981 *bail = 1;
7982 sctp_free_a_chunk(stcb, chk, so_locked);
7983 to_move = 0;
7984 goto out_of;
7985 } else {
7986 SCTP_BUF_LEN(m) = 0;
7987 SCTP_BUF_NEXT(m) = chk->data;
7988 chk->data = m;
7989 M_ALIGN(chk->data, 4);
7990 }
7991 }
7992 if (stcb->asoc.idata_supported == 0) {
7993 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7994 } else {
7995 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_idata_chunk), M_NOWAIT);
7996 }
7997 if (chk->data == NULL) {
7998 /* HELP, TSNH since we assured it would not above? */
7999#ifdef INVARIANTS
8000 panic("prepend failes HELP?");
8001#else
8002 SCTP_PRINTF("prepend fails HELP?\n");
8003 sctp_free_a_chunk(stcb, chk, so_locked);
8004#endif
8005 *bail = 1;
8006 to_move = 0;
8007 goto out_of;
8008 }
8009 if (stcb->asoc.idata_supported == 0) {
8010 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
8011 chk->book_size = chk->send_size = (uint16_t)(to_move + sizeof(struct sctp_data_chunk));
8012 } else {
8013 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_idata_chunk));
8014 chk->book_size = chk->send_size = (uint16_t)(to_move + sizeof(struct sctp_idata_chunk));
8015 }
8016 chk->book_size_scale = 0;
8017 chk->sent = SCTP_DATAGRAM_UNSENT;
8018
8019 chk->flags = 0;
8020 chk->asoc = &stcb->asoc;
8021 chk->pad_inplace = 0;
8022 chk->no_fr_allowed = 0;
8023 if (stcb->asoc.idata_supported == 0) {
8024 if (rcv_flags & SCTP_DATA_UNORDERED) {
8025 /* Just use 0. The receiver ignores the values. */
8026 chk->rec.data.mid = 0;
8027 } else {
8028 chk->rec.data.mid = strq->next_mid_ordered;
8029 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8030 strq->next_mid_ordered++;
8031 }
8032 }
8033 } else {
8034 if (rcv_flags & SCTP_DATA_UNORDERED) {
8035 chk->rec.data.mid = strq->next_mid_unordered;
8036 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8037 strq->next_mid_unordered++;
8038 }
8039 } else {
8040 chk->rec.data.mid = strq->next_mid_ordered;
8041 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
8042 strq->next_mid_ordered++;
8043 }
8044 }
8045 }
8046 chk->rec.data.sid = sp->sid;
8047 chk->rec.data.ppid = sp->ppid;
8048 chk->rec.data.context = sp->context;
8049 chk->rec.data.doing_fast_retransmit = 0;
8050
8051 chk->rec.data.timetodrop = sp->ts;
8052 chk->flags = sp->act_flags;
8053
8054 if (sp->net) {
8055 chk->whoTo = sp->net;
8056 atomic_add_int(&chk->whoTo->ref_count, 1);
8057 } else
8058 chk->whoTo = NULL;
8059
8060 if (sp->holds_key_ref) {
8061 chk->auth_keyid = sp->auth_keyid;
8062 sctp_auth_key_acquire(stcb, chk->auth_keyid);
8063 chk->holds_key_ref = 1;
8064 }
8065#if defined(__FreeBSD__) || defined(__Panda__)
8066 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
8067#else
8068 chk->rec.data.tsn = asoc->sending_seq++;
8069#endif
8070 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
8071 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
8072 (uint32_t)(uintptr_t)stcb, sp->length,
8073 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
8074 chk->rec.data.tsn);
8075 }
8076 if (stcb->asoc.idata_supported == 0) {
8077 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8078 } else {
8079 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
8080 }
8081 /*
8082 * Put the rest of the things in place now. Size was done
8083 * earlier in previous loop prior to padding.
8084 */
8085
8086#ifdef SCTP_ASOCLOG_OF_TSNS
8087 SCTP_TCB_LOCK_ASSERT(stcb);
8088 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
8089 asoc->tsn_out_at = 0;
8090 asoc->tsn_out_wrapped = 1;
8091 }
8092 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
8093 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
8094 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
8095 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
8096 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
8097 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
8098 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
8099 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
8100 asoc->tsn_out_at++;
8101#endif
8102 if (stcb->asoc.idata_supported == 0) {
8103 dchkh->ch.chunk_type = SCTP_DATA;
8104 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8105 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
8106 dchkh->dp.sid = htons(strq->sid);
8107 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
8108 dchkh->dp.ppid = chk->rec.data.ppid;
8109 dchkh->ch.chunk_length = htons(chk->send_size);
8110 } else {
8111 ndchkh->ch.chunk_type = SCTP_IDATA;
8112 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
8113 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
8114 ndchkh->dp.sid = htons(strq->sid);
8115 ndchkh->dp.reserved = htons(0);
8116 ndchkh->dp.mid = htonl(chk->rec.data.mid);
8117 if (sp->fsn == 0)
8118 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
8119 else
8120 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
8121 sp->fsn++;
8122 ndchkh->ch.chunk_length = htons(chk->send_size);
8123 }
8124 /* Now advance the chk->send_size by the actual pad needed. */
8125 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
8126 /* need a pad */
8127 struct mbuf *lm;
8128 int pads;
8129
8130 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
8131 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
8132 if (lm != NULL) {
8133 chk->last_mbuf = lm;
8134 chk->pad_inplace = 1;
8135 }
8136 chk->send_size += pads;
8137 }
8138 if (PR_SCTP_ENABLED(chk->flags)) {
8139 asoc->pr_sctp_cnt++;
8140 }
8141 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
8142 /* All done pull and kill the message */
8143 if (sp->put_last_out == 0) {
8144 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
8145 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
8146 sp->sender_all_done,
8147 sp->length,
8148 sp->msg_is_complete,
8149 sp->put_last_out,
8150 send_lock_up);
8151 }
8152 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
8153 SCTP_TCB_SEND_LOCK(stcb);
8154 send_lock_up = 1;
8155 }
8156 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
8157 TAILQ_REMOVE(&strq->outqueue, sp, next);
8158 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
8159 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
8160 (strq->chunks_on_queues == 0) &&
8161 TAILQ_EMPTY(&strq->outqueue)) {
8162 stcb->asoc.trigger_reset = 1;
8163 }
8164 if (sp->net) {
8165 sctp_free_remote_addr(sp->net);
8166 sp->net = NULL;
8167 }
8168 if (sp->data) {
8169 sctp_m_freem(sp->data);
8170 sp->data = NULL;
8171 }
8172 sctp_free_a_strmoq(stcb, sp, so_locked);
8173 }
8174 asoc->chunks_on_out_queue++;
8175 strq->chunks_on_queues++;
8176 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
8177 asoc->send_queue_cnt++;
8178out_of:
8179 if (send_lock_up) {
8180 SCTP_TCB_SEND_UNLOCK(stcb);
8181 }
8182 return (to_move);
8183}
8184
8185
8186static void
8187sctp_fill_outqueue(struct sctp_tcb *stcb,
8188 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
8189#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8190 SCTP_UNUSED
8191#endif
8192)
8193{
8194 struct sctp_association *asoc;
8195 struct sctp_stream_out *strq;
8196 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
8197 int giveup;
8198
8199 SCTP_TCB_LOCK_ASSERT(stcb);
8200 asoc = &stcb->asoc;
8201 switch (net->ro._l_addr.sa.sa_family) {
8202#ifdef INET
8203 case AF_INET:
8204 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8205 break;
8206#endif
8207#ifdef INET6
8208 case AF_INET6:
8209 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
8210 break;
8211#endif
8212#if defined(__Userspace__)
8213 case AF_CONN:
8214 goal_mtu = net->mtu - sizeof(struct sctphdr);
8215 break;
8216#endif
8217 default:
8218 /* TSNH */
8219 goal_mtu = net->mtu;
8220 break;
8221 }
8222 /* Need an allowance for the data chunk header too */
8223 if (stcb->asoc.idata_supported == 0) {
8224 goal_mtu -= sizeof(struct sctp_data_chunk);
8225 } else {
8226 goal_mtu -= sizeof(struct sctp_idata_chunk);
8227 }
8228
8229 /* must make even word boundary */
8230 goal_mtu &= 0xfffffffc;
8231 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8232 while ((goal_mtu > 0) && strq) {
8233 giveup = 0;
8234 bail = 0;
8235 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point,
8236 &giveup, eeor_mode, &bail, so_locked);
8237 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
8238
8239 if ((giveup) || bail) {
8240 break;
8241 }
8242 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
8243 if (strq == NULL) {
8244 break;
8245 }
8246 total_moved += moved_how_much;
8247 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
8248 goal_mtu &= 0xfffffffc;
8249 }
8250 if (bail)
8251 *quit_now = 1;
8252
8253 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
8254
8255 if (total_moved == 0) {
8256 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
8257 (net == stcb->asoc.primary_destination)) {
8258 /* ran dry for primary network net */
8259 SCTP_STAT_INCR(sctps_primary_randry);
8260 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
8261 /* ran dry with CMT on */
8262 SCTP_STAT_INCR(sctps_cmt_randry);
8263 }
8264 }
8265}
8266
8267void
8268sctp_fix_ecn_echo(struct sctp_association *asoc)
8269{
8270 struct sctp_tmit_chunk *chk;
8271
8272 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8273 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8274 chk->sent = SCTP_DATAGRAM_UNSENT;
8275 }
8276 }
8277}
8278
8279void
8280sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
8281{
8282 struct sctp_association *asoc;
8283 struct sctp_tmit_chunk *chk;
8284 struct sctp_stream_queue_pending *sp;
8285 unsigned int i;
8286
8287 if (net == NULL) {
8288 return;
8289 }
8290 asoc = &stcb->asoc;
8291 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
8292 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
8293 if (sp->net == net) {
8294 sctp_free_remote_addr(sp->net);
8295 sp->net = NULL;
8296 }
8297 }
8298 }
8299 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8300 if (chk->whoTo == net) {
8301 sctp_free_remote_addr(chk->whoTo);
8302 chk->whoTo = NULL;
8303 }
8304 }
8305}
8306
8307int
8308sctp_med_chunk_output(struct sctp_inpcb *inp,
8309 struct sctp_tcb *stcb,
8310 struct sctp_association *asoc,
8311 int *num_out,
8312 int *reason_code,
8313 int control_only, int from_where,
8314 struct timeval *now, int *now_filled, int frag_point, int so_locked
8315#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8316 SCTP_UNUSED
8317#endif
8318 )
8319{
8320 /**
8321 * Ok this is the generic chunk service queue. we must do the
8322 * following:
8323 * - Service the stream queue that is next, moving any
8324 * message (note I must get a complete message i.e. FIRST/MIDDLE and
8325 * LAST to the out queue in one pass) and assigning TSN's. This
8326 * only applys though if the peer does not support NDATA. For NDATA
8327 * chunks its ok to not send the entire message ;-)
8328 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
8329 * fomulate and send the low level chunks. Making sure to combine
8330 * any control in the control chunk queue also.
8331 */
8332 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
8333 struct mbuf *outchain, *endoutchain;
8334 struct sctp_tmit_chunk *chk, *nchk;
8335
8336 /* temp arrays for unlinking */
8337 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8338 int no_fragmentflg, error;
8339 unsigned int max_rwnd_per_dest, max_send_per_dest;
8340 int one_chunk, hbflag, skip_data_for_this_net;
8341 int asconf, cookie, no_out_cnt;
8342 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
8343 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
8344 int tsns_sent = 0;
8345 uint32_t auth_offset = 0;
8346 struct sctp_auth_chunk *auth = NULL;
8347 uint16_t auth_keyid;
8348 int override_ok = 1;
8349 int skip_fill_up = 0;
8350 int data_auth_reqd = 0;
8351 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
8352 the destination. */
8353 int quit_now = 0;
8354
8355#if defined(__APPLE__)
8356 if (so_locked) {
8357 sctp_lock_assert(SCTP_INP_SO(inp));
8358 } else {
8359 sctp_unlock_assert(SCTP_INP_SO(inp));
8360 }
8361#endif
8362 *num_out = 0;
8363 *reason_code = 0;
8364 auth_keyid = stcb->asoc.authinfo.active_keyid;
8365 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
8366 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
8367 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
8368 eeor_mode = 1;
8369 } else {
8370 eeor_mode = 0;
8371 }
8372 ctl_cnt = no_out_cnt = asconf = cookie = 0;
8373 /*
8374 * First lets prime the pump. For each destination, if there is room
8375 * in the flight size, attempt to pull an MTU's worth out of the
8376 * stream queues into the general send_queue
8377 */
8378#ifdef SCTP_AUDITING_ENABLED
8379 sctp_audit_log(0xC2, 2);
8380#endif
8381 SCTP_TCB_LOCK_ASSERT(stcb);
8382 hbflag = 0;
8383 if (control_only)
8384 no_data_chunks = 1;
8385 else
8386 no_data_chunks = 0;
8387
8388 /* Nothing to possible to send? */
8389 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
8390 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
8391 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8392 TAILQ_EMPTY(&asoc->send_queue) &&
8393 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
8394 nothing_to_send:
8395 *reason_code = 9;
8396 return (0);
8397 }
8398 if (asoc->peers_rwnd == 0) {
8399 /* No room in peers rwnd */
8400 *reason_code = 1;
8401 if (asoc->total_flight > 0) {
8402 /* we are allowed one chunk in flight */
8403 no_data_chunks = 1;
8404 }
8405 }
8406 if (stcb->asoc.ecn_echo_cnt_onq) {
8407 /* Record where a sack goes, if any */
8408 if (no_data_chunks &&
8409 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
8410 /* Nothing but ECNe to send - we don't do that */
8411 goto nothing_to_send;
8412 }
8413 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8414 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8415 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8416 sack_goes_to = chk->whoTo;
8417 break;
8418 }
8419 }
8420 }
8421 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
8422 if (stcb->sctp_socket)
8423 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
8424 else
8425 max_send_per_dest = 0;
8426 if (no_data_chunks == 0) {
8427 /* How many non-directed chunks are there? */
8428 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
8429 if (chk->whoTo == NULL) {
8430 /* We already have non-directed
8431 * chunks on the queue, no need
8432 * to do a fill-up.
8433 */
8434 skip_fill_up = 1;
8435 break;
8436 }
8437 }
8438
8439 }
8440 if ((no_data_chunks == 0) &&
8441 (skip_fill_up == 0) &&
8442 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
8443 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8444 /*
8445 * This for loop we are in takes in
8446 * each net, if its's got space in cwnd and
8447 * has data sent to it (when CMT is off) then it
8448 * calls sctp_fill_outqueue for the net. This gets
8449 * data on the send queue for that network.
8450 *
8451 * In sctp_fill_outqueue TSN's are assigned and
8452 * data is copied out of the stream buffers. Note
8453 * mostly copy by reference (we hope).
8454 */
8455 net->window_probe = 0;
8456 if ((net != stcb->asoc.alternate) &&
8457 ((net->dest_state & SCTP_ADDR_PF) ||
8458 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8459 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8461 sctp_log_cwnd(stcb, net, 1,
8462 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8463 }
8464 continue;
8465 }
8466 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8467 (net->flight_size == 0)) {
8468 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
8469 }
8470 if (net->flight_size >= net->cwnd) {
8471 /* skip this network, no room - can't fill */
8472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8473 sctp_log_cwnd(stcb, net, 3,
8474 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8475 }
8476 continue;
8477 }
8478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8479 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8480 }
8481 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8482 if (quit_now) {
8483 /* memory alloc failure */
8484 no_data_chunks = 1;
8485 break;
8486 }
8487 }
8488 }
8489 /* now service each destination and send out what we can for it */
8490 /* Nothing to send? */
8491 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8492 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8493 TAILQ_EMPTY(&asoc->send_queue)) {
8494 *reason_code = 8;
8495 return (0);
8496 }
8497
8498 if (asoc->sctp_cmt_on_off > 0) {
8499 /* get the last start point */
8500 start_at = asoc->last_net_cmt_send_started;
8501 if (start_at == NULL) {
8502 /* null so to beginning */
8503 start_at = TAILQ_FIRST(&asoc->nets);
8504 } else {
8505 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8506 if (start_at == NULL) {
8507 start_at = TAILQ_FIRST(&asoc->nets);
8508 }
8509 }
8510 asoc->last_net_cmt_send_started = start_at;
8511 } else {
8512 start_at = TAILQ_FIRST(&asoc->nets);
8513 }
8514 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8515 if (chk->whoTo == NULL) {
8516 if (asoc->alternate) {
8517 chk->whoTo = asoc->alternate;
8518 } else {
8519 chk->whoTo = asoc->primary_destination;
8520 }
8521 atomic_add_int(&chk->whoTo->ref_count, 1);
8522 }
8523 }
8524 old_start_at = NULL;
8525again_one_more_time:
8526 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8527 /* how much can we send? */
8528 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8529 if (old_start_at && (old_start_at == net)) {
8530 /* through list ocmpletely. */
8531 break;
8532 }
8533 tsns_sent = 0xa;
8534 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8535 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8536 (net->flight_size >= net->cwnd)) {
8537 /* Nothing on control or asconf and flight is full, we can skip
8538 * even in the CMT case.
8539 */
8540 continue;
8541 }
8542 bundle_at = 0;
8543 endoutchain = outchain = NULL;
8544 no_fragmentflg = 1;
8545 one_chunk = 0;
8546 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8547 skip_data_for_this_net = 1;
8548 } else {
8549 skip_data_for_this_net = 0;
8550 }
8551 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8552#ifdef INET
8553 case AF_INET:
8554 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8555 break;
8556#endif
8557#ifdef INET6
8558 case AF_INET6:
8559 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8560 break;
8561#endif
8562#if defined(__Userspace__)
8563 case AF_CONN:
8564 mtu = net->mtu - sizeof(struct sctphdr);
8565 break;
8566#endif
8567 default:
8568 /* TSNH */
8569 mtu = net->mtu;
8570 break;
8571 }
8572 mx_mtu = mtu;
8573 to_out = 0;
8574 if (mtu > asoc->peers_rwnd) {
8575 if (asoc->total_flight > 0) {
8576 /* We have a packet in flight somewhere */
8577 r_mtu = asoc->peers_rwnd;
8578 } else {
8579 /* We are always allowed to send one MTU out */
8580 one_chunk = 1;
8581 r_mtu = mtu;
8582 }
8583 } else {
8584 r_mtu = mtu;
8585 }
8586 error = 0;
8587 /************************/
8588 /* ASCONF transmission */
8589 /************************/
8590 /* Now first lets go through the asconf queue */
8591 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8592 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8593 continue;
8594 }
8595 if (chk->whoTo == NULL) {
8596 if (asoc->alternate == NULL) {
8597 if (asoc->primary_destination != net) {
8598 break;
8599 }
8600 } else {
8601 if (asoc->alternate != net) {
8602 break;
8603 }
8604 }
8605 } else {
8606 if (chk->whoTo != net) {
8607 break;
8608 }
8609 }
8610 if (chk->data == NULL) {
8611 break;
8612 }
8613 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8614 chk->sent != SCTP_DATAGRAM_RESEND) {
8615 break;
8616 }
8617 /*
8618 * if no AUTH is yet included and this chunk
8619 * requires it, make sure to account for it. We
8620 * don't apply the size until the AUTH chunk is
8621 * actually added below in case there is no room for
8622 * this chunk. NOTE: we overload the use of "omtu"
8623 * here
8624 */
8625 if ((auth == NULL) &&
8626 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8627 stcb->asoc.peer_auth_chunks)) {
8628 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8629 } else
8630 omtu = 0;
8631 /* Here we do NOT factor the r_mtu */
8632 if ((chk->send_size < (int)(mtu - omtu)) ||
8633 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8634 /*
8635 * We probably should glom the mbuf chain
8636 * from the chk->data for control but the
8637 * problem is it becomes yet one more level
8638 * of tracking to do if for some reason
8639 * output fails. Then I have got to
8640 * reconstruct the merged control chain.. el
8641 * yucko.. for now we take the easy way and
8642 * do the copy
8643 */
8644 /*
8645 * Add an AUTH chunk, if chunk requires it
8646 * save the offset into the chain for AUTH
8647 */
8648 if ((auth == NULL) &&
8649 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8650 stcb->asoc.peer_auth_chunks))) {
8651 outchain = sctp_add_auth_chunk(outchain,
8652 &endoutchain,
8653 &auth,
8654 &auth_offset,
8655 stcb,
8656 chk->rec.chunk_id.id);
8657 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8658 }
8659 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8660 (int)chk->rec.chunk_id.can_take_data,
8661 chk->send_size, chk->copy_by_ref);
8662 if (outchain == NULL) {
8663 *reason_code = 8;
8664 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8665 return (ENOMEM);
8666 }
8667 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8668 /* update our MTU size */
8669 if (mtu > (chk->send_size + omtu))
8670 mtu -= (chk->send_size + omtu);
8671 else
8672 mtu = 0;
8673 to_out += (chk->send_size + omtu);
8674 /* Do clear IP_DF ? */
8675 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8676 no_fragmentflg = 0;
8677 }
8678 if (chk->rec.chunk_id.can_take_data)
8679 chk->data = NULL;
8680 /*
8681 * set hb flag since we can
8682 * use these for RTO
8683 */
8684 hbflag = 1;
8685 asconf = 1;
8686 /*
8687 * should sysctl this: don't
8688 * bundle data with ASCONF
8689 * since it requires AUTH
8690 */
8691 no_data_chunks = 1;
8692 chk->sent = SCTP_DATAGRAM_SENT;
8693 if (chk->whoTo == NULL) {
8694 chk->whoTo = net;
8695 atomic_add_int(&net->ref_count, 1);
8696 }
8697 chk->snd_count++;
8698 if (mtu == 0) {
8699 /*
8700 * Ok we are out of room but we can
8701 * output without effecting the
8702 * flight size since this little guy
8703 * is a control only packet.
8704 */
8705 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8706 /*
8707 * do NOT clear the asconf
8708 * flag as it is used to do
8709 * appropriate source address
8710 * selection.
8711 */
8712 if (*now_filled == 0) {
8713 (void)SCTP_GETTIME_TIMEVAL(now);
8714 *now_filled = 1;
8715 }
8716 net->last_sent_time = *now;
8717 hbflag = 0;
8718 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8719 (struct sockaddr *)&net->ro._l_addr,
8720 outchain, auth_offset, auth,
8721 stcb->asoc.authinfo.active_keyid,
8722 no_fragmentflg, 0, asconf,
8723 inp->sctp_lport, stcb->rport,
8724 htonl(stcb->asoc.peer_vtag),
8725 net->port, NULL,
8726#if defined(__FreeBSD__)
8727 0, 0,
8728#endif
8729 so_locked))) {
8730 /* error, we could not output */
8731 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8732 if (from_where == 0) {
8733 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8734 }
8735 if (error == ENOBUFS) {
8736 asoc->ifp_had_enobuf = 1;
8737 SCTP_STAT_INCR(sctps_lowlevelerr);
8738 }
8739 /* error, could not output */
8740 if (error == EHOSTUNREACH) {
8741 /*
8742 * Destination went
8743 * unreachable
8744 * during this send
8745 */
8746 sctp_move_chunks_from_net(stcb, net);
8747 }
8748 *reason_code = 7;
8749 break;
8750 } else {
8751 asoc->ifp_had_enobuf = 0;
8752 }
8753 /*
8754 * increase the number we sent, if a
8755 * cookie is sent we don't tell them
8756 * any was sent out.
8757 */
8758 outchain = endoutchain = NULL;
8759 auth = NULL;
8760 auth_offset = 0;
8761 if (!no_out_cnt)
8762 *num_out += ctl_cnt;
8763 /* recalc a clean slate and setup */
8764 switch (net->ro._l_addr.sa.sa_family) {
8765#ifdef INET
8766 case AF_INET:
8767 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8768 break;
8769#endif
8770#ifdef INET6
8771 case AF_INET6:
8772 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8773 break;
8774#endif
8775#if defined(__Userspace__)
8776 case AF_CONN:
8777 mtu = net->mtu - sizeof(struct sctphdr);
8778 break;
8779#endif
8780 default:
8781 /* TSNH */
8782 mtu = net->mtu;
8783 break;
8784 }
8785 to_out = 0;
8786 no_fragmentflg = 1;
8787 }
8788 }
8789 }
8790 if (error != 0) {
8791 /* try next net */
8792 continue;
8793 }
8794 /************************/
8795 /* Control transmission */
8796 /************************/
8797 /* Now first lets go through the control queue */
8798 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8799 if ((sack_goes_to) &&
8800 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8801 (chk->whoTo != sack_goes_to)) {
8802 /*
8803 * if we have a sack in queue, and we are looking at an
8804 * ecn echo that is NOT queued to where the sack is going..
8805 */
8806 if (chk->whoTo == net) {
8807 /* Don't transmit it to where its going (current net) */
8808 continue;
8809 } else if (sack_goes_to == net) {
8810 /* But do transmit it to this address */
8811 goto skip_net_check;
8812 }
8813 }
8814 if (chk->whoTo == NULL) {
8815 if (asoc->alternate == NULL) {
8816 if (asoc->primary_destination != net) {
8817 continue;
8818 }
8819 } else {
8820 if (asoc->alternate != net) {
8821 continue;
8822 }
8823 }
8824 } else {
8825 if (chk->whoTo != net) {
8826 continue;
8827 }
8828 }
8829 skip_net_check:
8830 if (chk->data == NULL) {
8831 continue;
8832 }
8833 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8834 /*
8835 * It must be unsent. Cookies and ASCONF's
8836 * hang around but there timers will force
8837 * when marked for resend.
8838 */
8839 continue;
8840 }
8841 /*
8842 * if no AUTH is yet included and this chunk
8843 * requires it, make sure to account for it. We
8844 * don't apply the size until the AUTH chunk is
8845 * actually added below in case there is no room for
8846 * this chunk. NOTE: we overload the use of "omtu"
8847 * here
8848 */
8849 if ((auth == NULL) &&
8850 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8851 stcb->asoc.peer_auth_chunks)) {
8852 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8853 } else
8854 omtu = 0;
8855 /* Here we do NOT factor the r_mtu */
8856 if ((chk->send_size <= (int)(mtu - omtu)) ||
8857 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8858 /*
8859 * We probably should glom the mbuf chain
8860 * from the chk->data for control but the
8861 * problem is it becomes yet one more level
8862 * of tracking to do if for some reason
8863 * output fails. Then I have got to
8864 * reconstruct the merged control chain.. el
8865 * yucko.. for now we take the easy way and
8866 * do the copy
8867 */
8868 /*
8869 * Add an AUTH chunk, if chunk requires it
8870 * save the offset into the chain for AUTH
8871 */
8872 if ((auth == NULL) &&
8873 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8874 stcb->asoc.peer_auth_chunks))) {
8875 outchain = sctp_add_auth_chunk(outchain,
8876 &endoutchain,
8877 &auth,
8878 &auth_offset,
8879 stcb,
8880 chk->rec.chunk_id.id);
8881 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8882 }
8883 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8884 (int)chk->rec.chunk_id.can_take_data,
8885 chk->send_size, chk->copy_by_ref);
8886 if (outchain == NULL) {
8887 *reason_code = 8;
8888 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8889 return (ENOMEM);
8890 }
8891 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8892 /* update our MTU size */
8893 if (mtu > (chk->send_size + omtu))
8894 mtu -= (chk->send_size + omtu);
8895 else
8896 mtu = 0;
8897 to_out += (chk->send_size + omtu);
8898 /* Do clear IP_DF ? */
8899 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8900 no_fragmentflg = 0;
8901 }
8902 if (chk->rec.chunk_id.can_take_data)
8903 chk->data = NULL;
8904 /* Mark things to be removed, if needed */
8905 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8906 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8907 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8908 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8909 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8910 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8911 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8912 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8913 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8914 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8915 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8916 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8917 hbflag = 1;
8918 }
8919 /* remove these chunks at the end */
8920 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8921 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8922 /* turn off the timer */
8923 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8924 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8925 inp, stcb, net,
8926 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8927 }
8928 }
8929 ctl_cnt++;
8930 } else {
8931 /*
8932 * Other chunks, since they have
8933 * timers running (i.e. COOKIE)
8934 * we just "trust" that it
8935 * gets sent or retransmitted.
8936 */
8937 ctl_cnt++;
8938 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8939 cookie = 1;
8940 no_out_cnt = 1;
8941 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8942 /*
8943 * Increment ecne send count here
8944 * this means we may be over-zealous in
8945 * our counting if the send fails, but its
8946 * the best place to do it (we used to do
8947 * it in the queue of the chunk, but that did
8948 * not tell how many times it was sent.
8949 */
8950 SCTP_STAT_INCR(sctps_sendecne);
8951 }
8952 chk->sent = SCTP_DATAGRAM_SENT;
8953 if (chk->whoTo == NULL) {
8954 chk->whoTo = net;
8955 atomic_add_int(&net->ref_count, 1);
8956 }
8957 chk->snd_count++;
8958 }
8959 if (mtu == 0) {
8960 /*
8961 * Ok we are out of room but we can
8962 * output without effecting the
8963 * flight size since this little guy
8964 * is a control only packet.
8965 */
8966 if (asconf) {
8967 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8968 /*
8969 * do NOT clear the asconf
8970 * flag as it is used to do
8971 * appropriate source address
8972 * selection.
8973 */
8974 }
8975 if (cookie) {
8976 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8977 cookie = 0;
8978 }
8979 /* Only HB or ASCONF advances time */
8980 if (hbflag) {
8981 if (*now_filled == 0) {
8982 (void)SCTP_GETTIME_TIMEVAL(now);
8983 *now_filled = 1;
8984 }
8985 net->last_sent_time = *now;
8986 hbflag = 0;
8987 }
8988 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8989 (struct sockaddr *)&net->ro._l_addr,
8990 outchain,
8991 auth_offset, auth,
8992 stcb->asoc.authinfo.active_keyid,
8993 no_fragmentflg, 0, asconf,
8994 inp->sctp_lport, stcb->rport,
8995 htonl(stcb->asoc.peer_vtag),
8996 net->port, NULL,
8997#if defined(__FreeBSD__)
8998 0, 0,
8999#endif
9000 so_locked))) {
9001 /* error, we could not output */
9002 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9003 if (from_where == 0) {
9004 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9005 }
9006 if (error == ENOBUFS) {
9007 asoc->ifp_had_enobuf = 1;
9008 SCTP_STAT_INCR(sctps_lowlevelerr);
9009 }
9010 if (error == EHOSTUNREACH) {
9011 /*
9012 * Destination went
9013 * unreachable
9014 * during this send
9015 */
9016 sctp_move_chunks_from_net(stcb, net);
9017 }
9018 *reason_code = 7;
9019 break;
9020 } else {
9021 asoc->ifp_had_enobuf = 0;
9022 }
9023 /*
9024 * increase the number we sent, if a
9025 * cookie is sent we don't tell them
9026 * any was sent out.
9027 */
9028 outchain = endoutchain = NULL;
9029 auth = NULL;
9030 auth_offset = 0;
9031 if (!no_out_cnt)
9032 *num_out += ctl_cnt;
9033 /* recalc a clean slate and setup */
9034 switch (net->ro._l_addr.sa.sa_family) {
9035#ifdef INET
9036 case AF_INET:
9037 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9038 break;
9039#endif
9040#ifdef INET6
9041 case AF_INET6:
9042 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9043 break;
9044#endif
9045#if defined(__Userspace__)
9046 case AF_CONN:
9047 mtu = net->mtu - sizeof(struct sctphdr);
9048 break;
9049#endif
9050 default:
9051 /* TSNH */
9052 mtu = net->mtu;
9053 break;
9054 }
9055 to_out = 0;
9056 no_fragmentflg = 1;
9057 }
9058 }
9059 }
9060 if (error != 0) {
9061 /* try next net */
9062 continue;
9063 }
9064 /* JRI: if dest is in PF state, do not send data to it */
9065 if ((asoc->sctp_cmt_on_off > 0) &&
9066 (net != stcb->asoc.alternate) &&
9067 (net->dest_state & SCTP_ADDR_PF)) {
9068 goto no_data_fill;
9069 }
9070 if (net->flight_size >= net->cwnd) {
9071 goto no_data_fill;
9072 }
9073 if ((asoc->sctp_cmt_on_off > 0) &&
9074 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
9075 (net->flight_size > max_rwnd_per_dest)) {
9076 goto no_data_fill;
9077 }
9078 /*
9079 * We need a specific accounting for the usage of the
9080 * send buffer. We also need to check the number of messages
9081 * per net. For now, this is better than nothing and it
9082 * disabled by default...
9083 */
9084 if ((asoc->sctp_cmt_on_off > 0) &&
9085 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
9086 (max_send_per_dest > 0) &&
9087 (net->flight_size > max_send_per_dest)) {
9088 goto no_data_fill;
9089 }
9090 /*********************/
9091 /* Data transmission */
9092 /*********************/
9093 /*
9094 * if AUTH for DATA is required and no AUTH has been added
9095 * yet, account for this in the mtu now... if no data can be
9096 * bundled, this adjustment won't matter anyways since the
9097 * packet will be going out...
9098 */
9099 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
9100 stcb->asoc.peer_auth_chunks);
9101 if (data_auth_reqd && (auth == NULL)) {
9102 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9103 }
9104 /* now lets add any data within the MTU constraints */
9105 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
9106#ifdef INET
9107 case AF_INET:
9108 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
9109 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9110 else
9111 omtu = 0;
9112 break;
9113#endif
9114#ifdef INET6
9115 case AF_INET6:
9116 if (net->mtu > SCTP_MIN_OVERHEAD)
9117 omtu = net->mtu - SCTP_MIN_OVERHEAD;
9118 else
9119 omtu = 0;
9120 break;
9121#endif
9122#if defined(__Userspace__)
9123 case AF_CONN:
9124 if (net->mtu > sizeof(struct sctphdr)) {
9125 omtu = net->mtu - sizeof(struct sctphdr);
9126 } else {
9127 omtu = 0;
9128 }
9129 break;
9130#endif
9131 default:
9132 /* TSNH */
9133 omtu = 0;
9134 break;
9135 }
9136 if ((((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
9137 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
9138 (skip_data_for_this_net == 0)) ||
9139 (cookie)) {
9140 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
9141 if (no_data_chunks) {
9142 /* let only control go out */
9143 *reason_code = 1;
9144 break;
9145 }
9146 if (net->flight_size >= net->cwnd) {
9147 /* skip this net, no room for data */
9148 *reason_code = 2;
9149 break;
9150 }
9151 if ((chk->whoTo != NULL) &&
9152 (chk->whoTo != net)) {
9153 /* Don't send the chunk on this net */
9154 continue;
9155 }
9156
9157 if (asoc->sctp_cmt_on_off == 0) {
9158 if ((asoc->alternate) &&
9159 (asoc->alternate != net) &&
9160 (chk->whoTo == NULL)) {
9161 continue;
9162 } else if ((net != asoc->primary_destination) &&
9163 (asoc->alternate == NULL) &&
9164 (chk->whoTo == NULL)) {
9165 continue;
9166 }
9167 }
9168 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
9169 /*-
9170 * strange, we have a chunk that is
9171 * to big for its destination and
9172 * yet no fragment ok flag.
9173 * Something went wrong when the
9174 * PMTU changed...we did not mark
9175 * this chunk for some reason?? I
9176 * will fix it here by letting IP
9177 * fragment it for now and printing
9178 * a warning. This really should not
9179 * happen ...
9180 */
9181 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
9182 chk->send_size, mtu);
9183 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
9184 }
9185 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
9186 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
9187 struct sctp_data_chunk *dchkh;
9188
9189 dchkh = mtod(chk->data, struct sctp_data_chunk *);
9190 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
9191 }
9192 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
9193 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
9194 /* ok we will add this one */
9195
9196 /*
9197 * Add an AUTH chunk, if chunk
9198 * requires it, save the offset into
9199 * the chain for AUTH
9200 */
9201 if (data_auth_reqd) {
9202 if (auth == NULL) {
9203 outchain = sctp_add_auth_chunk(outchain,
9204 &endoutchain,
9205 &auth,
9206 &auth_offset,
9207 stcb,
9208 SCTP_DATA);
9209 auth_keyid = chk->auth_keyid;
9210 override_ok = 0;
9211 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9212 } else if (override_ok) {
9213 /* use this data's keyid */
9214 auth_keyid = chk->auth_keyid;
9215 override_ok = 0;
9216 } else if (auth_keyid != chk->auth_keyid) {
9217 /* different keyid, so done bundling */
9218 break;
9219 }
9220 }
9221 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
9222 chk->send_size, chk->copy_by_ref);
9223 if (outchain == NULL) {
9224 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
9225 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9226 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9227 }
9228 *reason_code = 3;
9229 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9230 return (ENOMEM);
9231 }
9232 /* upate our MTU size */
9233 /* Do clear IP_DF ? */
9234 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9235 no_fragmentflg = 0;
9236 }
9237 /* unsigned subtraction of mtu */
9238 if (mtu > chk->send_size)
9239 mtu -= chk->send_size;
9240 else
9241 mtu = 0;
9242 /* unsigned subtraction of r_mtu */
9243 if (r_mtu > chk->send_size)
9244 r_mtu -= chk->send_size;
9245 else
9246 r_mtu = 0;
9247
9248 to_out += chk->send_size;
9249 if ((to_out > mx_mtu) && no_fragmentflg) {
9250#ifdef INVARIANTS
9251 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
9252#else
9253 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
9254 mx_mtu, to_out);
9255#endif
9256 }
9257 chk->window_probe = 0;
9258 data_list[bundle_at++] = chk;
9259 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9260 break;
9261 }
9262 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
9263 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
9264 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
9265 } else {
9266 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
9267 }
9268 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
9269 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
9270 /* Count number of user msg's that were fragmented
9271 * we do this by counting when we see a LAST fragment
9272 * only.
9273 */
9274 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
9275 }
9276 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
9277 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
9278 data_list[0]->window_probe = 1;
9279 net->window_probe = 1;
9280 }
9281 break;
9282 }
9283 } else {
9284 /*
9285 * Must be sent in order of the
9286 * TSN's (on a network)
9287 */
9288 break;
9289 }
9290 } /* for (chunk gather loop for this net) */
9291 } /* if asoc.state OPEN */
9292 no_data_fill:
9293 /* Is there something to send for this destination? */
9294 if (outchain) {
9295 /* We may need to start a control timer or two */
9296 if (asconf) {
9297 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
9298 stcb, net);
9299 /*
9300 * do NOT clear the asconf flag as it is used
9301 * to do appropriate source address selection.
9302 */
9303 }
9304 if (cookie) {
9305 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
9306 cookie = 0;
9307 }
9308 /* must start a send timer if data is being sent */
9309 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
9310 /*
9311 * no timer running on this destination
9312 * restart it.
9313 */
9314 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9315 }
9316 if (bundle_at || hbflag) {
9317 /* For data/asconf and hb set time */
9318 if (*now_filled == 0) {
9319 (void)SCTP_GETTIME_TIMEVAL(now);
9320 *now_filled = 1;
9321 }
9322 net->last_sent_time = *now;
9323 }
9324 /* Now send it, if there is anything to send :> */
9325 if ((error = sctp_lowlevel_chunk_output(inp,
9326 stcb,
9327 net,
9328 (struct sockaddr *)&net->ro._l_addr,
9329 outchain,
9330 auth_offset,
9331 auth,
9332 auth_keyid,
9333 no_fragmentflg,
9334 bundle_at,
9335 asconf,
9336 inp->sctp_lport, stcb->rport,
9337 htonl(stcb->asoc.peer_vtag),
9338 net->port, NULL,
9339#if defined(__FreeBSD__)
9340 0, 0,
9341#endif
9342 so_locked))) {
9343 /* error, we could not output */
9344 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9345 if (from_where == 0) {
9346 SCTP_STAT_INCR(sctps_lowlevelerrusr);
9347 }
9348 if (error == ENOBUFS) {
9349 asoc->ifp_had_enobuf = 1;
9350 SCTP_STAT_INCR(sctps_lowlevelerr);
9351 }
9352 if (error == EHOSTUNREACH) {
9353 /*
9354 * Destination went unreachable
9355 * during this send
9356 */
9357 sctp_move_chunks_from_net(stcb, net);
9358 }
9359 *reason_code = 6;
9360 /*-
9361 * I add this line to be paranoid. As far as
9362 * I can tell the continue, takes us back to
9363 * the top of the for, but just to make sure
9364 * I will reset these again here.
9365 */
9366 ctl_cnt = bundle_at = 0;
9367 continue; /* This takes us back to the for() for the nets. */
9368 } else {
9369 asoc->ifp_had_enobuf = 0;
9370 }
9371 endoutchain = NULL;
9372 auth = NULL;
9373 auth_offset = 0;
9374 if (!no_out_cnt) {
9375 *num_out += (ctl_cnt + bundle_at);
9376 }
9377 if (bundle_at) {
9378 /* setup for a RTO measurement */
9379 tsns_sent = data_list[0]->rec.data.tsn;
9380 /* fill time if not already filled */
9381 if (*now_filled == 0) {
9382 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9383 *now_filled = 1;
9384 *now = asoc->time_last_sent;
9385 } else {
9386 asoc->time_last_sent = *now;
9387 }
9388 if (net->rto_needed) {
9389 data_list[0]->do_rtt = 1;
9390 net->rto_needed = 0;
9391 }
9392 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
9393 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
9394 }
9395 if (one_chunk) {
9396 break;
9397 }
9398 }
9399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9400 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
9401 }
9402 }
9403 if (old_start_at == NULL) {
9404 old_start_at = start_at;
9405 start_at = TAILQ_FIRST(&asoc->nets);
9406 if (old_start_at)
9407 goto again_one_more_time;
9408 }
9409
9410 /*
9411 * At the end there should be no NON timed chunks hanging on this
9412 * queue.
9413 */
9414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9415 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
9416 }
9417 if ((*num_out == 0) && (*reason_code == 0)) {
9418 *reason_code = 4;
9419 } else {
9420 *reason_code = 5;
9421 }
9422 sctp_clean_up_ctl(stcb, asoc, so_locked);
9423 return (0);
9424}
9425
9426void
9427sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
9428{
9429 /*-
9430 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
9431 * the control chunk queue.
9432 */
9433 struct sctp_chunkhdr *hdr;
9434 struct sctp_tmit_chunk *chk;
9435 struct mbuf *mat, *last_mbuf;
9436 uint32_t chunk_length;
9437 uint16_t padding_length;
9438
9439 SCTP_TCB_LOCK_ASSERT(stcb);
9440 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
9441 if (op_err == NULL) {
9442 return;
9443 }
9444 last_mbuf = NULL;
9445 chunk_length = 0;
9446 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
9447 chunk_length += SCTP_BUF_LEN(mat);
9448 if (SCTP_BUF_NEXT(mat) == NULL) {
9449 last_mbuf = mat;
9450 }
9451 }
9452 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
9453 sctp_m_freem(op_err);
9454 return;
9455 }
9456 padding_length = chunk_length % 4;
9457 if (padding_length != 0) {
9458 padding_length = 4 - padding_length;
9459 }
9460 if (padding_length != 0) {
9461 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
9462 sctp_m_freem(op_err);
9463 return;
9464 }
9465 }
9466 sctp_alloc_a_chunk(stcb, chk);
9467 if (chk == NULL) {
9468 /* no memory */
9469 sctp_m_freem(op_err);
9470 return;
9471 }
9472 chk->copy_by_ref = 0;
9473 chk->send_size = (uint16_t)chunk_length;
9474 chk->sent = SCTP_DATAGRAM_UNSENT;
9475 chk->snd_count = 0;
9476 chk->asoc = &stcb->asoc;
9477 chk->data = op_err;
9478 chk->whoTo = NULL;
9479 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9480 chk->rec.chunk_id.can_take_data = 0;
9481 hdr = mtod(op_err, struct sctp_chunkhdr *);
9482 hdr->chunk_type = SCTP_OPERATION_ERROR;
9483 hdr->chunk_flags = 0;
9484 hdr->chunk_length = htons(chk->send_size);
9485 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9486 chk->asoc->ctrl_queue_cnt++;
9487}
9488
9489int
9490sctp_send_cookie_echo(struct mbuf *m,
9491 int offset,
9492 struct sctp_tcb *stcb,
9493 struct sctp_nets *net)
9494{
9495 /*-
9496 * pull out the cookie and put it at the front of the control chunk
9497 * queue.
9498 */
9499 int at;
9500 struct mbuf *cookie;
9501 struct sctp_paramhdr parm, *phdr;
9502 struct sctp_chunkhdr *hdr;
9503 struct sctp_tmit_chunk *chk;
9504 uint16_t ptype, plen;
9505
9506 SCTP_TCB_LOCK_ASSERT(stcb);
9507 /* First find the cookie in the param area */
9508 cookie = NULL;
9509 at = offset + sizeof(struct sctp_init_chunk);
9510 for (;;) {
9511 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
9512 if (phdr == NULL) {
9513 return (-3);
9514 }
9515 ptype = ntohs(phdr->param_type);
9516 plen = ntohs(phdr->param_length);
9517 if (ptype == SCTP_STATE_COOKIE) {
9518 int pad;
9519
9520 /* found the cookie */
9521 if ((pad = (plen % 4))) {
9522 plen += 4 - pad;
9523 }
9524 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9525 if (cookie == NULL) {
9526 /* No memory */
9527 return (-2);
9528 }
9529#ifdef SCTP_MBUF_LOGGING
9530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9531 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9532 }
9533#endif
9534 break;
9535 }
9536 at += SCTP_SIZE32(plen);
9537 }
9538 /* ok, we got the cookie lets change it into a cookie echo chunk */
9539 /* first the change from param to cookie */
9540 hdr = mtod(cookie, struct sctp_chunkhdr *);
9541 hdr->chunk_type = SCTP_COOKIE_ECHO;
9542 hdr->chunk_flags = 0;
9543 /* get the chunk stuff now and place it in the FRONT of the queue */
9544 sctp_alloc_a_chunk(stcb, chk);
9545 if (chk == NULL) {
9546 /* no memory */
9547 sctp_m_freem(cookie);
9548 return (-5);
9549 }
9550 chk->copy_by_ref = 0;
9551 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9552 chk->rec.chunk_id.can_take_data = 0;
9553 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9554 chk->send_size = plen;
9555 chk->sent = SCTP_DATAGRAM_UNSENT;
9556 chk->snd_count = 0;
9557 chk->asoc = &stcb->asoc;
9558 chk->data = cookie;
9559 chk->whoTo = net;
9560 atomic_add_int(&chk->whoTo->ref_count, 1);
9561 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9562 chk->asoc->ctrl_queue_cnt++;
9563 return (0);
9564}
9565
9566void
9567sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9568 struct mbuf *m,
9569 int offset,
9570 int chk_length,
9571 struct sctp_nets *net)
9572{
9573 /*
9574 * take a HB request and make it into a HB ack and send it.
9575 */
9576 struct mbuf *outchain;
9577 struct sctp_chunkhdr *chdr;
9578 struct sctp_tmit_chunk *chk;
9579
9580
9581 if (net == NULL)
9582 /* must have a net pointer */
9583 return;
9584
9585 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9586 if (outchain == NULL) {
9587 /* gak out of memory */
9588 return;
9589 }
9590#ifdef SCTP_MBUF_LOGGING
9591 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9592 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9593 }
9594#endif
9595 chdr = mtod(outchain, struct sctp_chunkhdr *);
9596 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9597 chdr->chunk_flags = 0;
9598 if (chk_length % 4) {
9599 /* need pad */
9600 uint32_t cpthis = 0;
9601 int padlen;
9602
9603 padlen = 4 - (chk_length % 4);
9604 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9605 }
9606 sctp_alloc_a_chunk(stcb, chk);
9607 if (chk == NULL) {
9608 /* no memory */
9609 sctp_m_freem(outchain);
9610 return;
9611 }
9612 chk->copy_by_ref = 0;
9613 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9614 chk->rec.chunk_id.can_take_data = 1;
9615 chk->flags = 0;
9616 chk->send_size = chk_length;
9617 chk->sent = SCTP_DATAGRAM_UNSENT;
9618 chk->snd_count = 0;
9619 chk->asoc = &stcb->asoc;
9620 chk->data = outchain;
9621 chk->whoTo = net;
9622 atomic_add_int(&chk->whoTo->ref_count, 1);
9623 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9624 chk->asoc->ctrl_queue_cnt++;
9625}
9626
9627void
9628sctp_send_cookie_ack(struct sctp_tcb *stcb)
9629{
9630 /* formulate and queue a cookie-ack back to sender */
9631 struct mbuf *cookie_ack;
9632 struct sctp_chunkhdr *hdr;
9633 struct sctp_tmit_chunk *chk;
9634
9635 SCTP_TCB_LOCK_ASSERT(stcb);
9636
9637 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9638 if (cookie_ack == NULL) {
9639 /* no mbuf's */
9640 return;
9641 }
9642 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9643 sctp_alloc_a_chunk(stcb, chk);
9644 if (chk == NULL) {
9645 /* no memory */
9646 sctp_m_freem(cookie_ack);
9647 return;
9648 }
9649 chk->copy_by_ref = 0;
9650 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9651 chk->rec.chunk_id.can_take_data = 1;
9652 chk->flags = 0;
9653 chk->send_size = sizeof(struct sctp_chunkhdr);
9654 chk->sent = SCTP_DATAGRAM_UNSENT;
9655 chk->snd_count = 0;
9656 chk->asoc = &stcb->asoc;
9657 chk->data = cookie_ack;
9658 if (chk->asoc->last_control_chunk_from != NULL) {
9659 chk->whoTo = chk->asoc->last_control_chunk_from;
9660 atomic_add_int(&chk->whoTo->ref_count, 1);
9661 } else {
9662 chk->whoTo = NULL;
9663 }
9664 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9665 hdr->chunk_type = SCTP_COOKIE_ACK;
9666 hdr->chunk_flags = 0;
9667 hdr->chunk_length = htons(chk->send_size);
9668 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9669 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9670 chk->asoc->ctrl_queue_cnt++;
9671 return;
9672}
9673
9674
9675void
9676sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9677{
9678 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9679 struct mbuf *m_shutdown_ack;
9680 struct sctp_shutdown_ack_chunk *ack_cp;
9681 struct sctp_tmit_chunk *chk;
9682
9683 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9684 if (m_shutdown_ack == NULL) {
9685 /* no mbuf's */
9686 return;
9687 }
9688 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9689 sctp_alloc_a_chunk(stcb, chk);
9690 if (chk == NULL) {
9691 /* no memory */
9692 sctp_m_freem(m_shutdown_ack);
9693 return;
9694 }
9695 chk->copy_by_ref = 0;
9696 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9697 chk->rec.chunk_id.can_take_data = 1;
9698 chk->flags = 0;
9699 chk->send_size = sizeof(struct sctp_chunkhdr);
9700 chk->sent = SCTP_DATAGRAM_UNSENT;
9701 chk->snd_count = 0;
9702 chk->flags = 0;
9703 chk->asoc = &stcb->asoc;
9704 chk->data = m_shutdown_ack;
9705 chk->whoTo = net;
9706 if (chk->whoTo) {
9707 atomic_add_int(&chk->whoTo->ref_count, 1);
9708 }
9709 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9710 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9711 ack_cp->ch.chunk_flags = 0;
9712 ack_cp->ch.chunk_length = htons(chk->send_size);
9713 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9714 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9715 chk->asoc->ctrl_queue_cnt++;
9716 return;
9717}
9718
9719void
9720sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9721{
9722 /* formulate and queue a SHUTDOWN to the sender */
9723 struct mbuf *m_shutdown;
9724 struct sctp_shutdown_chunk *shutdown_cp;
9725 struct sctp_tmit_chunk *chk;
9726
9727 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9728 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9729 /* We already have a SHUTDOWN queued. Reuse it. */
9730 if (chk->whoTo) {
9731 sctp_free_remote_addr(chk->whoTo);
9732 chk->whoTo = NULL;
9733 }
9734 break;
9735 }
9736 }
9737 if (chk == NULL) {
9738 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9739 if (m_shutdown == NULL) {
9740 /* no mbuf's */
9741 return;
9742 }
9743 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9744 sctp_alloc_a_chunk(stcb, chk);
9745 if (chk == NULL) {
9746 /* no memory */
9747 sctp_m_freem(m_shutdown);
9748 return;
9749 }
9750 chk->copy_by_ref = 0;
9751 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9752 chk->rec.chunk_id.can_take_data = 1;
9753 chk->flags = 0;
9754 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9755 chk->sent = SCTP_DATAGRAM_UNSENT;
9756 chk->snd_count = 0;
9757 chk->flags = 0;
9758 chk->asoc = &stcb->asoc;
9759 chk->data = m_shutdown;
9760 chk->whoTo = net;
9761 if (chk->whoTo) {
9762 atomic_add_int(&chk->whoTo->ref_count, 1);
9763 }
9764 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9765 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9766 shutdown_cp->ch.chunk_flags = 0;
9767 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9768 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9769 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9770 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9771 chk->asoc->ctrl_queue_cnt++;
9772 } else {
9773 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9774 chk->whoTo = net;
9775 if (chk->whoTo) {
9776 atomic_add_int(&chk->whoTo->ref_count, 1);
9777 }
9778 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9779 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9780 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9781 }
9782 return;
9783}
9784
9785void
9786sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9787{
9788 /*
9789 * formulate and queue an ASCONF to the peer.
9790 * ASCONF parameters should be queued on the assoc queue.
9791 */
9792 struct sctp_tmit_chunk *chk;
9793 struct mbuf *m_asconf;
9794 int len;
9795
9796 SCTP_TCB_LOCK_ASSERT(stcb);
9797
9798 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9799 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9800 /* can't send a new one if there is one in flight already */
9801 return;
9802 }
9803
9804 /* compose an ASCONF chunk, maximum length is PMTU */
9805 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9806 if (m_asconf == NULL) {
9807 return;
9808 }
9809
9810 sctp_alloc_a_chunk(stcb, chk);
9811 if (chk == NULL) {
9812 /* no memory */
9813 sctp_m_freem(m_asconf);
9814 return;
9815 }
9816
9817 chk->copy_by_ref = 0;
9818 chk->rec.chunk_id.id = SCTP_ASCONF;
9819 chk->rec.chunk_id.can_take_data = 0;
9820 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9821 chk->data = m_asconf;
9822 chk->send_size = len;
9823 chk->sent = SCTP_DATAGRAM_UNSENT;
9824 chk->snd_count = 0;
9825 chk->asoc = &stcb->asoc;
9826 chk->whoTo = net;
9827 if (chk->whoTo) {
9828 atomic_add_int(&chk->whoTo->ref_count, 1);
9829 }
9830 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9831 chk->asoc->ctrl_queue_cnt++;
9832 return;
9833}
9834
9835void
9836sctp_send_asconf_ack(struct sctp_tcb *stcb)
9837{
9838 /*
9839 * formulate and queue a asconf-ack back to sender.
9840 * the asconf-ack must be stored in the tcb.
9841 */
9842 struct sctp_tmit_chunk *chk;
9843 struct sctp_asconf_ack *ack, *latest_ack;
9844 struct mbuf *m_ack;
9845 struct sctp_nets *net = NULL;
9846
9847 SCTP_TCB_LOCK_ASSERT(stcb);
9848 /* Get the latest ASCONF-ACK */
9849 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9850 if (latest_ack == NULL) {
9851 return;
9852 }
9853 if (latest_ack->last_sent_to != NULL &&
9854 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9855 /* we're doing a retransmission */
9856 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9857 if (net == NULL) {
9858 /* no alternate */
9859 if (stcb->asoc.last_control_chunk_from == NULL) {
9860 if (stcb->asoc.alternate) {
9861 net = stcb->asoc.alternate;
9862 } else {
9863 net = stcb->asoc.primary_destination;
9864 }
9865 } else {
9866 net = stcb->asoc.last_control_chunk_from;
9867 }
9868 }
9869 } else {
9870 /* normal case */
9871 if (stcb->asoc.last_control_chunk_from == NULL) {
9872 if (stcb->asoc.alternate) {
9873 net = stcb->asoc.alternate;
9874 } else {
9875 net = stcb->asoc.primary_destination;
9876 }
9877 } else {
9878 net = stcb->asoc.last_control_chunk_from;
9879 }
9880 }
9881 latest_ack->last_sent_to = net;
9882
9883 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9884 if (ack->data == NULL) {
9885 continue;
9886 }
9887
9888 /* copy the asconf_ack */
9889 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9890 if (m_ack == NULL) {
9891 /* couldn't copy it */
9892 return;
9893 }
9894#ifdef SCTP_MBUF_LOGGING
9895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9896 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9897 }
9898#endif
9899
9900 sctp_alloc_a_chunk(stcb, chk);
9901 if (chk == NULL) {
9902 /* no memory */
9903 if (m_ack)
9904 sctp_m_freem(m_ack);
9905 return;
9906 }
9907 chk->copy_by_ref = 0;
9908 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9909 chk->rec.chunk_id.can_take_data = 1;
9910 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9911 chk->whoTo = net;
9912 if (chk->whoTo) {
9913 atomic_add_int(&chk->whoTo->ref_count, 1);
9914 }
9915 chk->data = m_ack;
9916 chk->send_size = ack->len;
9917 chk->sent = SCTP_DATAGRAM_UNSENT;
9918 chk->snd_count = 0;
9919 chk->asoc = &stcb->asoc;
9920
9921 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9922 chk->asoc->ctrl_queue_cnt++;
9923 }
9924 return;
9925}
9926
9927
9928static int
9929sctp_chunk_retransmission(struct sctp_inpcb *inp,
9930 struct sctp_tcb *stcb,
9931 struct sctp_association *asoc,
9932 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9933#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9934 SCTP_UNUSED
9935#endif
9936 )
9937{
9938 /*-
9939 * send out one MTU of retransmission. If fast_retransmit is
9940 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9941 * rwnd. For a Cookie or Asconf in the control chunk queue we
9942 * retransmit them by themselves.
9943 *
9944 * For data chunks we will pick out the lowest TSN's in the sent_queue
9945 * marked for resend and bundle them all together (up to a MTU of
9946 * destination). The address to send to should have been
9947 * selected/changed where the retransmission was marked (i.e. in FR
9948 * or t3-timeout routines).
9949 */
9950 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9951 struct sctp_tmit_chunk *chk, *fwd;
9952 struct mbuf *m, *endofchain;
9953 struct sctp_nets *net = NULL;
9954 uint32_t tsns_sent = 0;
9955 int no_fragmentflg, bundle_at, cnt_thru;
9956 unsigned int mtu;
9957 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9958 struct sctp_auth_chunk *auth = NULL;
9959 uint32_t auth_offset = 0;
9960 uint16_t auth_keyid;
9961 int override_ok = 1;
9962 int data_auth_reqd = 0;
9963 uint32_t dmtu = 0;
9964
9965#if defined(__APPLE__)
9966 if (so_locked) {
9967 sctp_lock_assert(SCTP_INP_SO(inp));
9968 } else {
9969 sctp_unlock_assert(SCTP_INP_SO(inp));
9970 }
9971#endif
9972 SCTP_TCB_LOCK_ASSERT(stcb);
9973 tmr_started = ctl_cnt = bundle_at = error = 0;
9974 no_fragmentflg = 1;
9975 fwd_tsn = 0;
9976 *cnt_out = 0;
9977 fwd = NULL;
9978 endofchain = m = NULL;
9979 auth_keyid = stcb->asoc.authinfo.active_keyid;
9980#ifdef SCTP_AUDITING_ENABLED
9981 sctp_audit_log(0xC3, 1);
9982#endif
9983 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9984 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9985 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
9986 asoc->sent_queue_retran_cnt);
9987 asoc->sent_queue_cnt = 0;
9988 asoc->sent_queue_cnt_removeable = 0;
9989 /* send back 0/0 so we enter normal transmission */
9990 *cnt_out = 0;
9991 return (0);
9992 }
9993 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9994 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9995 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9996 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9997 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9998 continue;
9999 }
10000 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
10001 if (chk != asoc->str_reset) {
10002 /*
10003 * not eligible for retran if its
10004 * not ours
10005 */
10006 continue;
10007 }
10008 }
10009 ctl_cnt++;
10010 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10011 fwd_tsn = 1;
10012 }
10013 /*
10014 * Add an AUTH chunk, if chunk requires it save the
10015 * offset into the chain for AUTH
10016 */
10017 if ((auth == NULL) &&
10018 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
10019 stcb->asoc.peer_auth_chunks))) {
10020 m = sctp_add_auth_chunk(m, &endofchain,
10021 &auth, &auth_offset,
10022 stcb,
10023 chk->rec.chunk_id.id);
10024 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10025 }
10026 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10027 break;
10028 }
10029 }
10030 one_chunk = 0;
10031 cnt_thru = 0;
10032 /* do we have control chunks to retransmit? */
10033 if (m != NULL) {
10034 /* Start a timer no matter if we succeed or fail */
10035 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
10036 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
10037 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
10038 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
10039 chk->snd_count++; /* update our count */
10040 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
10041 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
10042 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
10043 no_fragmentflg, 0, 0,
10044 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10045 chk->whoTo->port, NULL,
10046#if defined(__FreeBSD__)
10047 0, 0,
10048#endif
10049 so_locked))) {
10050 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10051 if (error == ENOBUFS) {
10052 asoc->ifp_had_enobuf = 1;
10053 SCTP_STAT_INCR(sctps_lowlevelerr);
10054 }
10055 return (error);
10056 } else {
10057 asoc->ifp_had_enobuf = 0;
10058 }
10059 endofchain = NULL;
10060 auth = NULL;
10061 auth_offset = 0;
10062 /*
10063 * We don't want to mark the net->sent time here since this
10064 * we use this for HB and retrans cannot measure RTT
10065 */
10066 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
10067 *cnt_out += 1;
10068 chk->sent = SCTP_DATAGRAM_SENT;
10069 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
10070 if (fwd_tsn == 0) {
10071 return (0);
10072 } else {
10073 /* Clean up the fwd-tsn list */
10074 sctp_clean_up_ctl(stcb, asoc, so_locked);
10075 return (0);
10076 }
10077 }
10078 /*
10079 * Ok, it is just data retransmission we need to do or that and a
10080 * fwd-tsn with it all.
10081 */
10082 if (TAILQ_EMPTY(&asoc->sent_queue)) {
10083 return (SCTP_RETRAN_DONE);
10084 }
10085 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
10086 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
10087 /* not yet open, resend the cookie and that is it */
10088 return (1);
10089 }
10090#ifdef SCTP_AUDITING_ENABLED
10091 sctp_auditing(20, inp, stcb, NULL);
10092#endif
10093 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
10094 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
10095 if (chk->sent != SCTP_DATAGRAM_RESEND) {
10096 /* No, not sent to this net or not ready for rtx */
10097 continue;
10098 }
10099 if (chk->data == NULL) {
10100 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
10101 chk->rec.data.tsn, chk->snd_count, chk->sent);
10102 continue;
10103 }
10104 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
10105 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
10106 struct mbuf *op_err;
10107 char msg[SCTP_DIAG_INFO_LEN];
10108
10109 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
10110 chk->rec.data.tsn, chk->snd_count);
10111 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
10112 msg);
10113 atomic_add_int(&stcb->asoc.refcnt, 1);
10114 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
10115 so_locked);
10116 SCTP_TCB_LOCK(stcb);
10117 atomic_subtract_int(&stcb->asoc.refcnt, 1);
10118 return (SCTP_RETRAN_EXIT);
10119 }
10120 /* pick up the net */
10121 net = chk->whoTo;
10122 switch (net->ro._l_addr.sa.sa_family) {
10123#ifdef INET
10124 case AF_INET:
10125 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
10126 break;
10127#endif
10128#ifdef INET6
10129 case AF_INET6:
10130 mtu = net->mtu - SCTP_MIN_OVERHEAD;
10131 break;
10132#endif
10133#if defined(__Userspace__)
10134 case AF_CONN:
10135 mtu = net->mtu - sizeof(struct sctphdr);
10136 break;
10137#endif
10138 default:
10139 /* TSNH */
10140 mtu = net->mtu;
10141 break;
10142 }
10143
10144 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
10145 /* No room in peers rwnd */
10146 uint32_t tsn;
10147
10148 tsn = asoc->last_acked_seq + 1;
10149 if (tsn == chk->rec.data.tsn) {
10150 /*
10151 * we make a special exception for this
10152 * case. The peer has no rwnd but is missing
10153 * the lowest chunk.. which is probably what
10154 * is holding up the rwnd.
10155 */
10156 goto one_chunk_around;
10157 }
10158 return (1);
10159 }
10160 one_chunk_around:
10161 if (asoc->peers_rwnd < mtu) {
10162 one_chunk = 1;
10163 if ((asoc->peers_rwnd == 0) &&
10164 (asoc->total_flight == 0)) {
10165 chk->window_probe = 1;
10166 chk->whoTo->window_probe = 1;
10167 }
10168 }
10169#ifdef SCTP_AUDITING_ENABLED
10170 sctp_audit_log(0xC3, 2);
10171#endif
10172 bundle_at = 0;
10173 m = NULL;
10174 net->fast_retran_ip = 0;
10175 if (chk->rec.data.doing_fast_retransmit == 0) {
10176 /*
10177 * if no FR in progress skip destination that have
10178 * flight_size > cwnd.
10179 */
10180 if (net->flight_size >= net->cwnd) {
10181 continue;
10182 }
10183 } else {
10184 /*
10185 * Mark the destination net to have FR recovery
10186 * limits put on it.
10187 */
10188 *fr_done = 1;
10189 net->fast_retran_ip = 1;
10190 }
10191
10192 /*
10193 * if no AUTH is yet included and this chunk requires it,
10194 * make sure to account for it. We don't apply the size
10195 * until the AUTH chunk is actually added below in case
10196 * there is no room for this chunk.
10197 */
10198 if (data_auth_reqd && (auth == NULL)) {
10199 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10200 } else
10201 dmtu = 0;
10202
10203 if ((chk->send_size <= (mtu - dmtu)) ||
10204 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
10205 /* ok we will add this one */
10206 if (data_auth_reqd) {
10207 if (auth == NULL) {
10208 m = sctp_add_auth_chunk(m,
10209 &endofchain,
10210 &auth,
10211 &auth_offset,
10212 stcb,
10213 SCTP_DATA);
10214 auth_keyid = chk->auth_keyid;
10215 override_ok = 0;
10216 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10217 } else if (override_ok) {
10218 auth_keyid = chk->auth_keyid;
10219 override_ok = 0;
10220 } else if (chk->auth_keyid != auth_keyid) {
10221 /* different keyid, so done bundling */
10222 break;
10223 }
10224 }
10225 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
10226 if (m == NULL) {
10227 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10228 return (ENOMEM);
10229 }
10230 /* Do clear IP_DF ? */
10231 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10232 no_fragmentflg = 0;
10233 }
10234 /* upate our MTU size */
10235 if (mtu > (chk->send_size + dmtu))
10236 mtu -= (chk->send_size + dmtu);
10237 else
10238 mtu = 0;
10239 data_list[bundle_at++] = chk;
10240 if (one_chunk && (asoc->total_flight <= 0)) {
10241 SCTP_STAT_INCR(sctps_windowprobed);
10242 }
10243 }
10244 if (one_chunk == 0) {
10245 /*
10246 * now are there anymore forward from chk to pick
10247 * up?
10248 */
10249 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
10250 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
10251 /* Nope, not for retran */
10252 continue;
10253 }
10254 if (fwd->whoTo != net) {
10255 /* Nope, not the net in question */
10256 continue;
10257 }
10258 if (data_auth_reqd && (auth == NULL)) {
10259 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
10260 } else
10261 dmtu = 0;
10262 if (fwd->send_size <= (mtu - dmtu)) {
10263 if (data_auth_reqd) {
10264 if (auth == NULL) {
10265 m = sctp_add_auth_chunk(m,
10266 &endofchain,
10267 &auth,
10268 &auth_offset,
10269 stcb,
10270 SCTP_DATA);
10271 auth_keyid = fwd->auth_keyid;
10272 override_ok = 0;
10273 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10274 } else if (override_ok) {
10275 auth_keyid = fwd->auth_keyid;
10276 override_ok = 0;
10277 } else if (fwd->auth_keyid != auth_keyid) {
10278 /* different keyid, so done bundling */
10279 break;
10280 }
10281 }
10282 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
10283 if (m == NULL) {
10284 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
10285 return (ENOMEM);
10286 }
10287 /* Do clear IP_DF ? */
10288 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
10289 no_fragmentflg = 0;
10290 }
10291 /* upate our MTU size */
10292 if (mtu > (fwd->send_size + dmtu))
10293 mtu -= (fwd->send_size + dmtu);
10294 else
10295 mtu = 0;
10296 data_list[bundle_at++] = fwd;
10297 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
10298 break;
10299 }
10300 } else {
10301 /* can't fit so we are done */
10302 break;
10303 }
10304 }
10305 }
10306 /* Is there something to send for this destination? */
10307 if (m) {
10308 /*
10309 * No matter if we fail/or succeed we should start a
10310 * timer. A failure is like a lost IP packet :-)
10311 */
10312 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10313 /*
10314 * no timer running on this destination
10315 * restart it.
10316 */
10317 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10318 tmr_started = 1;
10319 }
10320 /* Now lets send it, if there is anything to send :> */
10321 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
10322 (struct sockaddr *)&net->ro._l_addr, m,
10323 auth_offset, auth, auth_keyid,
10324 no_fragmentflg, 0, 0,
10325 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10326 net->port, NULL,
10327#if defined(__FreeBSD__)
10328 0, 0,
10329#endif
10330 so_locked))) {
10331 /* error, we could not output */
10332 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10333 if (error == ENOBUFS) {
10334 asoc->ifp_had_enobuf = 1;
10335 SCTP_STAT_INCR(sctps_lowlevelerr);
10336 }
10337 return (error);
10338 } else {
10339 asoc->ifp_had_enobuf = 0;
10340 }
10341 endofchain = NULL;
10342 auth = NULL;
10343 auth_offset = 0;
10344 /* For HB's */
10345 /*
10346 * We don't want to mark the net->sent time here
10347 * since this we use this for HB and retrans cannot
10348 * measure RTT
10349 */
10350 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
10351
10352 /* For auto-close */
10353 cnt_thru++;
10354 if (*now_filled == 0) {
10355 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
10356 *now = asoc->time_last_sent;
10357 *now_filled = 1;
10358 } else {
10359 asoc->time_last_sent = *now;
10360 }
10361 *cnt_out += bundle_at;
10362#ifdef SCTP_AUDITING_ENABLED
10363 sctp_audit_log(0xC4, bundle_at);
10364#endif
10365 if (bundle_at) {
10366 tsns_sent = data_list[0]->rec.data.tsn;
10367 }
10368 for (i = 0; i < bundle_at; i++) {
10369 SCTP_STAT_INCR(sctps_sendretransdata);
10370 data_list[i]->sent = SCTP_DATAGRAM_SENT;
10371 /*
10372 * When we have a revoked data, and we
10373 * retransmit it, then we clear the revoked
10374 * flag since this flag dictates if we
10375 * subtracted from the fs
10376 */
10377 if (data_list[i]->rec.data.chunk_was_revoked) {
10378 /* Deflate the cwnd */
10379 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
10380 data_list[i]->rec.data.chunk_was_revoked = 0;
10381 }
10382 data_list[i]->snd_count++;
10383 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
10384 /* record the time */
10385 data_list[i]->sent_rcv_time = asoc->time_last_sent;
10386 if (data_list[i]->book_size_scale) {
10387 /*
10388 * need to double the book size on
10389 * this one
10390 */
10391 data_list[i]->book_size_scale = 0;
10392 /* Since we double the booksize, we must
10393 * also double the output queue size, since this
10394 * get shrunk when we free by this amount.
10395 */
10396 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
10397 data_list[i]->book_size *= 2;
10398
10399
10400 } else {
10401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
10402 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
10403 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
10404 }
10405 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
10406 (uint32_t) (data_list[i]->send_size +
10407 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
10408 }
10409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
10410 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
10411 data_list[i]->whoTo->flight_size,
10412 data_list[i]->book_size,
10413 (uint32_t)(uintptr_t)data_list[i]->whoTo,
10414 data_list[i]->rec.data.tsn);
10415 }
10416 sctp_flight_size_increase(data_list[i]);
10417 sctp_total_flight_increase(stcb, data_list[i]);
10418 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
10419 /* SWS sender side engages */
10420 asoc->peers_rwnd = 0;
10421 }
10422 if ((i == 0) &&
10423 (data_list[i]->rec.data.doing_fast_retransmit)) {
10424 SCTP_STAT_INCR(sctps_sendfastretrans);
10425 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
10426 (tmr_started == 0)) {
10427 /*-
10428 * ok we just fast-retrans'd
10429 * the lowest TSN, i.e the
10430 * first on the list. In
10431 * this case we want to give
10432 * some more time to get a
10433 * SACK back without a
10434 * t3-expiring.
10435 */
10436 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
10437 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
10438 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
10439 }
10440 }
10441 }
10442 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10443 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
10444 }
10445#ifdef SCTP_AUDITING_ENABLED
10446 sctp_auditing(21, inp, stcb, NULL);
10447#endif
10448 } else {
10449 /* None will fit */
10450 return (1);
10451 }
10452 if (asoc->sent_queue_retran_cnt <= 0) {
10453 /* all done we have no more to retran */
10454 asoc->sent_queue_retran_cnt = 0;
10455 break;
10456 }
10457 if (one_chunk) {
10458 /* No more room in rwnd */
10459 return (1);
10460 }
10461 /* stop the for loop here. we sent out a packet */
10462 break;
10463 }
10464 return (0);
10465}
10466
10467static void
10468sctp_timer_validation(struct sctp_inpcb *inp,
10469 struct sctp_tcb *stcb,
10470 struct sctp_association *asoc)
10471{
10472 struct sctp_nets *net;
10473
10474 /* Validate that a timer is running somewhere */
10475 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10476 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
10477 /* Here is a timer */
10478 return;
10479 }
10480 }
10481 SCTP_TCB_LOCK_ASSERT(stcb);
10482 /* Gak, we did not have a timer somewhere */
10483 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10484 if (asoc->alternate) {
10485 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10486 } else {
10487 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10488 }
10489 return;
10490}
10491
10492void
10493sctp_chunk_output (struct sctp_inpcb *inp,
10494 struct sctp_tcb *stcb,
10495 int from_where,
10496 int so_locked
10497#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10498 SCTP_UNUSED
10499#endif
10500 )
10501{
10502 /*-
10503 * Ok this is the generic chunk service queue. we must do the
10504 * following:
10505 * - See if there are retransmits pending, if so we must
10506 * do these first.
10507 * - Service the stream queue that is next, moving any
10508 * message (note I must get a complete message i.e.
10509 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10510 * TSN's
10511 * - Check to see if the cwnd/rwnd allows any output, if so we
10512 * go ahead and fomulate and send the low level chunks. Making sure
10513 * to combine any control in the control chunk queue also.
10514 */
10515 struct sctp_association *asoc;
10516 struct sctp_nets *net;
10517 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10518 unsigned int burst_cnt = 0;
10519 struct timeval now;
10520 int now_filled = 0;
10521 int nagle_on;
10522 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10523 int un_sent = 0;
10524 int fr_done;
10525 unsigned int tot_frs = 0;
10526
10527#if defined(__APPLE__)
10528 if (so_locked) {
10529 sctp_lock_assert(SCTP_INP_SO(inp));
10530 } else {
10531 sctp_unlock_assert(SCTP_INP_SO(inp));
10532 }
10533#endif
10534 asoc = &stcb->asoc;
10535do_it_again:
10536 /* The Nagle algorithm is only applied when handling a send call. */
10537 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10538 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10539 nagle_on = 0;
10540 } else {
10541 nagle_on = 1;
10542 }
10543 } else {
10544 nagle_on = 0;
10545 }
10546 SCTP_TCB_LOCK_ASSERT(stcb);
10547
10548 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10549
10550 if ((un_sent <= 0) &&
10551 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10552 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10553 (asoc->sent_queue_retran_cnt == 0) &&
10554 (asoc->trigger_reset == 0)) {
10555 /* Nothing to do unless there is something to be sent left */
10556 return;
10557 }
10558 /* Do we have something to send, data or control AND
10559 * a sack timer running, if so piggy-back the sack.
10560 */
10561 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10562 sctp_send_sack(stcb, so_locked);
10563 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10564 }
10565 while (asoc->sent_queue_retran_cnt) {
10566 /*-
10567 * Ok, it is retransmission time only, we send out only ONE
10568 * packet with a single call off to the retran code.
10569 */
10570 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10571 /*-
10572 * Special hook for handling cookiess discarded
10573 * by peer that carried data. Send cookie-ack only
10574 * and then the next call with get the retran's.
10575 */
10576 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10577 from_where,
10578 &now, &now_filled, frag_point, so_locked);
10579 return;
10580 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10581 /* if its not from a HB then do it */
10582 fr_done = 0;
10583 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10584 if (fr_done) {
10585 tot_frs++;
10586 }
10587 } else {
10588 /*
10589 * its from any other place, we don't allow retran
10590 * output (only control)
10591 */
10592 ret = 1;
10593 }
10594 if (ret > 0) {
10595 /* Can't send anymore */
10596 /*-
10597 * now lets push out control by calling med-level
10598 * output once. this assures that we WILL send HB's
10599 * if queued too.
10600 */
10601 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10602 from_where,
10603 &now, &now_filled, frag_point, so_locked);
10604#ifdef SCTP_AUDITING_ENABLED
10605 sctp_auditing(8, inp, stcb, NULL);
10606#endif
10607 sctp_timer_validation(inp, stcb, asoc);
10608 return;
10609 }
10610 if (ret < 0) {
10611 /*-
10612 * The count was off.. retran is not happening so do
10613 * the normal retransmission.
10614 */
10615#ifdef SCTP_AUDITING_ENABLED
10616 sctp_auditing(9, inp, stcb, NULL);
10617#endif
10618 if (ret == SCTP_RETRAN_EXIT) {
10619 return;
10620 }
10621 break;
10622 }
10623 if (from_where == SCTP_OUTPUT_FROM_T3) {
10624 /* Only one transmission allowed out of a timeout */
10625#ifdef SCTP_AUDITING_ENABLED
10626 sctp_auditing(10, inp, stcb, NULL);
10627#endif
10628 /* Push out any control */
10629 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10630 &now, &now_filled, frag_point, so_locked);
10631 return;
10632 }
10633 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10634 /* Hit FR burst limit */
10635 return;
10636 }
10637 if ((num_out == 0) && (ret == 0)) {
10638 /* No more retrans to send */
10639 break;
10640 }
10641 }
10642#ifdef SCTP_AUDITING_ENABLED
10643 sctp_auditing(12, inp, stcb, NULL);
10644#endif
10645 /* Check for bad destinations, if they exist move chunks around. */
10646 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10647 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10648 /*-
10649 * if possible move things off of this address we
10650 * still may send below due to the dormant state but
10651 * we try to find an alternate address to send to
10652 * and if we have one we move all queued data on the
10653 * out wheel to this alternate address.
10654 */
10655 if (net->ref_count > 1)
10656 sctp_move_chunks_from_net(stcb, net);
10657 } else {
10658 /*-
10659 * if ((asoc->sat_network) || (net->addr_is_local))
10660 * { burst_limit = asoc->max_burst *
10661 * SCTP_SAT_NETWORK_BURST_INCR; }
10662 */
10663 if (asoc->max_burst > 0) {
10664 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10665 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10666 /* JRS - Use the congestion control given in the congestion control module */
10667 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10669 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10670 }
10671 SCTP_STAT_INCR(sctps_maxburstqueued);
10672 }
10673 net->fast_retran_ip = 0;
10674 } else {
10675 if (net->flight_size == 0) {
10676 /* Should be decaying the cwnd here */
10677 ;
10678 }
10679 }
10680 }
10681 }
10682
10683 }
10684 burst_cnt = 0;
10685 do {
10686 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10687 &reason_code, 0, from_where,
10688 &now, &now_filled, frag_point, so_locked);
10689 if (error) {
10690 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10691 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10692 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10693 }
10694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10695 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10696 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10697 }
10698 break;
10699 }
10700 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10701
10702 tot_out += num_out;
10703 burst_cnt++;
10704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10705 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10706 if (num_out == 0) {
10707 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10708 }
10709 }
10710 if (nagle_on) {
10711 /*
10712 * When the Nagle algorithm is used, look at how much
10713 * is unsent, then if its smaller than an MTU and we
10714 * have data in flight we stop, except if we are
10715 * handling a fragmented user message.
10716 */
10717 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10718 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10719 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10720 (stcb->asoc.total_flight > 0)) {
10721/* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10722 break;
10723 }
10724 }
10725 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10726 TAILQ_EMPTY(&asoc->send_queue) &&
10727 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10728 /* Nothing left to send */
10729 break;
10730 }
10731 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10732 /* Nothing left to send */
10733 break;
10734 }
10735 } while (num_out &&
10736 ((asoc->max_burst == 0) ||
10737 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10738 (burst_cnt < asoc->max_burst)));
10739
10740 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10741 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10742 SCTP_STAT_INCR(sctps_maxburstqueued);
10743 asoc->burst_limit_applied = 1;
10744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10745 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10746 }
10747 } else {
10748 asoc->burst_limit_applied = 0;
10749 }
10750 }
10751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10752 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10753 }
10754 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10755 tot_out);
10756
10757 /*-
10758 * Now we need to clean up the control chunk chain if a ECNE is on
10759 * it. It must be marked as UNSENT again so next call will continue
10760 * to send it until such time that we get a CWR, to remove it.
10761 */
10762 if (stcb->asoc.ecn_echo_cnt_onq)
10763 sctp_fix_ecn_echo(asoc);
10764
10765 if (stcb->asoc.trigger_reset) {
10766 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10767 goto do_it_again;
10768 }
10769 }
10770 return;
10771}
10772
10773
10774int
10775sctp_output(
10776 struct sctp_inpcb *inp,
10777#if defined(__Panda__)
10778 pakhandle_type m,
10779#else
10780 struct mbuf *m,
10781#endif
10782 struct sockaddr *addr,
10783#if defined(__Panda__)
10784 pakhandle_type control,
10785#else
10786 struct mbuf *control,
10787#endif
10788#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
10789 struct thread *p,
10790#elif defined(__Windows__)
10791 PKTHREAD p,
10792#else
10793#if defined(__APPLE__)
10794 struct proc *p SCTP_UNUSED,
10795#else
10796 struct proc *p,
10797#endif
10798#endif
10799 int flags)
10800{
10801 if (inp == NULL) {
10802 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10803 return (EINVAL);
10804 }
10805
10806 if (inp->sctp_socket == NULL) {
10807 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10808 return (EINVAL);
10809 }
10810 return (sctp_sosend(inp->sctp_socket,
10811 addr,
10812 (struct uio *)NULL,
10813 m,
10814 control,
10815#if defined(__APPLE__) || defined(__Panda__)
10816 flags
10817#else
10818 flags, p
10819#endif
10820 ));
10821}
10822
10823void
10824send_forward_tsn(struct sctp_tcb *stcb,
10825 struct sctp_association *asoc)
10826{
10827 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10828 struct sctp_forward_tsn_chunk *fwdtsn;
10829 struct sctp_strseq *strseq;
10830 struct sctp_strseq_mid *strseq_m;
10831 uint32_t advance_peer_ack_point;
10832 unsigned int cnt_of_space, i, ovh;
10833 unsigned int space_needed;
10834 unsigned int cnt_of_skipped = 0;
10835
10836 SCTP_TCB_LOCK_ASSERT(stcb);
10837 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10838 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10839 /* mark it to unsent */
10840 chk->sent = SCTP_DATAGRAM_UNSENT;
10841 chk->snd_count = 0;
10842 /* Do we correct its output location? */
10843 if (chk->whoTo) {
10844 sctp_free_remote_addr(chk->whoTo);
10845 chk->whoTo = NULL;
10846 }
10847 goto sctp_fill_in_rest;
10848 }
10849 }
10850 /* Ok if we reach here we must build one */
10851 sctp_alloc_a_chunk(stcb, chk);
10852 if (chk == NULL) {
10853 return;
10854 }
10855 asoc->fwd_tsn_cnt++;
10856 chk->copy_by_ref = 0;
10857 /*
10858 * We don't do the old thing here since
10859 * this is used not for on-wire but to
10860 * tell if we are sending a fwd-tsn by
10861 * the stack during output. And if its
10862 * a IFORWARD or a FORWARD it is a fwd-tsn.
10863 */
10864 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10865 chk->rec.chunk_id.can_take_data = 0;
10866 chk->flags = 0;
10867 chk->asoc = asoc;
10868 chk->whoTo = NULL;
10869 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10870 if (chk->data == NULL) {
10871 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10872 return;
10873 }
10874 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10875 chk->sent = SCTP_DATAGRAM_UNSENT;
10876 chk->snd_count = 0;
10877 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10878 asoc->ctrl_queue_cnt++;
10879sctp_fill_in_rest:
10880 /*-
10881 * Here we go through and fill out the part that deals with
10882 * stream/seq of the ones we skip.
10883 */
10884 SCTP_BUF_LEN(chk->data) = 0;
10885 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10886 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10887 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10888 /* no more to look at */
10889 break;
10890 }
10891 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10892 /* We don't report these */
10893 continue;
10894 }
10895 cnt_of_skipped++;
10896 }
10897 if (asoc->idata_supported) {
10898 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10899 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10900 } else {
10901 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10902 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10903 }
10904 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10905
10906 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10907 ovh = SCTP_MIN_OVERHEAD;
10908 } else {
10909 ovh = SCTP_MIN_V4_OVERHEAD;
10910 }
10911 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10912 /* trim to a mtu size */
10913 cnt_of_space = asoc->smallest_mtu - ovh;
10914 }
10915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10916 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10917 0xff, 0, cnt_of_skipped,
10918 asoc->advanced_peer_ack_point);
10919 }
10920 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10921 if (cnt_of_space < space_needed) {
10922 /*-
10923 * ok we must trim down the chunk by lowering the
10924 * advance peer ack point.
10925 */
10926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10927 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10928 0xff, 0xff, cnt_of_space,
10929 space_needed);
10930 }
10931 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10932 if (asoc->idata_supported) {
10933 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10934 } else {
10935 cnt_of_skipped /= sizeof(struct sctp_strseq);
10936 }
10937 /*-
10938 * Go through and find the TSN that will be the one
10939 * we report.
10940 */
10941 at = TAILQ_FIRST(&asoc->sent_queue);
10942 if (at != NULL) {
10943 for (i = 0; i < cnt_of_skipped; i++) {
10944 tp1 = TAILQ_NEXT(at, sctp_next);
10945 if (tp1 == NULL) {
10946 break;
10947 }
10948 at = tp1;
10949 }
10950 }
10951 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10952 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10953 0xff, cnt_of_skipped, at->rec.data.tsn,
10954 asoc->advanced_peer_ack_point);
10955 }
10956 last = at;
10957 /*-
10958 * last now points to last one I can report, update
10959 * peer ack point
10960 */
10961 if (last) {
10962 advance_peer_ack_point = last->rec.data.tsn;
10963 }
10964 if (asoc->idata_supported) {
10965 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10966 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10967 } else {
10968 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10969 cnt_of_skipped * sizeof(struct sctp_strseq);
10970 }
10971 }
10972 chk->send_size = space_needed;
10973 /* Setup the chunk */
10974 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10975 fwdtsn->ch.chunk_length = htons(chk->send_size);
10976 fwdtsn->ch.chunk_flags = 0;
10977 if (asoc->idata_supported) {
10978 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10979 } else {
10980 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10981 }
10982 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10983 SCTP_BUF_LEN(chk->data) = chk->send_size;
10984 fwdtsn++;
10985 /*-
10986 * Move pointer to after the fwdtsn and transfer to the
10987 * strseq pointer.
10988 */
10989 if (asoc->idata_supported) {
10990 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10991 strseq = NULL;
10992 } else {
10993 strseq = (struct sctp_strseq *)fwdtsn;
10994 strseq_m = NULL;
10995 }
10996 /*-
10997 * Now populate the strseq list. This is done blindly
10998 * without pulling out duplicate stream info. This is
10999 * inefficent but won't harm the process since the peer will
11000 * look at these in sequence and will thus release anything.
11001 * It could mean we exceed the PMTU and chop off some that
11002 * we could have included.. but this is unlikely (aka 1432/4
11003 * would mean 300+ stream seq's would have to be reported in
11004 * one FWD-TSN. With a bit of work we can later FIX this to
11005 * optimize and pull out duplicates.. but it does add more
11006 * overhead. So for now... not!
11007 */
11008 i = 0;
11009 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
11010 if (i >= cnt_of_skipped) {
11011 break;
11012 }
11013 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
11014 /* We don't report these */
11015 continue;
11016 }
11017 if (at->rec.data.tsn == advance_peer_ack_point) {
11018 at->rec.data.fwd_tsn_cnt = 0;
11019 }
11020 if (asoc->idata_supported) {
11021 strseq_m->sid = htons(at->rec.data.sid);
11022 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
11023 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
11024 } else {
11025 strseq_m->flags = 0;
11026 }
11027 strseq_m->mid = htonl(at->rec.data.mid);
11028 strseq_m++;
11029 } else {
11030 strseq->sid = htons(at->rec.data.sid);
11031 strseq->ssn = htons((uint16_t)at->rec.data.mid);
11032 strseq++;
11033 }
11034 i++;
11035 }
11036 return;
11037}
11038
11039void
11040sctp_send_sack(struct sctp_tcb *stcb, int so_locked
11041#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11042 SCTP_UNUSED
11043#endif
11044)
11045{
11046 /*-
11047 * Queue up a SACK or NR-SACK in the control queue.
11048 * We must first check to see if a SACK or NR-SACK is
11049 * somehow on the control queue.
11050 * If so, we will take and and remove the old one.
11051 */
11052 struct sctp_association *asoc;
11053 struct sctp_tmit_chunk *chk, *a_chk;
11054 struct sctp_sack_chunk *sack;
11055 struct sctp_nr_sack_chunk *nr_sack;
11056 struct sctp_gap_ack_block *gap_descriptor;
11057 const struct sack_track *selector;
11058 int mergeable = 0;
11059 int offset;
11060 caddr_t limit;
11061 uint32_t *dup;
11062 int limit_reached = 0;
11063 unsigned int i, siz, j;
11064 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
11065 int num_dups = 0;
11066 int space_req;
11067 uint32_t highest_tsn;
11068 uint8_t flags;
11069 uint8_t type;
11070 uint8_t tsn_map;
11071
11072 if (stcb->asoc.nrsack_supported == 1) {
11073 type = SCTP_NR_SELECTIVE_ACK;
11074 } else {
11075 type = SCTP_SELECTIVE_ACK;
11076 }
11077 a_chk = NULL;
11078 asoc = &stcb->asoc;
11079 SCTP_TCB_LOCK_ASSERT(stcb);
11080 if (asoc->last_data_chunk_from == NULL) {
11081 /* Hmm we never received anything */
11082 return;
11083 }
11084 sctp_slide_mapping_arrays(stcb);
11085 sctp_set_rwnd(stcb, asoc);
11086 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11087 if (chk->rec.chunk_id.id == type) {
11088 /* Hmm, found a sack already on queue, remove it */
11089 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
11090 asoc->ctrl_queue_cnt--;
11091 a_chk = chk;
11092 if (a_chk->data) {
11093 sctp_m_freem(a_chk->data);
11094 a_chk->data = NULL;
11095 }
11096 if (a_chk->whoTo) {
11097 sctp_free_remote_addr(a_chk->whoTo);
11098 a_chk->whoTo = NULL;
11099 }
11100 break;
11101 }
11102 }
11103 if (a_chk == NULL) {
11104 sctp_alloc_a_chunk(stcb, a_chk);
11105 if (a_chk == NULL) {
11106 /* No memory so we drop the idea, and set a timer */
11107 if (stcb->asoc.delayed_ack) {
11108 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11109 stcb->sctp_ep, stcb, NULL,
11110 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
11111 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11112 stcb->sctp_ep, stcb, NULL);
11113 } else {
11114 stcb->asoc.send_sack = 1;
11115 }
11116 return;
11117 }
11118 a_chk->copy_by_ref = 0;
11119 a_chk->rec.chunk_id.id = type;
11120 a_chk->rec.chunk_id.can_take_data = 1;
11121 }
11122 /* Clear our pkt counts */
11123 asoc->data_pkts_seen = 0;
11124
11125 a_chk->flags = 0;
11126 a_chk->asoc = asoc;
11127 a_chk->snd_count = 0;
11128 a_chk->send_size = 0; /* fill in later */
11129 a_chk->sent = SCTP_DATAGRAM_UNSENT;
11130 a_chk->whoTo = NULL;
11131
11132 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
11133 /*-
11134 * Ok, the destination for the SACK is unreachable, lets see if
11135 * we can select an alternate to asoc->last_data_chunk_from
11136 */
11137 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
11138 if (a_chk->whoTo == NULL) {
11139 /* Nope, no alternate */
11140 a_chk->whoTo = asoc->last_data_chunk_from;
11141 }
11142 } else {
11143 a_chk->whoTo = asoc->last_data_chunk_from;
11144 }
11145 if (a_chk->whoTo) {
11146 atomic_add_int(&a_chk->whoTo->ref_count, 1);
11147 }
11148 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
11149 highest_tsn = asoc->highest_tsn_inside_map;
11150 } else {
11151 highest_tsn = asoc->highest_tsn_inside_nr_map;
11152 }
11153 if (highest_tsn == asoc->cumulative_tsn) {
11154 /* no gaps */
11155 if (type == SCTP_SELECTIVE_ACK) {
11156 space_req = sizeof(struct sctp_sack_chunk);
11157 } else {
11158 space_req = sizeof(struct sctp_nr_sack_chunk);
11159 }
11160 } else {
11161 /* gaps get a cluster */
11162 space_req = MCLBYTES;
11163 }
11164 /* Ok now lets formulate a MBUF with our sack */
11165 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
11166 if ((a_chk->data == NULL) ||
11167 (a_chk->whoTo == NULL)) {
11168 /* rats, no mbuf memory */
11169 if (a_chk->data) {
11170 /* was a problem with the destination */
11171 sctp_m_freem(a_chk->data);
11172 a_chk->data = NULL;
11173 }
11174 sctp_free_a_chunk(stcb, a_chk, so_locked);
11175 /* sa_ignore NO_NULL_CHK */
11176 if (stcb->asoc.delayed_ack) {
11177 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
11178 stcb->sctp_ep, stcb, NULL,
11179 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
11180 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
11181 stcb->sctp_ep, stcb, NULL);
11182 } else {
11183 stcb->asoc.send_sack = 1;
11184 }
11185 return;
11186 }
11187 /* ok, lets go through and fill it in */
11188 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
11189 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
11190 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
11191 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
11192 }
11193 limit = mtod(a_chk->data, caddr_t);
11194 limit += space;
11195
11196 flags = 0;
11197
11198 if ((asoc->sctp_cmt_on_off > 0) &&
11199 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
11200 /*-
11201 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
11202 * received, then set high bit to 1, else 0. Reset
11203 * pkts_rcvd.
11204 */
11205 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
11206 asoc->cmt_dac_pkts_rcvd = 0;
11207 }
11208#ifdef SCTP_ASOCLOG_OF_TSNS
11209 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
11210 stcb->asoc.cumack_log_atsnt++;
11211 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
11212 stcb->asoc.cumack_log_atsnt = 0;
11213 }
11214#endif
11215 /* reset the readers interpretation */
11216 stcb->freed_by_sorcv_sincelast = 0;
11217
11218 if (type == SCTP_SELECTIVE_ACK) {
11219 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
11220 nr_sack = NULL;
11221 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
11222 if (highest_tsn > asoc->mapping_array_base_tsn) {
11223 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11224 } else {
11225 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
11226 }
11227 } else {
11228 sack = NULL;
11229 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
11230 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
11231 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
11232 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11233 } else {
11234 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
11235 }
11236 }
11237
11238 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11239 offset = 1;
11240 } else {
11241 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11242 }
11243 if (((type == SCTP_SELECTIVE_ACK) &&
11244 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
11245 ((type == SCTP_NR_SELECTIVE_ACK) &&
11246 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
11247 /* we have a gap .. maybe */
11248 for (i = 0; i < siz; i++) {
11249 tsn_map = asoc->mapping_array[i];
11250 if (type == SCTP_SELECTIVE_ACK) {
11251 tsn_map |= asoc->nr_mapping_array[i];
11252 }
11253 if (i == 0) {
11254 /*
11255 * Clear all bits corresponding to TSNs
11256 * smaller or equal to the cumulative TSN.
11257 */
11258 tsn_map &= (~0U << (1 - offset));
11259 }
11260 selector = &sack_array[tsn_map];
11261 if (mergeable && selector->right_edge) {
11262 /*
11263 * Backup, left and right edges were ok to
11264 * merge.
11265 */
11266 num_gap_blocks--;
11267 gap_descriptor--;
11268 }
11269 if (selector->num_entries == 0)
11270 mergeable = 0;
11271 else {
11272 for (j = 0; j < selector->num_entries; j++) {
11273 if (mergeable && selector->right_edge) {
11274 /*
11275 * do a merge by NOT setting
11276 * the left side
11277 */
11278 mergeable = 0;
11279 } else {
11280 /*
11281 * no merge, set the left
11282 * side
11283 */
11284 mergeable = 0;
11285 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11286 }
11287 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11288 num_gap_blocks++;
11289 gap_descriptor++;
11290 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11291 /* no more room */
11292 limit_reached = 1;
11293 break;
11294 }
11295 }
11296 if (selector->left_edge) {
11297 mergeable = 1;
11298 }
11299 }
11300 if (limit_reached) {
11301 /* Reached the limit stop */
11302 break;
11303 }
11304 offset += 8;
11305 }
11306 }
11307 if ((type == SCTP_NR_SELECTIVE_ACK) &&
11308 (limit_reached == 0)) {
11309
11310 mergeable = 0;
11311
11312 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
11313 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
11314 } else {
11315 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
11316 }
11317
11318 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
11319 offset = 1;
11320 } else {
11321 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
11322 }
11323 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
11324 /* we have a gap .. maybe */
11325 for (i = 0; i < siz; i++) {
11326 tsn_map = asoc->nr_mapping_array[i];
11327 if (i == 0) {
11328 /*
11329 * Clear all bits corresponding to TSNs
11330 * smaller or equal to the cumulative TSN.
11331 */
11332 tsn_map &= (~0U << (1 - offset));
11333 }
11334 selector = &sack_array[tsn_map];
11335 if (mergeable && selector->right_edge) {
11336 /*
11337 * Backup, left and right edges were ok to
11338 * merge.
11339 */
11340 num_nr_gap_blocks--;
11341 gap_descriptor--;
11342 }
11343 if (selector->num_entries == 0)
11344 mergeable = 0;
11345 else {
11346 for (j = 0; j < selector->num_entries; j++) {
11347 if (mergeable && selector->right_edge) {
11348 /*
11349 * do a merge by NOT setting
11350 * the left side
11351 */
11352 mergeable = 0;
11353 } else {
11354 /*
11355 * no merge, set the left
11356 * side
11357 */
11358 mergeable = 0;
11359 gap_descriptor->start = htons((selector->gaps[j].start + offset));
11360 }
11361 gap_descriptor->end = htons((selector->gaps[j].end + offset));
11362 num_nr_gap_blocks++;
11363 gap_descriptor++;
11364 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
11365 /* no more room */
11366 limit_reached = 1;
11367 break;
11368 }
11369 }
11370 if (selector->left_edge) {
11371 mergeable = 1;
11372 }
11373 }
11374 if (limit_reached) {
11375 /* Reached the limit stop */
11376 break;
11377 }
11378 offset += 8;
11379 }
11380 }
11381 }
11382 /* now we must add any dups we are going to report. */
11383 if ((limit_reached == 0) && (asoc->numduptsns)) {
11384 dup = (uint32_t *) gap_descriptor;
11385 for (i = 0; i < asoc->numduptsns; i++) {
11386 *dup = htonl(asoc->dup_tsns[i]);
11387 dup++;
11388 num_dups++;
11389 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
11390 /* no more room */
11391 break;
11392 }
11393 }
11394 asoc->numduptsns = 0;
11395 }
11396 /*
11397 * now that the chunk is prepared queue it to the control chunk
11398 * queue.
11399 */
11400 if (type == SCTP_SELECTIVE_ACK) {
11401 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
11402 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11403 num_dups * sizeof(int32_t));
11404 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11405 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11406 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
11407 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
11408 sack->sack.num_dup_tsns = htons(num_dups);
11409 sack->ch.chunk_type = type;
11410 sack->ch.chunk_flags = flags;
11411 sack->ch.chunk_length = htons(a_chk->send_size);
11412 } else {
11413 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
11414 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
11415 num_dups * sizeof(int32_t));
11416 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
11417 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
11418 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
11419 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
11420 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
11421 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
11422 nr_sack->nr_sack.reserved = 0;
11423 nr_sack->ch.chunk_type = type;
11424 nr_sack->ch.chunk_flags = flags;
11425 nr_sack->ch.chunk_length = htons(a_chk->send_size);
11426 }
11427 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
11428 asoc->my_last_reported_rwnd = asoc->my_rwnd;
11429 asoc->ctrl_queue_cnt++;
11430 asoc->send_sack = 0;
11431 SCTP_STAT_INCR(sctps_sendsacks);
11432 return;
11433}
11434
11435void
11436sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
11437#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11438 SCTP_UNUSED
11439#endif
11440 )
11441{
11442 struct mbuf *m_abort, *m, *m_last;
11443 struct mbuf *m_out, *m_end = NULL;
11444 struct sctp_abort_chunk *abort;
11445 struct sctp_auth_chunk *auth = NULL;
11446 struct sctp_nets *net;
11447 uint32_t vtag;
11448 uint32_t auth_offset = 0;
11449 int error;
11450 uint16_t cause_len, chunk_len, padding_len;
11451
11452#if defined(__APPLE__)
11453 if (so_locked) {
11454 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
11455 } else {
11456 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
11457 }
11458#endif
11459 SCTP_TCB_LOCK_ASSERT(stcb);
11460 /*-
11461 * Add an AUTH chunk, if chunk requires it and save the offset into
11462 * the chain for AUTH
11463 */
11464 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
11465 stcb->asoc.peer_auth_chunks)) {
11466 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
11467 stcb, SCTP_ABORT_ASSOCIATION);
11468 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11469 } else {
11470 m_out = NULL;
11471 }
11472 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
11473 if (m_abort == NULL) {
11474 if (m_out) {
11475 sctp_m_freem(m_out);
11476 }
11477 if (operr) {
11478 sctp_m_freem(operr);
11479 }
11480 return;
11481 }
11482 /* link in any error */
11483 SCTP_BUF_NEXT(m_abort) = operr;
11484 cause_len = 0;
11485 m_last = NULL;
11486 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
11487 cause_len += (uint16_t)SCTP_BUF_LEN(m);
11488 if (SCTP_BUF_NEXT(m) == NULL) {
11489 m_last = m;
11490 }
11491 }
11492 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
11493 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
11494 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
11495 if (m_out == NULL) {
11496 /* NO Auth chunk prepended, so reserve space in front */
11497 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
11498 m_out = m_abort;
11499 } else {
11500 /* Put AUTH chunk at the front of the chain */
11501 SCTP_BUF_NEXT(m_end) = m_abort;
11502 }
11503 if (stcb->asoc.alternate) {
11504 net = stcb->asoc.alternate;
11505 } else {
11506 net = stcb->asoc.primary_destination;
11507 }
11508 /* Fill in the ABORT chunk header. */
11509 abort = mtod(m_abort, struct sctp_abort_chunk *);
11510 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11511 if (stcb->asoc.peer_vtag == 0) {
11512 /* This happens iff the assoc is in COOKIE-WAIT state. */
11513 vtag = stcb->asoc.my_vtag;
11514 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11515 } else {
11516 vtag = stcb->asoc.peer_vtag;
11517 abort->ch.chunk_flags = 0;
11518 }
11519 abort->ch.chunk_length = htons(chunk_len);
11520 /* Add padding, if necessary. */
11521 if (padding_len > 0) {
11522 if ((m_last == NULL) ||
11523 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11524 sctp_m_freem(m_out);
11525 return;
11526 }
11527 }
11528 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11529 (struct sockaddr *)&net->ro._l_addr,
11530 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11531 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11532 stcb->asoc.primary_destination->port, NULL,
11533#if defined(__FreeBSD__)
11534 0, 0,
11535#endif
11536 so_locked))) {
11537 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11538 if (error == ENOBUFS) {
11539 stcb->asoc.ifp_had_enobuf = 1;
11540 SCTP_STAT_INCR(sctps_lowlevelerr);
11541 }
11542 } else {
11543 stcb->asoc.ifp_had_enobuf = 0;
11544 }
11545 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11546}
11547
11548void
11549sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11550 struct sctp_nets *net,
11551 int reflect_vtag)
11552{
11553 /* formulate and SEND a SHUTDOWN-COMPLETE */
11554 struct mbuf *m_shutdown_comp;
11555 struct sctp_shutdown_complete_chunk *shutdown_complete;
11556 uint32_t vtag;
11557 int error;
11558 uint8_t flags;
11559
11560 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11561 if (m_shutdown_comp == NULL) {
11562 /* no mbuf's */
11563 return;
11564 }
11565 if (reflect_vtag) {
11566 flags = SCTP_HAD_NO_TCB;
11567 vtag = stcb->asoc.my_vtag;
11568 } else {
11569 flags = 0;
11570 vtag = stcb->asoc.peer_vtag;
11571 }
11572 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11573 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11574 shutdown_complete->ch.chunk_flags = flags;
11575 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11576 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11577 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11578 (struct sockaddr *)&net->ro._l_addr,
11579 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11580 stcb->sctp_ep->sctp_lport, stcb->rport,
11581 htonl(vtag),
11582 net->port, NULL,
11583#if defined(__FreeBSD__)
11584 0, 0,
11585#endif
11586 SCTP_SO_NOT_LOCKED))) {
11587 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11588 if (error == ENOBUFS) {
11589 stcb->asoc.ifp_had_enobuf = 1;
11590 SCTP_STAT_INCR(sctps_lowlevelerr);
11591 }
11592 } else {
11593 stcb->asoc.ifp_had_enobuf = 0;
11594 }
11595 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11596 return;
11597}
11598
11599#if defined(__FreeBSD__)
11600static void
11601sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11602 struct sctphdr *sh, uint32_t vtag,
11603 uint8_t type, struct mbuf *cause,
11604 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11605 uint32_t vrf_id, uint16_t port)
11606#else
11607static void
11608sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11609 struct sctphdr *sh, uint32_t vtag,
11610 uint8_t type, struct mbuf *cause,
11611 uint32_t vrf_id SCTP_UNUSED, uint16_t port)
11612#endif
11613{
11614#ifdef __Panda__
11615 pakhandle_type o_pak;
11616#else
11617 struct mbuf *o_pak;
11618#endif
11619 struct mbuf *mout;
11620 struct sctphdr *shout;
11621 struct sctp_chunkhdr *ch;
11622#if defined(INET) || defined(INET6)
11623 struct udphdr *udp;
11624 int ret;
11625#endif
11626 int len, cause_len, padding_len;
11627#ifdef INET
11628#if defined(__APPLE__) || defined(__Panda__)
11629 sctp_route_t ro;
11630#endif
11631 struct sockaddr_in *src_sin, *dst_sin;
11632 struct ip *ip;
11633#endif
11634#ifdef INET6
11635 struct sockaddr_in6 *src_sin6, *dst_sin6;
11636 struct ip6_hdr *ip6;
11637#endif
11638
11639 /* Compute the length of the cause and add final padding. */
11640 cause_len = 0;
11641 if (cause != NULL) {
11642 struct mbuf *m_at, *m_last = NULL;
11643
11644 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11645 if (SCTP_BUF_NEXT(m_at) == NULL)
11646 m_last = m_at;
11647 cause_len += SCTP_BUF_LEN(m_at);
11648 }
11649 padding_len = cause_len % 4;
11650 if (padding_len != 0) {
11651 padding_len = 4 - padding_len;
11652 }
11653 if (padding_len != 0) {
11654 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11655 sctp_m_freem(cause);
11656 return;
11657 }
11658 }
11659 } else {
11660 padding_len = 0;
11661 }
11662 /* Get an mbuf for the header. */
11663 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11664 switch (dst->sa_family) {
11665#ifdef INET
11666 case AF_INET:
11667 len += sizeof(struct ip);
11668 break;
11669#endif
11670#ifdef INET6
11671 case AF_INET6:
11672 len += sizeof(struct ip6_hdr);
11673 break;
11674#endif
11675 default:
11676 break;
11677 }
11678#if defined(INET) || defined(INET6)
11679 if (port) {
11680 len += sizeof(struct udphdr);
11681 }
11682#endif
11683#if defined(__APPLE__)
11684#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11685 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11686#else
11687 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
11688#endif
11689#else
11690 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11691#endif
11692 if (mout == NULL) {
11693 if (cause) {
11694 sctp_m_freem(cause);
11695 }
11696 return;
11697 }
11698#if defined(__APPLE__)
11699#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
11700 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11701#else
11702 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
11703#endif
11704#else
11705 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11706#endif
11707 SCTP_BUF_LEN(mout) = len;
11708 SCTP_BUF_NEXT(mout) = cause;
11709#if defined(__FreeBSD__)
11710 M_SETFIB(mout, fibnum);
11711 mout->m_pkthdr.flowid = mflowid;
11712 M_HASHTYPE_SET(mout, mflowtype);
11713#endif
11714#ifdef INET
11715 ip = NULL;
11716#endif
11717#ifdef INET6
11718 ip6 = NULL;
11719#endif
11720 switch (dst->sa_family) {
11721#ifdef INET
11722 case AF_INET:
11723 src_sin = (struct sockaddr_in *)src;
11724 dst_sin = (struct sockaddr_in *)dst;
11725 ip = mtod(mout, struct ip *);
11726 ip->ip_v = IPVERSION;
11727 ip->ip_hl = (sizeof(struct ip) >> 2);
11728 ip->ip_tos = 0;
11729 ip->ip_off = 0;
11730#if defined(__FreeBSD__)
11731 ip_fillid(ip);
11732#elif defined(__APPLE__)
11733#if RANDOM_IP_ID
11734 ip->ip_id = ip_randomid();
11735#else
11736 ip->ip_id = htons(ip_id++);
11737#endif
11738#elif defined(__Userspace__)
11739 ip->ip_id = htons(ip_id++);
11740#else
11741 ip->ip_id = ip_id++;
11742#endif
11743 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11744 if (port) {
11745 ip->ip_p = IPPROTO_UDP;
11746 } else {
11747 ip->ip_p = IPPROTO_SCTP;
11748 }
11749 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11750 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11751 ip->ip_sum = 0;
11752 len = sizeof(struct ip);
11753 shout = (struct sctphdr *)((caddr_t)ip + len);
11754 break;
11755#endif
11756#ifdef INET6
11757 case AF_INET6:
11758 src_sin6 = (struct sockaddr_in6 *)src;
11759 dst_sin6 = (struct sockaddr_in6 *)dst;
11760 ip6 = mtod(mout, struct ip6_hdr *);
11761 ip6->ip6_flow = htonl(0x60000000);
11762#if defined(__FreeBSD__)
11763 if (V_ip6_auto_flowlabel) {
11764 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11765 }
11766#endif
11767#if defined(__Userspace__)
11768 ip6->ip6_hlim = IPv6_HOP_LIMIT;
11769#else
11770 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11771#endif
11772 if (port) {
11773 ip6->ip6_nxt = IPPROTO_UDP;
11774 } else {
11775 ip6->ip6_nxt = IPPROTO_SCTP;
11776 }
11777 ip6->ip6_src = dst_sin6->sin6_addr;
11778 ip6->ip6_dst = src_sin6->sin6_addr;
11779 len = sizeof(struct ip6_hdr);
11780 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11781 break;
11782#endif
11783 default:
11784 len = 0;
11785 shout = mtod(mout, struct sctphdr *);
11786 break;
11787 }
11788#if defined(INET) || defined(INET6)
11789 if (port) {
11790 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11791 sctp_m_freem(mout);
11792 return;
11793 }
11794 udp = (struct udphdr *)shout;
11795 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11796 udp->uh_dport = port;
11797 udp->uh_sum = 0;
11798 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11799 sizeof(struct sctphdr) +
11800 sizeof(struct sctp_chunkhdr) +
11801 cause_len + padding_len));
11802 len += sizeof(struct udphdr);
11803 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11804 } else {
11805 udp = NULL;
11806 }
11807#endif
11808 shout->src_port = sh->dest_port;
11809 shout->dest_port = sh->src_port;
11810 shout->checksum = 0;
11811 if (vtag) {
11812 shout->v_tag = htonl(vtag);
11813 } else {
11814 shout->v_tag = sh->v_tag;
11815 }
11816 len += sizeof(struct sctphdr);
11817 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11818 ch->chunk_type = type;
11819 if (vtag) {
11820 ch->chunk_flags = 0;
11821 } else {
11822 ch->chunk_flags = SCTP_HAD_NO_TCB;
11823 }
11824 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11825 len += sizeof(struct sctp_chunkhdr);
11826 len += cause_len + padding_len;
11827
11828 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11829 sctp_m_freem(mout);
11830 return;
11831 }
11832 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11833 switch (dst->sa_family) {
11834#ifdef INET
11835 case AF_INET:
11836#if defined(__APPLE__) || defined(__Panda__)
11837 /* zap the stack pointer to the route */
11838 bzero(&ro, sizeof(sctp_route_t));
11839#if defined(__Panda__)
11840 ro._l_addr.sa.sa_family = AF_INET;
11841#endif
11842#endif
11843 if (port) {
11844#if !defined(__Windows__) && !defined(__Userspace__)
11845#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11846 if (V_udp_cksum) {
11847 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11848 } else {
11849 udp->uh_sum = 0;
11850 }
11851#else
11852 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11853#endif
11854#else
11855 udp->uh_sum = 0;
11856#endif
11857 }
11858#if defined(__FreeBSD__)
11859#if __FreeBSD_version >= 1000000
11860 ip->ip_len = htons(len);
11861#else
11862 ip->ip_len = len;
11863#endif
11864#elif defined(__APPLE__) || defined(__Userspace__)
11865 ip->ip_len = len;
11866#else
11867 ip->ip_len = htons(len);
11868#endif
11869 if (port) {
11870#if defined(SCTP_WITH_NO_CSUM)
11871 SCTP_STAT_INCR(sctps_sendnocrc);
11872#else
11873 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11874 SCTP_STAT_INCR(sctps_sendswcrc);
11875#endif
11876#if !defined(__Windows__) && !defined(__Userspace__)
11877#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
11878 if (V_udp_cksum) {
11879 SCTP_ENABLE_UDP_CSUM(o_pak);
11880 }
11881#else
11882 SCTP_ENABLE_UDP_CSUM(o_pak);
11883#endif
11884#endif
11885 } else {
11886#if defined(SCTP_WITH_NO_CSUM)
11887 SCTP_STAT_INCR(sctps_sendnocrc);
11888#else
11889#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
11890 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11891 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11892 SCTP_STAT_INCR(sctps_sendhwcrc);
11893#else
11894 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
11895 SCTP_STAT_INCR(sctps_sendswcrc);
11896#endif
11897#endif
11898 }
11899#ifdef SCTP_PACKET_LOGGING
11900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11901 sctp_packet_log(o_pak);
11902 }
11903#endif
11904#if defined(__APPLE__) || defined(__Panda__)
11905 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11906 /* Free the route if we got one back */
11907 if (ro.ro_rt) {
11908 RTFREE(ro.ro_rt);
11909 ro.ro_rt = NULL;
11910 }
11911#else
11912 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11913#endif
11914 break;
11915#endif
11916#ifdef INET6
11917 case AF_INET6:
11918 ip6->ip6_plen = (uint16_t)(len - sizeof(struct ip6_hdr));
11919 if (port) {
11920#if defined(SCTP_WITH_NO_CSUM)
11921 SCTP_STAT_INCR(sctps_sendnocrc);
11922#else
11923 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11924 SCTP_STAT_INCR(sctps_sendswcrc);
11925#endif
11926#if defined(__Windows__)
11927 udp->uh_sum = 0;
11928#elif !defined(__Userspace__)
11929 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11930 udp->uh_sum = 0xffff;
11931 }
11932#endif
11933 } else {
11934#if defined(SCTP_WITH_NO_CSUM)
11935 SCTP_STAT_INCR(sctps_sendnocrc);
11936#else
11937#if defined(__FreeBSD__) && __FreeBSD_version >= 900000
11938#if __FreeBSD_version > 901000
11939 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11940#else
11941 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11942#endif
11943 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11944 SCTP_STAT_INCR(sctps_sendhwcrc);
11945#else
11946 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
11947 SCTP_STAT_INCR(sctps_sendswcrc);
11948#endif
11949#endif
11950 }
11951#ifdef SCTP_PACKET_LOGGING
11952 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11953 sctp_packet_log(o_pak);
11954 }
11955#endif
11956 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11957 break;
11958#endif
11959#if defined(__Userspace__)
11960 case AF_CONN:
11961 {
11962 char *buffer;
11963 struct sockaddr_conn *sconn;
11964
11965 sconn = (struct sockaddr_conn *)src;
11966#if defined(SCTP_WITH_NO_CSUM)
11967 SCTP_STAT_INCR(sctps_sendnocrc);
11968#else
11969 if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
11970 shout->checksum = sctp_calculate_cksum(mout, 0);
11971 SCTP_STAT_INCR(sctps_sendswcrc);
11972 } else {
11973 SCTP_STAT_INCR(sctps_sendhwcrc);
11974 }
11975#endif
11976#ifdef SCTP_PACKET_LOGGING
11977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11978 sctp_packet_log(mout);
11979 }
11980#endif
11981 /* Don't alloc/free for each packet */
11982 if ((buffer = malloc(len)) != NULL) {
11983 m_copydata(mout, 0, len, buffer);
11984 SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
11985 free(buffer);
11986 }
11987 sctp_m_freem(mout);
11988 break;
11989 }
11990#endif
11991 default:
11992 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11993 dst->sa_family);
11994 sctp_m_freem(mout);
11995 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11996 return;
11997 }
11998 SCTP_STAT_INCR(sctps_sendpackets);
11999 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12000 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12001 return;
12002}
12003
12004void
12005sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
12006 struct sctphdr *sh,
12007#if defined(__FreeBSD__)
12008 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12009#endif
12010 uint32_t vrf_id, uint16_t port)
12011{
12012 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
12013#if defined(__FreeBSD__)
12014 mflowtype, mflowid, fibnum,
12015#endif
12016 vrf_id, port);
12017}
12018
12019void
12020sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
12021#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
12022 SCTP_UNUSED
12023#endif
12024)
12025{
12026 struct sctp_tmit_chunk *chk;
12027 struct sctp_heartbeat_chunk *hb;
12028 struct timeval now;
12029
12030 SCTP_TCB_LOCK_ASSERT(stcb);
12031 if (net == NULL) {
12032 return;
12033 }
12034 (void)SCTP_GETTIME_TIMEVAL(&now);
12035 switch (net->ro._l_addr.sa.sa_family) {
12036#ifdef INET
12037 case AF_INET:
12038 break;
12039#endif
12040#ifdef INET6
12041 case AF_INET6:
12042 break;
12043#endif
12044#if defined(__Userspace__)
12045 case AF_CONN:
12046 break;
12047#endif
12048 default:
12049 return;
12050 }
12051 sctp_alloc_a_chunk(stcb, chk);
12052 if (chk == NULL) {
12053 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
12054 return;
12055 }
12056
12057 chk->copy_by_ref = 0;
12058 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
12059 chk->rec.chunk_id.can_take_data = 1;
12060 chk->flags = 0;
12061 chk->asoc = &stcb->asoc;
12062 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
12063
12064 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12065 if (chk->data == NULL) {
12066 sctp_free_a_chunk(stcb, chk, so_locked);
12067 return;
12068 }
12069 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12070 SCTP_BUF_LEN(chk->data) = chk->send_size;
12071 chk->sent = SCTP_DATAGRAM_UNSENT;
12072 chk->snd_count = 0;
12073 chk->whoTo = net;
12074 atomic_add_int(&chk->whoTo->ref_count, 1);
12075 /* Now we have a mbuf that we can fill in with the details */
12076 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
12077 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
12078 /* fill out chunk header */
12079 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
12080 hb->ch.chunk_flags = 0;
12081 hb->ch.chunk_length = htons(chk->send_size);
12082 /* Fill out hb parameter */
12083 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
12084 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
12085 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
12086 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
12087 /* Did our user request this one, put it in */
12088 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
12089#ifdef HAVE_SA_LEN
12090 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
12091#else
12092 switch (net->ro._l_addr.sa.sa_family) {
12093#ifdef INET
12094 case AF_INET:
12095 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
12096 break;
12097#endif
12098#ifdef INET6
12099 case AF_INET6:
12100 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
12101 break;
12102#endif
12103#if defined(__Userspace__)
12104 case AF_CONN:
12105 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
12106 break;
12107#endif
12108 default:
12109 hb->heartbeat.hb_info.addr_len = 0;
12110 break;
12111 }
12112#endif
12113 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
12114 /*
12115 * we only take from the entropy pool if the address is not
12116 * confirmed.
12117 */
12118 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12119 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
12120 } else {
12121 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
12122 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
12123 }
12124 switch (net->ro._l_addr.sa.sa_family) {
12125#ifdef INET
12126 case AF_INET:
12127 memcpy(hb->heartbeat.hb_info.address,
12128 &net->ro._l_addr.sin.sin_addr,
12129 sizeof(net->ro._l_addr.sin.sin_addr));
12130 break;
12131#endif
12132#ifdef INET6
12133 case AF_INET6:
12134 memcpy(hb->heartbeat.hb_info.address,
12135 &net->ro._l_addr.sin6.sin6_addr,
12136 sizeof(net->ro._l_addr.sin6.sin6_addr));
12137 break;
12138#endif
12139#if defined(__Userspace__)
12140 case AF_CONN:
12141 memcpy(hb->heartbeat.hb_info.address,
12142 &net->ro._l_addr.sconn.sconn_addr,
12143 sizeof(net->ro._l_addr.sconn.sconn_addr));
12144 break;
12145#endif
12146 default:
12147 if (chk->data) {
12148 sctp_m_freem(chk->data);
12149 chk->data = NULL;
12150 }
12151 sctp_free_a_chunk(stcb, chk, so_locked);
12152 return;
12153 break;
12154 }
12155 net->hb_responded = 0;
12156 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12157 stcb->asoc.ctrl_queue_cnt++;
12158 SCTP_STAT_INCR(sctps_sendheartbeat);
12159 return;
12160}
12161
12162void
12163sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
12164 uint32_t high_tsn)
12165{
12166 struct sctp_association *asoc;
12167 struct sctp_ecne_chunk *ecne;
12168 struct sctp_tmit_chunk *chk;
12169
12170 if (net == NULL) {
12171 return;
12172 }
12173 asoc = &stcb->asoc;
12174 SCTP_TCB_LOCK_ASSERT(stcb);
12175 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12176 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
12177 /* found a previous ECN_ECHO update it if needed */
12178 uint32_t cnt, ctsn;
12179 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12180 ctsn = ntohl(ecne->tsn);
12181 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12182 ecne->tsn = htonl(high_tsn);
12183 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12184 }
12185 cnt = ntohl(ecne->num_pkts_since_cwr);
12186 cnt++;
12187 ecne->num_pkts_since_cwr = htonl(cnt);
12188 return;
12189 }
12190 }
12191 /* nope could not find one to update so we must build one */
12192 sctp_alloc_a_chunk(stcb, chk);
12193 if (chk == NULL) {
12194 return;
12195 }
12196 SCTP_STAT_INCR(sctps_queue_upd_ecne);
12197 chk->copy_by_ref = 0;
12198 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
12199 chk->rec.chunk_id.can_take_data = 0;
12200 chk->flags = 0;
12201 chk->asoc = &stcb->asoc;
12202 chk->send_size = sizeof(struct sctp_ecne_chunk);
12203 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12204 if (chk->data == NULL) {
12205 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12206 return;
12207 }
12208 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12209 SCTP_BUF_LEN(chk->data) = chk->send_size;
12210 chk->sent = SCTP_DATAGRAM_UNSENT;
12211 chk->snd_count = 0;
12212 chk->whoTo = net;
12213 atomic_add_int(&chk->whoTo->ref_count, 1);
12214
12215 stcb->asoc.ecn_echo_cnt_onq++;
12216 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
12217 ecne->ch.chunk_type = SCTP_ECN_ECHO;
12218 ecne->ch.chunk_flags = 0;
12219 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
12220 ecne->tsn = htonl(high_tsn);
12221 ecne->num_pkts_since_cwr = htonl(1);
12222 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
12223 asoc->ctrl_queue_cnt++;
12224}
12225
12226void
12227sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
12228 struct mbuf *m, int len, int iphlen, int bad_crc)
12229{
12230 struct sctp_association *asoc;
12231 struct sctp_pktdrop_chunk *drp;
12232 struct sctp_tmit_chunk *chk;
12233 uint8_t *datap;
12234 int was_trunc = 0;
12235 int fullsz = 0;
12236 long spc;
12237 int offset;
12238 struct sctp_chunkhdr *ch, chunk_buf;
12239 unsigned int chk_length;
12240
12241 if (!stcb) {
12242 return;
12243 }
12244 asoc = &stcb->asoc;
12245 SCTP_TCB_LOCK_ASSERT(stcb);
12246 if (asoc->pktdrop_supported == 0) {
12247 /*-
12248 * peer must declare support before I send one.
12249 */
12250 return;
12251 }
12252 if (stcb->sctp_socket == NULL) {
12253 return;
12254 }
12255 sctp_alloc_a_chunk(stcb, chk);
12256 if (chk == NULL) {
12257 return;
12258 }
12259 chk->copy_by_ref = 0;
12260 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
12261 chk->rec.chunk_id.can_take_data = 1;
12262 chk->flags = 0;
12263 len -= iphlen;
12264 chk->send_size = len;
12265 /* Validate that we do not have an ABORT in here. */
12266 offset = iphlen + sizeof(struct sctphdr);
12267 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12268 sizeof(*ch), (uint8_t *) & chunk_buf);
12269 while (ch != NULL) {
12270 chk_length = ntohs(ch->chunk_length);
12271 if (chk_length < sizeof(*ch)) {
12272 /* break to abort land */
12273 break;
12274 }
12275 switch (ch->chunk_type) {
12276 case SCTP_PACKET_DROPPED:
12277 case SCTP_ABORT_ASSOCIATION:
12278 case SCTP_INITIATION_ACK:
12279 /**
12280 * We don't respond with an PKT-DROP to an ABORT
12281 * or PKT-DROP. We also do not respond to an
12282 * INIT-ACK, because we can't know if the initiation
12283 * tag is correct or not.
12284 */
12285 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12286 return;
12287 default:
12288 break;
12289 }
12290 offset += SCTP_SIZE32(chk_length);
12291 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
12292 sizeof(*ch), (uint8_t *) & chunk_buf);
12293 }
12294
12295 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
12296 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
12297 /* only send 1 mtu worth, trim off the
12298 * excess on the end.
12299 */
12300 fullsz = len;
12301 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
12302 was_trunc = 1;
12303 }
12304 chk->asoc = &stcb->asoc;
12305 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12306 if (chk->data == NULL) {
12307jump_out:
12308 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12309 return;
12310 }
12311 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12312 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
12313 if (drp == NULL) {
12314 sctp_m_freem(chk->data);
12315 chk->data = NULL;
12316 goto jump_out;
12317 }
12318 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
12319 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
12320 chk->book_size_scale = 0;
12321 if (was_trunc) {
12322 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
12323 drp->trunc_len = htons(fullsz);
12324 /* Len is already adjusted to size minus overhead above
12325 * take out the pkt_drop chunk itself from it.
12326 */
12327 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
12328 len = chk->send_size;
12329 } else {
12330 /* no truncation needed */
12331 drp->ch.chunk_flags = 0;
12332 drp->trunc_len = htons(0);
12333 }
12334 if (bad_crc) {
12335 drp->ch.chunk_flags |= SCTP_BADCRC;
12336 }
12337 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
12338 SCTP_BUF_LEN(chk->data) = chk->send_size;
12339 chk->sent = SCTP_DATAGRAM_UNSENT;
12340 chk->snd_count = 0;
12341 if (net) {
12342 /* we should hit here */
12343 chk->whoTo = net;
12344 atomic_add_int(&chk->whoTo->ref_count, 1);
12345 } else {
12346 chk->whoTo = NULL;
12347 }
12348 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
12349 drp->ch.chunk_length = htons(chk->send_size);
12350 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
12351 if (spc < 0) {
12352 spc = 0;
12353 }
12354 drp->bottle_bw = htonl(spc);
12355 if (asoc->my_rwnd) {
12356 drp->current_onq = htonl(asoc->size_on_reasm_queue +
12357 asoc->size_on_all_streams +
12358 asoc->my_rwnd_control_len +
12359 stcb->sctp_socket->so_rcv.sb_cc);
12360 } else {
12361 /*-
12362 * If my rwnd is 0, possibly from mbuf depletion as well as
12363 * space used, tell the peer there is NO space aka onq == bw
12364 */
12365 drp->current_onq = htonl(spc);
12366 }
12367 drp->reserved = 0;
12368 datap = drp->data;
12369 m_copydata(m, iphlen, len, (caddr_t)datap);
12370 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12371 asoc->ctrl_queue_cnt++;
12372}
12373
12374void
12375sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
12376{
12377 struct sctp_association *asoc;
12378 struct sctp_cwr_chunk *cwr;
12379 struct sctp_tmit_chunk *chk;
12380
12381 SCTP_TCB_LOCK_ASSERT(stcb);
12382 if (net == NULL) {
12383 return;
12384 }
12385 asoc = &stcb->asoc;
12386 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
12387 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
12388 /* found a previous CWR queued to same destination update it if needed */
12389 uint32_t ctsn;
12390 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12391 ctsn = ntohl(cwr->tsn);
12392 if (SCTP_TSN_GT(high_tsn, ctsn)) {
12393 cwr->tsn = htonl(high_tsn);
12394 }
12395 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
12396 /* Make sure override is carried */
12397 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
12398 }
12399 return;
12400 }
12401 }
12402 sctp_alloc_a_chunk(stcb, chk);
12403 if (chk == NULL) {
12404 return;
12405 }
12406 chk->copy_by_ref = 0;
12407 chk->rec.chunk_id.id = SCTP_ECN_CWR;
12408 chk->rec.chunk_id.can_take_data = 1;
12409 chk->flags = 0;
12410 chk->asoc = &stcb->asoc;
12411 chk->send_size = sizeof(struct sctp_cwr_chunk);
12412 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
12413 if (chk->data == NULL) {
12414 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
12415 return;
12416 }
12417 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12418 SCTP_BUF_LEN(chk->data) = chk->send_size;
12419 chk->sent = SCTP_DATAGRAM_UNSENT;
12420 chk->snd_count = 0;
12421 chk->whoTo = net;
12422 atomic_add_int(&chk->whoTo->ref_count, 1);
12423 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
12424 cwr->ch.chunk_type = SCTP_ECN_CWR;
12425 cwr->ch.chunk_flags = override;
12426 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
12427 cwr->tsn = htonl(high_tsn);
12428 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
12429 asoc->ctrl_queue_cnt++;
12430}
12431
12432static int
12433sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
12434 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
12435{
12436 uint16_t len, old_len, i;
12437 struct sctp_stream_reset_out_request *req_out;
12438 struct sctp_chunkhdr *ch;
12439 int at;
12440 int number_entries=0;
12441
12442 ch = mtod(chk->data, struct sctp_chunkhdr *);
12443 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12444 /* get to new offset for the param. */
12445 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
12446 /* now how long will this param be? */
12447 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12448 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12449 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12450 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12451 number_entries++;
12452 }
12453 }
12454 if (number_entries == 0) {
12455 return (0);
12456 }
12457 if (number_entries == stcb->asoc.streamoutcnt) {
12458 number_entries = 0;
12459 }
12460 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
12461 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
12462 }
12463 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
12464 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
12465 req_out->ph.param_length = htons(len);
12466 req_out->request_seq = htonl(seq);
12467 req_out->response_seq = htonl(resp_seq);
12468 req_out->send_reset_at_tsn = htonl(last_sent);
12469 at = 0;
12470 if (number_entries) {
12471 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12472 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
12473 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
12474 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
12475 req_out->list_of_streams[at] = htons(i);
12476 at++;
12477 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12478 if (at >= number_entries) {
12479 break;
12480 }
12481 }
12482 }
12483 } else {
12484 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12485 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
12486 }
12487 }
12488 if (SCTP_SIZE32(len) > len) {
12489 /*-
12490 * Need to worry about the pad we may end up adding to the
12491 * end. This is easy since the struct is either aligned to 4
12492 * bytes or 2 bytes off.
12493 */
12494 req_out->list_of_streams[number_entries] = 0;
12495 }
12496 /* now fix the chunk length */
12497 ch->chunk_length = htons(len + old_len);
12498 chk->book_size = len + old_len;
12499 chk->book_size_scale = 0;
12500 chk->send_size = SCTP_SIZE32(chk->book_size);
12501 SCTP_BUF_LEN(chk->data) = chk->send_size;
12502 return (1);
12503}
12504
12505static void
12506sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
12507 int number_entries, uint16_t *list,
12508 uint32_t seq)
12509{
12510 uint16_t len, old_len, i;
12511 struct sctp_stream_reset_in_request *req_in;
12512 struct sctp_chunkhdr *ch;
12513
12514 ch = mtod(chk->data, struct sctp_chunkhdr *);
12515 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12516
12517 /* get to new offset for the param. */
12518 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
12519 /* now how long will this param be? */
12520 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
12521 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
12522 req_in->ph.param_length = htons(len);
12523 req_in->request_seq = htonl(seq);
12524 if (number_entries) {
12525 for (i = 0; i < number_entries; i++) {
12526 req_in->list_of_streams[i] = htons(list[i]);
12527 }
12528 }
12529 if (SCTP_SIZE32(len) > len) {
12530 /*-
12531 * Need to worry about the pad we may end up adding to the
12532 * end. This is easy since the struct is either aligned to 4
12533 * bytes or 2 bytes off.
12534 */
12535 req_in->list_of_streams[number_entries] = 0;
12536 }
12537 /* now fix the chunk length */
12538 ch->chunk_length = htons(len + old_len);
12539 chk->book_size = len + old_len;
12540 chk->book_size_scale = 0;
12541 chk->send_size = SCTP_SIZE32(chk->book_size);
12542 SCTP_BUF_LEN(chk->data) = chk->send_size;
12543 return;
12544}
12545
12546static void
12547sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
12548 uint32_t seq)
12549{
12550 uint16_t len, old_len;
12551 struct sctp_stream_reset_tsn_request *req_tsn;
12552 struct sctp_chunkhdr *ch;
12553
12554 ch = mtod(chk->data, struct sctp_chunkhdr *);
12555 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12556
12557 /* get to new offset for the param. */
12558 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
12559 /* now how long will this param be? */
12560 len = sizeof(struct sctp_stream_reset_tsn_request);
12561 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
12562 req_tsn->ph.param_length = htons(len);
12563 req_tsn->request_seq = htonl(seq);
12564
12565 /* now fix the chunk length */
12566 ch->chunk_length = htons(len + old_len);
12567 chk->send_size = len + old_len;
12568 chk->book_size = SCTP_SIZE32(chk->send_size);
12569 chk->book_size_scale = 0;
12570 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12571 return;
12572}
12573
12574void
12575sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
12576 uint32_t resp_seq, uint32_t result)
12577{
12578 uint16_t len, old_len;
12579 struct sctp_stream_reset_response *resp;
12580 struct sctp_chunkhdr *ch;
12581
12582 ch = mtod(chk->data, struct sctp_chunkhdr *);
12583 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12584
12585 /* get to new offset for the param. */
12586 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
12587 /* now how long will this param be? */
12588 len = sizeof(struct sctp_stream_reset_response);
12589 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12590 resp->ph.param_length = htons(len);
12591 resp->response_seq = htonl(resp_seq);
12592 resp->result = ntohl(result);
12593
12594 /* now fix the chunk length */
12595 ch->chunk_length = htons(len + old_len);
12596 chk->book_size = len + old_len;
12597 chk->book_size_scale = 0;
12598 chk->send_size = SCTP_SIZE32(chk->book_size);
12599 SCTP_BUF_LEN(chk->data) = chk->send_size;
12600 return;
12601}
12602
12603void
12604sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
12605 struct sctp_stream_reset_list *ent,
12606 int response)
12607{
12608 struct sctp_association *asoc;
12609 struct sctp_tmit_chunk *chk;
12610 struct sctp_chunkhdr *ch;
12611
12612 asoc = &stcb->asoc;
12613
12614 /*
12615 * Reset our last reset action to the new one IP -> response
12616 * (PERFORMED probably). This assures that if we fail to send, a
12617 * retran from the peer will get the new response.
12618 */
12619 asoc->last_reset_action[0] = response;
12620 if (asoc->stream_reset_outstanding) {
12621 return;
12622 }
12623 sctp_alloc_a_chunk(stcb, chk);
12624 if (chk == NULL) {
12625 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12626 return;
12627 }
12628 chk->copy_by_ref = 0;
12629 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12630 chk->rec.chunk_id.can_take_data = 0;
12631 chk->flags = 0;
12632 chk->asoc = &stcb->asoc;
12633 chk->book_size = sizeof(struct sctp_chunkhdr);
12634 chk->send_size = SCTP_SIZE32(chk->book_size);
12635 chk->book_size_scale = 0;
12636 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12637 if (chk->data == NULL) {
12638 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12639 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12640 return;
12641 }
12642 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12643 /* setup chunk parameters */
12644 chk->sent = SCTP_DATAGRAM_UNSENT;
12645 chk->snd_count = 0;
12646 if (stcb->asoc.alternate) {
12647 chk->whoTo = stcb->asoc.alternate;
12648 } else {
12649 chk->whoTo = stcb->asoc.primary_destination;
12650 }
12651 ch = mtod(chk->data, struct sctp_chunkhdr *);
12652 ch->chunk_type = SCTP_STREAM_RESET;
12653 ch->chunk_flags = 0;
12654 ch->chunk_length = htons(chk->book_size);
12655 atomic_add_int(&chk->whoTo->ref_count, 1);
12656 SCTP_BUF_LEN(chk->data) = chk->send_size;
12657 sctp_add_stream_reset_result(chk, ent->seq, response);
12658 /* insert the chunk for sending */
12659 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12660 chk,
12661 sctp_next);
12662 asoc->ctrl_queue_cnt++;
12663}
12664
12665void
12666sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
12667 uint32_t resp_seq, uint32_t result,
12668 uint32_t send_una, uint32_t recv_next)
12669{
12670 uint16_t len, old_len;
12671 struct sctp_stream_reset_response_tsn *resp;
12672 struct sctp_chunkhdr *ch;
12673
12674 ch = mtod(chk->data, struct sctp_chunkhdr *);
12675 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12676
12677 /* get to new offset for the param. */
12678 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
12679 /* now how long will this param be? */
12680 len = sizeof(struct sctp_stream_reset_response_tsn);
12681 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
12682 resp->ph.param_length = htons(len);
12683 resp->response_seq = htonl(resp_seq);
12684 resp->result = htonl(result);
12685 resp->senders_next_tsn = htonl(send_una);
12686 resp->receivers_next_tsn = htonl(recv_next);
12687
12688 /* now fix the chunk length */
12689 ch->chunk_length = htons(len + old_len);
12690 chk->book_size = len + old_len;
12691 chk->send_size = SCTP_SIZE32(chk->book_size);
12692 chk->book_size_scale = 0;
12693 SCTP_BUF_LEN(chk->data) = chk->send_size;
12694 return;
12695}
12696
12697static void
12698sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12699 uint32_t seq,
12700 uint16_t adding)
12701{
12702 uint16_t len, old_len;
12703 struct sctp_chunkhdr *ch;
12704 struct sctp_stream_reset_add_strm *addstr;
12705
12706 ch = mtod(chk->data, struct sctp_chunkhdr *);
12707 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12708
12709 /* get to new offset for the param. */
12710 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12711 /* now how long will this param be? */
12712 len = sizeof(struct sctp_stream_reset_add_strm);
12713
12714 /* Fill it out. */
12715 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12716 addstr->ph.param_length = htons(len);
12717 addstr->request_seq = htonl(seq);
12718 addstr->number_of_streams = htons(adding);
12719 addstr->reserved = 0;
12720
12721 /* now fix the chunk length */
12722 ch->chunk_length = htons(len + old_len);
12723 chk->send_size = len + old_len;
12724 chk->book_size = SCTP_SIZE32(chk->send_size);
12725 chk->book_size_scale = 0;
12726 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12727 return;
12728}
12729
12730static void
12731sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12732 uint32_t seq,
12733 uint16_t adding)
12734{
12735 uint16_t len, old_len;
12736 struct sctp_chunkhdr *ch;
12737 struct sctp_stream_reset_add_strm *addstr;
12738
12739 ch = mtod(chk->data, struct sctp_chunkhdr *);
12740 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12741
12742 /* get to new offset for the param. */
12743 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12744 /* now how long will this param be? */
12745 len = sizeof(struct sctp_stream_reset_add_strm);
12746 /* Fill it out. */
12747 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12748 addstr->ph.param_length = htons(len);
12749 addstr->request_seq = htonl(seq);
12750 addstr->number_of_streams = htons(adding);
12751 addstr->reserved = 0;
12752
12753 /* now fix the chunk length */
12754 ch->chunk_length = htons(len + old_len);
12755 chk->send_size = len + old_len;
12756 chk->book_size = SCTP_SIZE32(chk->send_size);
12757 chk->book_size_scale = 0;
12758 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12759 return;
12760}
12761
12762int
12763sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12764{
12765 struct sctp_association *asoc;
12766 struct sctp_tmit_chunk *chk;
12767 struct sctp_chunkhdr *ch;
12768 uint32_t seq;
12769
12770 asoc = &stcb->asoc;
12771 asoc->trigger_reset = 0;
12772 if (asoc->stream_reset_outstanding) {
12773 return (EALREADY);
12774 }
12775 sctp_alloc_a_chunk(stcb, chk);
12776 if (chk == NULL) {
12777 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12778 return (ENOMEM);
12779 }
12780 chk->copy_by_ref = 0;
12781 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12782 chk->rec.chunk_id.can_take_data = 0;
12783 chk->flags = 0;
12784 chk->asoc = &stcb->asoc;
12785 chk->book_size = sizeof(struct sctp_chunkhdr);
12786 chk->send_size = SCTP_SIZE32(chk->book_size);
12787 chk->book_size_scale = 0;
12788 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12789 if (chk->data == NULL) {
12790 sctp_free_a_chunk(stcb, chk, so_locked);
12791 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12792 return (ENOMEM);
12793 }
12794 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12795
12796 /* setup chunk parameters */
12797 chk->sent = SCTP_DATAGRAM_UNSENT;
12798 chk->snd_count = 0;
12799 if (stcb->asoc.alternate) {
12800 chk->whoTo = stcb->asoc.alternate;
12801 } else {
12802 chk->whoTo = stcb->asoc.primary_destination;
12803 }
12804 ch = mtod(chk->data, struct sctp_chunkhdr *);
12805 ch->chunk_type = SCTP_STREAM_RESET;
12806 ch->chunk_flags = 0;
12807 ch->chunk_length = htons(chk->book_size);
12808 atomic_add_int(&chk->whoTo->ref_count, 1);
12809 SCTP_BUF_LEN(chk->data) = chk->send_size;
12810 seq = stcb->asoc.str_reset_seq_out;
12811 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12812 seq++;
12813 asoc->stream_reset_outstanding++;
12814 } else {
12815 m_freem(chk->data);
12816 chk->data = NULL;
12817 sctp_free_a_chunk(stcb, chk, so_locked);
12818 return (ENOENT);
12819 }
12820 asoc->str_reset = chk;
12821 /* insert the chunk for sending */
12822 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12823 chk,
12824 sctp_next);
12825 asoc->ctrl_queue_cnt++;
12826
12827 if (stcb->asoc.send_sack) {
12828 sctp_send_sack(stcb, so_locked);
12829 }
12830 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12831 return (0);
12832}
12833
12834int
12835sctp_send_str_reset_req(struct sctp_tcb *stcb,
12836 uint16_t number_entries, uint16_t *list,
12837 uint8_t send_in_req,
12838 uint8_t send_tsn_req,
12839 uint8_t add_stream,
12840 uint16_t adding_o,
12841 uint16_t adding_i, uint8_t peer_asked)
12842{
12843 struct sctp_association *asoc;
12844 struct sctp_tmit_chunk *chk;
12845 struct sctp_chunkhdr *ch;
12846 int can_send_out_req=0;
12847 uint32_t seq;
12848
12849 asoc = &stcb->asoc;
12850 if (asoc->stream_reset_outstanding) {
12851 /*-
12852 * Already one pending, must get ACK back to clear the flag.
12853 */
12854 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12855 return (EBUSY);
12856 }
12857 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12858 (add_stream == 0)) {
12859 /* nothing to do */
12860 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12861 return (EINVAL);
12862 }
12863 if (send_tsn_req && send_in_req) {
12864 /* error, can't do that */
12865 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12866 return (EINVAL);
12867 } else if (send_in_req) {
12868 can_send_out_req = 1;
12869 }
12870 if (number_entries > (MCLBYTES -
12871 SCTP_MIN_OVERHEAD -
12872 sizeof(struct sctp_chunkhdr) -
12873 sizeof(struct sctp_stream_reset_out_request)) /
12874 sizeof(uint16_t)) {
12875 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12876 return (ENOMEM);
12877 }
12878 sctp_alloc_a_chunk(stcb, chk);
12879 if (chk == NULL) {
12880 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12881 return (ENOMEM);
12882 }
12883 chk->copy_by_ref = 0;
12884 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12885 chk->rec.chunk_id.can_take_data = 0;
12886 chk->flags = 0;
12887 chk->asoc = &stcb->asoc;
12888 chk->book_size = sizeof(struct sctp_chunkhdr);
12889 chk->send_size = SCTP_SIZE32(chk->book_size);
12890 chk->book_size_scale = 0;
12891
12892 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12893 if (chk->data == NULL) {
12894 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12895 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12896 return (ENOMEM);
12897 }
12898 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12899
12900 /* setup chunk parameters */
12901 chk->sent = SCTP_DATAGRAM_UNSENT;
12902 chk->snd_count = 0;
12903 if (stcb->asoc.alternate) {
12904 chk->whoTo = stcb->asoc.alternate;
12905 } else {
12906 chk->whoTo = stcb->asoc.primary_destination;
12907 }
12908 atomic_add_int(&chk->whoTo->ref_count, 1);
12909 ch = mtod(chk->data, struct sctp_chunkhdr *);
12910 ch->chunk_type = SCTP_STREAM_RESET;
12911 ch->chunk_flags = 0;
12912 ch->chunk_length = htons(chk->book_size);
12913 SCTP_BUF_LEN(chk->data) = chk->send_size;
12914
12915 seq = stcb->asoc.str_reset_seq_out;
12916 if (can_send_out_req) {
12917 int ret;
12918 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12919 if (ret) {
12920 seq++;
12921 asoc->stream_reset_outstanding++;
12922 }
12923 }
12924 if ((add_stream & 1) &&
12925 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12926 /* Need to allocate more */
12927 struct sctp_stream_out *oldstream;
12928 struct sctp_stream_queue_pending *sp, *nsp;
12929 int i;
12930#if defined(SCTP_DETAILED_STR_STATS)
12931 int j;
12932#endif
12933
12934 oldstream = stcb->asoc.strmout;
12935 /* get some more */
12936 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12937 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12938 SCTP_M_STRMO);
12939 if (stcb->asoc.strmout == NULL) {
12940 uint8_t x;
12941 stcb->asoc.strmout = oldstream;
12942 /* Turn off the bit */
12943 x = add_stream & 0xfe;
12944 add_stream = x;
12945 goto skip_stuff;
12946 }
12947 /* Ok now we proceed with copying the old out stuff and
12948 * initializing the new stuff.
12949 */
12950 SCTP_TCB_SEND_LOCK(stcb);
12951 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12952 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12953 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12954 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12955 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12956 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12957 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12958 stcb->asoc.strmout[i].sid = i;
12959 stcb->asoc.strmout[i].state = oldstream[i].state;
12960 /* FIX ME FIX ME */
12961 /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
12962 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12963 /* now anything on those queues? */
12964 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12965 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12966 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12967 }
12968
12969 }
12970 /* now the new streams */
12971 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12972 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12973 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12974 stcb->asoc.strmout[i].chunks_on_queues = 0;
12975#if defined(SCTP_DETAILED_STR_STATS)
12976 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12977 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12978 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12979 }
12980#else
12981 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12982 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12983#endif
12984 stcb->asoc.strmout[i].next_mid_ordered = 0;
12985 stcb->asoc.strmout[i].next_mid_unordered = 0;
12986 stcb->asoc.strmout[i].sid = i;
12987 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12988 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12989 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12990 }
12991 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12992 SCTP_FREE(oldstream, SCTP_M_STRMO);
12993 SCTP_TCB_SEND_UNLOCK(stcb);
12994 }
12995skip_stuff:
12996 if ((add_stream & 1) && (adding_o > 0)) {
12997 asoc->strm_pending_add_size = adding_o;
12998 asoc->peer_req_out = peer_asked;
12999 sctp_add_an_out_stream(chk, seq, adding_o);
13000 seq++;
13001 asoc->stream_reset_outstanding++;
13002 }
13003 if ((add_stream & 2) && (adding_i > 0)) {
13004 sctp_add_an_in_stream(chk, seq, adding_i);
13005 seq++;
13006 asoc->stream_reset_outstanding++;
13007 }
13008 if (send_in_req) {
13009 sctp_add_stream_reset_in(chk, number_entries, list, seq);
13010 seq++;
13011 asoc->stream_reset_outstanding++;
13012 }
13013 if (send_tsn_req) {
13014 sctp_add_stream_reset_tsn(chk, seq);
13015 asoc->stream_reset_outstanding++;
13016 }
13017 asoc->str_reset = chk;
13018 /* insert the chunk for sending */
13019 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
13020 chk,
13021 sctp_next);
13022 asoc->ctrl_queue_cnt++;
13023 if (stcb->asoc.send_sack) {
13024 sctp_send_sack(stcb, SCTP_SO_LOCKED);
13025 }
13026 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
13027 return (0);
13028}
13029
13030void
13031sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
13032 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13033#if defined(__FreeBSD__)
13034 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13035#endif
13036 uint32_t vrf_id, uint16_t port)
13037{
13038 /* Don't respond to an ABORT with an ABORT. */
13039 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
13040 if (cause)
13041 sctp_m_freem(cause);
13042 return;
13043 }
13044 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
13045#if defined(__FreeBSD__)
13046 mflowtype, mflowid, fibnum,
13047#endif
13048 vrf_id, port);
13049 return;
13050}
13051
13052void
13053sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
13054 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
13055#if defined(__FreeBSD__)
13056 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
13057#endif
13058 uint32_t vrf_id, uint16_t port)
13059{
13060 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
13061#if defined(__FreeBSD__)
13062 mflowtype, mflowid, fibnum,
13063#endif
13064 vrf_id, port);
13065 return;
13066}
13067
13068static struct mbuf *
13069sctp_copy_resume(struct uio *uio,
13070 int max_send_len,
13071#if defined(__FreeBSD__) && __FreeBSD_version > 602000
13072 int user_marks_eor,
13073#endif
13074 int *error,
13075 uint32_t *sndout,
13076 struct mbuf **new_tail)
13077{
13078#if defined(__Panda__)
13079 struct mbuf *m;
13080
13081 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13082 (user_marks_eor ? M_EOR : 0));
13083 if (m == NULL) {
13084 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13085 *error = ENOBUFS;
13086 } else {
13087 *sndout = m_length(m, NULL);
13088 *new_tail = m_last(m);
13089 }
13090 return (m);
13091#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
13092 struct mbuf *m;
13093
13094 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
13095 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
13096 if (m == NULL) {
13097 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13098 *error = ENOBUFS;
13099 } else {
13100 *sndout = m_length(m, NULL);
13101 *new_tail = m_last(m);
13102 }
13103 return (m);
13104#else
13105 int left, cancpy, willcpy;
13106 struct mbuf *m, *head;
13107
13108#if defined(__APPLE__)
13109#if defined(APPLE_LEOPARD)
13110 left = (int)min(uio->uio_resid, max_send_len);
13111#else
13112 left = (int)min(uio_resid(uio), max_send_len);
13113#endif
13114#else
13115 left = (int)min(uio->uio_resid, max_send_len);
13116#endif
13117 /* Always get a header just in case */
13118 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13119 if (head == NULL) {
13120 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13121 *error = ENOBUFS;
13122 return (NULL);
13123 }
13124 cancpy = (int)M_TRAILINGSPACE(head);
13125 willcpy = min(cancpy, left);
13126 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
13127 if (*error) {
13128 sctp_m_freem(head);
13129 return (NULL);
13130 }
13131 *sndout += willcpy;
13132 left -= willcpy;
13133 SCTP_BUF_LEN(head) = willcpy;
13134 m = head;
13135 *new_tail = head;
13136 while (left > 0) {
13137 /* move in user data */
13138 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13139 if (SCTP_BUF_NEXT(m) == NULL) {
13140 sctp_m_freem(head);
13141 *new_tail = NULL;
13142 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13143 *error = ENOBUFS;
13144 return (NULL);
13145 }
13146 m = SCTP_BUF_NEXT(m);
13147 cancpy = (int)M_TRAILINGSPACE(m);
13148 willcpy = min(cancpy, left);
13149 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
13150 if (*error) {
13151 sctp_m_freem(head);
13152 *new_tail = NULL;
13153 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13154 *error = EFAULT;
13155 return (NULL);
13156 }
13157 SCTP_BUF_LEN(m) = willcpy;
13158 left -= willcpy;
13159 *sndout += willcpy;
13160 *new_tail = m;
13161 if (left == 0) {
13162 SCTP_BUF_NEXT(m) = NULL;
13163 }
13164 }
13165 return (head);
13166#endif
13167}
13168
13169static int
13170sctp_copy_one(struct sctp_stream_queue_pending *sp,
13171 struct uio *uio,
13172 int resv_upfront)
13173{
13174#if defined(__Panda__)
13175 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13176 resv_upfront, 0);
13177 if (sp->data == NULL) {
13178 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13179 return (ENOBUFS);
13180 }
13181
13182 sp->tail_mbuf = m_last(sp->data);
13183 return (0);
13184#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
13185 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
13186 resv_upfront, 0);
13187 if (sp->data == NULL) {
13188 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13189 return (ENOBUFS);
13190 }
13191
13192 sp->tail_mbuf = m_last(sp->data);
13193 return (0);
13194#else
13195 int left;
13196 int cancpy, willcpy, error;
13197 struct mbuf *m, *head;
13198 int cpsz = 0;
13199
13200 /* First one gets a header */
13201 left = sp->length;
13202 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
13203 if (m == NULL) {
13204 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13205 return (ENOBUFS);
13206 }
13207 /*-
13208 * Add this one for m in now, that way if the alloc fails we won't
13209 * have a bad cnt.
13210 */
13211 SCTP_BUF_RESV_UF(m, resv_upfront);
13212 cancpy = (int)M_TRAILINGSPACE(m);
13213 willcpy = min(cancpy, left);
13214 while (left > 0) {
13215 /* move in user data */
13216 error = uiomove(mtod(m, caddr_t), willcpy, uio);
13217 if (error) {
13218 sctp_m_freem(head);
13219 return (error);
13220 }
13221 SCTP_BUF_LEN(m) = willcpy;
13222 left -= willcpy;
13223 cpsz += willcpy;
13224 if (left > 0) {
13225 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
13226 if (SCTP_BUF_NEXT(m) == NULL) {
13227 /*
13228 * the head goes back to caller, he can free
13229 * the rest
13230 */
13231 sctp_m_freem(head);
13232 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
13233 return (ENOBUFS);
13234 }
13235 m = SCTP_BUF_NEXT(m);
13236 cancpy = (int)M_TRAILINGSPACE(m);
13237 willcpy = min(cancpy, left);
13238 } else {
13239 sp->tail_mbuf = m;
13240 SCTP_BUF_NEXT(m) = NULL;
13241 }
13242 }
13243 sp->data = head;
13244 sp->length = cpsz;
13245 return (0);
13246#endif
13247}
13248
13249
13250
13251static struct sctp_stream_queue_pending *
13252sctp_copy_it_in(struct sctp_tcb *stcb,
13253 struct sctp_association *asoc,
13254 struct sctp_sndrcvinfo *srcv,
13255 struct uio *uio,
13256 struct sctp_nets *net,
13257 int max_send_len,
13258 int user_marks_eor,
13259 int *error)
13260
13261{
13262 /*-
13263 * This routine must be very careful in its work. Protocol
13264 * processing is up and running so care must be taken to spl...()
13265 * when you need to do something that may effect the stcb/asoc. The
13266 * sb is locked however. When data is copied the protocol processing
13267 * should be enabled since this is a slower operation...
13268 */
13269 struct sctp_stream_queue_pending *sp = NULL;
13270 int resv_in_first;
13271
13272 *error = 0;
13273 /* Now can we send this? */
13274 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13275 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13276 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13277 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13278 /* got data while shutting down */
13279 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13280 *error = ECONNRESET;
13281 goto out_now;
13282 }
13283 sctp_alloc_a_strmoq(stcb, sp);
13284 if (sp == NULL) {
13285 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13286 *error = ENOMEM;
13287 goto out_now;
13288 }
13289 sp->act_flags = 0;
13290 sp->sender_all_done = 0;
13291 sp->sinfo_flags = srcv->sinfo_flags;
13292 sp->timetolive = srcv->sinfo_timetolive;
13293 sp->ppid = srcv->sinfo_ppid;
13294 sp->context = srcv->sinfo_context;
13295 sp->fsn = 0;
13296 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
13297
13298 sp->sid = srcv->sinfo_stream;
13299#if defined(__APPLE__)
13300#if defined(APPLE_LEOPARD)
13301 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13302#else
13303 sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
13304#endif
13305#else
13306 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
13307#endif
13308#if defined(__APPLE__)
13309#if defined(APPLE_LEOPARD)
13310 if ((sp->length == (uint32_t)uio->uio_resid) &&
13311#else
13312 if ((sp->length == (uint32_t)uio_resid(uio)) &&
13313#endif
13314#else
13315 if ((sp->length == (uint32_t)uio->uio_resid) &&
13316#endif
13317 ((user_marks_eor == 0) ||
13318 (srcv->sinfo_flags & SCTP_EOF) ||
13319 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13320 sp->msg_is_complete = 1;
13321 } else {
13322 sp->msg_is_complete = 0;
13323 }
13324 sp->sender_all_done = 0;
13325 sp->some_taken = 0;
13326 sp->put_last_out = 0;
13327 resv_in_first = sizeof(struct sctp_data_chunk);
13328 sp->data = sp->tail_mbuf = NULL;
13329 if (sp->length == 0) {
13330 *error = 0;
13331 goto skip_copy;
13332 }
13333 if (srcv->sinfo_keynumber_valid) {
13334 sp->auth_keyid = srcv->sinfo_keynumber;
13335 } else {
13336 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
13337 }
13338 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
13339 sctp_auth_key_acquire(stcb, sp->auth_keyid);
13340 sp->holds_key_ref = 1;
13341 }
13342#if defined(__APPLE__)
13343 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13344#endif
13345 *error = sctp_copy_one(sp, uio, resv_in_first);
13346#if defined(__APPLE__)
13347 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
13348#endif
13349 skip_copy:
13350 if (*error) {
13351 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
13352 sp = NULL;
13353 } else {
13354 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
13355 sp->net = net;
13356 atomic_add_int(&sp->net->ref_count, 1);
13357 } else {
13358 sp->net = NULL;
13359 }
13360 sctp_set_prsctp_policy(sp);
13361 }
13362out_now:
13363 return (sp);
13364}
13365
13366
13367int
13368sctp_sosend(struct socket *so,
13369 struct sockaddr *addr,
13370 struct uio *uio,
13371#ifdef __Panda__
13372 pakhandle_type top,
13373 pakhandle_type icontrol,
13374#else
13375 struct mbuf *top,
13376 struct mbuf *control,
13377#endif
13378#if defined(__APPLE__) || defined(__Panda__)
13379 int flags
13380#else
13381 int flags,
13382#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13383 struct thread *p
13384#elif defined(__Windows__)
13385 PKTHREAD p
13386#else
13387#if defined(__Userspace__)
13388 /*
13389 * proc is a dummy in __Userspace__ and will not be passed
13390 * to sctp_lower_sosend
13391 */
13392#endif
13393 struct proc *p
13394#endif
13395#endif
13396)
13397{
13398#ifdef __Panda__
13399 struct mbuf *control = NULL;
13400#endif
13401#if defined(__APPLE__)
13402 struct proc *p = current_proc();
13403#endif
13404 int error, use_sndinfo = 0;
13405 struct sctp_sndrcvinfo sndrcvninfo;
13406 struct sockaddr *addr_to_use;
13407#if defined(INET) && defined(INET6)
13408 struct sockaddr_in sin;
13409#endif
13410
13411#if defined(__APPLE__)
13412 SCTP_SOCKET_LOCK(so, 1);
13413#endif
13414#ifdef __Panda__
13415 control = SCTP_HEADER_TO_CHAIN(icontrol);
13416#endif
13417 if (control) {
13418 /* process cmsg snd/rcv info (maybe a assoc-id) */
13419 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
13420 sizeof(sndrcvninfo))) {
13421 /* got one */
13422 use_sndinfo = 1;
13423 }
13424 }
13425 addr_to_use = addr;
13426#if defined(INET) && defined(INET6)
13427 if ((addr) && (addr->sa_family == AF_INET6)) {
13428 struct sockaddr_in6 *sin6;
13429
13430 sin6 = (struct sockaddr_in6 *)addr;
13431 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
13432 in6_sin6_2_sin(&sin, sin6);
13433 addr_to_use = (struct sockaddr *)&sin;
13434 }
13435 }
13436#endif
13437 error = sctp_lower_sosend(so, addr_to_use, uio, top,
13438#ifdef __Panda__
13439 icontrol,
13440#else
13441 control,
13442#endif
13443 flags,
13444 use_sndinfo ? &sndrcvninfo: NULL
13445#if !(defined(__Panda__) || defined(__Userspace__))
13446 , p
13447#endif
13448 );
13449#if defined(__APPLE__)
13450 SCTP_SOCKET_UNLOCK(so, 1);
13451#endif
13452 return (error);
13453}
13454
13455
13456int
13457sctp_lower_sosend(struct socket *so,
13458 struct sockaddr *addr,
13459 struct uio *uio,
13460#ifdef __Panda__
13461 pakhandle_type i_pak,
13462 pakhandle_type i_control,
13463#else
13464 struct mbuf *i_pak,
13465 struct mbuf *control,
13466#endif
13467 int flags,
13468 struct sctp_sndrcvinfo *srcv
13469#if !(defined( __Panda__) || defined(__Userspace__))
13470 ,
13471#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13472 struct thread *p
13473#elif defined(__Windows__)
13474 PKTHREAD p
13475#else
13476 struct proc *p
13477#endif
13478#endif
13479 )
13480{
13481 unsigned int sndlen = 0, max_len;
13482 int error, len;
13483 struct mbuf *top = NULL;
13484#ifdef __Panda__
13485 struct mbuf *control = NULL;
13486#endif
13487 int queue_only = 0, queue_only_for_init = 0;
13488 int free_cnt_applied = 0;
13489 int un_sent;
13490 int now_filled = 0;
13491 unsigned int inqueue_bytes = 0;
13492 struct sctp_block_entry be;
13493 struct sctp_inpcb *inp;
13494 struct sctp_tcb *stcb = NULL;
13495 struct timeval now;
13496 struct sctp_nets *net;
13497 struct sctp_association *asoc;
13498 struct sctp_inpcb *t_inp;
13499 int user_marks_eor;
13500 int create_lock_applied = 0;
13501 int nagle_applies = 0;
13502 int some_on_control = 0;
13503 int got_all_of_the_send = 0;
13504 int hold_tcblock = 0;
13505 int non_blocking = 0;
13506 uint32_t local_add_more, local_soresv = 0;
13507 uint16_t port;
13508 uint16_t sinfo_flags;
13509 sctp_assoc_t sinfo_assoc_id;
13510
13511 error = 0;
13512 net = NULL;
13513 stcb = NULL;
13514 asoc = NULL;
13515
13516#if defined(__APPLE__)
13517 sctp_lock_assert(so);
13518#endif
13519 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
13520 if (inp == NULL) {
13521 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13522 error = EINVAL;
13523 if (i_pak) {
13524 SCTP_RELEASE_PKT(i_pak);
13525 }
13526 return (error);
13527 }
13528 if ((uio == NULL) && (i_pak == NULL)) {
13529 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13530 return (EINVAL);
13531 }
13532 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
13533 atomic_add_int(&inp->total_sends, 1);
13534 if (uio) {
13535#if defined(__APPLE__)
13536#if defined(APPLE_LEOPARD)
13537 if (uio->uio_resid < 0) {
13538#else
13539 if (uio_resid(uio) < 0) {
13540#endif
13541#else
13542 if (uio->uio_resid < 0) {
13543#endif
13544 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13545 return (EINVAL);
13546 }
13547#if defined(__APPLE__)
13548#if defined(APPLE_LEOPARD)
13549 sndlen = (unsigned int)uio->uio_resid;
13550#else
13551 sndlen = (unsigned int)uio_resid(uio);
13552#endif
13553#else
13554 sndlen = (unsigned int)uio->uio_resid;
13555#endif
13556 } else {
13557 top = SCTP_HEADER_TO_CHAIN(i_pak);
13558#ifdef __Panda__
13559 /*-
13560 * app len indicates the datalen, dgsize for cases
13561 * of SCTP_EOF/ABORT will not have the right len
13562 */
13563 sndlen = SCTP_APP_DATA_LEN(i_pak);
13564 /*-
13565 * Set the particle len also to zero to match
13566 * up with app len. We only have one particle
13567 * if app len is zero for Panda. This is ensured
13568 * in the socket lib
13569 */
13570 if (sndlen == 0) {
13571 SCTP_BUF_LEN(top) = 0;
13572 }
13573 /*-
13574 * We delink the chain from header, but keep
13575 * the header around as we will need it in
13576 * EAGAIN case
13577 */
13578 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
13579#else
13580 sndlen = SCTP_HEADER_LEN(i_pak);
13581#endif
13582 }
13583 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
13584 (void *)addr,
13585 sndlen);
13586#ifdef __Panda__
13587 if (i_control) {
13588 control = SCTP_HEADER_TO_CHAIN(i_control);
13589 }
13590#endif
13591 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
13592 (inp->sctp_socket->so_qlimit)) {
13593 /* The listener can NOT send */
13594 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13595 error = ENOTCONN;
13596 goto out_unlocked;
13597 }
13598 /**
13599 * Pre-screen address, if one is given the sin-len
13600 * must be set correctly!
13601 */
13602 if (addr) {
13603 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
13604 switch (raddr->sa.sa_family) {
13605#ifdef INET
13606 case AF_INET:
13607#ifdef HAVE_SIN_LEN
13608 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
13609 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13610 error = EINVAL;
13611 goto out_unlocked;
13612 }
13613#endif
13614 port = raddr->sin.sin_port;
13615 break;
13616#endif
13617#ifdef INET6
13618 case AF_INET6:
13619#ifdef HAVE_SIN6_LEN
13620 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
13621 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13622 error = EINVAL;
13623 goto out_unlocked;
13624 }
13625#endif
13626 port = raddr->sin6.sin6_port;
13627 break;
13628#endif
13629#if defined(__Userspace__)
13630 case AF_CONN:
13631#ifdef HAVE_SCONN_LEN
13632 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
13633 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13634 error = EINVAL;
13635 goto out_unlocked;
13636 }
13637#endif
13638 port = raddr->sconn.sconn_port;
13639 break;
13640#endif
13641 default:
13642 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
13643 error = EAFNOSUPPORT;
13644 goto out_unlocked;
13645 }
13646 } else
13647 port = 0;
13648
13649 if (srcv) {
13650 sinfo_flags = srcv->sinfo_flags;
13651 sinfo_assoc_id = srcv->sinfo_assoc_id;
13652 if (INVALID_SINFO_FLAG(sinfo_flags) ||
13653 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
13654 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13655 error = EINVAL;
13656 goto out_unlocked;
13657 }
13658 if (srcv->sinfo_flags)
13659 SCTP_STAT_INCR(sctps_sends_with_flags);
13660 } else {
13661 sinfo_flags = inp->def_send.sinfo_flags;
13662 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
13663 }
13664 if (sinfo_flags & SCTP_SENDALL) {
13665 /* its a sendall */
13666 error = sctp_sendall(inp, uio, top, srcv);
13667 top = NULL;
13668 goto out_unlocked;
13669 }
13670 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
13671 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13672 error = EINVAL;
13673 goto out_unlocked;
13674 }
13675 /* now we must find the assoc */
13676 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
13677 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
13678 SCTP_INP_RLOCK(inp);
13679 stcb = LIST_FIRST(&inp->sctp_asoc_list);
13680 if (stcb) {
13681 SCTP_TCB_LOCK(stcb);
13682 hold_tcblock = 1;
13683 }
13684 SCTP_INP_RUNLOCK(inp);
13685 } else if (sinfo_assoc_id) {
13686 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
13687 if (stcb != NULL) {
13688 hold_tcblock = 1;
13689 }
13690 } else if (addr) {
13691 /*-
13692 * Since we did not use findep we must
13693 * increment it, and if we don't find a tcb
13694 * decrement it.
13695 */
13696 SCTP_INP_WLOCK(inp);
13697 SCTP_INP_INCR_REF(inp);
13698 SCTP_INP_WUNLOCK(inp);
13699 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13700 if (stcb == NULL) {
13701 SCTP_INP_WLOCK(inp);
13702 SCTP_INP_DECR_REF(inp);
13703 SCTP_INP_WUNLOCK(inp);
13704 } else {
13705 hold_tcblock = 1;
13706 }
13707 }
13708 if ((stcb == NULL) && (addr)) {
13709 /* Possible implicit send? */
13710 SCTP_ASOC_CREATE_LOCK(inp);
13711 create_lock_applied = 1;
13712 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
13713 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
13714 /* Should I really unlock ? */
13715 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13716 error = EINVAL;
13717 goto out_unlocked;
13718
13719 }
13720 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
13721 (addr->sa_family == AF_INET6)) {
13722 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13723 error = EINVAL;
13724 goto out_unlocked;
13725 }
13726 SCTP_INP_WLOCK(inp);
13727 SCTP_INP_INCR_REF(inp);
13728 SCTP_INP_WUNLOCK(inp);
13729 /* With the lock applied look again */
13730 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
13731 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
13732 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
13733 }
13734 if (stcb == NULL) {
13735 SCTP_INP_WLOCK(inp);
13736 SCTP_INP_DECR_REF(inp);
13737 SCTP_INP_WUNLOCK(inp);
13738 } else {
13739 hold_tcblock = 1;
13740 }
13741 if (error) {
13742 goto out_unlocked;
13743 }
13744 if (t_inp != inp) {
13745 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
13746 error = ENOTCONN;
13747 goto out_unlocked;
13748 }
13749 }
13750 if (stcb == NULL) {
13751 if (addr == NULL) {
13752 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13753 error = ENOENT;
13754 goto out_unlocked;
13755 } else {
13756 /* We must go ahead and start the INIT process */
13757 uint32_t vrf_id;
13758
13759 if ((sinfo_flags & SCTP_ABORT) ||
13760 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
13761 /*-
13762 * User asks to abort a non-existant assoc,
13763 * or EOF a non-existant assoc with no data
13764 */
13765 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
13766 error = ENOENT;
13767 goto out_unlocked;
13768 }
13769 /* get an asoc/stcb struct */
13770 vrf_id = inp->def_vrf_id;
13771#ifdef INVARIANTS
13772 if (create_lock_applied == 0) {
13773 panic("Error, should hold create lock and I don't?");
13774 }
13775#endif
13776 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
13777 inp->sctp_ep.pre_open_stream_count,
13778 inp->sctp_ep.port,
13779#if !(defined( __Panda__) || defined(__Userspace__))
13780 p);
13781#else
13782 (struct proc *)NULL);
13783#endif
13784 if (stcb == NULL) {
13785 /* Error is setup for us in the call */
13786 goto out_unlocked;
13787 }
13788 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
13789 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
13790 /* Set the connected flag so we can queue data */
13791 soisconnecting(so);
13792 }
13793 hold_tcblock = 1;
13794 if (create_lock_applied) {
13795 SCTP_ASOC_CREATE_UNLOCK(inp);
13796 create_lock_applied = 0;
13797 } else {
13798 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
13799 }
13800 /* Turn on queue only flag to prevent data from being sent */
13801 queue_only = 1;
13802 asoc = &stcb->asoc;
13803 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13804 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
13805
13806 /* initialize authentication params for the assoc */
13807 sctp_initialize_auth_params(inp, stcb);
13808
13809 if (control) {
13810 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
13811 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
13812 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
13813 hold_tcblock = 0;
13814 stcb = NULL;
13815 goto out_unlocked;
13816 }
13817 }
13818 /* out with the INIT */
13819 queue_only_for_init = 1;
13820 /*-
13821 * we may want to dig in after this call and adjust the MTU
13822 * value. It defaulted to 1500 (constant) but the ro
13823 * structure may now have an update and thus we may need to
13824 * change it BEFORE we append the message.
13825 */
13826 }
13827 } else
13828 asoc = &stcb->asoc;
13829 if (srcv == NULL)
13830 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
13831 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
13832 if (addr)
13833 net = sctp_findnet(stcb, addr);
13834 else
13835 net = NULL;
13836 if ((net == NULL) ||
13837 ((port != 0) && (port != stcb->rport))) {
13838 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13839 error = EINVAL;
13840 goto out_unlocked;
13841 }
13842 } else {
13843 if (stcb->asoc.alternate) {
13844 net = stcb->asoc.alternate;
13845 } else {
13846 net = stcb->asoc.primary_destination;
13847 }
13848 }
13849 atomic_add_int(&stcb->total_sends, 1);
13850 /* Keep the stcb from being freed under our feet */
13851 atomic_add_int(&asoc->refcnt, 1);
13852 free_cnt_applied = 1;
13853
13854 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
13855 if (sndlen > asoc->smallest_mtu) {
13856 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13857 error = EMSGSIZE;
13858 goto out_unlocked;
13859 }
13860 }
13861#if defined(__Userspace__)
13862 if (inp->recv_callback) {
13863 non_blocking = 1;
13864 }
13865#endif
13866 if (SCTP_SO_IS_NBIO(so)
13867#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
13868 || (flags & MSG_NBIO)
13869#endif
13870 ) {
13871 non_blocking = 1;
13872 }
13873 /* would we block? */
13874 if (non_blocking) {
13875 if (hold_tcblock == 0) {
13876 SCTP_TCB_LOCK(stcb);
13877 hold_tcblock = 1;
13878 }
13879 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13880 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13881 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13882 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13883 if (sndlen > SCTP_SB_LIMIT_SND(so))
13884 error = EMSGSIZE;
13885 else
13886 error = EWOULDBLOCK;
13887 goto out_unlocked;
13888 }
13889 stcb->asoc.sb_send_resv += sndlen;
13890 SCTP_TCB_UNLOCK(stcb);
13891 hold_tcblock = 0;
13892 } else {
13893 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13894 }
13895 local_soresv = sndlen;
13896 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13897 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13898 error = ECONNRESET;
13899 goto out_unlocked;
13900 }
13901 if (create_lock_applied) {
13902 SCTP_ASOC_CREATE_UNLOCK(inp);
13903 create_lock_applied = 0;
13904 }
13905 /* Is the stream no. valid? */
13906 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13907 /* Invalid stream number */
13908 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13909 error = EINVAL;
13910 goto out_unlocked;
13911 }
13912 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
13913 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
13914 /*
13915 * Can't queue any data while stream reset is underway.
13916 */
13917 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
13918 error = EAGAIN;
13919 } else {
13920 error = EINVAL;
13921 }
13922 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
13923 goto out_unlocked;
13924 }
13925 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13926 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13927 queue_only = 1;
13928 }
13929 /* we are now done with all control */
13930 if (control) {
13931 sctp_m_freem(control);
13932 control = NULL;
13933 }
13934 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13935 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13936 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13937 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13938 if (srcv->sinfo_flags & SCTP_ABORT) {
13939 ;
13940 } else {
13941 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13942 error = ECONNRESET;
13943 goto out_unlocked;
13944 }
13945 }
13946 /* Ok, we will attempt a msgsnd :> */
13947#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
13948 if (p) {
13949#if defined(__FreeBSD__) && __FreeBSD_version >= 603000
13950 p->td_ru.ru_msgsnd++;
13951#elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
13952 p->td_proc->p_stats->p_ru.ru_msgsnd++;
13953#else
13954 p->p_stats->p_ru.ru_msgsnd++;
13955#endif
13956 }
13957#endif
13958 /* Are we aborting? */
13959 if (srcv->sinfo_flags & SCTP_ABORT) {
13960 struct mbuf *mm;
13961 int tot_demand, tot_out = 0, max_out;
13962
13963 SCTP_STAT_INCR(sctps_sends_with_abort);
13964 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13965 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13966 /* It has to be up before we abort */
13967 /* how big is the user initiated abort? */
13968 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13969 error = EINVAL;
13970 goto out;
13971 }
13972 if (hold_tcblock) {
13973 SCTP_TCB_UNLOCK(stcb);
13974 hold_tcblock = 0;
13975 }
13976 if (top) {
13977 struct mbuf *cntm = NULL;
13978
13979 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13980 if (sndlen != 0) {
13981 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13982 tot_out += SCTP_BUF_LEN(cntm);
13983 }
13984 }
13985 } else {
13986 /* Must fit in a MTU */
13987 tot_out = sndlen;
13988 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13989 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13990 /* To big */
13991 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13992 error = EMSGSIZE;
13993 goto out;
13994 }
13995 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
13996 }
13997 if (mm == NULL) {
13998 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13999 error = ENOMEM;
14000 goto out;
14001 }
14002 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
14003 max_out -= sizeof(struct sctp_abort_msg);
14004 if (tot_out > max_out) {
14005 tot_out = max_out;
14006 }
14007 if (mm) {
14008 struct sctp_paramhdr *ph;
14009
14010 /* now move forward the data pointer */
14011 ph = mtod(mm, struct sctp_paramhdr *);
14012 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
14013 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
14014 ph++;
14015 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
14016 if (top == NULL) {
14017#if defined(__APPLE__)
14018 SCTP_SOCKET_UNLOCK(so, 0);
14019#endif
14020 error = uiomove((caddr_t)ph, (int)tot_out, uio);
14021#if defined(__APPLE__)
14022 SCTP_SOCKET_LOCK(so, 0);
14023#endif
14024 if (error) {
14025 /*-
14026 * Here if we can't get his data we
14027 * still abort we just don't get to
14028 * send the users note :-0
14029 */
14030 sctp_m_freem(mm);
14031 mm = NULL;
14032 }
14033 } else {
14034 if (sndlen != 0) {
14035 SCTP_BUF_NEXT(mm) = top;
14036 }
14037 }
14038 }
14039 if (hold_tcblock == 0) {
14040 SCTP_TCB_LOCK(stcb);
14041 }
14042 atomic_add_int(&stcb->asoc.refcnt, -1);
14043 free_cnt_applied = 0;
14044 /* release this lock, otherwise we hang on ourselves */
14045 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
14046 /* now relock the stcb so everything is sane */
14047 hold_tcblock = 0;
14048 stcb = NULL;
14049 /* In this case top is already chained to mm
14050 * avoid double free, since we free it below if
14051 * top != NULL and driver would free it after sending
14052 * the packet out
14053 */
14054 if (sndlen != 0) {
14055 top = NULL;
14056 }
14057 goto out_unlocked;
14058 }
14059 /* Calculate the maximum we can send */
14060 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
14061 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14062 if (non_blocking) {
14063 /* we already checked for non-blocking above. */
14064 max_len = sndlen;
14065 } else {
14066 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14067 }
14068 } else {
14069 max_len = 0;
14070 }
14071 if (hold_tcblock) {
14072 SCTP_TCB_UNLOCK(stcb);
14073 hold_tcblock = 0;
14074 }
14075 if (asoc->strmout == NULL) {
14076 /* huh? software error */
14077 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
14078 error = EFAULT;
14079 goto out_unlocked;
14080 }
14081
14082 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
14083 if ((user_marks_eor == 0) &&
14084 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
14085 /* It will NEVER fit */
14086 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
14087 error = EMSGSIZE;
14088 goto out_unlocked;
14089 }
14090 if ((uio == NULL) && user_marks_eor) {
14091 /*-
14092 * We do not support eeor mode for
14093 * sending with mbuf chains (like sendfile).
14094 */
14095 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14096 error = EINVAL;
14097 goto out_unlocked;
14098 }
14099
14100 if (user_marks_eor) {
14101 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
14102 } else {
14103 /*-
14104 * For non-eeor the whole message must fit in
14105 * the socket send buffer.
14106 */
14107 local_add_more = sndlen;
14108 }
14109 len = 0;
14110 if (non_blocking) {
14111 goto skip_preblock;
14112 }
14113 if (((max_len <= local_add_more) &&
14114 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
14115 (max_len == 0) ||
14116 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14117 /* No room right now ! */
14118 SOCKBUF_LOCK(&so->so_snd);
14119 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
14120 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
14121 ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
14122 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
14123 (unsigned int)SCTP_SB_LIMIT_SND(so),
14124 inqueue_bytes,
14125 local_add_more,
14126 stcb->asoc.stream_queue_cnt,
14127 stcb->asoc.chunks_on_out_queue,
14128 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
14129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14130 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
14131 }
14132 be.error = 0;
14133#if !defined(__Panda__) && !defined(__Windows__)
14134 stcb->block_entry = &be;
14135#endif
14136 error = sbwait(&so->so_snd);
14137 stcb->block_entry = NULL;
14138 if (error || so->so_error || be.error) {
14139 if (error == 0) {
14140 if (so->so_error)
14141 error = so->so_error;
14142 if (be.error) {
14143 error = be.error;
14144 }
14145 }
14146 SOCKBUF_UNLOCK(&so->so_snd);
14147 goto out_unlocked;
14148 }
14149 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14150 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14151 asoc, stcb->asoc.total_output_queue_size);
14152 }
14153 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14154 SOCKBUF_UNLOCK(&so->so_snd);
14155 goto out_unlocked;
14156 }
14157 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
14158 }
14159 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
14160 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14161 } else {
14162 max_len = 0;
14163 }
14164 SOCKBUF_UNLOCK(&so->so_snd);
14165 }
14166
14167skip_preblock:
14168 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14169 goto out_unlocked;
14170 }
14171#if defined(__APPLE__)
14172 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14173#endif
14174 /* sndlen covers for mbuf case
14175 * uio_resid covers for the non-mbuf case
14176 * NOTE: uio will be null when top/mbuf is passed
14177 */
14178 if (sndlen == 0) {
14179 if (srcv->sinfo_flags & SCTP_EOF) {
14180 got_all_of_the_send = 1;
14181 goto dataless_eof;
14182 } else {
14183 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14184 error = EINVAL;
14185 goto out;
14186 }
14187 }
14188 if (top == NULL) {
14189 struct sctp_stream_queue_pending *sp;
14190 struct sctp_stream_out *strm;
14191 uint32_t sndout;
14192
14193 SCTP_TCB_SEND_LOCK(stcb);
14194 if ((asoc->stream_locked) &&
14195 (asoc->stream_locked_on != srcv->sinfo_stream)) {
14196 SCTP_TCB_SEND_UNLOCK(stcb);
14197 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
14198 error = EINVAL;
14199 goto out;
14200 }
14201 SCTP_TCB_SEND_UNLOCK(stcb);
14202
14203 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
14204 if (strm->last_msg_incomplete == 0) {
14205 do_a_copy_in:
14206 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
14207 if ((sp == NULL) || (error)) {
14208 goto out;
14209 }
14210 SCTP_TCB_SEND_LOCK(stcb);
14211 if (sp->msg_is_complete) {
14212 strm->last_msg_incomplete = 0;
14213 asoc->stream_locked = 0;
14214 } else {
14215 /* Just got locked to this guy in
14216 * case of an interrupt.
14217 */
14218 strm->last_msg_incomplete = 1;
14219 if (stcb->asoc.idata_supported == 0) {
14220 asoc->stream_locked = 1;
14221 asoc->stream_locked_on = srcv->sinfo_stream;
14222 }
14223 sp->sender_all_done = 0;
14224 }
14225 sctp_snd_sb_alloc(stcb, sp->length);
14226 atomic_add_int(&asoc->stream_queue_cnt, 1);
14227 if (srcv->sinfo_flags & SCTP_UNORDERED) {
14228 SCTP_STAT_INCR(sctps_sends_with_unord);
14229 }
14230 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
14231 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
14232 SCTP_TCB_SEND_UNLOCK(stcb);
14233 } else {
14234 SCTP_TCB_SEND_LOCK(stcb);
14235 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
14236 SCTP_TCB_SEND_UNLOCK(stcb);
14237 if (sp == NULL) {
14238 /* ???? Huh ??? last msg is gone */
14239#ifdef INVARIANTS
14240 panic("Warning: Last msg marked incomplete, yet nothing left?");
14241#else
14242 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
14243 strm->last_msg_incomplete = 0;
14244#endif
14245 goto do_a_copy_in;
14246
14247 }
14248 }
14249#if defined(__APPLE__)
14250#if defined(APPLE_LEOPARD)
14251 while (uio->uio_resid > 0) {
14252#else
14253 while (uio_resid(uio) > 0) {
14254#endif
14255#else
14256 while (uio->uio_resid > 0) {
14257#endif
14258 /* How much room do we have? */
14259 struct mbuf *new_tail, *mm;
14260
14261 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
14262 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
14263 else
14264 max_len = 0;
14265
14266 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
14267 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
14268#if defined(__APPLE__)
14269#if defined(APPLE_LEOPARD)
14270 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
14271#else
14272 (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
14273#endif
14274#else
14275 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
14276#endif
14277 sndout = 0;
14278 new_tail = NULL;
14279 if (hold_tcblock) {
14280 SCTP_TCB_UNLOCK(stcb);
14281 hold_tcblock = 0;
14282 }
14283#if defined(__APPLE__)
14284 SCTP_SOCKET_UNLOCK(so, 0);
14285#endif
14286#if defined(__FreeBSD__) && __FreeBSD_version > 602000
14287 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
14288#else
14289 mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
14290#endif
14291#if defined(__APPLE__)
14292 SCTP_SOCKET_LOCK(so, 0);
14293#endif
14294 if ((mm == NULL) || error) {
14295 if (mm) {
14296 sctp_m_freem(mm);
14297 }
14298 goto out;
14299 }
14300 /* Update the mbuf and count */
14301 SCTP_TCB_SEND_LOCK(stcb);
14302 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14303 /* we need to get out.
14304 * Peer probably aborted.
14305 */
14306 sctp_m_freem(mm);
14307 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
14308 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
14309 error = ECONNRESET;
14310 }
14311 SCTP_TCB_SEND_UNLOCK(stcb);
14312 goto out;
14313 }
14314 if (sp->tail_mbuf) {
14315 /* tack it to the end */
14316 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
14317 sp->tail_mbuf = new_tail;
14318 } else {
14319 /* A stolen mbuf */
14320 sp->data = mm;
14321 sp->tail_mbuf = new_tail;
14322 }
14323 sctp_snd_sb_alloc(stcb, sndout);
14324 atomic_add_int(&sp->length, sndout);
14325 len += sndout;
14326 if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
14327 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
14328 }
14329
14330 /* Did we reach EOR? */
14331#if defined(__APPLE__)
14332#if defined(APPLE_LEOPARD)
14333 if ((uio->uio_resid == 0) &&
14334#else
14335 if ((uio_resid(uio) == 0) &&
14336#endif
14337#else
14338 if ((uio->uio_resid == 0) &&
14339#endif
14340 ((user_marks_eor == 0) ||
14341 (srcv->sinfo_flags & SCTP_EOF) ||
14342 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
14343 sp->msg_is_complete = 1;
14344 } else {
14345 sp->msg_is_complete = 0;
14346 }
14347 SCTP_TCB_SEND_UNLOCK(stcb);
14348 }
14349#if defined(__APPLE__)
14350#if defined(APPLE_LEOPARD)
14351 if (uio->uio_resid == 0) {
14352#else
14353 if (uio_resid(uio) == 0) {
14354#endif
14355#else
14356 if (uio->uio_resid == 0) {
14357#endif
14358 /* got it all? */
14359 continue;
14360 }
14361 /* PR-SCTP? */
14362 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
14363 /* This is ugly but we must assure locking order */
14364 if (hold_tcblock == 0) {
14365 SCTP_TCB_LOCK(stcb);
14366 hold_tcblock = 1;
14367 }
14368 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
14369 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
14370 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
14371 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
14372 else
14373 max_len = 0;
14374 if (max_len > 0) {
14375 continue;
14376 }
14377 SCTP_TCB_UNLOCK(stcb);
14378 hold_tcblock = 0;
14379 }
14380 /* wait for space now */
14381 if (non_blocking) {
14382 /* Non-blocking io in place out */
14383 goto skip_out_eof;
14384 }
14385 /* What about the INIT, send it maybe */
14386 if (queue_only_for_init) {
14387 if (hold_tcblock == 0) {
14388 SCTP_TCB_LOCK(stcb);
14389 hold_tcblock = 1;
14390 }
14391 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14392 /* a collision took us forward? */
14393 queue_only = 0;
14394 } else {
14395 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14396 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
14397 queue_only = 1;
14398 }
14399 }
14400 if ((net->flight_size > net->cwnd) &&
14401 (asoc->sctp_cmt_on_off == 0)) {
14402 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14403 queue_only = 1;
14404 } else if (asoc->ifp_had_enobuf) {
14405 SCTP_STAT_INCR(sctps_ifnomemqueued);
14406 if (net->flight_size > (2 * net->mtu)) {
14407 queue_only = 1;
14408 }
14409 asoc->ifp_had_enobuf = 0;
14410 }
14411 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14412 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14413 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14414 (stcb->asoc.total_flight > 0) &&
14415 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14416 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14417
14418 /*-
14419 * Ok, Nagle is set on and we have data outstanding.
14420 * Don't send anything and let SACKs drive out the
14421 * data unless we have a "full" segment to send.
14422 */
14423 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14424 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14425 }
14426 SCTP_STAT_INCR(sctps_naglequeued);
14427 nagle_applies = 1;
14428 } else {
14429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14430 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14431 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14432 }
14433 SCTP_STAT_INCR(sctps_naglesent);
14434 nagle_applies = 0;
14435 }
14436 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14437
14438 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14439 nagle_applies, un_sent);
14440 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14441 stcb->asoc.total_flight,
14442 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14443 }
14444 if (queue_only_for_init)
14445 queue_only_for_init = 0;
14446 if ((queue_only == 0) && (nagle_applies == 0)) {
14447 /*-
14448 * need to start chunk output
14449 * before blocking.. note that if
14450 * a lock is already applied, then
14451 * the input via the net is happening
14452 * and I don't need to start output :-D
14453 */
14454 if (hold_tcblock == 0) {
14455 if (SCTP_TCB_TRYLOCK(stcb)) {
14456 hold_tcblock = 1;
14457 sctp_chunk_output(inp,
14458 stcb,
14459 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14460 }
14461 } else {
14462 sctp_chunk_output(inp,
14463 stcb,
14464 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14465 }
14466 if (hold_tcblock == 1) {
14467 SCTP_TCB_UNLOCK(stcb);
14468 hold_tcblock = 0;
14469 }
14470 }
14471 SOCKBUF_LOCK(&so->so_snd);
14472 /*-
14473 * This is a bit strange, but I think it will
14474 * work. The total_output_queue_size is locked and
14475 * protected by the TCB_LOCK, which we just released.
14476 * There is a race that can occur between releasing it
14477 * above, and me getting the socket lock, where sacks
14478 * come in but we have not put the SB_WAIT on the
14479 * so_snd buffer to get the wakeup. After the LOCK
14480 * is applied the sack_processing will also need to
14481 * LOCK the so->so_snd to do the actual sowwakeup(). So
14482 * once we have the socket buffer lock if we recheck the
14483 * size we KNOW we will get to sleep safely with the
14484 * wakeup flag in place.
14485 */
14486 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
14487 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
14488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14489#if defined(__APPLE__)
14490#if defined(APPLE_LEOPARD)
14491 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14492 asoc, uio->uio_resid);
14493#else
14494 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14495 asoc, uio_resid(uio));
14496#endif
14497#else
14498 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
14499 asoc, (size_t)uio->uio_resid);
14500#endif
14501 }
14502 be.error = 0;
14503#if !defined(__Panda__) && !defined(__Windows__)
14504 stcb->block_entry = &be;
14505#endif
14506#if defined(__APPLE__)
14507 sbunlock(&so->so_snd, 1);
14508#endif
14509 error = sbwait(&so->so_snd);
14510 stcb->block_entry = NULL;
14511
14512 if (error || so->so_error || be.error) {
14513 if (error == 0) {
14514 if (so->so_error)
14515 error = so->so_error;
14516 if (be.error) {
14517 error = be.error;
14518 }
14519 }
14520 SOCKBUF_UNLOCK(&so->so_snd);
14521 goto out_unlocked;
14522 }
14523
14524#if defined(__APPLE__)
14525 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
14526#endif
14527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14528 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
14529 asoc, stcb->asoc.total_output_queue_size);
14530 }
14531 }
14532 SOCKBUF_UNLOCK(&so->so_snd);
14533 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14534 goto out_unlocked;
14535 }
14536 }
14537 SCTP_TCB_SEND_LOCK(stcb);
14538 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
14539 SCTP_TCB_SEND_UNLOCK(stcb);
14540 goto out_unlocked;
14541 }
14542 if (sp) {
14543 if (sp->msg_is_complete == 0) {
14544 strm->last_msg_incomplete = 1;
14545 if (stcb->asoc.idata_supported == 0) {
14546 asoc->stream_locked = 1;
14547 asoc->stream_locked_on = srcv->sinfo_stream;
14548 }
14549 } else {
14550 sp->sender_all_done = 1;
14551 strm->last_msg_incomplete = 0;
14552 asoc->stream_locked = 0;
14553 }
14554 } else {
14555 SCTP_PRINTF("Huh no sp TSNH?\n");
14556 strm->last_msg_incomplete = 0;
14557 asoc->stream_locked = 0;
14558 }
14559 SCTP_TCB_SEND_UNLOCK(stcb);
14560#if defined(__APPLE__)
14561#if defined(APPLE_LEOPARD)
14562 if (uio->uio_resid == 0) {
14563#else
14564 if (uio_resid(uio) == 0) {
14565#endif
14566#else
14567 if (uio->uio_resid == 0) {
14568#endif
14569 got_all_of_the_send = 1;
14570 }
14571 } else {
14572 /* We send in a 0, since we do NOT have any locks */
14573 error = sctp_msg_append(stcb, net, top, srcv, 0);
14574 top = NULL;
14575 if (srcv->sinfo_flags & SCTP_EOF) {
14576 /*
14577 * This should only happen for Panda for the mbuf
14578 * send case, which does NOT yet support EEOR mode.
14579 * Thus, we can just set this flag to do the proper
14580 * EOF handling.
14581 */
14582 got_all_of_the_send = 1;
14583 }
14584 }
14585 if (error) {
14586 goto out;
14587 }
14588dataless_eof:
14589 /* EOF thing ? */
14590 if ((srcv->sinfo_flags & SCTP_EOF) &&
14591 (got_all_of_the_send == 1)) {
14592 SCTP_STAT_INCR(sctps_sends_with_eof);
14593 error = 0;
14594 if (hold_tcblock == 0) {
14595 SCTP_TCB_LOCK(stcb);
14596 hold_tcblock = 1;
14597 }
14598 if (TAILQ_EMPTY(&asoc->send_queue) &&
14599 TAILQ_EMPTY(&asoc->sent_queue) &&
14600 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
14601 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14602 goto abort_anyway;
14603 }
14604 /* there is nothing queued to send, so I'm done... */
14605 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14606 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14607 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14608 struct sctp_nets *netp;
14609
14610 /* only send SHUTDOWN the first time through */
14611 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
14612 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
14613 }
14614 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
14615 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
14616 sctp_stop_timers_for_shutdown(stcb);
14617 if (stcb->asoc.alternate) {
14618 netp = stcb->asoc.alternate;
14619 } else {
14620 netp = stcb->asoc.primary_destination;
14621 }
14622 sctp_send_shutdown(stcb, netp);
14623 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
14624 netp);
14625 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14626 asoc->primary_destination);
14627 }
14628 } else {
14629 /*-
14630 * we still got (or just got) data to send, so set
14631 * SHUTDOWN_PENDING
14632 */
14633 /*-
14634 * XXX sockets draft says that SCTP_EOF should be
14635 * sent with no data. currently, we will allow user
14636 * data to be sent first and move to
14637 * SHUTDOWN-PENDING
14638 */
14639 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
14640 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
14641 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
14642 if (hold_tcblock == 0) {
14643 SCTP_TCB_LOCK(stcb);
14644 hold_tcblock = 1;
14645 }
14646 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
14647 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
14648 }
14649 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
14650 if (TAILQ_EMPTY(&asoc->send_queue) &&
14651 TAILQ_EMPTY(&asoc->sent_queue) &&
14652 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
14653 struct mbuf *op_err;
14654 char msg[SCTP_DIAG_INFO_LEN];
14655
14656 abort_anyway:
14657 if (free_cnt_applied) {
14658 atomic_add_int(&stcb->asoc.refcnt, -1);
14659 free_cnt_applied = 0;
14660 }
14661 snprintf(msg, sizeof(msg),
14662 "%s:%d at %s", __FILE__, __LINE__, __func__);
14663 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
14664 msg);
14665 sctp_abort_an_association(stcb->sctp_ep, stcb,
14666 op_err, SCTP_SO_LOCKED);
14667 /* now relock the stcb so everything is sane */
14668 hold_tcblock = 0;
14669 stcb = NULL;
14670 goto out;
14671 }
14672 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
14673 asoc->primary_destination);
14674 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
14675 }
14676 }
14677 }
14678skip_out_eof:
14679 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
14680 some_on_control = 1;
14681 }
14682 if (queue_only_for_init) {
14683 if (hold_tcblock == 0) {
14684 SCTP_TCB_LOCK(stcb);
14685 hold_tcblock = 1;
14686 }
14687 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
14688 /* a collision took us forward? */
14689 queue_only = 0;
14690 } else {
14691 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
14692 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
14693 queue_only = 1;
14694 }
14695 }
14696 if ((net->flight_size > net->cwnd) &&
14697 (stcb->asoc.sctp_cmt_on_off == 0)) {
14698 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
14699 queue_only = 1;
14700 } else if (asoc->ifp_had_enobuf) {
14701 SCTP_STAT_INCR(sctps_ifnomemqueued);
14702 if (net->flight_size > (2 * net->mtu)) {
14703 queue_only = 1;
14704 }
14705 asoc->ifp_had_enobuf = 0;
14706 }
14707 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
14708 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
14709 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
14710 (stcb->asoc.total_flight > 0) &&
14711 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
14712 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
14713 /*-
14714 * Ok, Nagle is set on and we have data outstanding.
14715 * Don't send anything and let SACKs drive out the
14716 * data unless wen have a "full" segment to send.
14717 */
14718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14719 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
14720 }
14721 SCTP_STAT_INCR(sctps_naglequeued);
14722 nagle_applies = 1;
14723 } else {
14724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
14725 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
14726 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
14727 }
14728 SCTP_STAT_INCR(sctps_naglesent);
14729 nagle_applies = 0;
14730 }
14731 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
14732 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
14733 nagle_applies, un_sent);
14734 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
14735 stcb->asoc.total_flight,
14736 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
14737 }
14738 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
14739 /* we can attempt to send too. */
14740 if (hold_tcblock == 0) {
14741 /* If there is activity recv'ing sacks no need to send */
14742 if (SCTP_TCB_TRYLOCK(stcb)) {
14743 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14744 hold_tcblock = 1;
14745 }
14746 } else {
14747 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14748 }
14749 } else if ((queue_only == 0) &&
14750 (stcb->asoc.peers_rwnd == 0) &&
14751 (stcb->asoc.total_flight == 0)) {
14752 /* We get to have a probe outstanding */
14753 if (hold_tcblock == 0) {
14754 hold_tcblock = 1;
14755 SCTP_TCB_LOCK(stcb);
14756 }
14757 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
14758 } else if (some_on_control) {
14759 int num_out, reason, frag_point;
14760
14761 /* Here we do control only */
14762 if (hold_tcblock == 0) {
14763 hold_tcblock = 1;
14764 SCTP_TCB_LOCK(stcb);
14765 }
14766 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
14767 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
14768 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
14769 }
14770 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
14771 queue_only, stcb->asoc.peers_rwnd, un_sent,
14772 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
14773 stcb->asoc.total_output_queue_size, error);
14774
14775out:
14776#if defined(__APPLE__)
14777 sbunlock(&so->so_snd, 1);
14778#endif
14779out_unlocked:
14780
14781 if (local_soresv && stcb) {
14782 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
14783 }
14784 if (create_lock_applied) {
14785 SCTP_ASOC_CREATE_UNLOCK(inp);
14786 }
14787 if ((stcb) && hold_tcblock) {
14788 SCTP_TCB_UNLOCK(stcb);
14789 }
14790 if (stcb && free_cnt_applied) {
14791 atomic_add_int(&stcb->asoc.refcnt, -1);
14792 }
14793#ifdef INVARIANTS
14794#if defined(__FreeBSD__)
14795 if (stcb) {
14796 if (mtx_owned(&stcb->tcb_mtx)) {
14797 panic("Leaving with tcb mtx owned?");
14798 }
14799 if (mtx_owned(&stcb->tcb_send_mtx)) {
14800 panic("Leaving with tcb send mtx owned?");
14801 }
14802 }
14803#endif
14804#endif
14805#ifdef __Panda__
14806 /*
14807 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
14808 * to particle when pak is passed in, so that caller
14809 * can try again with this pak
14810 *
14811 * NOTE: For other cases, including success case,
14812 * we simply want to return the header back to free
14813 * pool
14814 */
14815 if (top) {
14816 if ((error == EAGAIN) || (error == ENOMEM)) {
14817 SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
14818 top = NULL;
14819 } else {
14820 (void)SCTP_RELEASE_HEADER(i_pak);
14821 }
14822 } else {
14823 /* This is to handle cases when top has
14824 * been reset to NULL but pak might not
14825 * be freed
14826 */
14827 if (i_pak) {
14828 (void)SCTP_RELEASE_HEADER(i_pak);
14829 }
14830 }
14831#endif
14832 if (top) {
14833 sctp_m_freem(top);
14834 }
14835 if (control) {
14836 sctp_m_freem(control);
14837 }
14838 return (error);
14839}
14840
14841
14842/*
14843 * generate an AUTHentication chunk, if required
14844 */
14845struct mbuf *
14846sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
14847 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
14848 struct sctp_tcb *stcb, uint8_t chunk)
14849{
14850 struct mbuf *m_auth;
14851 struct sctp_auth_chunk *auth;
14852 int chunk_len;
14853 struct mbuf *cn;
14854
14855 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
14856 (stcb == NULL))
14857 return (m);
14858
14859 if (stcb->asoc.auth_supported == 0) {
14860 return (m);
14861 }
14862 /* does the requested chunk require auth? */
14863 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
14864 return (m);
14865 }
14866 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
14867 if (m_auth == NULL) {
14868 /* no mbuf's */
14869 return (m);
14870 }
14871 /* reserve some space if this will be the first mbuf */
14872 if (m == NULL)
14873 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
14874 /* fill in the AUTH chunk details */
14875 auth = mtod(m_auth, struct sctp_auth_chunk *);
14876 bzero(auth, sizeof(*auth));
14877 auth->ch.chunk_type = SCTP_AUTHENTICATION;
14878 auth->ch.chunk_flags = 0;
14879 chunk_len = sizeof(*auth) +
14880 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
14881 auth->ch.chunk_length = htons(chunk_len);
14882 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
14883 /* key id and hmac digest will be computed and filled in upon send */
14884
14885 /* save the offset where the auth was inserted into the chain */
14886 *offset = 0;
14887 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
14888 *offset += SCTP_BUF_LEN(cn);
14889 }
14890
14891 /* update length and return pointer to the auth chunk */
14892 SCTP_BUF_LEN(m_auth) = chunk_len;
14893 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
14894 if (auth_ret != NULL)
14895 *auth_ret = auth;
14896
14897 return (m);
14898}
14899
14900#if defined(__FreeBSD__) || defined(__APPLE__)
14901#ifdef INET6
14902int
14903sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
14904{
14905 struct nd_prefix *pfx = NULL;
14906 struct nd_pfxrouter *pfxrtr = NULL;
14907 struct sockaddr_in6 gw6;
14908
14909 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
14910 return (0);
14911
14912 /* get prefix entry of address */
14913#if defined(__FreeBSD__)
14914 ND6_RLOCK();
14915#endif
14916 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
14917 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
14918 continue;
14919 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
14920 &src6->sin6_addr, &pfx->ndpr_mask))
14921 break;
14922 }
14923 /* no prefix entry in the prefix list */
14924 if (pfx == NULL) {
14925#if defined(__FreeBSD__)
14926 ND6_RUNLOCK();
14927#endif
14928 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
14929 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14930 return (0);
14931 }
14932
14933 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
14934 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
14935
14936 /* search installed gateway from prefix entry */
14937 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
14938 memset(&gw6, 0, sizeof(struct sockaddr_in6));
14939 gw6.sin6_family = AF_INET6;
14940#ifdef HAVE_SIN6_LEN
14941 gw6.sin6_len = sizeof(struct sockaddr_in6);
14942#endif
14943 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
14944 sizeof(struct in6_addr));
14945 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
14946 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
14947 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
14948 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14949 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
14950#if defined(__FreeBSD__)
14951 ND6_RUNLOCK();
14952#endif
14953 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
14954 return (1);
14955 }
14956 }
14957#if defined(__FreeBSD__)
14958 ND6_RUNLOCK();
14959#endif
14960 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
14961 return (0);
14962}
14963#endif
14964
14965int
14966sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
14967{
14968#ifdef INET
14969 struct sockaddr_in *sin, *mask;
14970 struct ifaddr *ifa;
14971 struct in_addr srcnetaddr, gwnetaddr;
14972
14973 if (ro == NULL || ro->ro_rt == NULL ||
14974 sifa->address.sa.sa_family != AF_INET) {
14975 return (0);
14976 }
14977 ifa = (struct ifaddr *)sifa->ifa;
14978 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
14979 sin = &sifa->address.sin;
14980 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14981 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
14982 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
14983 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
14984
14985 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
14986 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
14987 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
14988 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
14989 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
14990 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
14991 return (1);
14992 }
14993#endif
14994 return (0);
14995}
14996#elif defined(__Userspace__)
14997/* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
14998int
14999sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
15000{
15001 return (0);
15002}
15003int
15004sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
15005{
15006 return (0);
15007}
15008
15009#endif