Home | History | Annotate | Download | only in netinet
      1 /*-
      2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
      3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
      4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions are met:
      8  *
      9  * a) Redistributions of source code must retain the above copyright notice,
     10  *    this list of conditions and the following disclaimer.
     11  *
     12  * b) Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in
     14  *    the documentation and/or other materials provided with the distribution.
     15  *
     16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
     17  *    contributors may be used to endorse or promote products derived
     18  *    from this software without specific prior written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     30  * THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #ifdef __FreeBSD__
     34 #include <sys/cdefs.h>
     35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 264017 2014-04-01 18:38:04Z tuexen $");
     36 #endif
     37 
     38 #include <netinet/sctp_os.h>
     39 #ifdef __FreeBSD__
     40 #include <sys/proc.h>
     41 #endif
     42 #include <netinet/sctp_var.h>
     43 #include <netinet/sctp_sysctl.h>
     44 #include <netinet/sctp_header.h>
     45 #include <netinet/sctp_pcb.h>
     46 #include <netinet/sctputil.h>
     47 #include <netinet/sctp_output.h>
     48 #include <netinet/sctp_uio.h>
     49 #include <netinet/sctputil.h>
     50 #include <netinet/sctp_auth.h>
     51 #include <netinet/sctp_timer.h>
     52 #include <netinet/sctp_asconf.h>
     53 #include <netinet/sctp_indata.h>
     54 #include <netinet/sctp_bsd_addr.h>
     55 #include <netinet/sctp_input.h>
     56 #include <netinet/sctp_crc32.h>
     57 #if defined(__Userspace_os_Linux)
     58 #define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
     59 #endif
     60 #if !defined(__Userspace_os_Windows)
     61 #include <netinet/udp.h>
     62 #endif
     63 #if defined(__APPLE__)
     64 #include <netinet/in.h>
     65 #endif
     66 #if defined(__FreeBSD__)
     67 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
     68 #include <netinet/udp_var.h>
     69 #endif
     70 #include <machine/in_cksum.h>
     71 #endif
     72 #if defined(__Userspace__) && defined(INET6)
     73 #include <netinet6/sctp6_var.h>
     74 #endif
     75 
     76 #if defined(__APPLE__)
     77 #define APPLE_FILE_NO 3
     78 #endif
     79 
     80 #if defined(__APPLE__)
     81 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
     82 #define SCTP_MAX_LINKHDR 16
     83 #endif
     84 #endif
     85 
     86 #define SCTP_MAX_GAPS_INARRAY 4
     87 struct sack_track {
     88 	uint8_t right_edge;	/* mergable on the right edge */
     89 	uint8_t left_edge;	/* mergable on the left edge */
     90 	uint8_t num_entries;
     91 	uint8_t spare;
     92 	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
     93 };
     94 
     95 struct sack_track sack_array[256] = {
     96 	{0, 0, 0, 0,		/* 0x00 */
     97 		{{0, 0},
     98 		{0, 0},
     99 		{0, 0},
    100 		{0, 0}
    101 		}
    102 	},
    103 	{1, 0, 1, 0,		/* 0x01 */
    104 		{{0, 0},
    105 		{0, 0},
    106 		{0, 0},
    107 		{0, 0}
    108 		}
    109 	},
    110 	{0, 0, 1, 0,		/* 0x02 */
    111 		{{1, 1},
    112 		{0, 0},
    113 		{0, 0},
    114 		{0, 0}
    115 		}
    116 	},
    117 	{1, 0, 1, 0,		/* 0x03 */
    118 		{{0, 1},
    119 		{0, 0},
    120 		{0, 0},
    121 		{0, 0}
    122 		}
    123 	},
    124 	{0, 0, 1, 0,		/* 0x04 */
    125 		{{2, 2},
    126 		{0, 0},
    127 		{0, 0},
    128 		{0, 0}
    129 		}
    130 	},
    131 	{1, 0, 2, 0,		/* 0x05 */
    132 		{{0, 0},
    133 		{2, 2},
    134 		{0, 0},
    135 		{0, 0}
    136 		}
    137 	},
    138 	{0, 0, 1, 0,		/* 0x06 */
    139 		{{1, 2},
    140 		{0, 0},
    141 		{0, 0},
    142 		{0, 0}
    143 		}
    144 	},
    145 	{1, 0, 1, 0,		/* 0x07 */
    146 		{{0, 2},
    147 		{0, 0},
    148 		{0, 0},
    149 		{0, 0}
    150 		}
    151 	},
    152 	{0, 0, 1, 0,		/* 0x08 */
    153 		{{3, 3},
    154 		{0, 0},
    155 		{0, 0},
    156 		{0, 0}
    157 		}
    158 	},
    159 	{1, 0, 2, 0,		/* 0x09 */
    160 		{{0, 0},
    161 		{3, 3},
    162 		{0, 0},
    163 		{0, 0}
    164 		}
    165 	},
    166 	{0, 0, 2, 0,		/* 0x0a */
    167 		{{1, 1},
    168 		{3, 3},
    169 		{0, 0},
    170 		{0, 0}
    171 		}
    172 	},
    173 	{1, 0, 2, 0,		/* 0x0b */
    174 		{{0, 1},
    175 		{3, 3},
    176 		{0, 0},
    177 		{0, 0}
    178 		}
    179 	},
    180 	{0, 0, 1, 0,		/* 0x0c */
    181 		{{2, 3},
    182 		{0, 0},
    183 		{0, 0},
    184 		{0, 0}
    185 		}
    186 	},
    187 	{1, 0, 2, 0,		/* 0x0d */
    188 		{{0, 0},
    189 		{2, 3},
    190 		{0, 0},
    191 		{0, 0}
    192 		}
    193 	},
    194 	{0, 0, 1, 0,		/* 0x0e */
    195 		{{1, 3},
    196 		{0, 0},
    197 		{0, 0},
    198 		{0, 0}
    199 		}
    200 	},
    201 	{1, 0, 1, 0,		/* 0x0f */
    202 		{{0, 3},
    203 		{0, 0},
    204 		{0, 0},
    205 		{0, 0}
    206 		}
    207 	},
    208 	{0, 0, 1, 0,		/* 0x10 */
    209 		{{4, 4},
    210 		{0, 0},
    211 		{0, 0},
    212 		{0, 0}
    213 		}
    214 	},
    215 	{1, 0, 2, 0,		/* 0x11 */
    216 		{{0, 0},
    217 		{4, 4},
    218 		{0, 0},
    219 		{0, 0}
    220 		}
    221 	},
    222 	{0, 0, 2, 0,		/* 0x12 */
    223 		{{1, 1},
    224 		{4, 4},
    225 		{0, 0},
    226 		{0, 0}
    227 		}
    228 	},
    229 	{1, 0, 2, 0,		/* 0x13 */
    230 		{{0, 1},
    231 		{4, 4},
    232 		{0, 0},
    233 		{0, 0}
    234 		}
    235 	},
    236 	{0, 0, 2, 0,		/* 0x14 */
    237 		{{2, 2},
    238 		{4, 4},
    239 		{0, 0},
    240 		{0, 0}
    241 		}
    242 	},
    243 	{1, 0, 3, 0,		/* 0x15 */
    244 		{{0, 0},
    245 		{2, 2},
    246 		{4, 4},
    247 		{0, 0}
    248 		}
    249 	},
    250 	{0, 0, 2, 0,		/* 0x16 */
    251 		{{1, 2},
    252 		{4, 4},
    253 		{0, 0},
    254 		{0, 0}
    255 		}
    256 	},
    257 	{1, 0, 2, 0,		/* 0x17 */
    258 		{{0, 2},
    259 		{4, 4},
    260 		{0, 0},
    261 		{0, 0}
    262 		}
    263 	},
    264 	{0, 0, 1, 0,		/* 0x18 */
    265 		{{3, 4},
    266 		{0, 0},
    267 		{0, 0},
    268 		{0, 0}
    269 		}
    270 	},
    271 	{1, 0, 2, 0,		/* 0x19 */
    272 		{{0, 0},
    273 		{3, 4},
    274 		{0, 0},
    275 		{0, 0}
    276 		}
    277 	},
    278 	{0, 0, 2, 0,		/* 0x1a */
    279 		{{1, 1},
    280 		{3, 4},
    281 		{0, 0},
    282 		{0, 0}
    283 		}
    284 	},
    285 	{1, 0, 2, 0,		/* 0x1b */
    286 		{{0, 1},
    287 		{3, 4},
    288 		{0, 0},
    289 		{0, 0}
    290 		}
    291 	},
    292 	{0, 0, 1, 0,		/* 0x1c */
    293 		{{2, 4},
    294 		{0, 0},
    295 		{0, 0},
    296 		{0, 0}
    297 		}
    298 	},
    299 	{1, 0, 2, 0,		/* 0x1d */
    300 		{{0, 0},
    301 		{2, 4},
    302 		{0, 0},
    303 		{0, 0}
    304 		}
    305 	},
    306 	{0, 0, 1, 0,		/* 0x1e */
    307 		{{1, 4},
    308 		{0, 0},
    309 		{0, 0},
    310 		{0, 0}
    311 		}
    312 	},
    313 	{1, 0, 1, 0,		/* 0x1f */
    314 		{{0, 4},
    315 		{0, 0},
    316 		{0, 0},
    317 		{0, 0}
    318 		}
    319 	},
    320 	{0, 0, 1, 0,		/* 0x20 */
    321 		{{5, 5},
    322 		{0, 0},
    323 		{0, 0},
    324 		{0, 0}
    325 		}
    326 	},
    327 	{1, 0, 2, 0,		/* 0x21 */
    328 		{{0, 0},
    329 		{5, 5},
    330 		{0, 0},
    331 		{0, 0}
    332 		}
    333 	},
    334 	{0, 0, 2, 0,		/* 0x22 */
    335 		{{1, 1},
    336 		{5, 5},
    337 		{0, 0},
    338 		{0, 0}
    339 		}
    340 	},
    341 	{1, 0, 2, 0,		/* 0x23 */
    342 		{{0, 1},
    343 		{5, 5},
    344 		{0, 0},
    345 		{0, 0}
    346 		}
    347 	},
    348 	{0, 0, 2, 0,		/* 0x24 */
    349 		{{2, 2},
    350 		{5, 5},
    351 		{0, 0},
    352 		{0, 0}
    353 		}
    354 	},
    355 	{1, 0, 3, 0,		/* 0x25 */
    356 		{{0, 0},
    357 		{2, 2},
    358 		{5, 5},
    359 		{0, 0}
    360 		}
    361 	},
    362 	{0, 0, 2, 0,		/* 0x26 */
    363 		{{1, 2},
    364 		{5, 5},
    365 		{0, 0},
    366 		{0, 0}
    367 		}
    368 	},
    369 	{1, 0, 2, 0,		/* 0x27 */
    370 		{{0, 2},
    371 		{5, 5},
    372 		{0, 0},
    373 		{0, 0}
    374 		}
    375 	},
    376 	{0, 0, 2, 0,		/* 0x28 */
    377 		{{3, 3},
    378 		{5, 5},
    379 		{0, 0},
    380 		{0, 0}
    381 		}
    382 	},
    383 	{1, 0, 3, 0,		/* 0x29 */
    384 		{{0, 0},
    385 		{3, 3},
    386 		{5, 5},
    387 		{0, 0}
    388 		}
    389 	},
    390 	{0, 0, 3, 0,		/* 0x2a */
    391 		{{1, 1},
    392 		{3, 3},
    393 		{5, 5},
    394 		{0, 0}
    395 		}
    396 	},
    397 	{1, 0, 3, 0,		/* 0x2b */
    398 		{{0, 1},
    399 		{3, 3},
    400 		{5, 5},
    401 		{0, 0}
    402 		}
    403 	},
    404 	{0, 0, 2, 0,		/* 0x2c */
    405 		{{2, 3},
    406 		{5, 5},
    407 		{0, 0},
    408 		{0, 0}
    409 		}
    410 	},
    411 	{1, 0, 3, 0,		/* 0x2d */
    412 		{{0, 0},
    413 		{2, 3},
    414 		{5, 5},
    415 		{0, 0}
    416 		}
    417 	},
    418 	{0, 0, 2, 0,		/* 0x2e */
    419 		{{1, 3},
    420 		{5, 5},
    421 		{0, 0},
    422 		{0, 0}
    423 		}
    424 	},
    425 	{1, 0, 2, 0,		/* 0x2f */
    426 		{{0, 3},
    427 		{5, 5},
    428 		{0, 0},
    429 		{0, 0}
    430 		}
    431 	},
    432 	{0, 0, 1, 0,		/* 0x30 */
    433 		{{4, 5},
    434 		{0, 0},
    435 		{0, 0},
    436 		{0, 0}
    437 		}
    438 	},
    439 	{1, 0, 2, 0,		/* 0x31 */
    440 		{{0, 0},
    441 		{4, 5},
    442 		{0, 0},
    443 		{0, 0}
    444 		}
    445 	},
    446 	{0, 0, 2, 0,		/* 0x32 */
    447 		{{1, 1},
    448 		{4, 5},
    449 		{0, 0},
    450 		{0, 0}
    451 		}
    452 	},
    453 	{1, 0, 2, 0,		/* 0x33 */
    454 		{{0, 1},
    455 		{4, 5},
    456 		{0, 0},
    457 		{0, 0}
    458 		}
    459 	},
    460 	{0, 0, 2, 0,		/* 0x34 */
    461 		{{2, 2},
    462 		{4, 5},
    463 		{0, 0},
    464 		{0, 0}
    465 		}
    466 	},
    467 	{1, 0, 3, 0,		/* 0x35 */
    468 		{{0, 0},
    469 		{2, 2},
    470 		{4, 5},
    471 		{0, 0}
    472 		}
    473 	},
    474 	{0, 0, 2, 0,		/* 0x36 */
    475 		{{1, 2},
    476 		{4, 5},
    477 		{0, 0},
    478 		{0, 0}
    479 		}
    480 	},
    481 	{1, 0, 2, 0,		/* 0x37 */
    482 		{{0, 2},
    483 		{4, 5},
    484 		{0, 0},
    485 		{0, 0}
    486 		}
    487 	},
    488 	{0, 0, 1, 0,		/* 0x38 */
    489 		{{3, 5},
    490 		{0, 0},
    491 		{0, 0},
    492 		{0, 0}
    493 		}
    494 	},
    495 	{1, 0, 2, 0,		/* 0x39 */
    496 		{{0, 0},
    497 		{3, 5},
    498 		{0, 0},
    499 		{0, 0}
    500 		}
    501 	},
    502 	{0, 0, 2, 0,		/* 0x3a */
    503 		{{1, 1},
    504 		{3, 5},
    505 		{0, 0},
    506 		{0, 0}
    507 		}
    508 	},
    509 	{1, 0, 2, 0,		/* 0x3b */
    510 		{{0, 1},
    511 		{3, 5},
    512 		{0, 0},
    513 		{0, 0}
    514 		}
    515 	},
    516 	{0, 0, 1, 0,		/* 0x3c */
    517 		{{2, 5},
    518 		{0, 0},
    519 		{0, 0},
    520 		{0, 0}
    521 		}
    522 	},
    523 	{1, 0, 2, 0,		/* 0x3d */
    524 		{{0, 0},
    525 		{2, 5},
    526 		{0, 0},
    527 		{0, 0}
    528 		}
    529 	},
    530 	{0, 0, 1, 0,		/* 0x3e */
    531 		{{1, 5},
    532 		{0, 0},
    533 		{0, 0},
    534 		{0, 0}
    535 		}
    536 	},
    537 	{1, 0, 1, 0,		/* 0x3f */
    538 		{{0, 5},
    539 		{0, 0},
    540 		{0, 0},
    541 		{0, 0}
    542 		}
    543 	},
    544 	{0, 0, 1, 0,		/* 0x40 */
    545 		{{6, 6},
    546 		{0, 0},
    547 		{0, 0},
    548 		{0, 0}
    549 		}
    550 	},
    551 	{1, 0, 2, 0,		/* 0x41 */
    552 		{{0, 0},
    553 		{6, 6},
    554 		{0, 0},
    555 		{0, 0}
    556 		}
    557 	},
    558 	{0, 0, 2, 0,		/* 0x42 */
    559 		{{1, 1},
    560 		{6, 6},
    561 		{0, 0},
    562 		{0, 0}
    563 		}
    564 	},
    565 	{1, 0, 2, 0,		/* 0x43 */
    566 		{{0, 1},
    567 		{6, 6},
    568 		{0, 0},
    569 		{0, 0}
    570 		}
    571 	},
    572 	{0, 0, 2, 0,		/* 0x44 */
    573 		{{2, 2},
    574 		{6, 6},
    575 		{0, 0},
    576 		{0, 0}
    577 		}
    578 	},
    579 	{1, 0, 3, 0,		/* 0x45 */
    580 		{{0, 0},
    581 		{2, 2},
    582 		{6, 6},
    583 		{0, 0}
    584 		}
    585 	},
    586 	{0, 0, 2, 0,		/* 0x46 */
    587 		{{1, 2},
    588 		{6, 6},
    589 		{0, 0},
    590 		{0, 0}
    591 		}
    592 	},
    593 	{1, 0, 2, 0,		/* 0x47 */
    594 		{{0, 2},
    595 		{6, 6},
    596 		{0, 0},
    597 		{0, 0}
    598 		}
    599 	},
    600 	{0, 0, 2, 0,		/* 0x48 */
    601 		{{3, 3},
    602 		{6, 6},
    603 		{0, 0},
    604 		{0, 0}
    605 		}
    606 	},
    607 	{1, 0, 3, 0,		/* 0x49 */
    608 		{{0, 0},
    609 		{3, 3},
    610 		{6, 6},
    611 		{0, 0}
    612 		}
    613 	},
    614 	{0, 0, 3, 0,		/* 0x4a */
    615 		{{1, 1},
    616 		{3, 3},
    617 		{6, 6},
    618 		{0, 0}
    619 		}
    620 	},
    621 	{1, 0, 3, 0,		/* 0x4b */
    622 		{{0, 1},
    623 		{3, 3},
    624 		{6, 6},
    625 		{0, 0}
    626 		}
    627 	},
    628 	{0, 0, 2, 0,		/* 0x4c */
    629 		{{2, 3},
    630 		{6, 6},
    631 		{0, 0},
    632 		{0, 0}
    633 		}
    634 	},
    635 	{1, 0, 3, 0,		/* 0x4d */
    636 		{{0, 0},
    637 		{2, 3},
    638 		{6, 6},
    639 		{0, 0}
    640 		}
    641 	},
    642 	{0, 0, 2, 0,		/* 0x4e */
    643 		{{1, 3},
    644 		{6, 6},
    645 		{0, 0},
    646 		{0, 0}
    647 		}
    648 	},
    649 	{1, 0, 2, 0,		/* 0x4f */
    650 		{{0, 3},
    651 		{6, 6},
    652 		{0, 0},
    653 		{0, 0}
    654 		}
    655 	},
    656 	{0, 0, 2, 0,		/* 0x50 */
    657 		{{4, 4},
    658 		{6, 6},
    659 		{0, 0},
    660 		{0, 0}
    661 		}
    662 	},
    663 	{1, 0, 3, 0,		/* 0x51 */
    664 		{{0, 0},
    665 		{4, 4},
    666 		{6, 6},
    667 		{0, 0}
    668 		}
    669 	},
    670 	{0, 0, 3, 0,		/* 0x52 */
    671 		{{1, 1},
    672 		{4, 4},
    673 		{6, 6},
    674 		{0, 0}
    675 		}
    676 	},
    677 	{1, 0, 3, 0,		/* 0x53 */
    678 		{{0, 1},
    679 		{4, 4},
    680 		{6, 6},
    681 		{0, 0}
    682 		}
    683 	},
    684 	{0, 0, 3, 0,		/* 0x54 */
    685 		{{2, 2},
    686 		{4, 4},
    687 		{6, 6},
    688 		{0, 0}
    689 		}
    690 	},
    691 	{1, 0, 4, 0,		/* 0x55 */
    692 		{{0, 0},
    693 		{2, 2},
    694 		{4, 4},
    695 		{6, 6}
    696 		}
    697 	},
    698 	{0, 0, 3, 0,		/* 0x56 */
    699 		{{1, 2},
    700 		{4, 4},
    701 		{6, 6},
    702 		{0, 0}
    703 		}
    704 	},
    705 	{1, 0, 3, 0,		/* 0x57 */
    706 		{{0, 2},
    707 		{4, 4},
    708 		{6, 6},
    709 		{0, 0}
    710 		}
    711 	},
    712 	{0, 0, 2, 0,		/* 0x58 */
    713 		{{3, 4},
    714 		{6, 6},
    715 		{0, 0},
    716 		{0, 0}
    717 		}
    718 	},
    719 	{1, 0, 3, 0,		/* 0x59 */
    720 		{{0, 0},
    721 		{3, 4},
    722 		{6, 6},
    723 		{0, 0}
    724 		}
    725 	},
    726 	{0, 0, 3, 0,		/* 0x5a */
    727 		{{1, 1},
    728 		{3, 4},
    729 		{6, 6},
    730 		{0, 0}
    731 		}
    732 	},
    733 	{1, 0, 3, 0,		/* 0x5b */
    734 		{{0, 1},
    735 		{3, 4},
    736 		{6, 6},
    737 		{0, 0}
    738 		}
    739 	},
    740 	{0, 0, 2, 0,		/* 0x5c */
    741 		{{2, 4},
    742 		{6, 6},
    743 		{0, 0},
    744 		{0, 0}
    745 		}
    746 	},
    747 	{1, 0, 3, 0,		/* 0x5d */
    748 		{{0, 0},
    749 		{2, 4},
    750 		{6, 6},
    751 		{0, 0}
    752 		}
    753 	},
    754 	{0, 0, 2, 0,		/* 0x5e */
    755 		{{1, 4},
    756 		{6, 6},
    757 		{0, 0},
    758 		{0, 0}
    759 		}
    760 	},
    761 	{1, 0, 2, 0,		/* 0x5f */
    762 		{{0, 4},
    763 		{6, 6},
    764 		{0, 0},
    765 		{0, 0}
    766 		}
    767 	},
    768 	{0, 0, 1, 0,		/* 0x60 */
    769 		{{5, 6},
    770 		{0, 0},
    771 		{0, 0},
    772 		{0, 0}
    773 		}
    774 	},
    775 	{1, 0, 2, 0,		/* 0x61 */
    776 		{{0, 0},
    777 		{5, 6},
    778 		{0, 0},
    779 		{0, 0}
    780 		}
    781 	},
    782 	{0, 0, 2, 0,		/* 0x62 */
    783 		{{1, 1},
    784 		{5, 6},
    785 		{0, 0},
    786 		{0, 0}
    787 		}
    788 	},
    789 	{1, 0, 2, 0,		/* 0x63 */
    790 		{{0, 1},
    791 		{5, 6},
    792 		{0, 0},
    793 		{0, 0}
    794 		}
    795 	},
    796 	{0, 0, 2, 0,		/* 0x64 */
    797 		{{2, 2},
    798 		{5, 6},
    799 		{0, 0},
    800 		{0, 0}
    801 		}
    802 	},
    803 	{1, 0, 3, 0,		/* 0x65 */
    804 		{{0, 0},
    805 		{2, 2},
    806 		{5, 6},
    807 		{0, 0}
    808 		}
    809 	},
    810 	{0, 0, 2, 0,		/* 0x66 */
    811 		{{1, 2},
    812 		{5, 6},
    813 		{0, 0},
    814 		{0, 0}
    815 		}
    816 	},
    817 	{1, 0, 2, 0,		/* 0x67 */
    818 		{{0, 2},
    819 		{5, 6},
    820 		{0, 0},
    821 		{0, 0}
    822 		}
    823 	},
    824 	{0, 0, 2, 0,		/* 0x68 */
    825 		{{3, 3},
    826 		{5, 6},
    827 		{0, 0},
    828 		{0, 0}
    829 		}
    830 	},
    831 	{1, 0, 3, 0,		/* 0x69 */
    832 		{{0, 0},
    833 		{3, 3},
    834 		{5, 6},
    835 		{0, 0}
    836 		}
    837 	},
    838 	{0, 0, 3, 0,		/* 0x6a */
    839 		{{1, 1},
    840 		{3, 3},
    841 		{5, 6},
    842 		{0, 0}
    843 		}
    844 	},
    845 	{1, 0, 3, 0,		/* 0x6b */
    846 		{{0, 1},
    847 		{3, 3},
    848 		{5, 6},
    849 		{0, 0}
    850 		}
    851 	},
    852 	{0, 0, 2, 0,		/* 0x6c */
    853 		{{2, 3},
    854 		{5, 6},
    855 		{0, 0},
    856 		{0, 0}
    857 		}
    858 	},
    859 	{1, 0, 3, 0,		/* 0x6d */
    860 		{{0, 0},
    861 		{2, 3},
    862 		{5, 6},
    863 		{0, 0}
    864 		}
    865 	},
    866 	{0, 0, 2, 0,		/* 0x6e */
    867 		{{1, 3},
    868 		{5, 6},
    869 		{0, 0},
    870 		{0, 0}
    871 		}
    872 	},
    873 	{1, 0, 2, 0,		/* 0x6f */
    874 		{{0, 3},
    875 		{5, 6},
    876 		{0, 0},
    877 		{0, 0}
    878 		}
    879 	},
    880 	{0, 0, 1, 0,		/* 0x70 */
    881 		{{4, 6},
    882 		{0, 0},
    883 		{0, 0},
    884 		{0, 0}
    885 		}
    886 	},
    887 	{1, 0, 2, 0,		/* 0x71 */
    888 		{{0, 0},
    889 		{4, 6},
    890 		{0, 0},
    891 		{0, 0}
    892 		}
    893 	},
    894 	{0, 0, 2, 0,		/* 0x72 */
    895 		{{1, 1},
    896 		{4, 6},
    897 		{0, 0},
    898 		{0, 0}
    899 		}
    900 	},
    901 	{1, 0, 2, 0,		/* 0x73 */
    902 		{{0, 1},
    903 		{4, 6},
    904 		{0, 0},
    905 		{0, 0}
    906 		}
    907 	},
    908 	{0, 0, 2, 0,		/* 0x74 */
    909 		{{2, 2},
    910 		{4, 6},
    911 		{0, 0},
    912 		{0, 0}
    913 		}
    914 	},
    915 	{1, 0, 3, 0,		/* 0x75 */
    916 		{{0, 0},
    917 		{2, 2},
    918 		{4, 6},
    919 		{0, 0}
    920 		}
    921 	},
    922 	{0, 0, 2, 0,		/* 0x76 */
    923 		{{1, 2},
    924 		{4, 6},
    925 		{0, 0},
    926 		{0, 0}
    927 		}
    928 	},
    929 	{1, 0, 2, 0,		/* 0x77 */
    930 		{{0, 2},
    931 		{4, 6},
    932 		{0, 0},
    933 		{0, 0}
    934 		}
    935 	},
    936 	{0, 0, 1, 0,		/* 0x78 */
    937 		{{3, 6},
    938 		{0, 0},
    939 		{0, 0},
    940 		{0, 0}
    941 		}
    942 	},
    943 	{1, 0, 2, 0,		/* 0x79 */
    944 		{{0, 0},
    945 		{3, 6},
    946 		{0, 0},
    947 		{0, 0}
    948 		}
    949 	},
    950 	{0, 0, 2, 0,		/* 0x7a */
    951 		{{1, 1},
    952 		{3, 6},
    953 		{0, 0},
    954 		{0, 0}
    955 		}
    956 	},
    957 	{1, 0, 2, 0,		/* 0x7b */
    958 		{{0, 1},
    959 		{3, 6},
    960 		{0, 0},
    961 		{0, 0}
    962 		}
    963 	},
    964 	{0, 0, 1, 0,		/* 0x7c */
    965 		{{2, 6},
    966 		{0, 0},
    967 		{0, 0},
    968 		{0, 0}
    969 		}
    970 	},
    971 	{1, 0, 2, 0,		/* 0x7d */
    972 		{{0, 0},
    973 		{2, 6},
    974 		{0, 0},
    975 		{0, 0}
    976 		}
    977 	},
    978 	{0, 0, 1, 0,		/* 0x7e */
    979 		{{1, 6},
    980 		{0, 0},
    981 		{0, 0},
    982 		{0, 0}
    983 		}
    984 	},
    985 	{1, 0, 1, 0,		/* 0x7f */
    986 		{{0, 6},
    987 		{0, 0},
    988 		{0, 0},
    989 		{0, 0}
    990 		}
    991 	},
    992 	{0, 1, 1, 0,		/* 0x80 */
    993 		{{7, 7},
    994 		{0, 0},
    995 		{0, 0},
    996 		{0, 0}
    997 		}
    998 	},
    999 	{1, 1, 2, 0,		/* 0x81 */
   1000 		{{0, 0},
   1001 		{7, 7},
   1002 		{0, 0},
   1003 		{0, 0}
   1004 		}
   1005 	},
   1006 	{0, 1, 2, 0,		/* 0x82 */
   1007 		{{1, 1},
   1008 		{7, 7},
   1009 		{0, 0},
   1010 		{0, 0}
   1011 		}
   1012 	},
   1013 	{1, 1, 2, 0,		/* 0x83 */
   1014 		{{0, 1},
   1015 		{7, 7},
   1016 		{0, 0},
   1017 		{0, 0}
   1018 		}
   1019 	},
   1020 	{0, 1, 2, 0,		/* 0x84 */
   1021 		{{2, 2},
   1022 		{7, 7},
   1023 		{0, 0},
   1024 		{0, 0}
   1025 		}
   1026 	},
   1027 	{1, 1, 3, 0,		/* 0x85 */
   1028 		{{0, 0},
   1029 		{2, 2},
   1030 		{7, 7},
   1031 		{0, 0}
   1032 		}
   1033 	},
   1034 	{0, 1, 2, 0,		/* 0x86 */
   1035 		{{1, 2},
   1036 		{7, 7},
   1037 		{0, 0},
   1038 		{0, 0}
   1039 		}
   1040 	},
   1041 	{1, 1, 2, 0,		/* 0x87 */
   1042 		{{0, 2},
   1043 		{7, 7},
   1044 		{0, 0},
   1045 		{0, 0}
   1046 		}
   1047 	},
   1048 	{0, 1, 2, 0,		/* 0x88 */
   1049 		{{3, 3},
   1050 		{7, 7},
   1051 		{0, 0},
   1052 		{0, 0}
   1053 		}
   1054 	},
   1055 	{1, 1, 3, 0,		/* 0x89 */
   1056 		{{0, 0},
   1057 		{3, 3},
   1058 		{7, 7},
   1059 		{0, 0}
   1060 		}
   1061 	},
   1062 	{0, 1, 3, 0,		/* 0x8a */
   1063 		{{1, 1},
   1064 		{3, 3},
   1065 		{7, 7},
   1066 		{0, 0}
   1067 		}
   1068 	},
   1069 	{1, 1, 3, 0,		/* 0x8b */
   1070 		{{0, 1},
   1071 		{3, 3},
   1072 		{7, 7},
   1073 		{0, 0}
   1074 		}
   1075 	},
   1076 	{0, 1, 2, 0,		/* 0x8c */
   1077 		{{2, 3},
   1078 		{7, 7},
   1079 		{0, 0},
   1080 		{0, 0}
   1081 		}
   1082 	},
   1083 	{1, 1, 3, 0,		/* 0x8d */
   1084 		{{0, 0},
   1085 		{2, 3},
   1086 		{7, 7},
   1087 		{0, 0}
   1088 		}
   1089 	},
   1090 	{0, 1, 2, 0,		/* 0x8e */
   1091 		{{1, 3},
   1092 		{7, 7},
   1093 		{0, 0},
   1094 		{0, 0}
   1095 		}
   1096 	},
   1097 	{1, 1, 2, 0,		/* 0x8f */
   1098 		{{0, 3},
   1099 		{7, 7},
   1100 		{0, 0},
   1101 		{0, 0}
   1102 		}
   1103 	},
   1104 	{0, 1, 2, 0,		/* 0x90 */
   1105 		{{4, 4},
   1106 		{7, 7},
   1107 		{0, 0},
   1108 		{0, 0}
   1109 		}
   1110 	},
   1111 	{1, 1, 3, 0,		/* 0x91 */
   1112 		{{0, 0},
   1113 		{4, 4},
   1114 		{7, 7},
   1115 		{0, 0}
   1116 		}
   1117 	},
   1118 	{0, 1, 3, 0,		/* 0x92 */
   1119 		{{1, 1},
   1120 		{4, 4},
   1121 		{7, 7},
   1122 		{0, 0}
   1123 		}
   1124 	},
   1125 	{1, 1, 3, 0,		/* 0x93 */
   1126 		{{0, 1},
   1127 		{4, 4},
   1128 		{7, 7},
   1129 		{0, 0}
   1130 		}
   1131 	},
   1132 	{0, 1, 3, 0,		/* 0x94 */
   1133 		{{2, 2},
   1134 		{4, 4},
   1135 		{7, 7},
   1136 		{0, 0}
   1137 		}
   1138 	},
   1139 	{1, 1, 4, 0,		/* 0x95 */
   1140 		{{0, 0},
   1141 		{2, 2},
   1142 		{4, 4},
   1143 		{7, 7}
   1144 		}
   1145 	},
   1146 	{0, 1, 3, 0,		/* 0x96 */
   1147 		{{1, 2},
   1148 		{4, 4},
   1149 		{7, 7},
   1150 		{0, 0}
   1151 		}
   1152 	},
   1153 	{1, 1, 3, 0,		/* 0x97 */
   1154 		{{0, 2},
   1155 		{4, 4},
   1156 		{7, 7},
   1157 		{0, 0}
   1158 		}
   1159 	},
   1160 	{0, 1, 2, 0,		/* 0x98 */
   1161 		{{3, 4},
   1162 		{7, 7},
   1163 		{0, 0},
   1164 		{0, 0}
   1165 		}
   1166 	},
   1167 	{1, 1, 3, 0,		/* 0x99 */
   1168 		{{0, 0},
   1169 		{3, 4},
   1170 		{7, 7},
   1171 		{0, 0}
   1172 		}
   1173 	},
   1174 	{0, 1, 3, 0,		/* 0x9a */
   1175 		{{1, 1},
   1176 		{3, 4},
   1177 		{7, 7},
   1178 		{0, 0}
   1179 		}
   1180 	},
   1181 	{1, 1, 3, 0,		/* 0x9b */
   1182 		{{0, 1},
   1183 		{3, 4},
   1184 		{7, 7},
   1185 		{0, 0}
   1186 		}
   1187 	},
   1188 	{0, 1, 2, 0,		/* 0x9c */
   1189 		{{2, 4},
   1190 		{7, 7},
   1191 		{0, 0},
   1192 		{0, 0}
   1193 		}
   1194 	},
   1195 	{1, 1, 3, 0,		/* 0x9d */
   1196 		{{0, 0},
   1197 		{2, 4},
   1198 		{7, 7},
   1199 		{0, 0}
   1200 		}
   1201 	},
   1202 	{0, 1, 2, 0,		/* 0x9e */
   1203 		{{1, 4},
   1204 		{7, 7},
   1205 		{0, 0},
   1206 		{0, 0}
   1207 		}
   1208 	},
   1209 	{1, 1, 2, 0,		/* 0x9f */
   1210 		{{0, 4},
   1211 		{7, 7},
   1212 		{0, 0},
   1213 		{0, 0}
   1214 		}
   1215 	},
   1216 	{0, 1, 2, 0,		/* 0xa0 */
   1217 		{{5, 5},
   1218 		{7, 7},
   1219 		{0, 0},
   1220 		{0, 0}
   1221 		}
   1222 	},
   1223 	{1, 1, 3, 0,		/* 0xa1 */
   1224 		{{0, 0},
   1225 		{5, 5},
   1226 		{7, 7},
   1227 		{0, 0}
   1228 		}
   1229 	},
   1230 	{0, 1, 3, 0,		/* 0xa2 */
   1231 		{{1, 1},
   1232 		{5, 5},
   1233 		{7, 7},
   1234 		{0, 0}
   1235 		}
   1236 	},
   1237 	{1, 1, 3, 0,		/* 0xa3 */
   1238 		{{0, 1},
   1239 		{5, 5},
   1240 		{7, 7},
   1241 		{0, 0}
   1242 		}
   1243 	},
   1244 	{0, 1, 3, 0,		/* 0xa4 */
   1245 		{{2, 2},
   1246 		{5, 5},
   1247 		{7, 7},
   1248 		{0, 0}
   1249 		}
   1250 	},
   1251 	{1, 1, 4, 0,		/* 0xa5 */
   1252 		{{0, 0},
   1253 		{2, 2},
   1254 		{5, 5},
   1255 		{7, 7}
   1256 		}
   1257 	},
   1258 	{0, 1, 3, 0,		/* 0xa6 */
   1259 		{{1, 2},
   1260 		{5, 5},
   1261 		{7, 7},
   1262 		{0, 0}
   1263 		}
   1264 	},
   1265 	{1, 1, 3, 0,		/* 0xa7 */
   1266 		{{0, 2},
   1267 		{5, 5},
   1268 		{7, 7},
   1269 		{0, 0}
   1270 		}
   1271 	},
   1272 	{0, 1, 3, 0,		/* 0xa8 */
   1273 		{{3, 3},
   1274 		{5, 5},
   1275 		{7, 7},
   1276 		{0, 0}
   1277 		}
   1278 	},
   1279 	{1, 1, 4, 0,		/* 0xa9 */
   1280 		{{0, 0},
   1281 		{3, 3},
   1282 		{5, 5},
   1283 		{7, 7}
   1284 		}
   1285 	},
   1286 	{0, 1, 4, 0,		/* 0xaa */
   1287 		{{1, 1},
   1288 		{3, 3},
   1289 		{5, 5},
   1290 		{7, 7}
   1291 		}
   1292 	},
   1293 	{1, 1, 4, 0,		/* 0xab */
   1294 		{{0, 1},
   1295 		{3, 3},
   1296 		{5, 5},
   1297 		{7, 7}
   1298 		}
   1299 	},
   1300 	{0, 1, 3, 0,		/* 0xac */
   1301 		{{2, 3},
   1302 		{5, 5},
   1303 		{7, 7},
   1304 		{0, 0}
   1305 		}
   1306 	},
   1307 	{1, 1, 4, 0,		/* 0xad */
   1308 		{{0, 0},
   1309 		{2, 3},
   1310 		{5, 5},
   1311 		{7, 7}
   1312 		}
   1313 	},
   1314 	{0, 1, 3, 0,		/* 0xae */
   1315 		{{1, 3},
   1316 		{5, 5},
   1317 		{7, 7},
   1318 		{0, 0}
   1319 		}
   1320 	},
   1321 	{1, 1, 3, 0,		/* 0xaf */
   1322 		{{0, 3},
   1323 		{5, 5},
   1324 		{7, 7},
   1325 		{0, 0}
   1326 		}
   1327 	},
   1328 	{0, 1, 2, 0,		/* 0xb0 */
   1329 		{{4, 5},
   1330 		{7, 7},
   1331 		{0, 0},
   1332 		{0, 0}
   1333 		}
   1334 	},
   1335 	{1, 1, 3, 0,		/* 0xb1 */
   1336 		{{0, 0},
   1337 		{4, 5},
   1338 		{7, 7},
   1339 		{0, 0}
   1340 		}
   1341 	},
   1342 	{0, 1, 3, 0,		/* 0xb2 */
   1343 		{{1, 1},
   1344 		{4, 5},
   1345 		{7, 7},
   1346 		{0, 0}
   1347 		}
   1348 	},
   1349 	{1, 1, 3, 0,		/* 0xb3 */
   1350 		{{0, 1},
   1351 		{4, 5},
   1352 		{7, 7},
   1353 		{0, 0}
   1354 		}
   1355 	},
   1356 	{0, 1, 3, 0,		/* 0xb4 */
   1357 		{{2, 2},
   1358 		{4, 5},
   1359 		{7, 7},
   1360 		{0, 0}
   1361 		}
   1362 	},
   1363 	{1, 1, 4, 0,		/* 0xb5 */
   1364 		{{0, 0},
   1365 		{2, 2},
   1366 		{4, 5},
   1367 		{7, 7}
   1368 		}
   1369 	},
   1370 	{0, 1, 3, 0,		/* 0xb6 */
   1371 		{{1, 2},
   1372 		{4, 5},
   1373 		{7, 7},
   1374 		{0, 0}
   1375 		}
   1376 	},
   1377 	{1, 1, 3, 0,		/* 0xb7 */
   1378 		{{0, 2},
   1379 		{4, 5},
   1380 		{7, 7},
   1381 		{0, 0}
   1382 		}
   1383 	},
   1384 	{0, 1, 2, 0,		/* 0xb8 */
   1385 		{{3, 5},
   1386 		{7, 7},
   1387 		{0, 0},
   1388 		{0, 0}
   1389 		}
   1390 	},
   1391 	{1, 1, 3, 0,		/* 0xb9 */
   1392 		{{0, 0},
   1393 		{3, 5},
   1394 		{7, 7},
   1395 		{0, 0}
   1396 		}
   1397 	},
   1398 	{0, 1, 3, 0,		/* 0xba */
   1399 		{{1, 1},
   1400 		{3, 5},
   1401 		{7, 7},
   1402 		{0, 0}
   1403 		}
   1404 	},
   1405 	{1, 1, 3, 0,		/* 0xbb */
   1406 		{{0, 1},
   1407 		{3, 5},
   1408 		{7, 7},
   1409 		{0, 0}
   1410 		}
   1411 	},
   1412 	{0, 1, 2, 0,		/* 0xbc */
   1413 		{{2, 5},
   1414 		{7, 7},
   1415 		{0, 0},
   1416 		{0, 0}
   1417 		}
   1418 	},
   1419 	{1, 1, 3, 0,		/* 0xbd */
   1420 		{{0, 0},
   1421 		{2, 5},
   1422 		{7, 7},
   1423 		{0, 0}
   1424 		}
   1425 	},
   1426 	{0, 1, 2, 0,		/* 0xbe */
   1427 		{{1, 5},
   1428 		{7, 7},
   1429 		{0, 0},
   1430 		{0, 0}
   1431 		}
   1432 	},
   1433 	{1, 1, 2, 0,		/* 0xbf */
   1434 		{{0, 5},
   1435 		{7, 7},
   1436 		{0, 0},
   1437 		{0, 0}
   1438 		}
   1439 	},
   1440 	{0, 1, 1, 0,		/* 0xc0 */
   1441 		{{6, 7},
   1442 		{0, 0},
   1443 		{0, 0},
   1444 		{0, 0}
   1445 		}
   1446 	},
   1447 	{1, 1, 2, 0,		/* 0xc1 */
   1448 		{{0, 0},
   1449 		{6, 7},
   1450 		{0, 0},
   1451 		{0, 0}
   1452 		}
   1453 	},
   1454 	{0, 1, 2, 0,		/* 0xc2 */
   1455 		{{1, 1},
   1456 		{6, 7},
   1457 		{0, 0},
   1458 		{0, 0}
   1459 		}
   1460 	},
   1461 	{1, 1, 2, 0,		/* 0xc3 */
   1462 		{{0, 1},
   1463 		{6, 7},
   1464 		{0, 0},
   1465 		{0, 0}
   1466 		}
   1467 	},
   1468 	{0, 1, 2, 0,		/* 0xc4 */
   1469 		{{2, 2},
   1470 		{6, 7},
   1471 		{0, 0},
   1472 		{0, 0}
   1473 		}
   1474 	},
   1475 	{1, 1, 3, 0,		/* 0xc5 */
   1476 		{{0, 0},
   1477 		{2, 2},
   1478 		{6, 7},
   1479 		{0, 0}
   1480 		}
   1481 	},
   1482 	{0, 1, 2, 0,		/* 0xc6 */
   1483 		{{1, 2},
   1484 		{6, 7},
   1485 		{0, 0},
   1486 		{0, 0}
   1487 		}
   1488 	},
   1489 	{1, 1, 2, 0,		/* 0xc7 */
   1490 		{{0, 2},
   1491 		{6, 7},
   1492 		{0, 0},
   1493 		{0, 0}
   1494 		}
   1495 	},
   1496 	{0, 1, 2, 0,		/* 0xc8 */
   1497 		{{3, 3},
   1498 		{6, 7},
   1499 		{0, 0},
   1500 		{0, 0}
   1501 		}
   1502 	},
   1503 	{1, 1, 3, 0,		/* 0xc9 */
   1504 		{{0, 0},
   1505 		{3, 3},
   1506 		{6, 7},
   1507 		{0, 0}
   1508 		}
   1509 	},
   1510 	{0, 1, 3, 0,		/* 0xca */
   1511 		{{1, 1},
   1512 		{3, 3},
   1513 		{6, 7},
   1514 		{0, 0}
   1515 		}
   1516 	},
   1517 	{1, 1, 3, 0,		/* 0xcb */
   1518 		{{0, 1},
   1519 		{3, 3},
   1520 		{6, 7},
   1521 		{0, 0}
   1522 		}
   1523 	},
   1524 	{0, 1, 2, 0,		/* 0xcc */
   1525 		{{2, 3},
   1526 		{6, 7},
   1527 		{0, 0},
   1528 		{0, 0}
   1529 		}
   1530 	},
   1531 	{1, 1, 3, 0,		/* 0xcd */
   1532 		{{0, 0},
   1533 		{2, 3},
   1534 		{6, 7},
   1535 		{0, 0}
   1536 		}
   1537 	},
   1538 	{0, 1, 2, 0,		/* 0xce */
   1539 		{{1, 3},
   1540 		{6, 7},
   1541 		{0, 0},
   1542 		{0, 0}
   1543 		}
   1544 	},
   1545 	{1, 1, 2, 0,		/* 0xcf */
   1546 		{{0, 3},
   1547 		{6, 7},
   1548 		{0, 0},
   1549 		{0, 0}
   1550 		}
   1551 	},
   1552 	{0, 1, 2, 0,		/* 0xd0 */
   1553 		{{4, 4},
   1554 		{6, 7},
   1555 		{0, 0},
   1556 		{0, 0}
   1557 		}
   1558 	},
   1559 	{1, 1, 3, 0,		/* 0xd1 */
   1560 		{{0, 0},
   1561 		{4, 4},
   1562 		{6, 7},
   1563 		{0, 0}
   1564 		}
   1565 	},
   1566 	{0, 1, 3, 0,		/* 0xd2 */
   1567 		{{1, 1},
   1568 		{4, 4},
   1569 		{6, 7},
   1570 		{0, 0}
   1571 		}
   1572 	},
   1573 	{1, 1, 3, 0,		/* 0xd3 */
   1574 		{{0, 1},
   1575 		{4, 4},
   1576 		{6, 7},
   1577 		{0, 0}
   1578 		}
   1579 	},
   1580 	{0, 1, 3, 0,		/* 0xd4 */
   1581 		{{2, 2},
   1582 		{4, 4},
   1583 		{6, 7},
   1584 		{0, 0}
   1585 		}
   1586 	},
   1587 	{1, 1, 4, 0,		/* 0xd5 */
   1588 		{{0, 0},
   1589 		{2, 2},
   1590 		{4, 4},
   1591 		{6, 7}
   1592 		}
   1593 	},
   1594 	{0, 1, 3, 0,		/* 0xd6 */
   1595 		{{1, 2},
   1596 		{4, 4},
   1597 		{6, 7},
   1598 		{0, 0}
   1599 		}
   1600 	},
   1601 	{1, 1, 3, 0,		/* 0xd7 */
   1602 		{{0, 2},
   1603 		{4, 4},
   1604 		{6, 7},
   1605 		{0, 0}
   1606 		}
   1607 	},
   1608 	{0, 1, 2, 0,		/* 0xd8 */
   1609 		{{3, 4},
   1610 		{6, 7},
   1611 		{0, 0},
   1612 		{0, 0}
   1613 		}
   1614 	},
   1615 	{1, 1, 3, 0,		/* 0xd9 */
   1616 		{{0, 0},
   1617 		{3, 4},
   1618 		{6, 7},
   1619 		{0, 0}
   1620 		}
   1621 	},
   1622 	{0, 1, 3, 0,		/* 0xda */
   1623 		{{1, 1},
   1624 		{3, 4},
   1625 		{6, 7},
   1626 		{0, 0}
   1627 		}
   1628 	},
   1629 	{1, 1, 3, 0,		/* 0xdb */
   1630 		{{0, 1},
   1631 		{3, 4},
   1632 		{6, 7},
   1633 		{0, 0}
   1634 		}
   1635 	},
   1636 	{0, 1, 2, 0,		/* 0xdc */
   1637 		{{2, 4},
   1638 		{6, 7},
   1639 		{0, 0},
   1640 		{0, 0}
   1641 		}
   1642 	},
   1643 	{1, 1, 3, 0,		/* 0xdd */
   1644 		{{0, 0},
   1645 		{2, 4},
   1646 		{6, 7},
   1647 		{0, 0}
   1648 		}
   1649 	},
   1650 	{0, 1, 2, 0,		/* 0xde */
   1651 		{{1, 4},
   1652 		{6, 7},
   1653 		{0, 0},
   1654 		{0, 0}
   1655 		}
   1656 	},
   1657 	{1, 1, 2, 0,		/* 0xdf */
   1658 		{{0, 4},
   1659 		{6, 7},
   1660 		{0, 0},
   1661 		{0, 0}
   1662 		}
   1663 	},
   1664 	{0, 1, 1, 0,		/* 0xe0 */
   1665 		{{5, 7},
   1666 		{0, 0},
   1667 		{0, 0},
   1668 		{0, 0}
   1669 		}
   1670 	},
   1671 	{1, 1, 2, 0,		/* 0xe1 */
   1672 		{{0, 0},
   1673 		{5, 7},
   1674 		{0, 0},
   1675 		{0, 0}
   1676 		}
   1677 	},
   1678 	{0, 1, 2, 0,		/* 0xe2 */
   1679 		{{1, 1},
   1680 		{5, 7},
   1681 		{0, 0},
   1682 		{0, 0}
   1683 		}
   1684 	},
   1685 	{1, 1, 2, 0,		/* 0xe3 */
   1686 		{{0, 1},
   1687 		{5, 7},
   1688 		{0, 0},
   1689 		{0, 0}
   1690 		}
   1691 	},
   1692 	{0, 1, 2, 0,		/* 0xe4 */
   1693 		{{2, 2},
   1694 		{5, 7},
   1695 		{0, 0},
   1696 		{0, 0}
   1697 		}
   1698 	},
   1699 	{1, 1, 3, 0,		/* 0xe5 */
   1700 		{{0, 0},
   1701 		{2, 2},
   1702 		{5, 7},
   1703 		{0, 0}
   1704 		}
   1705 	},
   1706 	{0, 1, 2, 0,		/* 0xe6 */
   1707 		{{1, 2},
   1708 		{5, 7},
   1709 		{0, 0},
   1710 		{0, 0}
   1711 		}
   1712 	},
   1713 	{1, 1, 2, 0,		/* 0xe7 */
   1714 		{{0, 2},
   1715 		{5, 7},
   1716 		{0, 0},
   1717 		{0, 0}
   1718 		}
   1719 	},
   1720 	{0, 1, 2, 0,		/* 0xe8 */
   1721 		{{3, 3},
   1722 		{5, 7},
   1723 		{0, 0},
   1724 		{0, 0}
   1725 		}
   1726 	},
   1727 	{1, 1, 3, 0,		/* 0xe9 */
   1728 		{{0, 0},
   1729 		{3, 3},
   1730 		{5, 7},
   1731 		{0, 0}
   1732 		}
   1733 	},
   1734 	{0, 1, 3, 0,		/* 0xea */
   1735 		{{1, 1},
   1736 		{3, 3},
   1737 		{5, 7},
   1738 		{0, 0}
   1739 		}
   1740 	},
   1741 	{1, 1, 3, 0,		/* 0xeb */
   1742 		{{0, 1},
   1743 		{3, 3},
   1744 		{5, 7},
   1745 		{0, 0}
   1746 		}
   1747 	},
   1748 	{0, 1, 2, 0,		/* 0xec */
   1749 		{{2, 3},
   1750 		{5, 7},
   1751 		{0, 0},
   1752 		{0, 0}
   1753 		}
   1754 	},
   1755 	{1, 1, 3, 0,		/* 0xed */
   1756 		{{0, 0},
   1757 		{2, 3},
   1758 		{5, 7},
   1759 		{0, 0}
   1760 		}
   1761 	},
   1762 	{0, 1, 2, 0,		/* 0xee */
   1763 		{{1, 3},
   1764 		{5, 7},
   1765 		{0, 0},
   1766 		{0, 0}
   1767 		}
   1768 	},
   1769 	{1, 1, 2, 0,		/* 0xef */
   1770 		{{0, 3},
   1771 		{5, 7},
   1772 		{0, 0},
   1773 		{0, 0}
   1774 		}
   1775 	},
   1776 	{0, 1, 1, 0,		/* 0xf0 */
   1777 		{{4, 7},
   1778 		{0, 0},
   1779 		{0, 0},
   1780 		{0, 0}
   1781 		}
   1782 	},
   1783 	{1, 1, 2, 0,		/* 0xf1 */
   1784 		{{0, 0},
   1785 		{4, 7},
   1786 		{0, 0},
   1787 		{0, 0}
   1788 		}
   1789 	},
   1790 	{0, 1, 2, 0,		/* 0xf2 */
   1791 		{{1, 1},
   1792 		{4, 7},
   1793 		{0, 0},
   1794 		{0, 0}
   1795 		}
   1796 	},
   1797 	{1, 1, 2, 0,		/* 0xf3 */
   1798 		{{0, 1},
   1799 		{4, 7},
   1800 		{0, 0},
   1801 		{0, 0}
   1802 		}
   1803 	},
   1804 	{0, 1, 2, 0,		/* 0xf4 */
   1805 		{{2, 2},
   1806 		{4, 7},
   1807 		{0, 0},
   1808 		{0, 0}
   1809 		}
   1810 	},
   1811 	{1, 1, 3, 0,		/* 0xf5 */
   1812 		{{0, 0},
   1813 		{2, 2},
   1814 		{4, 7},
   1815 		{0, 0}
   1816 		}
   1817 	},
   1818 	{0, 1, 2, 0,		/* 0xf6 */
   1819 		{{1, 2},
   1820 		{4, 7},
   1821 		{0, 0},
   1822 		{0, 0}
   1823 		}
   1824 	},
   1825 	{1, 1, 2, 0,		/* 0xf7 */
   1826 		{{0, 2},
   1827 		{4, 7},
   1828 		{0, 0},
   1829 		{0, 0}
   1830 		}
   1831 	},
   1832 	{0, 1, 1, 0,		/* 0xf8 */
   1833 		{{3, 7},
   1834 		{0, 0},
   1835 		{0, 0},
   1836 		{0, 0}
   1837 		}
   1838 	},
   1839 	{1, 1, 2, 0,		/* 0xf9 */
   1840 		{{0, 0},
   1841 		{3, 7},
   1842 		{0, 0},
   1843 		{0, 0}
   1844 		}
   1845 	},
   1846 	{0, 1, 2, 0,		/* 0xfa */
   1847 		{{1, 1},
   1848 		{3, 7},
   1849 		{0, 0},
   1850 		{0, 0}
   1851 		}
   1852 	},
   1853 	{1, 1, 2, 0,		/* 0xfb */
   1854 		{{0, 1},
   1855 		{3, 7},
   1856 		{0, 0},
   1857 		{0, 0}
   1858 		}
   1859 	},
   1860 	{0, 1, 1, 0,		/* 0xfc */
   1861 		{{2, 7},
   1862 		{0, 0},
   1863 		{0, 0},
   1864 		{0, 0}
   1865 		}
   1866 	},
   1867 	{1, 1, 2, 0,		/* 0xfd */
   1868 		{{0, 0},
   1869 		{2, 7},
   1870 		{0, 0},
   1871 		{0, 0}
   1872 		}
   1873 	},
   1874 	{0, 1, 1, 0,		/* 0xfe */
   1875 		{{1, 7},
   1876 		{0, 0},
   1877 		{0, 0},
   1878 		{0, 0}
   1879 		}
   1880 	},
   1881 	{1, 1, 1, 0,		/* 0xff */
   1882 		{{0, 7},
   1883 		{0, 0},
   1884 		{0, 0},
   1885 		{0, 0}
   1886 		}
   1887 	}
   1888 };
   1889 
   1890 
   1891 int
   1892 sctp_is_address_in_scope(struct sctp_ifa *ifa,
   1893                          struct sctp_scoping *scope,
   1894                          int do_update)
   1895 {
   1896 	if ((scope->loopback_scope == 0) &&
   1897 	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
   1898 		/*
   1899 		 * skip loopback if not in scope *
   1900 		 */
   1901 		return (0);
   1902 	}
   1903 	switch (ifa->address.sa.sa_family) {
   1904 #ifdef INET
   1905 	case AF_INET:
   1906 		if (scope->ipv4_addr_legal) {
   1907 			struct sockaddr_in *sin;
   1908 
   1909 			sin = (struct sockaddr_in *)&ifa->address.sin;
   1910 			if (sin->sin_addr.s_addr == 0) {
   1911 				/* not in scope , unspecified */
   1912 				return (0);
   1913 			}
   1914 			if ((scope->ipv4_local_scope == 0) &&
   1915 			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
   1916 				/* private address not in scope */
   1917 				return (0);
   1918 			}
   1919 		} else {
   1920 			return (0);
   1921 		}
   1922 		break;
   1923 #endif
   1924 #ifdef INET6
   1925 	case AF_INET6:
   1926 		if (scope->ipv6_addr_legal) {
   1927 			struct sockaddr_in6 *sin6;
   1928 
   1929 #if !defined(__Panda__)
   1930 			/* Must update the flags,  bummer, which
   1931 			 * means any IFA locks must now be applied HERE <->
   1932 			 */
   1933 			if (do_update) {
   1934 				sctp_gather_internal_ifa_flags(ifa);
   1935 			}
   1936 #endif
   1937 			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
   1938 				return (0);
   1939 			}
   1940 			/* ok to use deprecated addresses? */
   1941 			sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
   1942 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
   1943 				/* skip unspecifed addresses */
   1944 				return (0);
   1945 			}
   1946 			if (		/* (local_scope == 0) && */
   1947 			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
   1948 				return (0);
   1949 			}
   1950 			if ((scope->site_scope == 0) &&
   1951 			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
   1952 				return (0);
   1953 			}
   1954 		} else {
   1955 			return (0);
   1956 		}
   1957 		break;
   1958 #endif
   1959 #if defined(__Userspace__)
   1960 	case AF_CONN:
   1961 		if (!scope->conn_addr_legal) {
   1962 			return (0);
   1963 		}
   1964 		break;
   1965 #endif
   1966 	default:
   1967 		return (0);
   1968 	}
   1969 	return (1);
   1970 }
   1971 
   1972 static struct mbuf *
   1973 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
   1974 {
   1975 #if defined(INET) || defined(INET6)
   1976 	struct sctp_paramhdr *parmh;
   1977 	struct mbuf *mret;
   1978 	uint16_t plen;
   1979 #endif
   1980 
   1981 	switch (ifa->address.sa.sa_family) {
   1982 #ifdef INET
   1983 	case AF_INET:
   1984 		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
   1985 		break;
   1986 #endif
   1987 #ifdef INET6
   1988 	case AF_INET6:
   1989 		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
   1990 		break;
   1991 #endif
   1992 	default:
   1993 		return (m);
   1994 	}
   1995 #if defined(INET) || defined(INET6)
   1996 	if (M_TRAILINGSPACE(m) >= plen) {
   1997 		/* easy side we just drop it on the end */
   1998 		parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
   1999 		mret = m;
   2000 	} else {
   2001 		/* Need more space */
   2002 		mret = m;
   2003 		while (SCTP_BUF_NEXT(mret) != NULL) {
   2004 			mret = SCTP_BUF_NEXT(mret);
   2005 		}
   2006 		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
   2007 		if (SCTP_BUF_NEXT(mret) == NULL) {
   2008 			/* We are hosed, can't add more addresses */
   2009 			return (m);
   2010 		}
   2011 		mret = SCTP_BUF_NEXT(mret);
   2012 		parmh = mtod(mret, struct sctp_paramhdr *);
   2013 	}
   2014 	/* now add the parameter */
   2015 	switch (ifa->address.sa.sa_family) {
   2016 #ifdef INET
   2017 	case AF_INET:
   2018 	{
   2019 		struct sctp_ipv4addr_param *ipv4p;
   2020 		struct sockaddr_in *sin;
   2021 
   2022 		sin = (struct sockaddr_in *)&ifa->address.sin;
   2023 		ipv4p = (struct sctp_ipv4addr_param *)parmh;
   2024 		parmh->param_type = htons(SCTP_IPV4_ADDRESS);
   2025 		parmh->param_length = htons(plen);
   2026 		ipv4p->addr = sin->sin_addr.s_addr;
   2027 		SCTP_BUF_LEN(mret) += plen;
   2028 		break;
   2029 	}
   2030 #endif
   2031 #ifdef INET6
   2032 	case AF_INET6:
   2033 	{
   2034 		struct sctp_ipv6addr_param *ipv6p;
   2035 		struct sockaddr_in6 *sin6;
   2036 
   2037 		sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
   2038 		ipv6p = (struct sctp_ipv6addr_param *)parmh;
   2039 		parmh->param_type = htons(SCTP_IPV6_ADDRESS);
   2040 		parmh->param_length = htons(plen);
   2041 		memcpy(ipv6p->addr, &sin6->sin6_addr,
   2042 		    sizeof(ipv6p->addr));
   2043 #if defined(SCTP_EMBEDDED_V6_SCOPE)
   2044 		/* clear embedded scope in the address */
   2045 		in6_clearscope((struct in6_addr *)ipv6p->addr);
   2046 #endif
   2047 		SCTP_BUF_LEN(mret) += plen;
   2048 		break;
   2049 	}
   2050 #endif
   2051 	default:
   2052 		return (m);
   2053 	}
   2054 	if (len != NULL) {
   2055 		*len += plen;
   2056 	}
   2057 	return (mret);
   2058 #endif
   2059 }
   2060 
   2061 
   2062 struct mbuf *
   2063 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
   2064                            struct sctp_scoping *scope,
   2065 			   struct mbuf *m_at, int cnt_inits_to,
   2066 			   uint16_t *padding_len, uint16_t *chunk_len)
   2067 {
   2068 	struct sctp_vrf *vrf = NULL;
   2069 	int cnt, limit_out = 0, total_count;
   2070 	uint32_t vrf_id;
   2071 
   2072 	vrf_id = inp->def_vrf_id;
   2073 	SCTP_IPI_ADDR_RLOCK();
   2074 	vrf = sctp_find_vrf(vrf_id);
   2075 	if (vrf == NULL) {
   2076 		SCTP_IPI_ADDR_RUNLOCK();
   2077 		return (m_at);
   2078 	}
   2079 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
   2080 		struct sctp_ifa *sctp_ifap;
   2081 		struct sctp_ifn *sctp_ifnp;
   2082 
   2083 		cnt = cnt_inits_to;
   2084 		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
   2085 			limit_out = 1;
   2086 			cnt = SCTP_ADDRESS_LIMIT;
   2087 			goto skip_count;
   2088 		}
   2089 		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
   2090 			if ((scope->loopback_scope == 0) &&
   2091 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
   2092 				/*
   2093 				 * Skip loopback devices if loopback_scope
   2094 				 * not set
   2095 				 */
   2096 				continue;
   2097 			}
   2098 			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
   2099 				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
   2100 					continue;
   2101 				}
   2102 #if defined(__Userspace__)
   2103 				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
   2104 					continue;
   2105 				}
   2106 #endif
   2107 				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
   2108 					continue;
   2109 				}
   2110 				cnt++;
   2111 				if (cnt > SCTP_ADDRESS_LIMIT) {
   2112 					break;
   2113 				}
   2114 			}
   2115 			if (cnt > SCTP_ADDRESS_LIMIT) {
   2116 				break;
   2117 			}
   2118 		}
   2119 	skip_count:
   2120 		if (cnt > 1) {
   2121 			total_count = 0;
   2122 			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
   2123 				cnt = 0;
   2124 				if ((scope->loopback_scope == 0) &&
   2125 				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
   2126 					/*
   2127 					 * Skip loopback devices if
   2128 					 * loopback_scope not set
   2129 					 */
   2130 					continue;
   2131 				}
   2132 				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
   2133 					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
   2134 						continue;
   2135 					}
   2136 #if defined(__Userspace__)
   2137 					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
   2138 						continue;
   2139 					}
   2140 #endif
   2141 					if (sctp_is_address_in_scope(sctp_ifap,
   2142 								     scope, 0) == 0) {
   2143 						continue;
   2144 					}
   2145 					if ((chunk_len != NULL) &&
   2146 					    (padding_len != NULL) &&
   2147 					    (*padding_len > 0)) {
   2148 						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
   2149 						SCTP_BUF_LEN(m_at) += *padding_len;
   2150 						*chunk_len += *padding_len;
   2151 						*padding_len = 0;
   2152 					}
   2153 					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
   2154 					if (limit_out) {
   2155 						cnt++;
   2156 						total_count++;
   2157 						if (cnt >= 2) {
   2158 							/* two from each address */
   2159 							break;
   2160 						}
   2161 						if (total_count > SCTP_ADDRESS_LIMIT) {
   2162 							/* No more addresses */
   2163 							break;
   2164 						}
   2165 					}
   2166 				}
   2167 			}
   2168 		}
   2169 	} else {
   2170 		struct sctp_laddr *laddr;
   2171 
   2172 		cnt = cnt_inits_to;
   2173 		/* First, how many ? */
   2174 		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
   2175 			if (laddr->ifa == NULL) {
   2176 				continue;
   2177 			}
   2178 			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
   2179                                 /* Address being deleted by the system, dont
   2180 				 * list.
   2181 				 */
   2182 				continue;
   2183 			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
   2184 				/* Address being deleted on this ep
   2185 				 * don't list.
   2186 				 */
   2187 				continue;
   2188 			}
   2189 #if defined(__Userspace__)
   2190 			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
   2191 				continue;
   2192 			}
   2193 #endif
   2194 			if (sctp_is_address_in_scope(laddr->ifa,
   2195 						     scope, 1) == 0) {
   2196 				continue;
   2197 			}
   2198 			cnt++;
   2199 		}
   2200 		/*
   2201 		 * To get through a NAT we only list addresses if we have
   2202 		 * more than one. That way if you just bind a single address
   2203 		 * we let the source of the init dictate our address.
   2204 		 */
   2205 		if (cnt > 1) {
   2206 			cnt = cnt_inits_to;
   2207 			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
   2208 				if (laddr->ifa == NULL) {
   2209 					continue;
   2210 				}
   2211 				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
   2212 					continue;
   2213 				}
   2214 #if defined(__Userspace__)
   2215 				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
   2216 					continue;
   2217 				}
   2218 #endif
   2219 				if (sctp_is_address_in_scope(laddr->ifa,
   2220 							     scope, 0) == 0) {
   2221 					continue;
   2222 				}
   2223 				if ((chunk_len != NULL) &&
   2224 				    (padding_len != NULL) &&
   2225 				    (*padding_len > 0)) {
   2226 					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
   2227 					SCTP_BUF_LEN(m_at) += *padding_len;
   2228 					*chunk_len += *padding_len;
   2229 					*padding_len = 0;
   2230 				}
   2231 				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
   2232 				cnt++;
   2233 				if (cnt >= SCTP_ADDRESS_LIMIT) {
   2234 					break;
   2235 				}
   2236 			}
   2237 		}
   2238 	}
   2239 	SCTP_IPI_ADDR_RUNLOCK();
   2240 	return (m_at);
   2241 }
   2242 
   2243 static struct sctp_ifa *
   2244 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
   2245 			   uint8_t dest_is_loop,
   2246 			   uint8_t dest_is_priv,
   2247 			   sa_family_t fam)
   2248 {
   2249 	uint8_t dest_is_global = 0;
   2250 	/* dest_is_priv is true if destination is a private address */
   2251 	/* dest_is_loop is true if destination is a loopback addresses */
   2252 
   2253 	/**
   2254 	 * Here we determine if its a preferred address. A preferred address
   2255 	 * means it is the same scope or higher scope then the destination.
   2256 	 * L = loopback, P = private, G = global
   2257 	 * -----------------------------------------
   2258 	 *    src    |  dest | result
   2259 	 *  ----------------------------------------
   2260 	 *     L     |    L  |    yes
   2261 	 *  -----------------------------------------
   2262 	 *     P     |    L  |    yes-v4 no-v6
   2263 	 *  -----------------------------------------
   2264 	 *     G     |    L  |    yes-v4 no-v6
   2265 	 *  -----------------------------------------
   2266 	 *     L     |    P  |    no
   2267 	 *  -----------------------------------------
   2268 	 *     P     |    P  |    yes
   2269 	 *  -----------------------------------------
   2270 	 *     G     |    P  |    no
   2271 	 *   -----------------------------------------
   2272 	 *     L     |    G  |    no
   2273 	 *   -----------------------------------------
   2274 	 *     P     |    G  |    no
   2275 	 *    -----------------------------------------
   2276 	 *     G     |    G  |    yes
   2277 	 *    -----------------------------------------
   2278 	 */
   2279 
   2280 	if (ifa->address.sa.sa_family != fam) {
   2281 		/* forget mis-matched family */
   2282 		return (NULL);
   2283 	}
   2284 	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
   2285 		dest_is_global = 1;
   2286 	}
   2287 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
   2288 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
   2289 	/* Ok the address may be ok */
   2290 #ifdef INET6
   2291 	if (fam == AF_INET6) {
   2292 		/* ok to use deprecated addresses? no lets not! */
   2293 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
   2294 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
   2295 			return (NULL);
   2296 		}
   2297 		if (ifa->src_is_priv && !ifa->src_is_loop) {
   2298 			if (dest_is_loop) {
   2299 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
   2300 				return (NULL);
   2301 			}
   2302 		}
   2303 		if (ifa->src_is_glob) {
   2304 			if (dest_is_loop) {
   2305 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
   2306 				return (NULL);
   2307 			}
   2308 		}
   2309 	}
   2310 #endif
   2311 	/* Now that we know what is what, implement or table
   2312 	 * this could in theory be done slicker (it used to be), but this
   2313 	 * is straightforward and easier to validate :-)
   2314 	 */
   2315 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
   2316 		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
   2317 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
   2318 		dest_is_loop, dest_is_priv, dest_is_global);
   2319 
   2320 	if ((ifa->src_is_loop) && (dest_is_priv)) {
   2321 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
   2322 		return (NULL);
   2323 	}
   2324 	if ((ifa->src_is_glob) && (dest_is_priv)) {
   2325 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
   2326 		return (NULL);
   2327 	}
   2328 	if ((ifa->src_is_loop) && (dest_is_global)) {
   2329 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
   2330 		return (NULL);
   2331 	}
   2332 	if ((ifa->src_is_priv) && (dest_is_global)) {
   2333 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
   2334 		return (NULL);
   2335 	}
   2336 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
   2337 	/* its a preferred address */
   2338 	return (ifa);
   2339 }
   2340 
   2341 static struct sctp_ifa *
   2342 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
   2343 			    uint8_t dest_is_loop,
   2344 			    uint8_t dest_is_priv,
   2345 			    sa_family_t fam)
   2346 {
   2347 	uint8_t dest_is_global = 0;
   2348 
   2349 	/**
   2350 	 * Here we determine if its a acceptable address. A acceptable
   2351 	 * address means it is the same scope or higher scope but we can
   2352 	 * allow for NAT which means its ok to have a global dest and a
   2353 	 * private src.
   2354 	 *
   2355 	 * L = loopback, P = private, G = global
   2356 	 * -----------------------------------------
   2357 	 *  src    |  dest | result
   2358 	 * -----------------------------------------
   2359 	 *   L     |   L   |    yes
   2360 	 *  -----------------------------------------
   2361 	 *   P     |   L   |    yes-v4 no-v6
   2362 	 *  -----------------------------------------
   2363 	 *   G     |   L   |    yes
   2364 	 * -----------------------------------------
   2365 	 *   L     |   P   |    no
   2366 	 * -----------------------------------------
   2367 	 *   P     |   P   |    yes
   2368 	 * -----------------------------------------
   2369 	 *   G     |   P   |    yes - May not work
   2370 	 * -----------------------------------------
   2371 	 *   L     |   G   |    no
   2372 	 * -----------------------------------------
   2373 	 *   P     |   G   |    yes - May not work
   2374 	 * -----------------------------------------
   2375 	 *   G     |   G   |    yes
   2376 	 * -----------------------------------------
   2377 	 */
   2378 
   2379 	if (ifa->address.sa.sa_family != fam) {
   2380 		/* forget non matching family */
   2381 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
   2382 			ifa->address.sa.sa_family, fam);
   2383 		return (NULL);
   2384 	}
   2385 	/* Ok the address may be ok */
   2386 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
   2387 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
   2388 		dest_is_loop, dest_is_priv);
   2389 	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
   2390 		dest_is_global = 1;
   2391 	}
   2392 #ifdef INET6
   2393 	if (fam == AF_INET6) {
   2394 		/* ok to use deprecated addresses? */
   2395 		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
   2396 			return (NULL);
   2397 		}
   2398 		if (ifa->src_is_priv) {
   2399 			/* Special case, linklocal to loop */
   2400 			if (dest_is_loop)
   2401 				return (NULL);
   2402 		}
   2403 	}
   2404 #endif
   2405 	/*
   2406 	 * Now that we know what is what, implement our table.
   2407 	 * This could in theory be done slicker (it used to be), but this
   2408 	 * is straightforward and easier to validate :-)
   2409 	 */
   2410 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
   2411 		ifa->src_is_loop,
   2412 		dest_is_priv);
   2413 	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
   2414 		return (NULL);
   2415 	}
   2416 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
   2417 		ifa->src_is_loop,
   2418 		dest_is_global);
   2419 	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
   2420 		return (NULL);
   2421 	}
   2422 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
   2423 	/* its an acceptable address */
   2424 	return (ifa);
   2425 }
   2426 
   2427 int
   2428 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
   2429 {
   2430 	struct sctp_laddr *laddr;
   2431 
   2432 	if (stcb == NULL) {
   2433 		/* There are no restrictions, no TCB :-) */
   2434 		return (0);
   2435 	}
   2436 	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
   2437 		if (laddr->ifa == NULL) {
   2438 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
   2439 				__FUNCTION__);
   2440 			continue;
   2441 		}
   2442 		if (laddr->ifa == ifa) {
   2443 			/* Yes it is on the list */
   2444 			return (1);
   2445 		}
   2446 	}
   2447 	return (0);
   2448 }
   2449 
   2450 
   2451 int
   2452 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
   2453 {
   2454 	struct sctp_laddr *laddr;
   2455 
   2456 	if (ifa == NULL)
   2457 		return (0);
   2458 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
   2459 		if (laddr->ifa == NULL) {
   2460 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
   2461 				__FUNCTION__);
   2462 			continue;
   2463 		}
   2464 		if ((laddr->ifa == ifa) && laddr->action == 0)
   2465 			/* same pointer */
   2466 			return (1);
   2467 	}
   2468 	return (0);
   2469 }
   2470 
   2471 
   2472 
   2473 static struct sctp_ifa *
   2474 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
   2475 			      sctp_route_t *ro,
   2476 			      uint32_t vrf_id,
   2477 			      int non_asoc_addr_ok,
   2478 			      uint8_t dest_is_priv,
   2479 			      uint8_t dest_is_loop,
   2480 			      sa_family_t fam)
   2481 {
   2482 	struct sctp_laddr *laddr, *starting_point;
   2483 	void *ifn;
   2484 	int resettotop = 0;
   2485 	struct sctp_ifn *sctp_ifn;
   2486 	struct sctp_ifa *sctp_ifa, *sifa;
   2487 	struct sctp_vrf *vrf;
   2488 	uint32_t ifn_index;
   2489 
   2490 	vrf = sctp_find_vrf(vrf_id);
   2491 	if (vrf == NULL)
   2492 		return (NULL);
   2493 
   2494 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
   2495 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
   2496 	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
   2497 	/*
   2498 	 * first question, is the ifn we will emit on in our list, if so, we
   2499 	 * want such an address. Note that we first looked for a
   2500 	 * preferred address.
   2501 	 */
   2502 	if (sctp_ifn) {
   2503 		/* is a preferred one on the interface we route out? */
   2504 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
   2505 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   2506 			    (non_asoc_addr_ok == 0))
   2507 				continue;
   2508 			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
   2509 							  dest_is_loop,
   2510 							  dest_is_priv, fam);
   2511 			if (sifa == NULL)
   2512 				continue;
   2513 			if (sctp_is_addr_in_ep(inp, sifa)) {
   2514 				atomic_add_int(&sifa->refcount, 1);
   2515 				return (sifa);
   2516 			}
   2517 		}
   2518 	}
   2519 	/*
   2520 	 * ok, now we now need to find one on the list of the addresses.
   2521 	 * We can't get one on the emitting interface so let's find first
   2522 	 * a preferred one. If not that an acceptable one otherwise...
   2523 	 * we return NULL.
   2524 	 */
   2525 	starting_point = inp->next_addr_touse;
   2526  once_again:
   2527 	if (inp->next_addr_touse == NULL) {
   2528 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
   2529 		resettotop = 1;
   2530 	}
   2531 	for (laddr = inp->next_addr_touse; laddr;
   2532 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
   2533 		if (laddr->ifa == NULL) {
   2534 			/* address has been removed */
   2535 			continue;
   2536 		}
   2537 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
   2538 			/* address is being deleted */
   2539 			continue;
   2540 		}
   2541 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
   2542 						  dest_is_priv, fam);
   2543 		if (sifa == NULL)
   2544 			continue;
   2545 		atomic_add_int(&sifa->refcount, 1);
   2546 		return (sifa);
   2547 	}
   2548 	if (resettotop == 0) {
   2549 		inp->next_addr_touse = NULL;
   2550 		goto once_again;
   2551 	}
   2552 
   2553 	inp->next_addr_touse = starting_point;
   2554 	resettotop = 0;
   2555  once_again_too:
   2556 	if (inp->next_addr_touse == NULL) {
   2557 		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
   2558 		resettotop = 1;
   2559 	}
   2560 
   2561 	/* ok, what about an acceptable address in the inp */
   2562 	for (laddr = inp->next_addr_touse; laddr;
   2563 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
   2564 		if (laddr->ifa == NULL) {
   2565 			/* address has been removed */
   2566 			continue;
   2567 		}
   2568 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
   2569 			/* address is being deleted */
   2570 			continue;
   2571 		}
   2572 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
   2573 						   dest_is_priv, fam);
   2574 		if (sifa == NULL)
   2575 			continue;
   2576 		atomic_add_int(&sifa->refcount, 1);
   2577 		return (sifa);
   2578 	}
   2579 	if (resettotop == 0) {
   2580 		inp->next_addr_touse = NULL;
   2581 		goto once_again_too;
   2582 	}
   2583 
   2584 	/*
   2585 	 * no address bound can be a source for the destination we are in
   2586 	 * trouble
   2587 	 */
   2588 	return (NULL);
   2589 }
   2590 
   2591 
   2592 
   2593 static struct sctp_ifa *
   2594 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
   2595 			       struct sctp_tcb *stcb,
   2596 			       sctp_route_t *ro,
   2597 			       uint32_t vrf_id,
   2598 			       uint8_t dest_is_priv,
   2599 			       uint8_t dest_is_loop,
   2600 			       int non_asoc_addr_ok,
   2601 			       sa_family_t fam)
   2602 {
   2603 	struct sctp_laddr *laddr, *starting_point;
   2604 	void *ifn;
   2605 	struct sctp_ifn *sctp_ifn;
   2606 	struct sctp_ifa *sctp_ifa, *sifa;
   2607 	uint8_t start_at_beginning = 0;
   2608 	struct sctp_vrf *vrf;
   2609 	uint32_t ifn_index;
   2610 
   2611 	/*
   2612 	 * first question, is the ifn we will emit on in our list, if so, we
   2613 	 * want that one.
   2614 	 */
   2615 	vrf = sctp_find_vrf(vrf_id);
   2616 	if (vrf == NULL)
   2617 		return (NULL);
   2618 
   2619 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
   2620 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
   2621 	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
   2622 
   2623 	/*
   2624  	 * first question, is the ifn we will emit on in our list?  If so,
   2625 	 * we want that one. First we look for a preferred. Second, we go
   2626 	 * for an acceptable.
   2627 	 */
   2628 	if (sctp_ifn) {
   2629 		/* first try for a preferred address on the ep */
   2630 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
   2631 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
   2632 				continue;
   2633 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
   2634 				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
   2635 				if (sifa == NULL)
   2636 					continue;
   2637 				if (((non_asoc_addr_ok == 0) &&
   2638 				     (sctp_is_addr_restricted(stcb, sifa))) ||
   2639 				    (non_asoc_addr_ok &&
   2640 				     (sctp_is_addr_restricted(stcb, sifa)) &&
   2641 				     (!sctp_is_addr_pending(stcb, sifa)))) {
   2642 					/* on the no-no list */
   2643 					continue;
   2644 				}
   2645 				atomic_add_int(&sifa->refcount, 1);
   2646 				return (sifa);
   2647 			}
   2648 		}
   2649 		/* next try for an acceptable address on the ep */
   2650 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
   2651 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
   2652 				continue;
   2653 			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
   2654 				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
   2655 				if (sifa == NULL)
   2656 					continue;
   2657 				if (((non_asoc_addr_ok == 0) &&
   2658 				     (sctp_is_addr_restricted(stcb, sifa))) ||
   2659 				    (non_asoc_addr_ok &&
   2660 				     (sctp_is_addr_restricted(stcb, sifa)) &&
   2661 				     (!sctp_is_addr_pending(stcb, sifa)))) {
   2662 					/* on the no-no list */
   2663 					continue;
   2664 				}
   2665 				atomic_add_int(&sifa->refcount, 1);
   2666 				return (sifa);
   2667 			}
   2668 		}
   2669 
   2670 	}
   2671 	/*
   2672 	 * if we can't find one like that then we must look at all
   2673 	 * addresses bound to pick one at first preferable then
   2674 	 * secondly acceptable.
   2675 	 */
   2676 	starting_point = stcb->asoc.last_used_address;
   2677  sctp_from_the_top:
   2678 	if (stcb->asoc.last_used_address == NULL) {
   2679 		start_at_beginning = 1;
   2680 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
   2681 	}
   2682 	/* search beginning with the last used address */
   2683 	for (laddr = stcb->asoc.last_used_address; laddr;
   2684 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
   2685 		if (laddr->ifa == NULL) {
   2686 			/* address has been removed */
   2687 			continue;
   2688 		}
   2689 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
   2690 			/* address is being deleted */
   2691 			continue;
   2692 		}
   2693 		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
   2694 		if (sifa == NULL)
   2695 			continue;
   2696 		if (((non_asoc_addr_ok == 0) &&
   2697 		     (sctp_is_addr_restricted(stcb, sifa))) ||
   2698 		    (non_asoc_addr_ok &&
   2699 		     (sctp_is_addr_restricted(stcb, sifa)) &&
   2700 		     (!sctp_is_addr_pending(stcb, sifa)))) {
   2701 			/* on the no-no list */
   2702 			continue;
   2703 		}
   2704 		stcb->asoc.last_used_address = laddr;
   2705 		atomic_add_int(&sifa->refcount, 1);
   2706 		return (sifa);
   2707 	}
   2708 	if (start_at_beginning == 0) {
   2709 		stcb->asoc.last_used_address = NULL;
   2710 		goto sctp_from_the_top;
   2711 	}
   2712 	/* now try for any higher scope than the destination */
   2713 	stcb->asoc.last_used_address = starting_point;
   2714 	start_at_beginning = 0;
   2715  sctp_from_the_top2:
   2716 	if (stcb->asoc.last_used_address == NULL) {
   2717 		start_at_beginning = 1;
   2718 		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
   2719 	}
   2720 	/* search beginning with the last used address */
   2721 	for (laddr = stcb->asoc.last_used_address; laddr;
   2722 	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
   2723 		if (laddr->ifa == NULL) {
   2724 			/* address has been removed */
   2725 			continue;
   2726 		}
   2727 		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
   2728 			/* address is being deleted */
   2729 			continue;
   2730 		}
   2731 		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
   2732 						   dest_is_priv, fam);
   2733 		if (sifa == NULL)
   2734 			continue;
   2735 		if (((non_asoc_addr_ok == 0) &&
   2736 		     (sctp_is_addr_restricted(stcb, sifa))) ||
   2737 		    (non_asoc_addr_ok &&
   2738 		     (sctp_is_addr_restricted(stcb, sifa)) &&
   2739 		     (!sctp_is_addr_pending(stcb, sifa)))) {
   2740 			/* on the no-no list */
   2741 			continue;
   2742 		}
   2743 		stcb->asoc.last_used_address = laddr;
   2744 		atomic_add_int(&sifa->refcount, 1);
   2745 		return (sifa);
   2746 	}
   2747 	if (start_at_beginning == 0) {
   2748 		stcb->asoc.last_used_address = NULL;
   2749 		goto sctp_from_the_top2;
   2750 	}
   2751 	return (NULL);
   2752 }
   2753 
   2754 static struct sctp_ifa *
   2755 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
   2756 						 struct sctp_tcb *stcb,
   2757 						 int non_asoc_addr_ok,
   2758 						 uint8_t dest_is_loop,
   2759 						 uint8_t dest_is_priv,
   2760 						 int addr_wanted,
   2761 						 sa_family_t fam,
   2762 						 sctp_route_t *ro
   2763 						 )
   2764 {
   2765 	struct sctp_ifa *ifa, *sifa;
   2766 	int num_eligible_addr = 0;
   2767 #ifdef INET6
   2768 #ifdef SCTP_EMBEDDED_V6_SCOPE
   2769 	struct sockaddr_in6 sin6, lsa6;
   2770 
   2771 	if (fam == AF_INET6) {
   2772 		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
   2773 #ifdef SCTP_KAME
   2774 		(void)sa6_recoverscope(&sin6);
   2775 #else
   2776 		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
   2777 #endif  /* SCTP_KAME */
   2778 	}
   2779 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
   2780 #endif	/* INET6 */
   2781 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
   2782 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   2783 		    (non_asoc_addr_ok == 0))
   2784 			continue;
   2785 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
   2786 						  dest_is_priv, fam);
   2787 		if (sifa == NULL)
   2788 			continue;
   2789 #ifdef INET6
   2790 		if (fam == AF_INET6 &&
   2791 		    dest_is_loop &&
   2792 		    sifa->src_is_loop && sifa->src_is_priv) {
   2793 			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
   2794 			 * to the peer so we will get an abort.
   2795 			 */
   2796 			continue;
   2797 		}
   2798 #ifdef SCTP_EMBEDDED_V6_SCOPE
   2799 		if (fam == AF_INET6 &&
   2800 		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
   2801 		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
   2802 			/* link-local <-> link-local must belong to the same scope. */
   2803 			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
   2804 #ifdef SCTP_KAME
   2805 			(void)sa6_recoverscope(&lsa6);
   2806 #else
   2807 			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
   2808 #endif  /* SCTP_KAME */
   2809 			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
   2810 				continue;
   2811 			}
   2812 		}
   2813 #endif  /* SCTP_EMBEDDED_V6_SCOPE */
   2814 #endif	/* INET6 */
   2815 
   2816 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
   2817 		/* Check if the IPv6 address matches to next-hop.
   2818 		   In the mobile case, old IPv6 address may be not deleted
   2819 		   from the interface. Then, the interface has previous and
   2820 		   new addresses.  We should use one corresponding to the
   2821 		   next-hop.  (by micchie)
   2822 		 */
   2823 #ifdef INET6
   2824 		if (stcb && fam == AF_INET6 &&
   2825 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
   2826 			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
   2827 			    == 0) {
   2828 				continue;
   2829 			}
   2830 		}
   2831 #endif
   2832 #ifdef INET
   2833 		/* Avoid topologically incorrect IPv4 address */
   2834 		if (stcb && fam == AF_INET &&
   2835 		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
   2836 			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
   2837 				continue;
   2838 			}
   2839 		}
   2840 #endif
   2841 #endif
   2842 		if (stcb) {
   2843 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
   2844 				continue;
   2845 			}
   2846 			if (((non_asoc_addr_ok == 0) &&
   2847 			     (sctp_is_addr_restricted(stcb, sifa))) ||
   2848 			    (non_asoc_addr_ok &&
   2849 			     (sctp_is_addr_restricted(stcb, sifa)) &&
   2850 			     (!sctp_is_addr_pending(stcb, sifa)))) {
   2851 				/*
   2852 				 * It is restricted for some reason..
   2853 				 * probably not yet added.
   2854 				 */
   2855 				continue;
   2856 			}
   2857 		}
   2858 		if (num_eligible_addr >= addr_wanted) {
   2859 			return (sifa);
   2860 		}
   2861 		num_eligible_addr++;
   2862 	}
   2863 	return (NULL);
   2864 }
   2865 
   2866 
   2867 static int
   2868 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
   2869 				  struct sctp_tcb *stcb,
   2870 				  int non_asoc_addr_ok,
   2871 				  uint8_t dest_is_loop,
   2872 				  uint8_t dest_is_priv,
   2873 				  sa_family_t fam)
   2874 {
   2875 	struct sctp_ifa *ifa, *sifa;
   2876 	int num_eligible_addr = 0;
   2877 
   2878 	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
   2879 		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   2880 		    (non_asoc_addr_ok == 0)) {
   2881 			continue;
   2882 		}
   2883 		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
   2884 						  dest_is_priv, fam);
   2885 		if (sifa == NULL) {
   2886 			continue;
   2887 		}
   2888 		if (stcb) {
   2889 			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
   2890 				continue;
   2891 			}
   2892 			if (((non_asoc_addr_ok == 0) &&
   2893 			     (sctp_is_addr_restricted(stcb, sifa))) ||
   2894 			    (non_asoc_addr_ok &&
   2895 			     (sctp_is_addr_restricted(stcb, sifa)) &&
   2896 			     (!sctp_is_addr_pending(stcb, sifa)))) {
   2897 				/*
   2898 				 * It is restricted for some reason..
   2899 				 * probably not yet added.
   2900 				 */
   2901 				continue;
   2902 			}
   2903 		}
   2904 		num_eligible_addr++;
   2905 	}
   2906 	return (num_eligible_addr);
   2907 }
   2908 
   2909 static struct sctp_ifa *
   2910 sctp_choose_boundall(struct sctp_tcb *stcb,
   2911 		     struct sctp_nets *net,
   2912 		     sctp_route_t *ro,
   2913 		     uint32_t vrf_id,
   2914 		     uint8_t dest_is_priv,
   2915 		     uint8_t dest_is_loop,
   2916 		     int non_asoc_addr_ok,
   2917 		     sa_family_t fam)
   2918 {
   2919 	int cur_addr_num = 0, num_preferred = 0;
   2920 	void *ifn;
   2921 	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
   2922 	struct sctp_ifa *sctp_ifa, *sifa;
   2923 	uint32_t ifn_index;
   2924 	struct sctp_vrf *vrf;
   2925 #ifdef INET
   2926 	int retried = 0;
   2927 #endif
   2928 
   2929 	/*-
   2930 	 * For boundall we can use any address in the association.
   2931 	 * If non_asoc_addr_ok is set we can use any address (at least in
   2932 	 * theory). So we look for preferred addresses first. If we find one,
   2933 	 * we use it. Otherwise we next try to get an address on the
   2934 	 * interface, which we should be able to do (unless non_asoc_addr_ok
   2935 	 * is false and we are routed out that way). In these cases where we
   2936 	 * can't use the address of the interface we go through all the
   2937 	 * ifn's looking for an address we can use and fill that in. Punting
   2938 	 * means we send back address 0, which will probably cause problems
   2939 	 * actually since then IP will fill in the address of the route ifn,
   2940 	 * which means we probably already rejected it.. i.e. here comes an
   2941 	 * abort :-<.
   2942 	 */
   2943 	vrf = sctp_find_vrf(vrf_id);
   2944 	if (vrf == NULL)
   2945 		return (NULL);
   2946 
   2947 	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
   2948 	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
   2949 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
   2950 	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
   2951 	if (sctp_ifn == NULL) {
   2952 		/* ?? We don't have this guy ?? */
   2953 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
   2954 		goto bound_all_plan_b;
   2955 	}
   2956 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
   2957 		ifn_index, sctp_ifn->ifn_name);
   2958 
   2959 	if (net) {
   2960 		cur_addr_num = net->indx_of_eligible_next_to_use;
   2961 	}
   2962 	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
   2963 							  stcb,
   2964 							  non_asoc_addr_ok,
   2965 							  dest_is_loop,
   2966 							  dest_is_priv, fam);
   2967 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
   2968 		num_preferred, sctp_ifn->ifn_name);
   2969 	if (num_preferred == 0) {
   2970 		/*
   2971 		 * no eligible addresses, we must use some other interface
   2972 		 * address if we can find one.
   2973 		 */
   2974 		goto bound_all_plan_b;
   2975 	}
   2976 	/*
   2977 	 * Ok we have num_eligible_addr set with how many we can use, this
   2978 	 * may vary from call to call due to addresses being deprecated
   2979 	 * etc..
   2980 	 */
   2981 	if (cur_addr_num >= num_preferred) {
   2982 		cur_addr_num = 0;
   2983 	}
   2984 	/*
   2985 	 * select the nth address from the list (where cur_addr_num is the
   2986 	 * nth) and 0 is the first one, 1 is the second one etc...
   2987 	 */
   2988 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
   2989 
   2990 	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
   2991                                                                     dest_is_priv, cur_addr_num, fam, ro);
   2992 
   2993 	/* if sctp_ifa is NULL something changed??, fall to plan b. */
   2994 	if (sctp_ifa) {
   2995 		atomic_add_int(&sctp_ifa->refcount, 1);
   2996 		if (net) {
   2997 			/* save off where the next one we will want */
   2998 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
   2999 		}
   3000 		return (sctp_ifa);
   3001 	}
   3002 	/*
   3003 	 * plan_b: Look at all interfaces and find a preferred address. If
   3004 	 * no preferred fall through to plan_c.
   3005 	 */
   3006  bound_all_plan_b:
   3007 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
   3008 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
   3009 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
   3010 			sctp_ifn->ifn_name);
   3011 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
   3012 			/* wrong base scope */
   3013 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
   3014 			continue;
   3015 		}
   3016 		if ((sctp_ifn == looked_at) && looked_at) {
   3017 			/* already looked at this guy */
   3018 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
   3019 			continue;
   3020 		}
   3021 		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
   3022                                                                   dest_is_loop, dest_is_priv, fam);
   3023 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
   3024 			"Found ifn:%p %d preferred source addresses\n",
   3025 			ifn, num_preferred);
   3026 		if (num_preferred == 0) {
   3027 			/* None on this interface. */
   3028 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
   3029 			continue;
   3030 		}
   3031 		SCTPDBG(SCTP_DEBUG_OUTPUT2,
   3032 			"num preferred:%d on interface:%p cur_addr_num:%d\n",
   3033 			num_preferred, (void *)sctp_ifn, cur_addr_num);
   3034 
   3035 		/*
   3036 		 * Ok we have num_eligible_addr set with how many we can
   3037 		 * use, this may vary from call to call due to addresses
   3038 		 * being deprecated etc..
   3039 		 */
   3040 		if (cur_addr_num >= num_preferred) {
   3041 			cur_addr_num = 0;
   3042 		}
   3043 		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
   3044                                                                         dest_is_priv, cur_addr_num, fam, ro);
   3045 		if (sifa == NULL)
   3046 			continue;
   3047 		if (net) {
   3048 			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
   3049 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
   3050 				cur_addr_num);
   3051 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
   3052 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
   3053 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
   3054 			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
   3055 		}
   3056 		atomic_add_int(&sifa->refcount, 1);
   3057 		return (sifa);
   3058 	}
   3059 #ifdef INET
   3060 again_with_private_addresses_allowed:
   3061 #endif
   3062 	/* plan_c: do we have an acceptable address on the emit interface */
   3063 	sifa = NULL;
   3064 	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
   3065 	if (emit_ifn == NULL) {
   3066 		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
   3067 		goto plan_d;
   3068 	}
   3069 	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
   3070 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
   3071 		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   3072 		    (non_asoc_addr_ok == 0)) {
   3073 			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
   3074 			continue;
   3075 		}
   3076 		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
   3077 						   dest_is_priv, fam);
   3078 		if (sifa == NULL) {
   3079 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
   3080 			continue;
   3081 		}
   3082 		if (stcb) {
   3083 			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
   3084 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
   3085 				sifa = NULL;
   3086 				continue;
   3087 			}
   3088 			if (((non_asoc_addr_ok == 0) &&
   3089 			     (sctp_is_addr_restricted(stcb, sifa))) ||
   3090 			    (non_asoc_addr_ok &&
   3091 			     (sctp_is_addr_restricted(stcb, sifa)) &&
   3092 			     (!sctp_is_addr_pending(stcb, sifa)))) {
   3093 				/*
   3094 				 * It is restricted for some
   3095 				 * reason.. probably not yet added.
   3096 				 */
   3097 				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
   3098 				sifa = NULL;
   3099 				continue;
   3100 			}
   3101 		} else {
   3102 			SCTP_PRINTF("Stcb is null - no print\n");
   3103 		}
   3104 		atomic_add_int(&sifa->refcount, 1);
   3105 		goto out;
   3106 	}
   3107  plan_d:
   3108 	/*
   3109 	 * plan_d: We are in trouble. No preferred address on the emit
   3110 	 * interface. And not even a preferred address on all interfaces.
   3111 	 * Go out and see if we can find an acceptable address somewhere
   3112 	 * amongst all interfaces.
   3113 	 */
   3114 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
   3115 	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
   3116 		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
   3117 			/* wrong base scope */
   3118 			continue;
   3119 		}
   3120 		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
   3121 			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   3122 			    (non_asoc_addr_ok == 0))
   3123 				continue;
   3124 			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
   3125 							   dest_is_loop,
   3126 							   dest_is_priv, fam);
   3127 			if (sifa == NULL)
   3128 				continue;
   3129 			if (stcb) {
   3130 				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
   3131 					sifa = NULL;
   3132 					continue;
   3133 				}
   3134 				if (((non_asoc_addr_ok == 0) &&
   3135 				     (sctp_is_addr_restricted(stcb, sifa))) ||
   3136 				    (non_asoc_addr_ok &&
   3137 				     (sctp_is_addr_restricted(stcb, sifa)) &&
   3138 				     (!sctp_is_addr_pending(stcb, sifa)))) {
   3139 					/*
   3140 					 * It is restricted for some
   3141 					 * reason.. probably not yet added.
   3142 					 */
   3143 					sifa = NULL;
   3144 					continue;
   3145 				}
   3146 			}
   3147 			goto out;
   3148 		}
   3149 	}
   3150 #ifdef INET
   3151 	if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
   3152 		stcb->asoc.scope.ipv4_local_scope = 1;
   3153 		retried = 1;
   3154 		goto again_with_private_addresses_allowed;
   3155 	} else if (retried == 1) {
   3156 		stcb->asoc.scope.ipv4_local_scope = 0;
   3157 	}
   3158 #endif
   3159 out:
   3160 #ifdef INET
   3161 	if (sifa) {
   3162 		if (retried == 1) {
   3163 			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
   3164 				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
   3165 					/* wrong base scope */
   3166 					continue;
   3167 				}
   3168 				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
   3169 					struct sctp_ifa *tmp_sifa;
   3170 
   3171 					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
   3172 					    (non_asoc_addr_ok == 0))
   3173 						continue;
   3174 					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
   3175 					                                       dest_is_loop,
   3176 					                                       dest_is_priv, fam);
   3177 					if (tmp_sifa == NULL) {
   3178 						continue;
   3179 					}
   3180 					if (tmp_sifa == sifa) {
   3181 						continue;
   3182 					}
   3183 					if (stcb) {
   3184 						if (sctp_is_address_in_scope(tmp_sifa,
   3185 						                             &stcb->asoc.scope, 0) == 0) {
   3186 							continue;
   3187 						}
   3188 						if (((non_asoc_addr_ok == 0) &&
   3189 						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
   3190 						    (non_asoc_addr_ok &&
   3191 						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
   3192 						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
   3193 							/*
   3194 							 * It is restricted for some
   3195 							 * reason.. probably not yet added.
   3196 							 */
   3197 							continue;
   3198 						}
   3199 					}
   3200 					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
   3201 					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
   3202 						sctp_add_local_addr_restricted(stcb, tmp_sifa);
   3203 					}
   3204 				}
   3205 			}
   3206 		}
   3207 		atomic_add_int(&sifa->refcount, 1);
   3208 	}
   3209 #endif
   3210 	return (sifa);
   3211 }
   3212 
   3213 
   3214 
   3215 /* tcb may be NULL */
   3216 struct sctp_ifa *
   3217 sctp_source_address_selection(struct sctp_inpcb *inp,
   3218 			      struct sctp_tcb *stcb,
   3219 			      sctp_route_t *ro,
   3220 			      struct sctp_nets *net,
   3221 			      int non_asoc_addr_ok, uint32_t vrf_id)
   3222 {
   3223 	struct sctp_ifa *answer;
   3224 	uint8_t dest_is_priv, dest_is_loop;
   3225 	sa_family_t fam;
   3226 #ifdef INET
   3227 	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
   3228 #endif
   3229 #ifdef INET6
   3230 	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
   3231 #endif
   3232 
   3233 	/**
   3234 	 * Rules: - Find the route if needed, cache if I can. - Look at
   3235 	 * interface address in route, Is it in the bound list. If so we
   3236 	 * have the best source. - If not we must rotate amongst the
   3237 	 * addresses.
   3238 	 *
   3239 	 * Cavets and issues
   3240 	 *
   3241 	 * Do we need to pay attention to scope. We can have a private address
   3242 	 * or a global address we are sourcing or sending to. So if we draw
   3243 	 * it out
   3244 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
   3245 	 * For V4
   3246 	 * ------------------------------------------
   3247 	 *      source     *      dest  *  result
   3248 	 * -----------------------------------------
   3249 	 * <a>  Private    *    Global  *  NAT
   3250 	 * -----------------------------------------
   3251 	 * <b>  Private    *    Private *  No problem
   3252 	 * -----------------------------------------
   3253 	 * <c>  Global     *    Private *  Huh, How will this work?
   3254 	 * -----------------------------------------
   3255 	 * <d>  Global     *    Global  *  No Problem
   3256 	 *------------------------------------------
   3257 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
   3258 	 * For V6
   3259 	 *------------------------------------------
   3260 	 *      source     *      dest  *  result
   3261 	 * -----------------------------------------
   3262 	 * <a>  Linklocal  *    Global  *
   3263 	 * -----------------------------------------
   3264 	 * <b>  Linklocal  * Linklocal  *  No problem
   3265 	 * -----------------------------------------
   3266 	 * <c>  Global     * Linklocal  *  Huh, How will this work?
   3267 	 * -----------------------------------------
   3268 	 * <d>  Global     *    Global  *  No Problem
   3269 	 *------------------------------------------
   3270 	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
   3271 	 *
   3272 	 * And then we add to that what happens if there are multiple addresses
   3273 	 * assigned to an interface. Remember the ifa on a ifn is a linked
   3274 	 * list of addresses. So one interface can have more than one IP
   3275 	 * address. What happens if we have both a private and a global
   3276 	 * address? Do we then use context of destination to sort out which
   3277 	 * one is best? And what about NAT's sending P->G may get you a NAT
   3278 	 * translation, or should you select the G thats on the interface in
   3279 	 * preference.
   3280 	 *
   3281 	 * Decisions:
   3282 	 *
   3283 	 * - count the number of addresses on the interface.
   3284 	 * - if it is one, no problem except case <c>.
   3285 	 *   For <a> we will assume a NAT out there.
   3286 	 * - if there are more than one, then we need to worry about scope P
   3287 	 *   or G. We should prefer G -> G and P -> P if possible.
   3288 	 *   Then as a secondary fall back to mixed types G->P being a last
   3289 	 *   ditch one.
   3290 	 * - The above all works for bound all, but bound specific we need to
   3291 	 *   use the same concept but instead only consider the bound
   3292 	 *   addresses. If the bound set is NOT assigned to the interface then
   3293 	 *   we must use rotation amongst the bound addresses..
   3294 	 */
   3295 	if (ro->ro_rt == NULL) {
   3296 		/*
   3297 		 * Need a route to cache.
   3298 		 */
   3299 		SCTP_RTALLOC(ro, vrf_id);
   3300 	}
   3301 	if (ro->ro_rt == NULL) {
   3302 		return (NULL);
   3303 	}
   3304 	fam = ro->ro_dst.sa_family;
   3305 	dest_is_priv = dest_is_loop = 0;
   3306 	/* Setup our scopes for the destination */
   3307 	switch (fam) {
   3308 #ifdef INET
   3309 	case AF_INET:
   3310 		/* Scope based on outbound address */
   3311 		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
   3312 			dest_is_loop = 1;
   3313 			if (net != NULL) {
   3314 				/* mark it as local */
   3315 				net->addr_is_local = 1;
   3316 			}
   3317 		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
   3318 			dest_is_priv = 1;
   3319 		}
   3320 		break;
   3321 #endif
   3322 #ifdef INET6
   3323 	case AF_INET6:
   3324 		/* Scope based on outbound address */
   3325 #if defined(__Userspace_os_Windows)
   3326 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
   3327 #else
   3328 		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
   3329 		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
   3330 #endif
   3331 			/*
   3332 			 * If the address is a loopback address, which
   3333 			 * consists of "::1" OR "fe80::1%lo0", we are loopback
   3334 			 * scope. But we don't use dest_is_priv (link local
   3335 			 * addresses).
   3336 			 */
   3337 			dest_is_loop = 1;
   3338 			if (net != NULL) {
   3339 				/* mark it as local */
   3340 				net->addr_is_local = 1;
   3341 			}
   3342 		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
   3343 			dest_is_priv = 1;
   3344 		}
   3345 		break;
   3346 #endif
   3347 	}
   3348 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
   3349 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
   3350 	SCTP_IPI_ADDR_RLOCK();
   3351 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
   3352 		/*
   3353 		 * Bound all case
   3354 		 */
   3355 		answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
   3356 					      dest_is_priv, dest_is_loop,
   3357 					      non_asoc_addr_ok, fam);
   3358 		SCTP_IPI_ADDR_RUNLOCK();
   3359 		return (answer);
   3360 	}
   3361 	/*
   3362 	 * Subset bound case
   3363 	 */
   3364 	if (stcb) {
   3365 		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
   3366 							vrf_id,	dest_is_priv,
   3367 							dest_is_loop,
   3368 							non_asoc_addr_ok, fam);
   3369 	} else {
   3370 		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
   3371 						       non_asoc_addr_ok,
   3372 						       dest_is_priv,
   3373 						       dest_is_loop, fam);
   3374 	}
   3375 	SCTP_IPI_ADDR_RUNLOCK();
   3376 	return (answer);
   3377 }
   3378 
   3379 static int
   3380 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
   3381 {
   3382 #if defined(__Userspace_os_Windows)
   3383 	WSACMSGHDR cmh;
   3384 #else
   3385 	struct cmsghdr cmh;
   3386 #endif
   3387 	int tlen, at, found;
   3388 	struct sctp_sndinfo sndinfo;
   3389 	struct sctp_prinfo prinfo;
   3390 	struct sctp_authinfo authinfo;
   3391 
   3392 	tlen = SCTP_BUF_LEN(control);
   3393 	at = 0;
   3394 	found = 0;
   3395 	/*
   3396 	 * Independent of how many mbufs, find the c_type inside the control
   3397 	 * structure and copy out the data.
   3398 	 */
   3399 	while (at < tlen) {
   3400 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
   3401 			/* There is not enough room for one more. */
   3402 			return (found);
   3403 		}
   3404 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
   3405 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
   3406 			/* We dont't have a complete CMSG header. */
   3407 			return (found);
   3408 		}
   3409 		if (((int)cmh.cmsg_len + at) > tlen) {
   3410 			/* We don't have the complete CMSG. */
   3411 			return (found);
   3412 		}
   3413 		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
   3414 		    ((c_type == cmh.cmsg_type) ||
   3415 		     ((c_type == SCTP_SNDRCV) &&
   3416 		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
   3417 		       (cmh.cmsg_type == SCTP_PRINFO) ||
   3418 		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
   3419 			if (c_type == cmh.cmsg_type) {
   3420 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
   3421 					return (found);
   3422 				}
   3423 				/* It is exactly what we want. Copy it out. */
   3424 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
   3425 				return (1);
   3426 			} else {
   3427 				struct sctp_sndrcvinfo *sndrcvinfo;
   3428 
   3429 				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
   3430 				if (found == 0) {
   3431 					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
   3432 						return (found);
   3433 					}
   3434 					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
   3435 				}
   3436 				switch (cmh.cmsg_type) {
   3437 				case SCTP_SNDINFO:
   3438 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
   3439 						return (found);
   3440 					}
   3441 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
   3442 					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
   3443 					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
   3444 					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
   3445 					sndrcvinfo->sinfo_context = sndinfo.snd_context;
   3446 					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
   3447 					break;
   3448 				case SCTP_PRINFO:
   3449 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
   3450 						return (found);
   3451 					}
   3452 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
   3453 					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
   3454 						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
   3455 					} else {
   3456 						sndrcvinfo->sinfo_timetolive = 0;
   3457 					}
   3458 					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
   3459 					break;
   3460 				case SCTP_AUTHINFO:
   3461 					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
   3462 						return (found);
   3463 					}
   3464 					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
   3465 					sndrcvinfo->sinfo_keynumber_valid = 1;
   3466 					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
   3467 					break;
   3468 				default:
   3469 					return (found);
   3470 				}
   3471 				found = 1;
   3472 			}
   3473 		}
   3474 		at += CMSG_ALIGN(cmh.cmsg_len);
   3475 	}
   3476 	return (found);
   3477 }
   3478 
   3479 static int
   3480 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
   3481 {
   3482 #if defined(__Userspace_os_Windows)
   3483 	WSACMSGHDR cmh;
   3484 #else
   3485 	struct cmsghdr cmh;
   3486 #endif
   3487 	int tlen, at;
   3488 	struct sctp_initmsg initmsg;
   3489 #ifdef INET
   3490 	struct sockaddr_in sin;
   3491 #endif
   3492 #ifdef INET6
   3493 	struct sockaddr_in6 sin6;
   3494 #endif
   3495 
   3496 	tlen = SCTP_BUF_LEN(control);
   3497 	at = 0;
   3498 	while (at < tlen) {
   3499 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
   3500 			/* There is not enough room for one more. */
   3501 			*error = EINVAL;
   3502 			return (1);
   3503 		}
   3504 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
   3505 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
   3506 			/* We dont't have a complete CMSG header. */
   3507 			*error = EINVAL;
   3508 			return (1);
   3509 		}
   3510 		if (((int)cmh.cmsg_len + at) > tlen) {
   3511 			/* We don't have the complete CMSG. */
   3512 			*error = EINVAL;
   3513 			return (1);
   3514 		}
   3515 		if (cmh.cmsg_level == IPPROTO_SCTP) {
   3516 			switch (cmh.cmsg_type) {
   3517 			case SCTP_INIT:
   3518 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
   3519 					*error = EINVAL;
   3520 					return (1);
   3521 				}
   3522 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
   3523 				if (initmsg.sinit_max_attempts)
   3524 					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
   3525 				if (initmsg.sinit_num_ostreams)
   3526 					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
   3527 				if (initmsg.sinit_max_instreams)
   3528 					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
   3529 				if (initmsg.sinit_max_init_timeo)
   3530 					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
   3531 				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
   3532 					struct sctp_stream_out *tmp_str;
   3533 					unsigned int i;
   3534 
   3535 					/* Default is NOT correct */
   3536 					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
   3537 						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
   3538 					SCTP_TCB_UNLOCK(stcb);
   3539 					SCTP_MALLOC(tmp_str,
   3540 					            struct sctp_stream_out *,
   3541 					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
   3542 					            SCTP_M_STRMO);
   3543 					SCTP_TCB_LOCK(stcb);
   3544 					if (tmp_str != NULL) {
   3545 						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
   3546 						stcb->asoc.strmout = tmp_str;
   3547 						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
   3548 					} else {
   3549 						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
   3550 					}
   3551 					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
   3552 						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
   3553 						stcb->asoc.strmout[i].chunks_on_queues = 0;
   3554 						stcb->asoc.strmout[i].next_sequence_send = 0;
   3555 						stcb->asoc.strmout[i].stream_no = i;
   3556 						stcb->asoc.strmout[i].last_msg_incomplete = 0;
   3557 						stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
   3558 					}
   3559 				}
   3560 				break;
   3561 #ifdef INET
   3562 			case SCTP_DSTADDRV4:
   3563 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
   3564 					*error = EINVAL;
   3565 					return (1);
   3566 				}
   3567 				memset(&sin, 0, sizeof(struct sockaddr_in));
   3568 				sin.sin_family = AF_INET;
   3569 #ifdef HAVE_SIN_LEN
   3570 				sin.sin_len = sizeof(struct sockaddr_in);
   3571 #endif
   3572 				sin.sin_port = stcb->rport;
   3573 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
   3574 				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
   3575 				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
   3576 				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
   3577 					*error = EINVAL;
   3578 					return (1);
   3579 				}
   3580 				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
   3581 				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
   3582 					*error = ENOBUFS;
   3583 					return (1);
   3584 				}
   3585 				break;
   3586 #endif
   3587 #ifdef INET6
   3588 			case SCTP_DSTADDRV6:
   3589 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
   3590 					*error = EINVAL;
   3591 					return (1);
   3592 				}
   3593 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
   3594 				sin6.sin6_family = AF_INET6;
   3595 #ifdef HAVE_SIN6_LEN
   3596 				sin6.sin6_len = sizeof(struct sockaddr_in6);
   3597 #endif
   3598 				sin6.sin6_port = stcb->rport;
   3599 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
   3600 				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
   3601 				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
   3602 					*error = EINVAL;
   3603 					return (1);
   3604 				}
   3605 #ifdef INET
   3606 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
   3607 					in6_sin6_2_sin(&sin, &sin6);
   3608 					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
   3609 					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
   3610 					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
   3611 						*error = EINVAL;
   3612 						return (1);
   3613 					}
   3614 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
   3615 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
   3616 						*error = ENOBUFS;
   3617 						return (1);
   3618 					}
   3619 				} else
   3620 #endif
   3621 					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
   3622 					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
   3623 						*error = ENOBUFS;
   3624 						return (1);
   3625 					}
   3626 				break;
   3627 #endif
   3628 			default:
   3629 				break;
   3630 			}
   3631 		}
   3632 		at += CMSG_ALIGN(cmh.cmsg_len);
   3633 	}
   3634 	return (0);
   3635 }
   3636 
   3637 static struct sctp_tcb *
   3638 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
   3639                            uint16_t port,
   3640                            struct mbuf *control,
   3641                            struct sctp_nets **net_p,
   3642                            int *error)
   3643 {
   3644 #if defined(__Userspace_os_Windows)
   3645 	WSACMSGHDR cmh;
   3646 #else
   3647 	struct cmsghdr cmh;
   3648 #endif
   3649 	int tlen, at;
   3650 	struct sctp_tcb *stcb;
   3651 	struct sockaddr *addr;
   3652 #ifdef INET
   3653 	struct sockaddr_in sin;
   3654 #endif
   3655 #ifdef INET6
   3656 	struct sockaddr_in6 sin6;
   3657 #endif
   3658 
   3659 	tlen = SCTP_BUF_LEN(control);
   3660 	at = 0;
   3661 	while (at < tlen) {
   3662 		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
   3663 			/* There is not enough room for one more. */
   3664 			*error = EINVAL;
   3665 			return (NULL);
   3666 		}
   3667 		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
   3668 		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
   3669 			/* We dont't have a complete CMSG header. */
   3670 			*error = EINVAL;
   3671 			return (NULL);
   3672 		}
   3673 		if (((int)cmh.cmsg_len + at) > tlen) {
   3674 			/* We don't have the complete CMSG. */
   3675 			*error = EINVAL;
   3676 			return (NULL);
   3677 		}
   3678 		if (cmh.cmsg_level == IPPROTO_SCTP) {
   3679 			switch (cmh.cmsg_type) {
   3680 #ifdef INET
   3681 			case SCTP_DSTADDRV4:
   3682 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
   3683 					*error = EINVAL;
   3684 					return (NULL);
   3685 				}
   3686 				memset(&sin, 0, sizeof(struct sockaddr_in));
   3687 				sin.sin_family = AF_INET;
   3688 #ifdef HAVE_SIN_LEN
   3689 				sin.sin_len = sizeof(struct sockaddr_in);
   3690 #endif
   3691 				sin.sin_port = port;
   3692 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
   3693 				addr = (struct sockaddr *)&sin;
   3694 				break;
   3695 #endif
   3696 #ifdef INET6
   3697 			case SCTP_DSTADDRV6:
   3698 				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
   3699 					*error = EINVAL;
   3700 					return (NULL);
   3701 				}
   3702 				memset(&sin6, 0, sizeof(struct sockaddr_in6));
   3703 				sin6.sin6_family = AF_INET6;
   3704 #ifdef HAVE_SIN6_LEN
   3705 				sin6.sin6_len = sizeof(struct sockaddr_in6);
   3706 #endif
   3707 				sin6.sin6_port = port;
   3708 				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
   3709 #ifdef INET
   3710 				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
   3711 					in6_sin6_2_sin(&sin, &sin6);
   3712 					addr = (struct sockaddr *)&sin;
   3713 				} else
   3714 #endif
   3715 					addr = (struct sockaddr *)&sin6;
   3716 				break;
   3717 #endif
   3718 			default:
   3719 				addr = NULL;
   3720 				break;
   3721 			}
   3722 			if (addr) {
   3723 				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
   3724 				if (stcb != NULL) {
   3725 					return (stcb);
   3726 				}
   3727 			}
   3728 		}
   3729 		at += CMSG_ALIGN(cmh.cmsg_len);
   3730 	}
   3731 	return (NULL);
   3732 }
   3733 
   3734 static struct mbuf *
   3735 sctp_add_cookie(struct mbuf *init, int init_offset,
   3736     struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
   3737 {
   3738 	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
   3739 	struct sctp_state_cookie *stc;
   3740 	struct sctp_paramhdr *ph;
   3741 	uint8_t *foo;
   3742 	int sig_offset;
   3743 	uint16_t cookie_sz;
   3744 
   3745 	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
   3746 				      sizeof(struct sctp_paramhdr)), 0,
   3747 				     M_NOWAIT, 1, MT_DATA);
   3748 	if (mret == NULL) {
   3749 		return (NULL);
   3750 	}
   3751 	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
   3752 	if (copy_init == NULL) {
   3753 		sctp_m_freem(mret);
   3754 		return (NULL);
   3755 	}
   3756 #ifdef SCTP_MBUF_LOGGING
   3757 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   3758 		struct mbuf *mat;
   3759 
   3760 		for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
   3761 			if (SCTP_BUF_IS_EXTENDED(mat)) {
   3762 				sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   3763 			}
   3764 		}
   3765 	}
   3766 #endif
   3767 	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
   3768 	    M_NOWAIT);
   3769 	if (copy_initack == NULL) {
   3770 		sctp_m_freem(mret);
   3771 		sctp_m_freem(copy_init);
   3772 		return (NULL);
   3773 	}
   3774 #ifdef SCTP_MBUF_LOGGING
   3775 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   3776 		struct mbuf *mat;
   3777 
   3778 		for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
   3779 			if (SCTP_BUF_IS_EXTENDED(mat)) {
   3780 				sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   3781 			}
   3782 		}
   3783 	}
   3784 #endif
   3785 	/* easy side we just drop it on the end */
   3786 	ph = mtod(mret, struct sctp_paramhdr *);
   3787 	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
   3788 	    sizeof(struct sctp_paramhdr);
   3789 	stc = (struct sctp_state_cookie *)((caddr_t)ph +
   3790 	    sizeof(struct sctp_paramhdr));
   3791 	ph->param_type = htons(SCTP_STATE_COOKIE);
   3792 	ph->param_length = 0;	/* fill in at the end */
   3793 	/* Fill in the stc cookie data */
   3794 	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
   3795 
   3796 	/* tack the INIT and then the INIT-ACK onto the chain */
   3797 	cookie_sz = 0;
   3798 	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
   3799 		cookie_sz += SCTP_BUF_LEN(m_at);
   3800 		if (SCTP_BUF_NEXT(m_at) == NULL) {
   3801 			SCTP_BUF_NEXT(m_at) = copy_init;
   3802 			break;
   3803 		}
   3804 	}
   3805 	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
   3806 		cookie_sz += SCTP_BUF_LEN(m_at);
   3807 		if (SCTP_BUF_NEXT(m_at) == NULL) {
   3808 			SCTP_BUF_NEXT(m_at) = copy_initack;
   3809 			break;
   3810 		}
   3811 	}
   3812 	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
   3813 		cookie_sz += SCTP_BUF_LEN(m_at);
   3814 		if (SCTP_BUF_NEXT(m_at) == NULL) {
   3815 			break;
   3816 		}
   3817 	}
   3818 	sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
   3819 	if (sig == NULL) {
   3820 		/* no space, so free the entire chain */
   3821 		sctp_m_freem(mret);
   3822 		return (NULL);
   3823 	}
   3824 	SCTP_BUF_LEN(sig) = 0;
   3825 	SCTP_BUF_NEXT(m_at) = sig;
   3826 	sig_offset = 0;
   3827 	foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
   3828 	memset(foo, 0, SCTP_SIGNATURE_SIZE);
   3829 	*signature = foo;
   3830 	SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
   3831 	cookie_sz += SCTP_SIGNATURE_SIZE;
   3832 	ph->param_length = htons(cookie_sz);
   3833 	return (mret);
   3834 }
   3835 
   3836 
   3837 static uint8_t
   3838 sctp_get_ect(struct sctp_tcb *stcb)
   3839 {
   3840 	if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
   3841 		return (SCTP_ECT0_BIT);
   3842 	} else {
   3843 		return (0);
   3844 	}
   3845 }
   3846 
   3847 #if defined(INET) || defined(INET6)
   3848 static void
   3849 sctp_handle_no_route(struct sctp_tcb *stcb,
   3850                      struct sctp_nets *net,
   3851                      int so_locked)
   3852 {
   3853 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
   3854 
   3855 	if (net) {
   3856 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
   3857 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
   3858 		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
   3859 			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
   3860 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
   3861 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
   3862 			                        stcb, 0,
   3863 			                        (void *)net,
   3864 			                        so_locked);
   3865 				net->dest_state &= ~SCTP_ADDR_REACHABLE;
   3866 				net->dest_state &= ~SCTP_ADDR_PF;
   3867 			}
   3868 		}
   3869 		if (stcb) {
   3870 			if (net == stcb->asoc.primary_destination) {
   3871 				/* need a new primary */
   3872 				struct sctp_nets *alt;
   3873 
   3874 				alt = sctp_find_alternate_net(stcb, net, 0);
   3875 				if (alt != net) {
   3876 					if (stcb->asoc.alternate) {
   3877 						sctp_free_remote_addr(stcb->asoc.alternate);
   3878 					}
   3879 					stcb->asoc.alternate = alt;
   3880 					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
   3881 					if (net->ro._s_addr) {
   3882 						sctp_free_ifa(net->ro._s_addr);
   3883 						net->ro._s_addr = NULL;
   3884 					}
   3885 					net->src_addr_selected = 0;
   3886 				}
   3887 			}
   3888 		}
   3889 	}
   3890 }
   3891 #endif
   3892 
   3893 static int
   3894 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
   3895     struct sctp_tcb *stcb,	/* may be NULL */
   3896     struct sctp_nets *net,
   3897     struct sockaddr *to,
   3898     struct mbuf *m,
   3899     uint32_t auth_offset,
   3900     struct sctp_auth_chunk *auth,
   3901     uint16_t auth_keyid,
   3902     int nofragment_flag,
   3903     int ecn_ok,
   3904     int out_of_asoc_ok,
   3905     uint16_t src_port,
   3906     uint16_t dest_port,
   3907     uint32_t v_tag,
   3908     uint16_t port,
   3909     union sctp_sockstore *over_addr,
   3910 #if defined(__FreeBSD__)
   3911     uint8_t use_mflowid, uint32_t mflowid,
   3912 #endif
   3913 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   3914     int so_locked SCTP_UNUSED
   3915 #else
   3916     int so_locked
   3917 #endif
   3918     )
   3919 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
   3920 {
   3921 	/**
   3922 	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
   3923 	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
   3924 	 * - fill in the HMAC digest of any AUTH chunk in the packet.
   3925 	 * - calculate and fill in the SCTP checksum.
   3926 	 * - prepend an IP address header.
   3927 	 * - if boundall use INADDR_ANY.
   3928 	 * - if boundspecific do source address selection.
   3929 	 * - set fragmentation option for ipV4.
   3930 	 * - On return from IP output, check/adjust mtu size of output
   3931 	 *   interface and smallest_mtu size as well.
   3932 	 */
   3933 	/* Will need ifdefs around this */
   3934 #ifdef __Panda__
   3935 	pakhandle_type o_pak;
   3936 #endif
   3937 	struct mbuf *newm;
   3938 	struct sctphdr *sctphdr;
   3939 	int packet_length;
   3940 	int ret;
   3941 #if defined(INET) || defined(INET6)
   3942 	uint32_t vrf_id;
   3943 #endif
   3944 #if defined(INET) || defined(INET6)
   3945 #if !defined(__Panda__)
   3946 	struct mbuf *o_pak;
   3947 #endif
   3948 	sctp_route_t *ro = NULL;
   3949 	struct udphdr *udp = NULL;
   3950 #endif
   3951 	uint8_t tos_value;
   3952 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   3953 	struct socket *so = NULL;
   3954 #endif
   3955 
   3956 #if defined(__APPLE__)
   3957 	if (so_locked) {
   3958 		sctp_lock_assert(SCTP_INP_SO(inp));
   3959 		SCTP_TCB_LOCK_ASSERT(stcb);
   3960 	} else {
   3961 		sctp_unlock_assert(SCTP_INP_SO(inp));
   3962 	}
   3963 #endif
   3964 	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
   3965 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   3966 		sctp_m_freem(m);
   3967 		return (EFAULT);
   3968 	}
   3969 #if defined(INET) || defined(INET6)
   3970 	if (stcb) {
   3971 		vrf_id = stcb->asoc.vrf_id;
   3972 	} else {
   3973 		vrf_id = inp->def_vrf_id;
   3974 	}
   3975 #endif
   3976 	/* fill in the HMAC digest for any AUTH chunk in the packet */
   3977 	if ((auth != NULL) && (stcb != NULL)) {
   3978 		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
   3979 	}
   3980 
   3981 	if (net) {
   3982 		tos_value = net->dscp;
   3983 	} else if (stcb) {
   3984 		tos_value = stcb->asoc.default_dscp;
   3985 	} else {
   3986 		tos_value = inp->sctp_ep.default_dscp;
   3987 	}
   3988 
   3989 	switch (to->sa_family) {
   3990 #ifdef INET
   3991 	case AF_INET:
   3992 	{
   3993 		struct ip *ip = NULL;
   3994 		sctp_route_t iproute;
   3995 		int len;
   3996 
   3997 		len = sizeof(struct ip) + sizeof(struct sctphdr);
   3998 		if (port) {
   3999 			len += sizeof(struct udphdr);
   4000 		}
   4001 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
   4002 		if (newm == NULL) {
   4003 			sctp_m_freem(m);
   4004 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   4005 			return (ENOMEM);
   4006 		}
   4007 		SCTP_ALIGN_TO_END(newm, len);
   4008 		SCTP_BUF_LEN(newm) = len;
   4009 		SCTP_BUF_NEXT(newm) = m;
   4010 		m = newm;
   4011 #if defined(__FreeBSD__)
   4012 		if (net != NULL) {
   4013 #ifdef INVARIANTS
   4014 			if (net->flowidset == 0) {
   4015 				panic("Flow ID not set");
   4016 			}
   4017 #endif
   4018 			m->m_pkthdr.flowid = net->flowid;
   4019 			m->m_flags |= M_FLOWID;
   4020 		} else {
   4021 			if (use_mflowid != 0) {
   4022 				m->m_pkthdr.flowid = mflowid;
   4023 				m->m_flags |= M_FLOWID;
   4024 			}
   4025 		}
   4026 #endif
   4027 		packet_length = sctp_calculate_len(m);
   4028 		ip = mtod(m, struct ip *);
   4029 		ip->ip_v = IPVERSION;
   4030 		ip->ip_hl = (sizeof(struct ip) >> 2);
   4031 		if (tos_value == 0) {
   4032 			/*
   4033 			 * This means especially, that it is not set at the
   4034 			 * SCTP layer. So use the value from the IP layer.
   4035 			 */
   4036 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
   4037 			tos_value = inp->ip_inp.inp.inp_ip_tos;
   4038 #else
   4039 			tos_value = inp->inp_ip_tos;
   4040 #endif
   4041 		}
   4042 		tos_value &= 0xfc;
   4043 		if (ecn_ok) {
   4044 			tos_value |= sctp_get_ect(stcb);
   4045 		}
   4046                 if ((nofragment_flag) && (port == 0)) {
   4047 #if defined(__FreeBSD__)
   4048 #if __FreeBSD_version >= 1000000
   4049 			ip->ip_off = htons(IP_DF);
   4050 #else
   4051 			ip->ip_off = IP_DF;
   4052 #endif
   4053 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
   4054 			ip->ip_off = IP_DF;
   4055 #else
   4056 			ip->ip_off = htons(IP_DF);
   4057 #endif
   4058 		} else {
   4059 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
   4060 			ip->ip_off = htons(0);
   4061 #else
   4062 			ip->ip_off = 0;
   4063 #endif
   4064 		}
   4065 #if defined(__FreeBSD__)
   4066 		/* FreeBSD has a function for ip_id's */
   4067 		ip->ip_id = ip_newid();
   4068 #elif defined(RANDOM_IP_ID)
   4069 		/* Apple has RANDOM_IP_ID switch */
   4070 		ip->ip_id = htons(ip_randomid());
   4071 #elif defined(__Userspace__)
   4072                 ip->ip_id = htons(SCTP_IP_ID(inp)++);
   4073 #else
   4074 		ip->ip_id = SCTP_IP_ID(inp)++;
   4075 #endif
   4076 
   4077 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
   4078 		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
   4079 #else
   4080 		ip->ip_ttl = inp->inp_ip_ttl;
   4081 #endif
   4082 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
   4083 		ip->ip_len = htons(packet_length);
   4084 #else
   4085 		ip->ip_len = packet_length;
   4086 #endif
   4087 		ip->ip_tos = tos_value;
   4088 		if (port) {
   4089 			ip->ip_p = IPPROTO_UDP;
   4090 		} else {
   4091 			ip->ip_p = IPPROTO_SCTP;
   4092 		}
   4093 		ip->ip_sum = 0;
   4094 		if (net == NULL) {
   4095 			ro = &iproute;
   4096 			memset(&iproute, 0, sizeof(iproute));
   4097 #ifdef HAVE_SA_LEN
   4098 			memcpy(&ro->ro_dst, to, to->sa_len);
   4099 #else
   4100 			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
   4101 #endif
   4102 		} else {
   4103 			ro = (sctp_route_t *)&net->ro;
   4104 		}
   4105 		/* Now the address selection part */
   4106 		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
   4107 
   4108 		/* call the routine to select the src address */
   4109 		if (net && out_of_asoc_ok == 0) {
   4110 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
   4111 				sctp_free_ifa(net->ro._s_addr);
   4112 				net->ro._s_addr = NULL;
   4113 				net->src_addr_selected = 0;
   4114 				if (ro->ro_rt) {
   4115 					RTFREE(ro->ro_rt);
   4116 					ro->ro_rt = NULL;
   4117 				}
   4118 			}
   4119 			if (net->src_addr_selected == 0) {
   4120 				/* Cache the source address */
   4121 				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
   4122 										ro, net, 0,
   4123 										vrf_id);
   4124 				net->src_addr_selected = 1;
   4125 			}
   4126 			if (net->ro._s_addr == NULL) {
   4127 				/* No route to host */
   4128 				net->src_addr_selected = 0;
   4129 				sctp_handle_no_route(stcb, net, so_locked);
   4130 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4131 				sctp_m_freem(m);
   4132 				return (EHOSTUNREACH);
   4133 			}
   4134 			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
   4135 		} else {
   4136 			if (over_addr == NULL) {
   4137 				struct sctp_ifa *_lsrc;
   4138 
   4139 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
   4140 				                                      net,
   4141 				                                      out_of_asoc_ok,
   4142 				                                      vrf_id);
   4143 				if (_lsrc == NULL) {
   4144 					sctp_handle_no_route(stcb, net, so_locked);
   4145 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4146 					sctp_m_freem(m);
   4147 					return (EHOSTUNREACH);
   4148 				}
   4149 				ip->ip_src = _lsrc->address.sin.sin_addr;
   4150 				sctp_free_ifa(_lsrc);
   4151 			} else {
   4152 				ip->ip_src = over_addr->sin.sin_addr;
   4153 				SCTP_RTALLOC(ro, vrf_id);
   4154 			}
   4155 		}
   4156 		if (port) {
   4157 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
   4158 				sctp_handle_no_route(stcb, net, so_locked);
   4159 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4160 				sctp_m_freem(m);
   4161 				return (EHOSTUNREACH);
   4162 			}
   4163 			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
   4164 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
   4165 			udp->uh_dport = port;
   4166 			udp->uh_ulen = htons(packet_length - sizeof(struct ip));
   4167 #if !defined(__Windows__) && !defined(__Userspace__)
   4168 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
   4169 			if (V_udp_cksum) {
   4170 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
   4171 			} else {
   4172 				udp->uh_sum = 0;
   4173 			}
   4174 #else
   4175 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
   4176 #endif
   4177 #else
   4178 			udp->uh_sum = 0;
   4179 #endif
   4180 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
   4181 		} else {
   4182 			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
   4183 		}
   4184 
   4185 		sctphdr->src_port = src_port;
   4186 		sctphdr->dest_port = dest_port;
   4187 		sctphdr->v_tag = v_tag;
   4188 		sctphdr->checksum = 0;
   4189 
   4190 		/*
   4191 		 * If source address selection fails and we find no route
   4192 		 * then the ip_output should fail as well with a
   4193 		 * NO_ROUTE_TO_HOST type error. We probably should catch
   4194 		 * that somewhere and abort the association right away
   4195 		 * (assuming this is an INIT being sent).
   4196 		 */
   4197 		if (ro->ro_rt == NULL) {
   4198 			/*
   4199 			 * src addr selection failed to find a route (or
   4200 			 * valid source addr), so we can't get there from
   4201 			 * here (yet)!
   4202 			 */
   4203 			sctp_handle_no_route(stcb, net, so_locked);
   4204 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4205 			sctp_m_freem(m);
   4206 			return (EHOSTUNREACH);
   4207 		}
   4208 		if (ro != &iproute) {
   4209 			memcpy(&iproute, ro, sizeof(*ro));
   4210 		}
   4211 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
   4212 			(uint32_t) (ntohl(ip->ip_src.s_addr)));
   4213 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
   4214 			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
   4215 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
   4216 			(void *)ro->ro_rt);
   4217 
   4218 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
   4219 			/* failed to prepend data, give up */
   4220 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   4221 			sctp_m_freem(m);
   4222 			return (ENOMEM);
   4223 		}
   4224 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
   4225 		if (port) {
   4226 #if defined(SCTP_WITH_NO_CSUM)
   4227 			SCTP_STAT_INCR(sctps_sendnocrc);
   4228 #else
   4229 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
   4230 			SCTP_STAT_INCR(sctps_sendswcrc);
   4231 #endif
   4232 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
   4233 			if (V_udp_cksum) {
   4234 				SCTP_ENABLE_UDP_CSUM(o_pak);
   4235 			}
   4236 #else
   4237 			SCTP_ENABLE_UDP_CSUM(o_pak);
   4238 #endif
   4239 		} else {
   4240 #if defined(SCTP_WITH_NO_CSUM)
   4241 			SCTP_STAT_INCR(sctps_sendnocrc);
   4242 #else
   4243 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
   4244 			m->m_pkthdr.csum_flags = CSUM_SCTP;
   4245 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
   4246 			SCTP_STAT_INCR(sctps_sendhwcrc);
   4247 #else
   4248 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
   4249 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
   4250 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
   4251 				SCTP_STAT_INCR(sctps_sendswcrc);
   4252 			} else {
   4253 				SCTP_STAT_INCR(sctps_sendnocrc);
   4254 			}
   4255 #endif
   4256 #endif
   4257 		}
   4258 #ifdef SCTP_PACKET_LOGGING
   4259 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
   4260 			sctp_packet_log(o_pak);
   4261 #endif
   4262 		/* send it out.  table id is taken from stcb */
   4263 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4264 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
   4265 			so = SCTP_INP_SO(inp);
   4266 			SCTP_SOCKET_UNLOCK(so, 0);
   4267 		}
   4268 #endif
   4269 		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
   4270 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4271 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
   4272 			atomic_add_int(&stcb->asoc.refcnt, 1);
   4273 			SCTP_TCB_UNLOCK(stcb);
   4274 			SCTP_SOCKET_LOCK(so, 0);
   4275 			SCTP_TCB_LOCK(stcb);
   4276 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4277 		}
   4278 #endif
   4279 		SCTP_STAT_INCR(sctps_sendpackets);
   4280 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
   4281 		if (ret)
   4282 			SCTP_STAT_INCR(sctps_senderrors);
   4283 
   4284 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
   4285 		if (net == NULL) {
   4286 			/* free tempy routes */
   4287 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
   4288 			RO_RTFREE(ro);
   4289 #else
   4290 			if (ro->ro_rt) {
   4291 				RTFREE(ro->ro_rt);
   4292 				ro->ro_rt = NULL;
   4293 			}
   4294 #endif
   4295 		} else {
   4296 			/* PMTU check versus smallest asoc MTU goes here */
   4297 			if ((ro->ro_rt != NULL) &&
   4298 			    (net->ro._s_addr)) {
   4299 				uint32_t mtu;
   4300 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
   4301 				if (net->port) {
   4302 					mtu -= sizeof(struct udphdr);
   4303 				}
   4304 				if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
   4305 					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
   4306 					net->mtu = mtu;
   4307 				}
   4308 			} else if (ro->ro_rt == NULL) {
   4309 				/* route was freed */
   4310 				if (net->ro._s_addr &&
   4311 				    net->src_addr_selected) {
   4312 					sctp_free_ifa(net->ro._s_addr);
   4313 					net->ro._s_addr = NULL;
   4314 				}
   4315 				net->src_addr_selected = 0;
   4316 			}
   4317 		}
   4318 		return (ret);
   4319 	}
   4320 #endif
   4321 #ifdef INET6
   4322 	case AF_INET6:
   4323 	{
   4324 		uint32_t flowlabel, flowinfo;
   4325 		struct ip6_hdr *ip6h;
   4326 		struct route_in6 ip6route;
   4327 #if !(defined(__Panda__) || defined(__Userspace__))
   4328 		struct ifnet *ifp;
   4329 #endif
   4330 		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
   4331 		int prev_scope = 0;
   4332 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4333 		struct sockaddr_in6 lsa6_storage;
   4334 		int error;
   4335 #endif
   4336 		u_short prev_port = 0;
   4337 		int len;
   4338 
   4339 		if (net) {
   4340 			flowlabel = net->flowlabel;
   4341 		} else if (stcb) {
   4342 			flowlabel = stcb->asoc.default_flowlabel;
   4343 		} else {
   4344 			flowlabel = inp->sctp_ep.default_flowlabel;
   4345 		}
   4346 		if (flowlabel == 0) {
   4347 			/*
   4348 			 * This means especially, that it is not set at the
   4349 			 * SCTP layer. So use the value from the IP layer.
   4350 			 */
   4351 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
   4352 			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
   4353 #else
   4354 			flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
   4355 #endif
   4356 		}
   4357 		flowlabel &= 0x000fffff;
   4358 		len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
   4359 		if (port) {
   4360 			len += sizeof(struct udphdr);
   4361 		}
   4362 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
   4363 		if (newm == NULL) {
   4364 			sctp_m_freem(m);
   4365 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   4366 			return (ENOMEM);
   4367 		}
   4368 		SCTP_ALIGN_TO_END(newm, len);
   4369 		SCTP_BUF_LEN(newm) = len;
   4370 		SCTP_BUF_NEXT(newm) = m;
   4371 		m = newm;
   4372 #if defined(__FreeBSD__)
   4373 		if (net != NULL) {
   4374 #ifdef INVARIANTS
   4375 			if (net->flowidset == 0) {
   4376 				panic("Flow ID not set");
   4377 			}
   4378 #endif
   4379 			m->m_pkthdr.flowid = net->flowid;
   4380 			m->m_flags |= M_FLOWID;
   4381 		} else {
   4382 			if (use_mflowid != 0) {
   4383 				m->m_pkthdr.flowid = mflowid;
   4384 				m->m_flags |= M_FLOWID;
   4385 			}
   4386 		}
   4387 #endif
   4388 		packet_length = sctp_calculate_len(m);
   4389 
   4390 		ip6h = mtod(m, struct ip6_hdr *);
   4391 		/* protect *sin6 from overwrite */
   4392 		sin6 = (struct sockaddr_in6 *)to;
   4393 		tmp = *sin6;
   4394 		sin6 = &tmp;
   4395 
   4396 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4397 		/* KAME hack: embed scopeid */
   4398 #if defined(__APPLE__)
   4399 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
   4400 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
   4401 #else
   4402 		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
   4403 #endif
   4404 #elif defined(SCTP_KAME)
   4405 		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
   4406 #else
   4407 		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
   4408 #endif
   4409 		{
   4410 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   4411 			return (EINVAL);
   4412 		}
   4413 #endif /* SCTP_EMBEDDED_V6_SCOPE */
   4414 		if (net == NULL) {
   4415 			memset(&ip6route, 0, sizeof(ip6route));
   4416 			ro = (sctp_route_t *)&ip6route;
   4417 #ifdef HAVE_SIN6_LEN
   4418 			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
   4419 #else
   4420 			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
   4421 #endif
   4422 		} else {
   4423 			ro = (sctp_route_t *)&net->ro;
   4424 		}
   4425 		/*
   4426 		 * We assume here that inp_flow is in host byte order within
   4427 		 * the TCB!
   4428 		 */
   4429 		if (tos_value == 0) {
   4430 			/*
   4431 			 * This means especially, that it is not set at the
   4432 			 * SCTP layer. So use the value from the IP layer.
   4433 			 */
   4434 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
   4435 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
   4436 			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
   4437 #else
   4438 			tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
   4439 #endif
   4440 #endif
   4441 		}
   4442 		tos_value &= 0xfc;
   4443 		if (ecn_ok) {
   4444 			tos_value |= sctp_get_ect(stcb);
   4445 		}
   4446 		flowinfo = 0x06;
   4447 		flowinfo <<= 8;
   4448 		flowinfo |= tos_value;
   4449 		flowinfo <<= 20;
   4450 		flowinfo |= flowlabel;
   4451 		ip6h->ip6_flow = htonl(flowinfo);
   4452 		if (port) {
   4453 			ip6h->ip6_nxt = IPPROTO_UDP;
   4454 		} else {
   4455 			ip6h->ip6_nxt = IPPROTO_SCTP;
   4456 		}
   4457 		ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
   4458 		ip6h->ip6_dst = sin6->sin6_addr;
   4459 
   4460 		/*
   4461 		 * Add SRC address selection here: we can only reuse to a
   4462 		 * limited degree the kame src-addr-sel, since we can try
   4463 		 * their selection but it may not be bound.
   4464 		 */
   4465 		bzero(&lsa6_tmp, sizeof(lsa6_tmp));
   4466 		lsa6_tmp.sin6_family = AF_INET6;
   4467 #ifdef HAVE_SIN6_LEN
   4468 		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
   4469 #endif
   4470 		lsa6 = &lsa6_tmp;
   4471 		if (net && out_of_asoc_ok == 0) {
   4472 			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
   4473 				sctp_free_ifa(net->ro._s_addr);
   4474 				net->ro._s_addr = NULL;
   4475 				net->src_addr_selected = 0;
   4476 				if (ro->ro_rt) {
   4477 					RTFREE(ro->ro_rt);
   4478 					ro->ro_rt = NULL;
   4479 				}
   4480 			}
   4481 			if (net->src_addr_selected == 0) {
   4482 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4483 				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
   4484 				/* KAME hack: embed scopeid */
   4485 #if defined(__APPLE__)
   4486 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
   4487 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
   4488 #else
   4489 				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
   4490 #endif
   4491 #elif defined(SCTP_KAME)
   4492 				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
   4493 #else
   4494 				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
   4495 #endif
   4496 				{
   4497 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   4498 					return (EINVAL);
   4499 				}
   4500 #endif /* SCTP_EMBEDDED_V6_SCOPE */
   4501 				/* Cache the source address */
   4502 				net->ro._s_addr = sctp_source_address_selection(inp,
   4503 										stcb,
   4504 										ro,
   4505 										net,
   4506 										0,
   4507 										vrf_id);
   4508 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4509 #ifdef SCTP_KAME
   4510 				(void)sa6_recoverscope(sin6);
   4511 #else
   4512 				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
   4513 #endif	/* SCTP_KAME */
   4514 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
   4515 				net->src_addr_selected = 1;
   4516 			}
   4517 			if (net->ro._s_addr == NULL) {
   4518 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
   4519 				net->src_addr_selected = 0;
   4520 				sctp_handle_no_route(stcb, net, so_locked);
   4521 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4522 				sctp_m_freem(m);
   4523 				return (EHOSTUNREACH);
   4524 			}
   4525 			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
   4526 		} else {
   4527 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4528 			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
   4529 			/* KAME hack: embed scopeid */
   4530 #if defined(__APPLE__)
   4531 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
   4532 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
   4533 #else
   4534 			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
   4535 #endif
   4536 #elif defined(SCTP_KAME)
   4537 			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
   4538 #else
   4539 			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
   4540 #endif
   4541 			  {
   4542 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   4543 				return (EINVAL);
   4544 			  }
   4545 #endif /* SCTP_EMBEDDED_V6_SCOPE */
   4546 			if (over_addr == NULL) {
   4547 				struct sctp_ifa *_lsrc;
   4548 
   4549 				_lsrc = sctp_source_address_selection(inp, stcb, ro,
   4550 				                                      net,
   4551 				                                      out_of_asoc_ok,
   4552 				                                      vrf_id);
   4553 				if (_lsrc == NULL) {
   4554 					sctp_handle_no_route(stcb, net, so_locked);
   4555 					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4556 					sctp_m_freem(m);
   4557 					return (EHOSTUNREACH);
   4558 				}
   4559 				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
   4560 				sctp_free_ifa(_lsrc);
   4561 			} else {
   4562 				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
   4563 				SCTP_RTALLOC(ro, vrf_id);
   4564 			}
   4565 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4566 #ifdef SCTP_KAME
   4567 			(void)sa6_recoverscope(sin6);
   4568 #else
   4569 			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
   4570 #endif	/* SCTP_KAME */
   4571 #endif	/* SCTP_EMBEDDED_V6_SCOPE */
   4572 		}
   4573 		lsa6->sin6_port = inp->sctp_lport;
   4574 
   4575 		if (ro->ro_rt == NULL) {
   4576 			/*
   4577 			 * src addr selection failed to find a route (or
   4578 			 * valid source addr), so we can't get there from
   4579 			 * here!
   4580 			 */
   4581 			sctp_handle_no_route(stcb, net, so_locked);
   4582 			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4583 			sctp_m_freem(m);
   4584 			return (EHOSTUNREACH);
   4585 		}
   4586 #ifndef SCOPEDROUTING
   4587 #ifdef SCTP_EMBEDDED_V6_SCOPE
   4588 		/*
   4589 		 * XXX: sa6 may not have a valid sin6_scope_id in the
   4590 		 * non-SCOPEDROUTING case.
   4591 		 */
   4592 		bzero(&lsa6_storage, sizeof(lsa6_storage));
   4593 		lsa6_storage.sin6_family = AF_INET6;
   4594 #ifdef HAVE_SIN6_LEN
   4595 		lsa6_storage.sin6_len = sizeof(lsa6_storage);
   4596 #endif
   4597 #ifdef SCTP_KAME
   4598 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
   4599 		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
   4600 #else
   4601 		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
   4602 		    NULL)) != 0) {
   4603 #endif				/* SCTP_KAME */
   4604 			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
   4605 			sctp_m_freem(m);
   4606 			return (error);
   4607 		}
   4608 		/* XXX */
   4609 		lsa6_storage.sin6_addr = lsa6->sin6_addr;
   4610 		lsa6_storage.sin6_port = inp->sctp_lport;
   4611 		lsa6 = &lsa6_storage;
   4612 #endif /* SCTP_EMBEDDED_V6_SCOPE */
   4613 #endif /* SCOPEDROUTING */
   4614 		ip6h->ip6_src = lsa6->sin6_addr;
   4615 
   4616 		if (port) {
   4617 			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
   4618 				sctp_handle_no_route(stcb, net, so_locked);
   4619 				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
   4620 				sctp_m_freem(m);
   4621 				return (EHOSTUNREACH);
   4622 			}
   4623 			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
   4624 			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
   4625 			udp->uh_dport = port;
   4626 			udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
   4627 			udp->uh_sum = 0;
   4628 			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
   4629 		} else {
   4630 			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
   4631 		}
   4632 
   4633 		sctphdr->src_port = src_port;
   4634 		sctphdr->dest_port = dest_port;
   4635 		sctphdr->v_tag = v_tag;
   4636 		sctphdr->checksum = 0;
   4637 
   4638 		/*
   4639 		 * We set the hop limit now since there is a good chance
   4640 		 * that our ro pointer is now filled
   4641 		 */
   4642 		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
   4643 #if !(defined(__Panda__) || defined(__Userspace__))
   4644 		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
   4645 #endif
   4646 
   4647 #ifdef SCTP_DEBUG
   4648 		/* Copy to be sure something bad is not happening */
   4649 		sin6->sin6_addr = ip6h->ip6_dst;
   4650 		lsa6->sin6_addr = ip6h->ip6_src;
   4651 #endif
   4652 
   4653 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
   4654 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
   4655 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
   4656 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
   4657 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
   4658 		if (net) {
   4659 			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
   4660 			/* preserve the port and scope for link local send */
   4661 			prev_scope = sin6->sin6_scope_id;
   4662 			prev_port = sin6->sin6_port;
   4663 		}
   4664 
   4665 		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
   4666 			/* failed to prepend data, give up */
   4667 			sctp_m_freem(m);
   4668 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   4669 			return (ENOMEM);
   4670 		}
   4671 		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
   4672 		if (port) {
   4673 #if defined(SCTP_WITH_NO_CSUM)
   4674 			SCTP_STAT_INCR(sctps_sendnocrc);
   4675 #else
   4676 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
   4677 			SCTP_STAT_INCR(sctps_sendswcrc);
   4678 #endif
   4679 #if defined(__Windows__)
   4680 			udp->uh_sum = 0;
   4681 #elif !defined(__Userspace__)
   4682 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
   4683 				udp->uh_sum = 0xffff;
   4684 			}
   4685 #endif
   4686 		} else {
   4687 #if defined(SCTP_WITH_NO_CSUM)
   4688 			SCTP_STAT_INCR(sctps_sendnocrc);
   4689 #else
   4690 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
   4691 #if __FreeBSD_version < 900000
   4692 			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
   4693 			SCTP_STAT_INCR(sctps_sendswcrc);
   4694 #else
   4695 #if __FreeBSD_version > 901000
   4696 			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
   4697 #else
   4698 			m->m_pkthdr.csum_flags = CSUM_SCTP;
   4699 #endif
   4700 			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
   4701 			SCTP_STAT_INCR(sctps_sendhwcrc);
   4702 #endif
   4703 #else
   4704 			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
   4705 			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
   4706 				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
   4707 				SCTP_STAT_INCR(sctps_sendswcrc);
   4708 			} else {
   4709 				SCTP_STAT_INCR(sctps_sendnocrc);
   4710 			}
   4711 #endif
   4712 #endif
   4713 		}
   4714 		/* send it out. table id is taken from stcb */
   4715 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4716 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
   4717 			so = SCTP_INP_SO(inp);
   4718 			SCTP_SOCKET_UNLOCK(so, 0);
   4719 		}
   4720 #endif
   4721 #ifdef SCTP_PACKET_LOGGING
   4722 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
   4723 			sctp_packet_log(o_pak);
   4724 #endif
   4725 #if !(defined(__Panda__) || defined(__Userspace__))
   4726 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
   4727 #else
   4728 		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
   4729 #endif
   4730 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4731 		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
   4732 			atomic_add_int(&stcb->asoc.refcnt, 1);
   4733 			SCTP_TCB_UNLOCK(stcb);
   4734 			SCTP_SOCKET_LOCK(so, 0);
   4735 			SCTP_TCB_LOCK(stcb);
   4736 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4737 		}
   4738 #endif
   4739 		if (net) {
   4740 			/* for link local this must be done */
   4741 			sin6->sin6_scope_id = prev_scope;
   4742 			sin6->sin6_port = prev_port;
   4743 		}
   4744 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
   4745 		SCTP_STAT_INCR(sctps_sendpackets);
   4746 		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
   4747 		if (ret) {
   4748 			SCTP_STAT_INCR(sctps_senderrors);
   4749 		}
   4750 		if (net == NULL) {
   4751 			/* Now if we had a temp route free it */
   4752 #if defined(__FreeBSD__) && __FreeBSD_version > 901000
   4753 			RO_RTFREE(ro);
   4754 #else
   4755 			if (ro->ro_rt) {
   4756 				RTFREE(ro->ro_rt);
   4757 				ro->ro_rt = NULL;
   4758 			}
   4759 #endif
   4760 		} else {
   4761 			/* PMTU check versus smallest asoc MTU goes here */
   4762 			if (ro->ro_rt == NULL) {
   4763 				/* Route was freed */
   4764 				if (net->ro._s_addr &&
   4765 				    net->src_addr_selected) {
   4766 					sctp_free_ifa(net->ro._s_addr);
   4767 					net->ro._s_addr = NULL;
   4768 				}
   4769 				net->src_addr_selected = 0;
   4770 			}
   4771 			if ((ro->ro_rt != NULL) &&
   4772 			    (net->ro._s_addr)) {
   4773 				uint32_t mtu;
   4774 				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
   4775 				if (mtu &&
   4776 				    (stcb->asoc.smallest_mtu > mtu)) {
   4777 					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
   4778 					net->mtu = mtu;
   4779 					if (net->port) {
   4780 						net->mtu -= sizeof(struct udphdr);
   4781 					}
   4782 				}
   4783 			}
   4784 #if !defined(__Panda__) && !defined(__Userspace__)
   4785 			else if (ifp) {
   4786 #if defined(__Windows__)
   4787 #define ND_IFINFO(ifp)	(ifp)
   4788 #define linkmtu		if_mtu
   4789 #endif
   4790 				if (ND_IFINFO(ifp)->linkmtu &&
   4791 				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
   4792 					sctp_mtu_size_reset(inp,
   4793 					    &stcb->asoc,
   4794 					    ND_IFINFO(ifp)->linkmtu);
   4795 				}
   4796 			}
   4797 #endif
   4798 		}
   4799 		return (ret);
   4800 	}
   4801 #endif
   4802 #if defined(__Userspace__)
   4803 	case AF_CONN:
   4804 	{
   4805 		char *buffer;
   4806 		struct sockaddr_conn *sconn;
   4807 		int len;
   4808 
   4809 		sconn = (struct sockaddr_conn *)to;
   4810 		len = sizeof(struct sctphdr);
   4811 		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
   4812 		if (newm == NULL) {
   4813 			sctp_m_freem(m);
   4814 			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   4815 			return (ENOMEM);
   4816 		}
   4817 		SCTP_ALIGN_TO_END(newm, len);
   4818 		SCTP_BUF_LEN(newm) = len;
   4819 		SCTP_BUF_NEXT(newm) = m;
   4820 		m = newm;
   4821 		packet_length = sctp_calculate_len(m);
   4822 		sctphdr = mtod(m, struct sctphdr *);
   4823 		sctphdr->src_port = src_port;
   4824 		sctphdr->dest_port = dest_port;
   4825 		sctphdr->v_tag = v_tag;
   4826 		sctphdr->checksum = 0;
   4827 #if defined(SCTP_WITH_NO_CSUM)
   4828 		SCTP_STAT_INCR(sctps_sendnocrc);
   4829 #else
   4830 		sctphdr->checksum = sctp_calculate_cksum(m, 0);
   4831 		SCTP_STAT_INCR(sctps_sendswcrc);
   4832 #endif
   4833 		if (tos_value == 0) {
   4834 			tos_value = inp->ip_inp.inp.inp_ip_tos;
   4835 		}
   4836 		tos_value &= 0xfc;
   4837 		if (ecn_ok) {
   4838 			tos_value |= sctp_get_ect(stcb);
   4839 		}
   4840 		/* Don't alloc/free for each packet */
   4841 		if ((buffer = malloc(packet_length)) != NULL) {
   4842 			m_copydata(m, 0, packet_length, buffer);
   4843 			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
   4844 			free(buffer);
   4845 		} else {
   4846 			ret = ENOMEM;
   4847 		}
   4848 		sctp_m_freem(m);
   4849 		return (ret);
   4850 	}
   4851 #endif
   4852 	default:
   4853 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
   4854 		        ((struct sockaddr *)to)->sa_family);
   4855 		sctp_m_freem(m);
   4856 		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   4857 		return (EFAULT);
   4858 	}
   4859 }
   4860 
   4861 
   4862 void
   4863 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
   4864 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   4865     SCTP_UNUSED
   4866 #endif
   4867     )
   4868 {
   4869 	struct mbuf *m;
   4870 	struct sctp_nets *net;
   4871 	struct sctp_init_chunk *init;
   4872 	struct sctp_supported_addr_param *sup_addr;
   4873 	struct sctp_adaptation_layer_indication *ali;
   4874 	struct sctp_supported_chunk_types_param *pr_supported;
   4875 	struct sctp_paramhdr *ph;
   4876 	int cnt_inits_to = 0;
   4877 	int ret;
   4878 	uint16_t num_ext, chunk_len, padding_len, parameter_len;
   4879 
   4880 #if defined(__APPLE__)
   4881 	if (so_locked) {
   4882 		sctp_lock_assert(SCTP_INP_SO(inp));
   4883 	} else {
   4884 		sctp_unlock_assert(SCTP_INP_SO(inp));
   4885 	}
   4886 #endif
   4887 	/* INIT's always go to the primary (and usually ONLY address) */
   4888 	net = stcb->asoc.primary_destination;
   4889 	if (net == NULL) {
   4890 		net = TAILQ_FIRST(&stcb->asoc.nets);
   4891 		if (net == NULL) {
   4892 			/* TSNH */
   4893 			return;
   4894 		}
   4895 		/* we confirm any address we send an INIT to */
   4896 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
   4897 		(void)sctp_set_primary_addr(stcb, NULL, net);
   4898 	} else {
   4899 		/* we confirm any address we send an INIT to */
   4900 		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
   4901 	}
   4902 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
   4903 #ifdef INET6
   4904 	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
   4905 		/*
   4906 		 * special hook, if we are sending to link local it will not
   4907 		 * show up in our private address count.
   4908 		 */
   4909 		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
   4910 			cnt_inits_to = 1;
   4911 	}
   4912 #endif
   4913 	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4914 		/* This case should not happen */
   4915 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
   4916 		return;
   4917 	}
   4918 	/* start the INIT timer */
   4919 	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
   4920 
   4921 	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
   4922 	if (m == NULL) {
   4923 		/* No memory, INIT timer will re-attempt. */
   4924 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
   4925 		return;
   4926 	}
   4927 	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
   4928 	padding_len = 0;
   4929 	/*
   4930 	 * assume peer supports asconf in order to be able to queue
   4931 	 * local address changes while an INIT is in flight and before
   4932 	 * the assoc is established.
   4933 	 */
   4934 	stcb->asoc.peer_supports_asconf = 1;
   4935 	/* Now lets put the chunk header in place */
   4936 	init = mtod(m, struct sctp_init_chunk *);
   4937 	/* now the chunk header */
   4938 	init->ch.chunk_type = SCTP_INITIATION;
   4939 	init->ch.chunk_flags = 0;
   4940 	/* fill in later from mbuf we build */
   4941 	init->ch.chunk_length = 0;
   4942 	/* place in my tag */
   4943 	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
   4944 	/* set up some of the credits. */
   4945 	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
   4946 	                              SCTP_MINIMAL_RWND));
   4947 	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
   4948 	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
   4949 	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
   4950 
   4951 	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
   4952 		uint8_t i;
   4953 
   4954 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
   4955 		if (stcb->asoc.scope.ipv4_addr_legal) {
   4956 			parameter_len += (uint16_t)sizeof(uint16_t);
   4957 		}
   4958 		if (stcb->asoc.scope.ipv6_addr_legal) {
   4959 			parameter_len += (uint16_t)sizeof(uint16_t);
   4960 		}
   4961 		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
   4962 		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
   4963 		sup_addr->ph.param_length = htons(parameter_len);
   4964 		i = 0;
   4965 		if (stcb->asoc.scope.ipv4_addr_legal) {
   4966 			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
   4967 		}
   4968 		if (stcb->asoc.scope.ipv6_addr_legal) {
   4969 			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
   4970 		}
   4971 		padding_len = 4 - 2 * i;
   4972 		chunk_len += parameter_len;
   4973 	}
   4974 
   4975 	/* Adaptation layer indication parameter */
   4976 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
   4977 		if (padding_len > 0) {
   4978 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   4979 			chunk_len += padding_len;
   4980 			padding_len = 0;
   4981 		}
   4982 		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
   4983 		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
   4984 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
   4985 		ali->ph.param_length = htons(parameter_len);
   4986 		ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
   4987 		chunk_len += parameter_len;
   4988 	}
   4989 
   4990 	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
   4991 		/* Add NAT friendly parameter. */
   4992 		if (padding_len > 0) {
   4993 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   4994 			chunk_len += padding_len;
   4995 			padding_len = 0;
   4996 		}
   4997 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
   4998 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
   4999 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
   5000 		ph->param_length = htons(parameter_len);
   5001 		chunk_len += parameter_len;
   5002 	}
   5003 
   5004 	/* now any cookie time extensions */
   5005 	if (stcb->asoc.cookie_preserve_req) {
   5006 		struct sctp_cookie_perserve_param *cookie_preserve;
   5007 
   5008 		if (padding_len > 0) {
   5009 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5010 			chunk_len += padding_len;
   5011 			padding_len = 0;
   5012 		}
   5013 		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
   5014 		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
   5015 		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
   5016 		cookie_preserve->ph.param_length = htons(parameter_len);
   5017 		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
   5018 		stcb->asoc.cookie_preserve_req = 0;
   5019 		chunk_len += parameter_len;
   5020 	}
   5021 
   5022 	/* ECN parameter */
   5023 	if (stcb->asoc.ecn_allowed == 1) {
   5024 		if (padding_len > 0) {
   5025 			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5026 			chunk_len += padding_len;
   5027 			padding_len = 0;
   5028 		}
   5029 		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
   5030 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
   5031 		ph->param_type = htons(SCTP_ECN_CAPABLE);
   5032 		ph->param_length = htons(parameter_len);
   5033 		chunk_len += parameter_len;
   5034 	}
   5035 
   5036 	/* And now tell the peer we do support PR-SCTP. */
   5037 	if (padding_len > 0) {
   5038 		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5039 		chunk_len += padding_len;
   5040 		padding_len = 0;
   5041 	}
   5042 	parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
   5043 	ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
   5044 	ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
   5045 	ph->param_length = htons(parameter_len);
   5046 	chunk_len += parameter_len;
   5047 
   5048 	/* And now tell the peer we do all the extensions */
   5049 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
   5050 	pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
   5051 	num_ext = 0;
   5052 	pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
   5053 	pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
   5054 	pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
   5055 	pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
   5056 	pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
   5057 	if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
   5058 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
   5059 	}
   5060 	if (stcb->asoc.sctp_nr_sack_on_off == 1) {
   5061 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
   5062 	}
   5063 	parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
   5064 	pr_supported->ph.param_length = htons(parameter_len);
   5065 	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
   5066 	chunk_len += parameter_len;
   5067 
   5068 	/* add authentication parameters */
   5069 	if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
   5070 		/* attach RANDOM parameter, if available */
   5071 		if (stcb->asoc.authinfo.random != NULL) {
   5072 			struct sctp_auth_random *randp;
   5073 
   5074 			if (padding_len > 0) {
   5075 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5076 				chunk_len += padding_len;
   5077 				padding_len = 0;
   5078 			}
   5079 			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
   5080 			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
   5081 			/* random key already contains the header */
   5082 			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
   5083 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
   5084 			chunk_len += parameter_len;
   5085 		}
   5086 		/* add HMAC_ALGO parameter */
   5087 		if ((stcb->asoc.local_hmacs != NULL) &&
   5088 		    (stcb->asoc.local_hmacs->num_algo > 0)) {
   5089 			struct sctp_auth_hmac_algo *hmacs;
   5090 
   5091 			if (padding_len > 0) {
   5092 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5093 				chunk_len += padding_len;
   5094 				padding_len = 0;
   5095 			}
   5096 			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
   5097 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
   5098 			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
   5099 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
   5100 			hmacs->ph.param_length = htons(parameter_len);
   5101 			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
   5102 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
   5103 			chunk_len += parameter_len;
   5104 		}
   5105 		/* add CHUNKS parameter */
   5106 		if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
   5107 			struct sctp_auth_chunk_list *chunks;
   5108 
   5109 			if (padding_len > 0) {
   5110 				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
   5111 				chunk_len += padding_len;
   5112 				padding_len = 0;
   5113 			}
   5114 			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
   5115 			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
   5116 			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
   5117 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
   5118 			chunks->ph.param_length = htons(parameter_len);
   5119 			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
   5120 			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
   5121 			chunk_len += parameter_len;
   5122 		}
   5123 	}
   5124 	SCTP_BUF_LEN(m) = chunk_len;
   5125 
   5126 	/* now the addresses */
   5127 	/* To optimize this we could put the scoping stuff
   5128 	 * into a structure and remove the individual uint8's from
   5129 	 * the assoc structure. Then we could just sifa in the
   5130 	 * address within the stcb. But for now this is a quick
   5131 	 * hack to get the address stuff teased apart.
   5132 	 */
   5133 	sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
   5134 
   5135 	init->ch.chunk_length = htons(chunk_len);
   5136 	if (padding_len > 0) {
   5137 		struct mbuf *m_at, *mp_last;
   5138 
   5139 		mp_last = NULL;
   5140 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
   5141 			if (SCTP_BUF_NEXT(m_at) == NULL)
   5142 				mp_last = m_at;
   5143 		}
   5144 		if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
   5145 			sctp_m_freem(m);
   5146 			return;
   5147 		}
   5148 	}
   5149 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
   5150 	ret = sctp_lowlevel_chunk_output(inp, stcb, net,
   5151 	                                 (struct sockaddr *)&net->ro._l_addr,
   5152 	                                 m, 0, NULL, 0, 0, 0, 0,
   5153 	                                 inp->sctp_lport, stcb->rport, htonl(0),
   5154 	                                 net->port, NULL,
   5155 #if defined(__FreeBSD__)
   5156 	                                 0, 0,
   5157 #endif
   5158 	                                 so_locked);
   5159 	SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
   5160 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   5161 	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   5162 }
   5163 
   5164 struct mbuf *
   5165 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
   5166 	int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
   5167 {
   5168 	/*
   5169 	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
   5170 	 * being equal to the beginning of the params i.e. (iphlen +
   5171 	 * sizeof(struct sctp_init_msg) parse through the parameters to the
   5172 	 * end of the mbuf verifying that all parameters are known.
   5173 	 *
   5174 	 * For unknown parameters build and return a mbuf with
   5175 	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
   5176 	 * processing this chunk stop, and set *abort_processing to 1.
   5177 	 *
   5178 	 * By having param_offset be pre-set to where parameters begin it is
   5179 	 * hoped that this routine may be reused in the future by new
   5180 	 * features.
   5181 	 */
   5182 	struct sctp_paramhdr *phdr, params;
   5183 
   5184 	struct mbuf *mat, *op_err;
   5185 	char tempbuf[SCTP_PARAM_BUFFER_SIZE];
   5186 	int at, limit, pad_needed;
   5187 	uint16_t ptype, plen, padded_size;
   5188 	int err_at;
   5189 
   5190 	*abort_processing = 0;
   5191 	mat = in_initpkt;
   5192 	err_at = 0;
   5193 	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
   5194 	at = param_offset;
   5195 	op_err = NULL;
   5196 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
   5197 	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
   5198 	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
   5199 		ptype = ntohs(phdr->param_type);
   5200 		plen = ntohs(phdr->param_length);
   5201 		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
   5202 			/* wacked parameter */
   5203 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
   5204 			goto invalid_size;
   5205 		}
   5206 		limit -= SCTP_SIZE32(plen);
   5207 		/*-
   5208 		 * All parameters for all chunks that we know/understand are
   5209 		 * listed here. We process them other places and make
   5210 		 * appropriate stop actions per the upper bits. However this
   5211 		 * is the generic routine processor's can call to get back
   5212 		 * an operr.. to either incorporate (init-ack) or send.
   5213 		 */
   5214 		padded_size = SCTP_SIZE32(plen);
   5215 		switch (ptype) {
   5216 			/* Param's with variable size */
   5217 		case SCTP_HEARTBEAT_INFO:
   5218 		case SCTP_STATE_COOKIE:
   5219 		case SCTP_UNRECOG_PARAM:
   5220 		case SCTP_ERROR_CAUSE_IND:
   5221 			/* ok skip fwd */
   5222 			at += padded_size;
   5223 			break;
   5224 			/* Param's with variable size within a range */
   5225 		case SCTP_CHUNK_LIST:
   5226 		case SCTP_SUPPORTED_CHUNK_EXT:
   5227 			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
   5228 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
   5229 				goto invalid_size;
   5230 			}
   5231 			at += padded_size;
   5232 			break;
   5233 		case SCTP_SUPPORTED_ADDRTYPE:
   5234 			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
   5235 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
   5236 				goto invalid_size;
   5237 			}
   5238 			at += padded_size;
   5239 			break;
   5240 		case SCTP_RANDOM:
   5241 			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
   5242 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
   5243 				goto invalid_size;
   5244 			}
   5245 			at += padded_size;
   5246 			break;
   5247 		case SCTP_SET_PRIM_ADDR:
   5248 		case SCTP_DEL_IP_ADDRESS:
   5249 		case SCTP_ADD_IP_ADDRESS:
   5250 			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
   5251 			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
   5252 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
   5253 				goto invalid_size;
   5254 			}
   5255 			at += padded_size;
   5256 			break;
   5257 			/* Param's with a fixed size */
   5258 		case SCTP_IPV4_ADDRESS:
   5259 			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
   5260 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
   5261 				goto invalid_size;
   5262 			}
   5263 			at += padded_size;
   5264 			break;
   5265 		case SCTP_IPV6_ADDRESS:
   5266 			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
   5267 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
   5268 				goto invalid_size;
   5269 			}
   5270 			at += padded_size;
   5271 			break;
   5272 		case SCTP_COOKIE_PRESERVE:
   5273 			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
   5274 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
   5275 				goto invalid_size;
   5276 			}
   5277 			at += padded_size;
   5278 			break;
   5279 		case SCTP_HAS_NAT_SUPPORT:
   5280 		  *nat_friendly = 1;
   5281 		  /* fall through */
   5282 		case SCTP_PRSCTP_SUPPORTED:
   5283 
   5284 			if (padded_size != sizeof(struct sctp_paramhdr)) {
   5285 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
   5286 				goto invalid_size;
   5287 			}
   5288 			at += padded_size;
   5289 			break;
   5290 		case SCTP_ECN_CAPABLE:
   5291 			if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
   5292 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
   5293 				goto invalid_size;
   5294 			}
   5295 			at += padded_size;
   5296 			break;
   5297 		case SCTP_ULP_ADAPTATION:
   5298 			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
   5299 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
   5300 				goto invalid_size;
   5301 			}
   5302 			at += padded_size;
   5303 			break;
   5304 		case SCTP_SUCCESS_REPORT:
   5305 			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
   5306 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
   5307 				goto invalid_size;
   5308 			}
   5309 			at += padded_size;
   5310 			break;
   5311 		case SCTP_HOSTNAME_ADDRESS:
   5312 		{
   5313 			/* We can NOT handle HOST NAME addresses!! */
   5314 			int l_len;
   5315 
   5316 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
   5317 			*abort_processing = 1;
   5318 			if (op_err == NULL) {
   5319 				/* Ok need to try to get a mbuf */
   5320 #ifdef INET6
   5321 				l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5322 #else
   5323 				l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5324 #endif
   5325 				l_len += plen;
   5326 				l_len += sizeof(struct sctp_paramhdr);
   5327 				op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
   5328 				if (op_err) {
   5329 					SCTP_BUF_LEN(op_err) = 0;
   5330 					/*
   5331 					 * pre-reserve space for ip and sctp
   5332 					 * header  and chunk hdr
   5333 					 */
   5334 #ifdef INET6
   5335 					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
   5336 #else
   5337 					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
   5338 #endif
   5339 					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
   5340 					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
   5341 				}
   5342 			}
   5343 			if (op_err) {
   5344 				/* If we have space */
   5345 				struct sctp_paramhdr s;
   5346 
   5347 				if (err_at % 4) {
   5348 					uint32_t cpthis = 0;
   5349 
   5350 					pad_needed = 4 - (err_at % 4);
   5351 					m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
   5352 					err_at += pad_needed;
   5353 				}
   5354 				s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
   5355 				s.param_length = htons(sizeof(s) + plen);
   5356 				m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
   5357 				err_at += sizeof(s);
   5358 				phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
   5359 				if (phdr == NULL) {
   5360 					sctp_m_freem(op_err);
   5361 					/*
   5362 					 * we are out of memory but we still
   5363 					 * need to have a look at what to do
   5364 					 * (the system is in trouble
   5365 					 * though).
   5366 					 */
   5367 					return (NULL);
   5368 				}
   5369 				m_copyback(op_err, err_at, plen, (caddr_t)phdr);
   5370 			}
   5371 			return (op_err);
   5372 			break;
   5373 		}
   5374 		default:
   5375 			/*
   5376 			 * we do not recognize the parameter figure out what
   5377 			 * we do.
   5378 			 */
   5379 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
   5380 			if ((ptype & 0x4000) == 0x4000) {
   5381 				/* Report bit is set?? */
   5382 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
   5383 				if (op_err == NULL) {
   5384 					int l_len;
   5385 					/* Ok need to try to get an mbuf */
   5386 #ifdef INET6
   5387 					l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5388 #else
   5389 					l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5390 #endif
   5391 					l_len += plen;
   5392 					l_len += sizeof(struct sctp_paramhdr);
   5393 					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
   5394 					if (op_err) {
   5395 						SCTP_BUF_LEN(op_err) = 0;
   5396 #ifdef INET6
   5397 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
   5398 #else
   5399 						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
   5400 #endif
   5401 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
   5402 						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
   5403 					}
   5404 				}
   5405 				if (op_err) {
   5406 					/* If we have space */
   5407 					struct sctp_paramhdr s;
   5408 
   5409 					if (err_at % 4) {
   5410 						uint32_t cpthis = 0;
   5411 
   5412 						pad_needed = 4 - (err_at % 4);
   5413 						m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
   5414 						err_at += pad_needed;
   5415 					}
   5416 					s.param_type = htons(SCTP_UNRECOG_PARAM);
   5417 					s.param_length = htons(sizeof(s) + plen);
   5418 					m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
   5419 					err_at += sizeof(s);
   5420 					if (plen > sizeof(tempbuf)) {
   5421 						plen = sizeof(tempbuf);
   5422 					}
   5423 					phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
   5424 					if (phdr == NULL) {
   5425 						sctp_m_freem(op_err);
   5426 						/*
   5427 						 * we are out of memory but
   5428 						 * we still need to have a
   5429 						 * look at what to do (the
   5430 						 * system is in trouble
   5431 						 * though).
   5432 						 */
   5433 						op_err = NULL;
   5434 						goto more_processing;
   5435 					}
   5436 					m_copyback(op_err, err_at, plen, (caddr_t)phdr);
   5437 					err_at += plen;
   5438 				}
   5439 			}
   5440 		more_processing:
   5441 			if ((ptype & 0x8000) == 0x0000) {
   5442 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
   5443 				return (op_err);
   5444 			} else {
   5445 				/* skip this chunk and continue processing */
   5446 				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
   5447 				at += SCTP_SIZE32(plen);
   5448 			}
   5449 			break;
   5450 
   5451 		}
   5452 		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
   5453 	}
   5454 	return (op_err);
   5455  invalid_size:
   5456 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
   5457 	*abort_processing = 1;
   5458 	if ((op_err == NULL) && phdr) {
   5459 		int l_len;
   5460 #ifdef INET6
   5461 		l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5462 #else
   5463 		l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   5464 #endif
   5465 		l_len += (2 * sizeof(struct sctp_paramhdr));
   5466 		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
   5467 		if (op_err) {
   5468 			SCTP_BUF_LEN(op_err) = 0;
   5469 #ifdef INET6
   5470 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
   5471 #else
   5472 			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
   5473 #endif
   5474 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
   5475 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
   5476 		}
   5477 	}
   5478 	if ((op_err) && phdr) {
   5479 		struct sctp_paramhdr s;
   5480 
   5481 		if (err_at % 4) {
   5482 			uint32_t cpthis = 0;
   5483 
   5484 			pad_needed = 4 - (err_at % 4);
   5485 			m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
   5486 			err_at += pad_needed;
   5487 		}
   5488 		s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
   5489 		s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
   5490 		m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
   5491 		err_at += sizeof(s);
   5492 		/* Only copy back the p-hdr that caused the issue */
   5493 		m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
   5494 	}
   5495 	return (op_err);
   5496 }
   5497 
   5498 static int
   5499 sctp_are_there_new_addresses(struct sctp_association *asoc,
   5500     struct mbuf *in_initpkt, int offset, struct sockaddr *src)
   5501 {
   5502 	/*
   5503 	 * Given a INIT packet, look through the packet to verify that there
   5504 	 * are NO new addresses. As we go through the parameters add reports
   5505 	 * of any un-understood parameters that require an error.  Also we
   5506 	 * must return (1) to drop the packet if we see a un-understood
   5507 	 * parameter that tells us to drop the chunk.
   5508 	 */
   5509 	struct sockaddr *sa_touse;
   5510 	struct sockaddr *sa;
   5511 	struct sctp_paramhdr *phdr, params;
   5512 	uint16_t ptype, plen;
   5513 	uint8_t fnd;
   5514 	struct sctp_nets *net;
   5515 #ifdef INET
   5516 	struct sockaddr_in sin4, *sa4;
   5517 #endif
   5518 #ifdef INET6
   5519 	struct sockaddr_in6 sin6, *sa6;
   5520 #endif
   5521 
   5522 #ifdef INET
   5523 	memset(&sin4, 0, sizeof(sin4));
   5524 	sin4.sin_family = AF_INET;
   5525 #ifdef HAVE_SIN_LEN
   5526 	sin4.sin_len = sizeof(sin4);
   5527 #endif
   5528 #endif
   5529 #ifdef INET6
   5530 	memset(&sin6, 0, sizeof(sin6));
   5531 	sin6.sin6_family = AF_INET6;
   5532 #ifdef HAVE_SIN6_LEN
   5533 	sin6.sin6_len = sizeof(sin6);
   5534 #endif
   5535 #endif
   5536 	/* First what about the src address of the pkt ? */
   5537 	fnd = 0;
   5538 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5539 		sa = (struct sockaddr *)&net->ro._l_addr;
   5540 		if (sa->sa_family == src->sa_family) {
   5541 #ifdef INET
   5542 			if (sa->sa_family == AF_INET) {
   5543 				struct sockaddr_in *src4;
   5544 
   5545 				sa4 = (struct sockaddr_in *)sa;
   5546 				src4 = (struct sockaddr_in *)src;
   5547 				if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
   5548 					fnd = 1;
   5549 					break;
   5550 				}
   5551 			}
   5552 #endif
   5553 #ifdef INET6
   5554 			if (sa->sa_family == AF_INET6) {
   5555 				struct sockaddr_in6 *src6;
   5556 
   5557 				sa6 = (struct sockaddr_in6 *)sa;
   5558 				src6 = (struct sockaddr_in6 *)src;
   5559 				if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
   5560 					fnd = 1;
   5561 					break;
   5562 				}
   5563 			}
   5564 #endif
   5565 		}
   5566 	}
   5567 	if (fnd == 0) {
   5568 		/* New address added! no need to look futher. */
   5569 		return (1);
   5570 	}
   5571 	/* Ok so far lets munge through the rest of the packet */
   5572 	offset += sizeof(struct sctp_init_chunk);
   5573 	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
   5574 	while (phdr) {
   5575 		sa_touse = NULL;
   5576 		ptype = ntohs(phdr->param_type);
   5577 		plen = ntohs(phdr->param_length);
   5578 		switch (ptype) {
   5579 #ifdef INET
   5580 		case SCTP_IPV4_ADDRESS:
   5581 		{
   5582 			struct sctp_ipv4addr_param *p4, p4_buf;
   5583 
   5584 			phdr = sctp_get_next_param(in_initpkt, offset,
   5585 			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
   5586 			if (plen != sizeof(struct sctp_ipv4addr_param) ||
   5587 			    phdr == NULL) {
   5588 				return (1);
   5589 			}
   5590 			p4 = (struct sctp_ipv4addr_param *)phdr;
   5591 			sin4.sin_addr.s_addr = p4->addr;
   5592 			sa_touse = (struct sockaddr *)&sin4;
   5593 			break;
   5594 		}
   5595 #endif
   5596 #ifdef INET6
   5597 		case SCTP_IPV6_ADDRESS:
   5598 		{
   5599 			struct sctp_ipv6addr_param *p6, p6_buf;
   5600 
   5601 			phdr = sctp_get_next_param(in_initpkt, offset,
   5602 			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
   5603 			if (plen != sizeof(struct sctp_ipv6addr_param) ||
   5604 			    phdr == NULL) {
   5605 				return (1);
   5606 			}
   5607 			p6 = (struct sctp_ipv6addr_param *)phdr;
   5608 			memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
   5609 			    sizeof(p6->addr));
   5610 			sa_touse = (struct sockaddr *)&sin6;
   5611 			break;
   5612 		}
   5613 #endif
   5614 		default:
   5615 			sa_touse = NULL;
   5616 			break;
   5617 		}
   5618 		if (sa_touse) {
   5619 			/* ok, sa_touse points to one to check */
   5620 			fnd = 0;
   5621 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   5622 				sa = (struct sockaddr *)&net->ro._l_addr;
   5623 				if (sa->sa_family != sa_touse->sa_family) {
   5624 					continue;
   5625 				}
   5626 #ifdef INET
   5627 				if (sa->sa_family == AF_INET) {
   5628 					sa4 = (struct sockaddr_in *)sa;
   5629 					if (sa4->sin_addr.s_addr ==
   5630 					    sin4.sin_addr.s_addr) {
   5631 						fnd = 1;
   5632 						break;
   5633 					}
   5634 				}
   5635 #endif
   5636 #ifdef INET6
   5637 				if (sa->sa_family == AF_INET6) {
   5638 					sa6 = (struct sockaddr_in6 *)sa;
   5639 					if (SCTP6_ARE_ADDR_EQUAL(
   5640 					    sa6, &sin6)) {
   5641 						fnd = 1;
   5642 						break;
   5643 					}
   5644 				}
   5645 #endif
   5646 			}
   5647 			if (!fnd) {
   5648 				/* New addr added! no need to look further */
   5649 				return (1);
   5650 			}
   5651 		}
   5652 		offset += SCTP_SIZE32(plen);
   5653 		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
   5654 	}
   5655 	return (0);
   5656 }
   5657 
   5658 /*
   5659  * Given a MBUF chain that was sent into us containing an INIT. Build a
   5660  * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
   5661  * a pullup to include IPv6/4header, SCTP header and initial part of INIT
   5662  * message (i.e. the struct sctp_init_msg).
   5663  */
   5664 void
   5665 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
   5666                        struct mbuf *init_pkt, int iphlen, int offset,
   5667                        struct sockaddr *src, struct sockaddr *dst,
   5668                        struct sctphdr *sh, struct sctp_init_chunk *init_chk,
   5669 #if defined(__FreeBSD__)
   5670 		       uint8_t use_mflowid, uint32_t mflowid,
   5671 #endif
   5672                        uint32_t vrf_id, uint16_t port, int hold_inp_lock)
   5673 {
   5674 	struct sctp_association *asoc;
   5675 	struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
   5676 	struct sctp_init_ack_chunk *initack;
   5677 	struct sctp_adaptation_layer_indication *ali;
   5678 	struct sctp_ecn_supported_param *ecn;
   5679 	struct sctp_prsctp_supported_param *prsctp;
   5680 	struct sctp_supported_chunk_types_param *pr_supported;
   5681 	union sctp_sockstore *over_addr;
   5682 #ifdef INET
   5683 	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
   5684 	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
   5685 	struct sockaddr_in *sin;
   5686 #endif
   5687 #ifdef INET6
   5688 	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
   5689 	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
   5690 	struct sockaddr_in6 *sin6;
   5691 #endif
   5692 #if defined(__Userspace__)
   5693 	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
   5694 	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
   5695 	struct sockaddr_conn *sconn;
   5696 #endif
   5697 	struct sockaddr *to;
   5698 	struct sctp_state_cookie stc;
   5699 	struct sctp_nets *net = NULL;
   5700 	uint8_t *signature = NULL;
   5701 	int cnt_inits_to = 0;
   5702 	uint16_t his_limit, i_want;
   5703 	int abort_flag, padval;
   5704 	int num_ext;
   5705 	int p_len;
   5706 	int nat_friendly = 0;
   5707 	struct socket *so;
   5708 
   5709 	if (stcb) {
   5710 		asoc = &stcb->asoc;
   5711 	} else {
   5712 		asoc = NULL;
   5713 	}
   5714 	mp_last = NULL;
   5715 	if ((asoc != NULL) &&
   5716 	    (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
   5717 	    (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
   5718 		/* new addresses, out of here in non-cookie-wait states */
   5719 		/*
   5720 		 * Send a ABORT, we don't add the new address error clause
   5721 		 * though we even set the T bit and copy in the 0 tag.. this
   5722 		 * looks no different than if no listener was present.
   5723 		 */
   5724 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
   5725 		                             "Address added");
   5726 		sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
   5727 #if defined(__FreeBSD__)
   5728 		                use_mflowid, mflowid,
   5729 #endif
   5730 		                vrf_id, port);
   5731 		return;
   5732 	}
   5733 	abort_flag = 0;
   5734 	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
   5735 						       (offset + sizeof(struct sctp_init_chunk)),
   5736 						       &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
   5737 	if (abort_flag) {
   5738 	do_a_abort:
   5739 		if (op_err == NULL) {
   5740 			char msg[SCTP_DIAG_INFO_LEN];
   5741 
   5742 			snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
   5743 			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
   5744 			                             msg);
   5745 		}
   5746 		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
   5747 				init_chk->init.initiate_tag, op_err,
   5748 #if defined(__FreeBSD__)
   5749 		                use_mflowid, mflowid,
   5750 #endif
   5751 		                vrf_id, port);
   5752 		return;
   5753 	}
   5754 	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
   5755 	if (m == NULL) {
   5756 		/* No memory, INIT timer will re-attempt. */
   5757 		if (op_err)
   5758 			sctp_m_freem(op_err);
   5759 		return;
   5760 	}
   5761 	SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
   5762 
   5763 	/*
   5764 	 * We might not overwrite the identification[] completely and on
   5765 	 * some platforms time_entered will contain some padding.
   5766 	 * Therefore zero out the cookie to avoid putting
   5767 	 * uninitialized memory on the wire.
   5768 	 */
   5769 	memset(&stc, 0, sizeof(struct sctp_state_cookie));
   5770 
   5771 	/* the time I built cookie */
   5772 	(void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
   5773 
   5774 	/* populate any tie tags */
   5775 	if (asoc != NULL) {
   5776 		/* unlock before tag selections */
   5777 		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
   5778 		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
   5779 		stc.cookie_life = asoc->cookie_life;
   5780 		net = asoc->primary_destination;
   5781 	} else {
   5782 		stc.tie_tag_my_vtag = 0;
   5783 		stc.tie_tag_peer_vtag = 0;
   5784 		/* life I will award this cookie */
   5785 		stc.cookie_life = inp->sctp_ep.def_cookie_life;
   5786 	}
   5787 
   5788 	/* copy in the ports for later check */
   5789 	stc.myport = sh->dest_port;
   5790 	stc.peerport = sh->src_port;
   5791 
   5792 	/*
   5793 	 * If we wanted to honor cookie life extentions, we would add to
   5794 	 * stc.cookie_life. For now we should NOT honor any extension
   5795 	 */
   5796 	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
   5797 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
   5798 		stc.ipv6_addr_legal = 1;
   5799 		if (SCTP_IPV6_V6ONLY(inp)) {
   5800 			stc.ipv4_addr_legal = 0;
   5801 		} else {
   5802 			stc.ipv4_addr_legal = 1;
   5803 		}
   5804 #if defined(__Userspace__)
   5805 		stc.conn_addr_legal = 0;
   5806 #endif
   5807 	} else {
   5808 		stc.ipv6_addr_legal = 0;
   5809 #if defined(__Userspace__)
   5810 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
   5811 			stc.conn_addr_legal = 1;
   5812 			stc.ipv4_addr_legal = 0;
   5813 		} else {
   5814 			stc.conn_addr_legal = 0;
   5815 			stc.ipv4_addr_legal = 1;
   5816 		}
   5817 #else
   5818 		stc.ipv4_addr_legal = 1;
   5819 #endif
   5820 	}
   5821 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
   5822 	stc.ipv4_scope = 1;
   5823 #else
   5824 	stc.ipv4_scope = 0;
   5825 #endif
   5826 	if (net == NULL) {
   5827 		to = src;
   5828 		switch (dst->sa_family) {
   5829 #ifdef INET
   5830 		case AF_INET:
   5831 		{
   5832 			/* lookup address */
   5833 			stc.address[0] = src4->sin_addr.s_addr;
   5834 			stc.address[1] = 0;
   5835 			stc.address[2] = 0;
   5836 			stc.address[3] = 0;
   5837 			stc.addr_type = SCTP_IPV4_ADDRESS;
   5838 			/* local from address */
   5839 			stc.laddress[0] = dst4->sin_addr.s_addr;
   5840 			stc.laddress[1] = 0;
   5841 			stc.laddress[2] = 0;
   5842 			stc.laddress[3] = 0;
   5843 			stc.laddr_type = SCTP_IPV4_ADDRESS;
   5844 			/* scope_id is only for v6 */
   5845 			stc.scope_id = 0;
   5846 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
   5847 			if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
   5848 				stc.ipv4_scope = 1;
   5849 			}
   5850 #else
   5851 			stc.ipv4_scope = 1;
   5852 #endif				/* SCTP_DONT_DO_PRIVADDR_SCOPE */
   5853 			/* Must use the address in this case */
   5854 			if (sctp_is_address_on_local_host(src, vrf_id)) {
   5855 				stc.loopback_scope = 1;
   5856 				stc.ipv4_scope = 1;
   5857 				stc.site_scope = 1;
   5858 				stc.local_scope = 0;
   5859 			}
   5860 			break;
   5861 		}
   5862 #endif
   5863 #ifdef INET6
   5864 		case AF_INET6:
   5865 		{
   5866 			stc.addr_type = SCTP_IPV6_ADDRESS;
   5867 			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
   5868 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
   5869 			stc.scope_id = in6_getscope(&src6->sin6_addr);
   5870 #else
   5871 			stc.scope_id = 0;
   5872 #endif
   5873 			if (sctp_is_address_on_local_host(src, vrf_id)) {
   5874 				stc.loopback_scope = 1;
   5875 				stc.local_scope = 0;
   5876 				stc.site_scope = 1;
   5877 				stc.ipv4_scope = 1;
   5878 			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
   5879 				/*
   5880 				 * If the new destination is a LINK_LOCAL we
   5881 				 * must have common both site and local
   5882 				 * scope. Don't set local scope though since
   5883 				 * we must depend on the source to be added
   5884 				 * implicitly. We cannot assure just because
   5885 				 * we share one link that all links are
   5886 				 * common.
   5887 				 */
   5888 #if defined(__APPLE__)
   5889 				/* Mac OS X currently doesn't have in6_getscope() */
   5890 				stc.scope_id = src6->sin6_addr.s6_addr16[1];
   5891 #endif
   5892 				stc.local_scope = 0;
   5893 				stc.site_scope = 1;
   5894 				stc.ipv4_scope = 1;
   5895 				/*
   5896 				 * we start counting for the private address
   5897 				 * stuff at 1. since the link local we
   5898 				 * source from won't show up in our scoped
   5899 				 * count.
   5900 				 */
   5901 				cnt_inits_to = 1;
   5902 				/* pull out the scope_id from incoming pkt */
   5903 			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
   5904 				/*
   5905 				 * If the new destination is SITE_LOCAL then
   5906 				 * we must have site scope in common.
   5907 				 */
   5908 				stc.site_scope = 1;
   5909 			}
   5910 			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
   5911 			stc.laddr_type = SCTP_IPV6_ADDRESS;
   5912 			break;
   5913 		}
   5914 #endif
   5915 #if defined(__Userspace__)
   5916 		case AF_CONN:
   5917 		{
   5918 			/* lookup address */
   5919 			stc.address[0] = 0;
   5920 			stc.address[1] = 0;
   5921 			stc.address[2] = 0;
   5922 			stc.address[3] = 0;
   5923 			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
   5924 			stc.addr_type = SCTP_CONN_ADDRESS;
   5925 			/* local from address */
   5926 			stc.laddress[0] = 0;
   5927 			stc.laddress[1] = 0;
   5928 			stc.laddress[2] = 0;
   5929 			stc.laddress[3] = 0;
   5930 			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
   5931 			stc.laddr_type = SCTP_CONN_ADDRESS;
   5932 			/* scope_id is only for v6 */
   5933 			stc.scope_id = 0;
   5934 			break;
   5935 		}
   5936 #endif
   5937 		default:
   5938 			/* TSNH */
   5939 			goto do_a_abort;
   5940 			break;
   5941 		}
   5942 	} else {
   5943 		/* set the scope per the existing tcb */
   5944 
   5945 #ifdef INET6
   5946 		struct sctp_nets *lnet;
   5947 #endif
   5948 
   5949 		stc.loopback_scope = asoc->scope.loopback_scope;
   5950 		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
   5951 		stc.site_scope = asoc->scope.site_scope;
   5952 		stc.local_scope = asoc->scope.local_scope;
   5953 #ifdef INET6
   5954 		/* Why do we not consider IPv4 LL addresses? */
   5955 		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
   5956 			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
   5957 				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
   5958 					/*
   5959 					 * if we have a LL address, start
   5960 					 * counting at 1.
   5961 					 */
   5962 					cnt_inits_to = 1;
   5963 				}
   5964 			}
   5965 		}
   5966 #endif
   5967 		/* use the net pointer */
   5968 		to = (struct sockaddr *)&net->ro._l_addr;
   5969 		switch (to->sa_family) {
   5970 #ifdef INET
   5971 		case AF_INET:
   5972 			sin = (struct sockaddr_in *)to;
   5973 			stc.address[0] = sin->sin_addr.s_addr;
   5974 			stc.address[1] = 0;
   5975 			stc.address[2] = 0;
   5976 			stc.address[3] = 0;
   5977 			stc.addr_type = SCTP_IPV4_ADDRESS;
   5978 			if (net->src_addr_selected == 0) {
   5979 				/*
   5980 				 * strange case here, the INIT should have
   5981 				 * did the selection.
   5982 				 */
   5983 				net->ro._s_addr = sctp_source_address_selection(inp,
   5984 										stcb, (sctp_route_t *)&net->ro,
   5985 										net, 0, vrf_id);
   5986 				if (net->ro._s_addr == NULL)
   5987 					return;
   5988 
   5989 				net->src_addr_selected = 1;
   5990 
   5991 			}
   5992 			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
   5993 			stc.laddress[1] = 0;
   5994 			stc.laddress[2] = 0;
   5995 			stc.laddress[3] = 0;
   5996 			stc.laddr_type = SCTP_IPV4_ADDRESS;
   5997 			/* scope_id is only for v6 */
   5998 			stc.scope_id = 0;
   5999 			break;
   6000 #endif
   6001 #ifdef INET6
   6002 		case AF_INET6:
   6003 			sin6 = (struct sockaddr_in6 *)to;
   6004 			memcpy(&stc.address, &sin6->sin6_addr,
   6005 			       sizeof(struct in6_addr));
   6006 			stc.addr_type = SCTP_IPV6_ADDRESS;
   6007 			stc.scope_id = sin6->sin6_scope_id;
   6008 			if (net->src_addr_selected == 0) {
   6009 				/*
   6010 				 * strange case here, the INIT should have
   6011 				 * done the selection.
   6012 				 */
   6013 				net->ro._s_addr = sctp_source_address_selection(inp,
   6014 										stcb, (sctp_route_t *)&net->ro,
   6015 										net, 0, vrf_id);
   6016 				if (net->ro._s_addr == NULL)
   6017 					return;
   6018 
   6019 				net->src_addr_selected = 1;
   6020 			}
   6021 			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
   6022 			       sizeof(struct in6_addr));
   6023 			stc.laddr_type = SCTP_IPV6_ADDRESS;
   6024 			break;
   6025 #endif
   6026 #if defined(__Userspace__)
   6027 		case AF_CONN:
   6028 			sconn = (struct sockaddr_conn *)to;
   6029 			stc.address[0] = 0;
   6030 			stc.address[1] = 0;
   6031 			stc.address[2] = 0;
   6032 			stc.address[3] = 0;
   6033 			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
   6034 			stc.addr_type = SCTP_CONN_ADDRESS;
   6035 			stc.laddress[0] = 0;
   6036 			stc.laddress[1] = 0;
   6037 			stc.laddress[2] = 0;
   6038 			stc.laddress[3] = 0;
   6039 			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
   6040 			stc.laddr_type = SCTP_CONN_ADDRESS;
   6041 			stc.scope_id = 0;
   6042 			break;
   6043 #endif
   6044 		}
   6045 	}
   6046 	/* Now lets put the SCTP header in place */
   6047 	initack = mtod(m, struct sctp_init_ack_chunk *);
   6048 	/* Save it off for quick ref */
   6049 	stc.peers_vtag = init_chk->init.initiate_tag;
   6050 	/* who are we */
   6051 	memcpy(stc.identification, SCTP_VERSION_STRING,
   6052 	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
   6053 	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
   6054 	/* now the chunk header */
   6055 	initack->ch.chunk_type = SCTP_INITIATION_ACK;
   6056 	initack->ch.chunk_flags = 0;
   6057 	/* fill in later from mbuf we build */
   6058 	initack->ch.chunk_length = 0;
   6059 	/* place in my tag */
   6060 	if ((asoc != NULL) &&
   6061 	    ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
   6062 	     (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
   6063 	     (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
   6064 		/* re-use the v-tags and init-seq here */
   6065 		initack->init.initiate_tag = htonl(asoc->my_vtag);
   6066 		initack->init.initial_tsn = htonl(asoc->init_seq_number);
   6067 	} else {
   6068 		uint32_t vtag, itsn;
   6069 		if (hold_inp_lock) {
   6070 			SCTP_INP_INCR_REF(inp);
   6071 			SCTP_INP_RUNLOCK(inp);
   6072 		}
   6073 		if (asoc) {
   6074 			atomic_add_int(&asoc->refcnt, 1);
   6075 			SCTP_TCB_UNLOCK(stcb);
   6076 		new_tag:
   6077 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
   6078 			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
   6079 				/* Got a duplicate vtag on some guy behind a nat
   6080 				 * make sure we don't use it.
   6081 				 */
   6082 				goto new_tag;
   6083 			}
   6084 			initack->init.initiate_tag = htonl(vtag);
   6085 			/* get a TSN to use too */
   6086 			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
   6087 			initack->init.initial_tsn = htonl(itsn);
   6088 			SCTP_TCB_LOCK(stcb);
   6089 			atomic_add_int(&asoc->refcnt, -1);
   6090 		} else {
   6091 			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
   6092 			initack->init.initiate_tag = htonl(vtag);
   6093 			/* get a TSN to use too */
   6094 			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
   6095 		}
   6096 		if (hold_inp_lock) {
   6097 			SCTP_INP_RLOCK(inp);
   6098 			SCTP_INP_DECR_REF(inp);
   6099 		}
   6100 	}
   6101 	/* save away my tag to */
   6102 	stc.my_vtag = initack->init.initiate_tag;
   6103 
   6104 	/* set up some of the credits. */
   6105 	so = inp->sctp_socket;
   6106 	if (so == NULL) {
   6107 		/* memory problem */
   6108 		sctp_m_freem(m);
   6109 		return;
   6110 	} else {
   6111 		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
   6112 	}
   6113 	/* set what I want */
   6114 	his_limit = ntohs(init_chk->init.num_inbound_streams);
   6115 	/* choose what I want */
   6116 	if (asoc != NULL) {
   6117 		if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
   6118 			i_want = asoc->streamoutcnt;
   6119 		} else {
   6120 			i_want = inp->sctp_ep.pre_open_stream_count;
   6121 		}
   6122 	} else {
   6123 		i_want = inp->sctp_ep.pre_open_stream_count;
   6124 	}
   6125 	if (his_limit < i_want) {
   6126 		/* I Want more :< */
   6127 		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
   6128 	} else {
   6129 		/* I can have what I want :> */
   6130 		initack->init.num_outbound_streams = htons(i_want);
   6131 	}
   6132 	/* tell him his limit. */
   6133 	initack->init.num_inbound_streams =
   6134 		htons(inp->sctp_ep.max_open_streams_intome);
   6135 
   6136 	/* adaptation layer indication parameter */
   6137 	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
   6138 		ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
   6139 		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
   6140 		ali->ph.param_length = htons(sizeof(*ali));
   6141 		ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
   6142 		SCTP_BUF_LEN(m) += sizeof(*ali);
   6143 		ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
   6144 	} else {
   6145 		ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
   6146 	}
   6147 
   6148 	/* ECN parameter */
   6149 	if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
   6150 	    (inp->sctp_ecn_enable == 1)) {
   6151 		ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
   6152 		ecn->ph.param_length = htons(sizeof(*ecn));
   6153 		SCTP_BUF_LEN(m) += sizeof(*ecn);
   6154 
   6155 		prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
   6156 								sizeof(*ecn));
   6157 	} else {
   6158 		prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
   6159 	}
   6160 	/* And now tell the peer we do  pr-sctp */
   6161 	prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
   6162 	prsctp->ph.param_length = htons(sizeof(*prsctp));
   6163 	SCTP_BUF_LEN(m) += sizeof(*prsctp);
   6164 	if (nat_friendly) {
   6165 		/* Add NAT friendly parameter */
   6166 		struct sctp_paramhdr *ph;
   6167 
   6168 		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
   6169 		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
   6170 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
   6171 		SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
   6172 	}
   6173 	/* And now tell the peer we do all the extensions */
   6174 	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
   6175 	pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
   6176 	num_ext = 0;
   6177 	pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
   6178 	pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
   6179 	pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
   6180 	pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
   6181 	pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
   6182 	if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
   6183 		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
   6184 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
   6185 		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
   6186 	p_len = sizeof(*pr_supported) + num_ext;
   6187 	pr_supported->ph.param_length = htons(p_len);
   6188 	bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
   6189 	SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
   6190 
   6191 	/* add authentication parameters */
   6192 	if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
   6193 		struct sctp_auth_random *randp;
   6194 		struct sctp_auth_hmac_algo *hmacs;
   6195 		struct sctp_auth_chunk_list *chunks;
   6196 		uint16_t random_len;
   6197 
   6198 		/* generate and add RANDOM parameter */
   6199 		random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
   6200 		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
   6201 		randp->ph.param_type = htons(SCTP_RANDOM);
   6202 		p_len = sizeof(*randp) + random_len;
   6203 		randp->ph.param_length = htons(p_len);
   6204 		SCTP_READ_RANDOM(randp->random_data, random_len);
   6205 		/* zero out any padding required */
   6206 		bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
   6207 		SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
   6208 
   6209 		/* add HMAC_ALGO parameter */
   6210 		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
   6211 		p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
   6212 						(uint8_t *) hmacs->hmac_ids);
   6213 		if (p_len > 0) {
   6214 			p_len += sizeof(*hmacs);
   6215 			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
   6216 			hmacs->ph.param_length = htons(p_len);
   6217 			/* zero out any padding required */
   6218 			bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
   6219 			SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
   6220 		}
   6221 		/* add CHUNKS parameter */
   6222 		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m));
   6223 		p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
   6224 						   chunks->chunk_types);
   6225 		if (p_len > 0) {
   6226 			p_len += sizeof(*chunks);
   6227 			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
   6228 			chunks->ph.param_length = htons(p_len);
   6229 			/* zero out any padding required */
   6230 			bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
   6231 			SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
   6232 		}
   6233 	}
   6234 	m_at = m;
   6235 	/* now the addresses */
   6236 	{
   6237 		struct sctp_scoping scp;
   6238 		/* To optimize this we could put the scoping stuff
   6239 		 * into a structure and remove the individual uint8's from
   6240 		 * the stc structure. Then we could just sifa in the
   6241 		 * address within the stc.. but for now this is a quick
   6242 		 * hack to get the address stuff teased apart.
   6243 		 */
   6244  		scp.ipv4_addr_legal = stc.ipv4_addr_legal;
   6245 		scp.ipv6_addr_legal = stc.ipv6_addr_legal;
   6246 #if defined(__Userspace__)
   6247 		scp.conn_addr_legal = stc.conn_addr_legal;
   6248 #endif
   6249 		scp.loopback_scope = stc.loopback_scope;
   6250 		scp.ipv4_local_scope = stc.ipv4_scope;
   6251 		scp.local_scope = stc.local_scope;
   6252 		scp.site_scope = stc.site_scope;
   6253 		m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
   6254 	}
   6255 
   6256 	/* tack on the operational error if present */
   6257 	if (op_err) {
   6258 		struct mbuf *ol;
   6259 		int llen;
   6260 		llen = 0;
   6261 		ol = op_err;
   6262 
   6263 		while (ol) {
   6264 			llen += SCTP_BUF_LEN(ol);
   6265 			ol = SCTP_BUF_NEXT(ol);
   6266 		}
   6267 		if (llen % 4) {
   6268 			/* must add a pad to the param */
   6269 			uint32_t cpthis = 0;
   6270 			int padlen;
   6271 
   6272 			padlen = 4 - (llen % 4);
   6273 			m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
   6274 		}
   6275 		while (SCTP_BUF_NEXT(m_at) != NULL) {
   6276 			m_at = SCTP_BUF_NEXT(m_at);
   6277 		}
   6278 		SCTP_BUF_NEXT(m_at) = op_err;
   6279 		while (SCTP_BUF_NEXT(m_at) != NULL) {
   6280 			m_at = SCTP_BUF_NEXT(m_at);
   6281 		}
   6282 	}
   6283 	/* pre-calulate the size and update pkt header and chunk header */
   6284 	p_len = 0;
   6285 	for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
   6286 		p_len += SCTP_BUF_LEN(m_tmp);
   6287 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
   6288 			/* m_tmp should now point to last one */
   6289 			break;
   6290 		}
   6291 	}
   6292 
   6293 	/* Now we must build a cookie */
   6294 	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
   6295 	if (m_cookie == NULL) {
   6296 		/* memory problem */
   6297 		sctp_m_freem(m);
   6298 		return;
   6299 	}
   6300 	/* Now append the cookie to the end and update the space/size */
   6301 	SCTP_BUF_NEXT(m_tmp) = m_cookie;
   6302 
   6303 	for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
   6304 		p_len += SCTP_BUF_LEN(m_tmp);
   6305 		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
   6306 			/* m_tmp should now point to last one */
   6307 			mp_last = m_tmp;
   6308 			break;
   6309 		}
   6310 	}
   6311 	/* Place in the size, but we don't include
   6312 	 * the last pad (if any) in the INIT-ACK.
   6313 	 */
   6314 	initack->ch.chunk_length = htons(p_len);
   6315 
   6316 	/* Time to sign the cookie, we don't sign over the cookie
   6317 	 * signature though thus we set trailer.
   6318 	 */
   6319 	(void)sctp_hmac_m(SCTP_HMAC,
   6320 			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
   6321 			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
   6322 			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
   6323 	/*
   6324 	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
   6325 	 * here since the timer will drive a retranmission.
   6326 	 */
   6327 	padval = p_len % 4;
   6328 	if ((padval) && (mp_last)) {
   6329 		/* see my previous comments on mp_last */
   6330 		if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
   6331 			/* Houston we have a problem, no space */
   6332 			sctp_m_freem(m);
   6333 			return;
   6334 		}
   6335 	}
   6336 	if (stc.loopback_scope) {
   6337 		over_addr = (union sctp_sockstore *)dst;
   6338 	} else {
   6339 		over_addr = NULL;
   6340 	}
   6341 
   6342 	(void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
   6343 	                                 0, 0,
   6344 	                                 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
   6345 	                                 port, over_addr,
   6346 #if defined(__FreeBSD__)
   6347 	                                 use_mflowid, mflowid,
   6348 #endif
   6349 	                                 SCTP_SO_NOT_LOCKED);
   6350 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   6351 }
   6352 
   6353 
   6354 static void
   6355 sctp_prune_prsctp(struct sctp_tcb *stcb,
   6356     struct sctp_association *asoc,
   6357     struct sctp_sndrcvinfo *srcv,
   6358     int dataout)
   6359 {
   6360 	int freed_spc = 0;
   6361 	struct sctp_tmit_chunk *chk, *nchk;
   6362 
   6363 	SCTP_TCB_LOCK_ASSERT(stcb);
   6364 	if ((asoc->peer_supports_prsctp) &&
   6365 	    (asoc->sent_queue_cnt_removeable > 0)) {
   6366 		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
   6367 			/*
   6368 			 * Look for chunks marked with the PR_SCTP flag AND
   6369 			 * the buffer space flag. If the one being sent is
   6370 			 * equal or greater priority then purge the old one
   6371 			 * and free some space.
   6372 			 */
   6373 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
   6374 				/*
   6375 				 * This one is PR-SCTP AND buffer space
   6376 				 * limited type
   6377 				 */
   6378 				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
   6379 					/*
   6380 					 * Lower numbers equates to higher
   6381 					 * priority so if the one we are
   6382 					 * looking at has a larger or equal
   6383 					 * priority we want to drop the data
   6384 					 * and NOT retransmit it.
   6385 					 */
   6386 					if (chk->data) {
   6387 						/*
   6388 						 * We release the book_size
   6389 						 * if the mbuf is here
   6390 						 */
   6391 						int ret_spc;
   6392 						uint8_t sent;
   6393 
   6394 						if (chk->sent > SCTP_DATAGRAM_UNSENT)
   6395 							sent = 1;
   6396 						else
   6397 							sent = 0;
   6398 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
   6399 						    sent,
   6400 						    SCTP_SO_LOCKED);
   6401 						freed_spc += ret_spc;
   6402 						if (freed_spc >= dataout) {
   6403 							return;
   6404 						}
   6405 					}	/* if chunk was present */
   6406 				}	/* if of sufficent priority */
   6407 			}	/* if chunk has enabled */
   6408 		}		/* tailqforeach */
   6409 
   6410 		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
   6411 			/* Here we must move to the sent queue and mark */
   6412 			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
   6413 				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
   6414 					if (chk->data) {
   6415 						/*
   6416 						 * We release the book_size
   6417 						 * if the mbuf is here
   6418 						 */
   6419 						int ret_spc;
   6420 
   6421 						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
   6422 						    0, SCTP_SO_LOCKED);
   6423 
   6424 						freed_spc += ret_spc;
   6425 						if (freed_spc >= dataout) {
   6426 							return;
   6427 						}
   6428 					}	/* end if chk->data */
   6429 				}	/* end if right class */
   6430 			}	/* end if chk pr-sctp */
   6431 		}		/* tailqforeachsafe (chk) */
   6432 	}			/* if enabled in asoc */
   6433 }
   6434 
   6435 int
   6436 sctp_get_frag_point(struct sctp_tcb *stcb,
   6437     struct sctp_association *asoc)
   6438 {
   6439 	int siz, ovh;
   6440 
   6441 	/*
   6442 	 * For endpoints that have both v6 and v4 addresses we must reserve
   6443 	 * room for the ipv6 header, for those that are only dealing with V4
   6444 	 * we use a larger frag point.
   6445 	 */
   6446 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
   6447 		ovh = SCTP_MED_OVERHEAD;
   6448 	} else {
   6449 		ovh = SCTP_MED_V4_OVERHEAD;
   6450 	}
   6451 
   6452 	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
   6453 		siz = asoc->smallest_mtu - ovh;
   6454 	else
   6455 		siz = (stcb->asoc.sctp_frag_point - ovh);
   6456 	/*
   6457 	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
   6458 	 */
   6459 	/* A data chunk MUST fit in a cluster */
   6460 	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
   6461 	/* } */
   6462 
   6463 	/* adjust for an AUTH chunk if DATA requires auth */
   6464 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
   6465 		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   6466 
   6467 	if (siz % 4) {
   6468 		/* make it an even word boundary please */
   6469 		siz -= (siz % 4);
   6470 	}
   6471 	return (siz);
   6472 }
   6473 
   6474 static void
   6475 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
   6476 {
   6477 	/*
   6478 	 * We assume that the user wants PR_SCTP_TTL if the user
   6479 	 * provides a positive lifetime but does not specify any
   6480 	 * PR_SCTP policy.
   6481 	 */
   6482 	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
   6483 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
   6484 	} else if (sp->timetolive > 0) {
   6485 		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
   6486 		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
   6487 	} else {
   6488 		return;
   6489 	}
   6490 	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
   6491 	case CHUNK_FLAGS_PR_SCTP_BUF:
   6492 		/*
   6493 		 * Time to live is a priority stored in tv_sec when
   6494 		 * doing the buffer drop thing.
   6495 		 */
   6496 		sp->ts.tv_sec = sp->timetolive;
   6497 		sp->ts.tv_usec = 0;
   6498 		break;
   6499 	case CHUNK_FLAGS_PR_SCTP_TTL:
   6500 	{
   6501 		struct timeval tv;
   6502 		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
   6503 		tv.tv_sec = sp->timetolive / 1000;
   6504 		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
   6505 		/* TODO sctp_constants.h needs alternative time macros when
   6506 		 *  _KERNEL is undefined.
   6507 		 */
   6508 #ifndef __FreeBSD__
   6509 		timeradd(&sp->ts, &tv, &sp->ts);
   6510 #else
   6511 		timevaladd(&sp->ts, &tv);
   6512 #endif
   6513 	}
   6514 		break;
   6515 	case CHUNK_FLAGS_PR_SCTP_RTX:
   6516 		/*
   6517 		 * Time to live is a the number or retransmissions
   6518 		 * stored in tv_sec.
   6519 		 */
   6520 		sp->ts.tv_sec = sp->timetolive;
   6521 		sp->ts.tv_usec = 0;
   6522 		break;
   6523 	default:
   6524 		SCTPDBG(SCTP_DEBUG_USRREQ1,
   6525 			"Unknown PR_SCTP policy %u.\n",
   6526 			PR_SCTP_POLICY(sp->sinfo_flags));
   6527 		break;
   6528 	}
   6529 }
   6530 
   6531 static int
   6532 sctp_msg_append(struct sctp_tcb *stcb,
   6533 		struct sctp_nets *net,
   6534 		struct mbuf *m,
   6535 		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
   6536 {
   6537 	int error = 0;
   6538 	struct mbuf *at;
   6539 	struct sctp_stream_queue_pending *sp = NULL;
   6540 	struct sctp_stream_out *strm;
   6541 
   6542 	/* Given an mbuf chain, put it
   6543 	 * into the association send queue and
   6544 	 * place it on the wheel
   6545 	 */
   6546 	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
   6547 		/* Invalid stream number */
   6548 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   6549 		error = EINVAL;
   6550 		goto out_now;
   6551 	}
   6552 	if ((stcb->asoc.stream_locked) &&
   6553 	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
   6554 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   6555 		error = EINVAL;
   6556 		goto out_now;
   6557 	}
   6558 	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
   6559 	/* Now can we send this? */
   6560 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
   6561 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
   6562 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
   6563 	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
   6564 		/* got data while shutting down */
   6565 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
   6566 		error = ECONNRESET;
   6567 		goto out_now;
   6568 	}
   6569 	sctp_alloc_a_strmoq(stcb, sp);
   6570 	if (sp == NULL) {
   6571 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   6572 		error = ENOMEM;
   6573 		goto out_now;
   6574 	}
   6575 	sp->sinfo_flags = srcv->sinfo_flags;
   6576 	sp->timetolive = srcv->sinfo_timetolive;
   6577 	sp->ppid = srcv->sinfo_ppid;
   6578 	sp->context = srcv->sinfo_context;
   6579 	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
   6580 		sp->net = net;
   6581 		atomic_add_int(&sp->net->ref_count, 1);
   6582 	} else {
   6583 		sp->net = NULL;
   6584 	}
   6585 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
   6586 	sp->stream = srcv->sinfo_stream;
   6587 	sp->msg_is_complete = 1;
   6588 	sp->sender_all_done = 1;
   6589 	sp->some_taken = 0;
   6590 	sp->data = m;
   6591 	sp->tail_mbuf = NULL;
   6592 	sctp_set_prsctp_policy(sp);
   6593 	/* We could in theory (for sendall) sifa the length
   6594 	 * in, but we would still have to hunt through the
   6595 	 * chain since we need to setup the tail_mbuf
   6596 	 */
   6597 	sp->length = 0;
   6598 	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
   6599 		if (SCTP_BUF_NEXT(at) == NULL)
   6600 			sp->tail_mbuf = at;
   6601 		sp->length += SCTP_BUF_LEN(at);
   6602 	}
   6603 	if (srcv->sinfo_keynumber_valid) {
   6604 		sp->auth_keyid = srcv->sinfo_keynumber;
   6605 	} else {
   6606 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
   6607 	}
   6608 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
   6609 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
   6610 		sp->holds_key_ref = 1;
   6611 	}
   6612 	if (hold_stcb_lock == 0) {
   6613 		SCTP_TCB_SEND_LOCK(stcb);
   6614 	}
   6615 	sctp_snd_sb_alloc(stcb, sp->length);
   6616 	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
   6617 	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
   6618 	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
   6619 	m = NULL;
   6620 	if (hold_stcb_lock == 0) {
   6621 		SCTP_TCB_SEND_UNLOCK(stcb);
   6622 	}
   6623 out_now:
   6624 	if (m) {
   6625 		sctp_m_freem(m);
   6626 	}
   6627 	return (error);
   6628 }
   6629 
   6630 
   6631 static struct mbuf *
   6632 sctp_copy_mbufchain(struct mbuf *clonechain,
   6633 		    struct mbuf *outchain,
   6634 		    struct mbuf **endofchain,
   6635 		    int can_take_mbuf,
   6636 		    int sizeofcpy,
   6637 		    uint8_t copy_by_ref)
   6638 {
   6639 	struct mbuf *m;
   6640 	struct mbuf *appendchain;
   6641 	caddr_t cp;
   6642 	int len;
   6643 
   6644 	if (endofchain == NULL) {
   6645 		/* error */
   6646 	error_out:
   6647 		if (outchain)
   6648 			sctp_m_freem(outchain);
   6649 		return (NULL);
   6650 	}
   6651 	if (can_take_mbuf) {
   6652 		appendchain = clonechain;
   6653 	} else {
   6654 		if (!copy_by_ref &&
   6655 #if defined(__Panda__)
   6656 		    0
   6657 #else
   6658 		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
   6659 #endif
   6660 		    ) {
   6661 			/* Its not in a cluster */
   6662 			if (*endofchain == NULL) {
   6663 				/* lets get a mbuf cluster */
   6664 				if (outchain == NULL) {
   6665 					/* This is the general case */
   6666 				new_mbuf:
   6667 					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
   6668 					if (outchain == NULL) {
   6669 						goto error_out;
   6670 					}
   6671 					SCTP_BUF_LEN(outchain) = 0;
   6672 					*endofchain = outchain;
   6673 					/* get the prepend space */
   6674 					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
   6675 				} else {
   6676 					/* We really should not get a NULL in endofchain */
   6677 					/* find end */
   6678 					m = outchain;
   6679 					while (m) {
   6680 						if (SCTP_BUF_NEXT(m) == NULL) {
   6681 							*endofchain = m;
   6682 							break;
   6683 						}
   6684 						m = SCTP_BUF_NEXT(m);
   6685 					}
   6686 					/* sanity */
   6687 					if (*endofchain == NULL) {
   6688 						/* huh, TSNH XXX maybe we should panic */
   6689 						sctp_m_freem(outchain);
   6690 						goto new_mbuf;
   6691 					}
   6692 				}
   6693 				/* get the new end of length */
   6694 				len = M_TRAILINGSPACE(*endofchain);
   6695 			} else {
   6696 				/* how much is left at the end? */
   6697 				len = M_TRAILINGSPACE(*endofchain);
   6698 			}
   6699 			/* Find the end of the data, for appending */
   6700 			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
   6701 
   6702 			/* Now lets copy it out */
   6703 			if (len >= sizeofcpy) {
   6704 				/* It all fits, copy it in */
   6705 				m_copydata(clonechain, 0, sizeofcpy, cp);
   6706 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
   6707 			} else {
   6708 				/* fill up the end of the chain */
   6709 				if (len > 0) {
   6710 					m_copydata(clonechain, 0, len, cp);
   6711 					SCTP_BUF_LEN((*endofchain)) += len;
   6712 					/* now we need another one */
   6713 					sizeofcpy -= len;
   6714 				}
   6715 				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
   6716 				if (m == NULL) {
   6717 					/* We failed */
   6718 					goto error_out;
   6719 				}
   6720 				SCTP_BUF_NEXT((*endofchain)) = m;
   6721 				*endofchain = m;
   6722 				cp = mtod((*endofchain), caddr_t);
   6723 				m_copydata(clonechain, len, sizeofcpy, cp);
   6724 				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
   6725 			}
   6726 			return (outchain);
   6727 		} else {
   6728 			/* copy the old fashion way */
   6729 			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
   6730 #ifdef SCTP_MBUF_LOGGING
   6731 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   6732 				struct mbuf *mat;
   6733 
   6734 				for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
   6735 					if (SCTP_BUF_IS_EXTENDED(mat)) {
   6736 						sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   6737 					}
   6738 				}
   6739 			}
   6740 #endif
   6741 		}
   6742 	}
   6743 	if (appendchain == NULL) {
   6744 		/* error */
   6745 		if (outchain)
   6746 			sctp_m_freem(outchain);
   6747 		return (NULL);
   6748 	}
   6749 	if (outchain) {
   6750 		/* tack on to the end */
   6751 		if (*endofchain != NULL) {
   6752 			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
   6753 		} else {
   6754 			m = outchain;
   6755 			while (m) {
   6756 				if (SCTP_BUF_NEXT(m) == NULL) {
   6757 					SCTP_BUF_NEXT(m) = appendchain;
   6758 					break;
   6759 				}
   6760 				m = SCTP_BUF_NEXT(m);
   6761 			}
   6762 		}
   6763 		/*
   6764 		 * save off the end and update the end-chain
   6765 		 * postion
   6766 		 */
   6767 		m = appendchain;
   6768 		while (m) {
   6769 			if (SCTP_BUF_NEXT(m) == NULL) {
   6770 				*endofchain = m;
   6771 				break;
   6772 			}
   6773 			m = SCTP_BUF_NEXT(m);
   6774 		}
   6775 		return (outchain);
   6776 	} else {
   6777 		/* save off the end and update the end-chain postion */
   6778 		m = appendchain;
   6779 		while (m) {
   6780 			if (SCTP_BUF_NEXT(m) == NULL) {
   6781 				*endofchain = m;
   6782 				break;
   6783 			}
   6784 			m = SCTP_BUF_NEXT(m);
   6785 		}
   6786 		return (appendchain);
   6787 	}
   6788 }
   6789 
   6790 static int
   6791 sctp_med_chunk_output(struct sctp_inpcb *inp,
   6792 		      struct sctp_tcb *stcb,
   6793 		      struct sctp_association *asoc,
   6794 		      int *num_out,
   6795 		      int *reason_code,
   6796 		      int control_only, int from_where,
   6797 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
   6798 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   6799 		      SCTP_UNUSED
   6800 #endif
   6801                       );
   6802 
   6803 static void
   6804 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
   6805     uint32_t val SCTP_UNUSED)
   6806 {
   6807 	struct sctp_copy_all *ca;
   6808 	struct mbuf *m;
   6809 	int ret = 0;
   6810 	int added_control = 0;
   6811 	int un_sent, do_chunk_output = 1;
   6812 	struct sctp_association *asoc;
   6813 	struct sctp_nets *net;
   6814 
   6815 	ca = (struct sctp_copy_all *)ptr;
   6816 	if (ca->m == NULL) {
   6817 		return;
   6818 	}
   6819 	if (ca->inp != inp) {
   6820 		/* TSNH */
   6821 		return;
   6822 	}
   6823 	if (ca->sndlen > 0) {
   6824 		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
   6825 		if (m == NULL) {
   6826 			/* can't copy so we are done */
   6827 			ca->cnt_failed++;
   6828 			return;
   6829 		}
   6830 #ifdef SCTP_MBUF_LOGGING
   6831 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   6832 			struct mbuf *mat;
   6833 
   6834 			for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
   6835 				if (SCTP_BUF_IS_EXTENDED(mat)) {
   6836 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   6837 				}
   6838 			}
   6839 		}
   6840 #endif
   6841 	} else {
   6842 		m = NULL;
   6843 	}
   6844 	SCTP_TCB_LOCK_ASSERT(stcb);
   6845 	if (stcb->asoc.alternate) {
   6846 		net = stcb->asoc.alternate;
   6847 	} else {
   6848 		net = stcb->asoc.primary_destination;
   6849 	}
   6850 	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
   6851 		/* Abort this assoc with m as the user defined reason */
   6852 		if (m != NULL) {
   6853 			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
   6854 		} else {
   6855 			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
   6856 			                          0, M_NOWAIT, 1, MT_DATA);
   6857 			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
   6858 		}
   6859 		if (m != NULL) {
   6860 			struct sctp_paramhdr *ph;
   6861 
   6862 			ph = mtod(m, struct sctp_paramhdr *);
   6863 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
   6864 			ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
   6865 		}
   6866 		/* We add one here to keep the assoc from
   6867 		 * dis-appearing on us.
   6868 		 */
   6869 		atomic_add_int(&stcb->asoc.refcnt, 1);
   6870 		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
   6871 		/* sctp_abort_an_association calls sctp_free_asoc()
   6872 		 * free association will NOT free it since we
   6873 		 * incremented the refcnt .. we do this to prevent
   6874 		 * it being freed and things getting tricky since
   6875 		 * we could end up (from free_asoc) calling inpcb_free
   6876 		 * which would get a recursive lock call to the
   6877 		 * iterator lock.. But as a consequence of that the
   6878 		 * stcb will return to us un-locked.. since free_asoc
   6879 		 * returns with either no TCB or the TCB unlocked, we
   6880 		 * must relock.. to unlock in the iterator timer :-0
   6881 		 */
   6882 		SCTP_TCB_LOCK(stcb);
   6883 		atomic_add_int(&stcb->asoc.refcnt, -1);
   6884 		goto no_chunk_output;
   6885 	} else {
   6886 		if (m) {
   6887 			ret = sctp_msg_append(stcb, net, m,
   6888 					      &ca->sndrcv, 1);
   6889 		}
   6890 		asoc = &stcb->asoc;
   6891 		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
   6892 			/* shutdown this assoc */
   6893 			int cnt;
   6894 			cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
   6895 
   6896 			if (TAILQ_EMPTY(&asoc->send_queue) &&
   6897 			    TAILQ_EMPTY(&asoc->sent_queue) &&
   6898 			    (cnt == 0)) {
   6899 				if (asoc->locked_on_sending) {
   6900 					goto abort_anyway;
   6901 				}
   6902 				/* there is nothing queued to send, so I'm done... */
   6903 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
   6904 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
   6905 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
   6906 					/* only send SHUTDOWN the first time through */
   6907 					if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
   6908 						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   6909 					}
   6910 					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
   6911 					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   6912 					sctp_stop_timers_for_shutdown(stcb);
   6913 					sctp_send_shutdown(stcb, net);
   6914 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
   6915 							 net);
   6916 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
   6917 							 asoc->primary_destination);
   6918 					added_control = 1;
   6919 					do_chunk_output = 0;
   6920 				}
   6921 			} else {
   6922 				/*
   6923 				 * we still got (or just got) data to send, so set
   6924 				 * SHUTDOWN_PENDING
   6925 				 */
   6926 				/*
   6927 				 * XXX sockets draft says that SCTP_EOF should be
   6928 				 * sent with no data.  currently, we will allow user
   6929 				 * data to be sent first and move to
   6930 				 * SHUTDOWN-PENDING
   6931 				 */
   6932 				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
   6933 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
   6934 				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
   6935 					if (asoc->locked_on_sending) {
   6936 						/* Locked to send out the data */
   6937 						struct sctp_stream_queue_pending *sp;
   6938 						sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
   6939 						if (sp) {
   6940 							if ((sp->length == 0) && (sp->msg_is_complete == 0))
   6941 								asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
   6942 						}
   6943 					}
   6944 					asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
   6945 					if (TAILQ_EMPTY(&asoc->send_queue) &&
   6946 					    TAILQ_EMPTY(&asoc->sent_queue) &&
   6947 					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
   6948 					abort_anyway:
   6949 						atomic_add_int(&stcb->asoc.refcnt, 1);
   6950 						sctp_abort_an_association(stcb->sctp_ep, stcb,
   6951 									  NULL, SCTP_SO_NOT_LOCKED);
   6952 						atomic_add_int(&stcb->asoc.refcnt, -1);
   6953 						goto no_chunk_output;
   6954 					}
   6955 					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
   6956 							 asoc->primary_destination);
   6957 				}
   6958 			}
   6959 
   6960 		}
   6961 	}
   6962 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
   6963 		   (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
   6964 
   6965 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
   6966 	    (stcb->asoc.total_flight > 0) &&
   6967 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
   6968 		do_chunk_output = 0;
   6969 	}
   6970 	if (do_chunk_output)
   6971 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
   6972 	else if (added_control) {
   6973 		int num_out = 0, reason = 0, now_filled = 0;
   6974 		struct timeval now;
   6975 		int frag_point;
   6976 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
   6977 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
   6978 				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
   6979 	}
   6980  no_chunk_output:
   6981 	if (ret) {
   6982 		ca->cnt_failed++;
   6983 	} else {
   6984 		ca->cnt_sent++;
   6985 	}
   6986 }
   6987 
   6988 static void
   6989 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
   6990 {
   6991 	struct sctp_copy_all *ca;
   6992 
   6993 	ca = (struct sctp_copy_all *)ptr;
   6994 	/*
   6995 	 * Do a notify here? Kacheong suggests that the notify be done at
   6996 	 * the send time.. so you would push up a notification if any send
   6997 	 * failed. Don't know if this is feasable since the only failures we
   6998 	 * have is "memory" related and if you cannot get an mbuf to send
   6999 	 * the data you surely can't get an mbuf to send up to notify the
   7000 	 * user you can't send the data :->
   7001 	 */
   7002 
   7003 	/* now free everything */
   7004 	sctp_m_freem(ca->m);
   7005 	SCTP_FREE(ca, SCTP_M_COPYAL);
   7006 }
   7007 
   7008 
   7009 #define	MC_ALIGN(m, len) do {						\
   7010 	SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1));	\
   7011 } while (0)
   7012 
   7013 
   7014 
   7015 static struct mbuf *
   7016 sctp_copy_out_all(struct uio *uio, int len)
   7017 {
   7018 	struct mbuf *ret, *at;
   7019 	int left, willcpy, cancpy, error;
   7020 
   7021 	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
   7022 	if (ret == NULL) {
   7023 		/* TSNH */
   7024 		return (NULL);
   7025 	}
   7026 	left = len;
   7027 	SCTP_BUF_LEN(ret) = 0;
   7028 	/* save space for the data chunk header */
   7029 	cancpy = M_TRAILINGSPACE(ret);
   7030 	willcpy = min(cancpy, left);
   7031 	at = ret;
   7032 	while (left > 0) {
   7033 		/* Align data to the end */
   7034 		error = uiomove(mtod(at, caddr_t), willcpy, uio);
   7035 		if (error) {
   7036 	err_out_now:
   7037 			sctp_m_freem(at);
   7038 			return (NULL);
   7039 		}
   7040 		SCTP_BUF_LEN(at) = willcpy;
   7041 		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
   7042 		left -= willcpy;
   7043 		if (left > 0) {
   7044 			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
   7045 			if (SCTP_BUF_NEXT(at) == NULL) {
   7046 				goto err_out_now;
   7047 			}
   7048 			at = SCTP_BUF_NEXT(at);
   7049 			SCTP_BUF_LEN(at) = 0;
   7050 			cancpy = M_TRAILINGSPACE(at);
   7051 			willcpy = min(cancpy, left);
   7052 		}
   7053 	}
   7054 	return (ret);
   7055 }
   7056 
   7057 static int
   7058 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
   7059     struct sctp_sndrcvinfo *srcv)
   7060 {
   7061 	int ret;
   7062 	struct sctp_copy_all *ca;
   7063 
   7064 	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
   7065 		    SCTP_M_COPYAL);
   7066 	if (ca == NULL) {
   7067 		sctp_m_freem(m);
   7068 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   7069 		return (ENOMEM);
   7070 	}
   7071 	memset(ca, 0, sizeof(struct sctp_copy_all));
   7072 
   7073 	ca->inp = inp;
   7074 	if (srcv) {
   7075 		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
   7076 	}
   7077 	/*
   7078 	 * take off the sendall flag, it would be bad if we failed to do
   7079 	 * this :-0
   7080 	 */
   7081 	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
   7082 	/* get length and mbuf chain */
   7083 	if (uio) {
   7084 #if defined(__APPLE__)
   7085 #if defined(APPLE_LEOPARD)
   7086 		ca->sndlen = uio->uio_resid;
   7087 #else
   7088 		ca->sndlen = uio_resid(uio);
   7089 #endif
   7090 #else
   7091 		ca->sndlen = uio->uio_resid;
   7092 #endif
   7093 #if defined(__APPLE__)
   7094 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
   7095 #endif
   7096 		ca->m = sctp_copy_out_all(uio, ca->sndlen);
   7097 #if defined(__APPLE__)
   7098 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
   7099 #endif
   7100 		if (ca->m == NULL) {
   7101 			SCTP_FREE(ca, SCTP_M_COPYAL);
   7102 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   7103 			return (ENOMEM);
   7104 		}
   7105 	} else {
   7106 		/* Gather the length of the send */
   7107 		struct mbuf *mat;
   7108 
   7109 		ca->sndlen = 0;
   7110 		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
   7111 			ca->sndlen += SCTP_BUF_LEN(mat);
   7112 		}
   7113 	}
   7114 	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
   7115 				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
   7116 				     SCTP_ASOC_ANY_STATE,
   7117 				     (void *)ca, 0,
   7118 				     sctp_sendall_completes, inp, 1);
   7119 	if (ret) {
   7120 		SCTP_PRINTF("Failed to initiate iterator for sendall\n");
   7121 		SCTP_FREE(ca, SCTP_M_COPYAL);
   7122 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   7123 		return (EFAULT);
   7124 	}
   7125 	return (0);
   7126 }
   7127 
   7128 
   7129 void
   7130 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
   7131 {
   7132 	struct sctp_tmit_chunk *chk, *nchk;
   7133 
   7134 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
   7135 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
   7136 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
   7137 			if (chk->data) {
   7138 				sctp_m_freem(chk->data);
   7139 				chk->data = NULL;
   7140 			}
   7141 			asoc->ctrl_queue_cnt--;
   7142 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   7143 		}
   7144 	}
   7145 }
   7146 
   7147 void
   7148 sctp_toss_old_asconf(struct sctp_tcb *stcb)
   7149 {
   7150 	struct sctp_association *asoc;
   7151 	struct sctp_tmit_chunk *chk, *nchk;
   7152 	struct sctp_asconf_chunk *acp;
   7153 
   7154 	asoc = &stcb->asoc;
   7155 	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
   7156 		/* find SCTP_ASCONF chunk in queue */
   7157 		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
   7158 			if (chk->data) {
   7159 				acp = mtod(chk->data, struct sctp_asconf_chunk *);
   7160 				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
   7161 					/* Not Acked yet */
   7162 					break;
   7163 				}
   7164 			}
   7165 			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
   7166 			if (chk->data) {
   7167 				sctp_m_freem(chk->data);
   7168 				chk->data = NULL;
   7169 			}
   7170 			asoc->ctrl_queue_cnt--;
   7171 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   7172 		}
   7173 	}
   7174 }
   7175 
   7176 
   7177 static void
   7178 sctp_clean_up_datalist(struct sctp_tcb *stcb,
   7179     struct sctp_association *asoc,
   7180     struct sctp_tmit_chunk **data_list,
   7181     int bundle_at,
   7182     struct sctp_nets *net)
   7183 {
   7184 	int i;
   7185 	struct sctp_tmit_chunk *tp1;
   7186 
   7187 	for (i = 0; i < bundle_at; i++) {
   7188 		/* off of the send queue */
   7189 		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
   7190 		asoc->send_queue_cnt--;
   7191 		if (i > 0) {
   7192 			/*
   7193 			 * Any chunk NOT 0 you zap the time chunk 0 gets
   7194 			 * zapped or set based on if a RTO measurment is
   7195 			 * needed.
   7196 			 */
   7197 			data_list[i]->do_rtt = 0;
   7198 		}
   7199 		/* record time */
   7200 		data_list[i]->sent_rcv_time = net->last_sent_time;
   7201 		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
   7202 		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
   7203 		if (data_list[i]->whoTo == NULL) {
   7204 			data_list[i]->whoTo = net;
   7205 			atomic_add_int(&net->ref_count, 1);
   7206 		}
   7207 		/* on to the sent queue */
   7208 		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
   7209 		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
   7210 			struct sctp_tmit_chunk *tpp;
   7211 
   7212 			/* need to move back */
   7213 		back_up_more:
   7214 			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
   7215 			if (tpp == NULL) {
   7216 				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
   7217 				goto all_done;
   7218 			}
   7219 			tp1 = tpp;
   7220 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
   7221 				goto back_up_more;
   7222 			}
   7223 			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
   7224 		} else {
   7225 			TAILQ_INSERT_TAIL(&asoc->sent_queue,
   7226 					  data_list[i],
   7227 					  sctp_next);
   7228 		}
   7229 	all_done:
   7230 		/* This does not lower until the cum-ack passes it */
   7231 		asoc->sent_queue_cnt++;
   7232 		if ((asoc->peers_rwnd <= 0) &&
   7233 		    (asoc->total_flight == 0) &&
   7234 		    (bundle_at == 1)) {
   7235 			/* Mark the chunk as being a window probe */
   7236 			SCTP_STAT_INCR(sctps_windowprobed);
   7237 		}
   7238 #ifdef SCTP_AUDITING_ENABLED
   7239 		sctp_audit_log(0xC2, 3);
   7240 #endif
   7241 		data_list[i]->sent = SCTP_DATAGRAM_SENT;
   7242 		data_list[i]->snd_count = 1;
   7243 		data_list[i]->rec.data.chunk_was_revoked = 0;
   7244 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   7245 			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
   7246 				       data_list[i]->whoTo->flight_size,
   7247 				       data_list[i]->book_size,
   7248 				       (uintptr_t)data_list[i]->whoTo,
   7249 				       data_list[i]->rec.data.TSN_seq);
   7250 		}
   7251 		sctp_flight_size_increase(data_list[i]);
   7252 		sctp_total_flight_increase(stcb, data_list[i]);
   7253 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   7254 			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
   7255 			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   7256 		}
   7257 		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
   7258 						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
   7259 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   7260 			/* SWS sender side engages */
   7261 			asoc->peers_rwnd = 0;
   7262 		}
   7263 	}
   7264 	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
   7265 		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
   7266 	}
   7267 }
   7268 
   7269 static void
   7270 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
   7271 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   7272 	SCTP_UNUSED
   7273 #endif
   7274 )
   7275 {
   7276 	struct sctp_tmit_chunk *chk, *nchk;
   7277 
   7278 	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
   7279 		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
   7280 		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
   7281 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
   7282 		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
   7283 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
   7284 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
   7285 		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
   7286 		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
   7287 		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
   7288 		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
   7289 		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
   7290 		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
   7291 			/* Stray chunks must be cleaned up */
   7292 	clean_up_anyway:
   7293 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
   7294 			if (chk->data) {
   7295 				sctp_m_freem(chk->data);
   7296 				chk->data = NULL;
   7297 			}
   7298 			asoc->ctrl_queue_cnt--;
   7299 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
   7300 				asoc->fwd_tsn_cnt--;
   7301 			sctp_free_a_chunk(stcb, chk, so_locked);
   7302 		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
   7303 			/* special handling, we must look into the param */
   7304 			if (chk != asoc->str_reset) {
   7305 				goto clean_up_anyway;
   7306 			}
   7307 		}
   7308 	}
   7309 }
   7310 
   7311 
   7312 static int
   7313 sctp_can_we_split_this(struct sctp_tcb *stcb,
   7314                        uint32_t length,
   7315                        uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
   7316 {
   7317 	/* Make a decision on if I should split a
   7318 	 * msg into multiple parts. This is only asked of
   7319 	 * incomplete messages.
   7320 	 */
   7321 	if (eeor_on) {
   7322 		/* If we are doing EEOR we need to always send
   7323 		 * it if its the entire thing, since it might
   7324 		 * be all the guy is putting in the hopper.
   7325 		 */
   7326 		if (goal_mtu >= length) {
   7327 			/*-
   7328 			 * If we have data outstanding,
   7329 			 * we get another chance when the sack
   7330 			 * arrives to transmit - wait for more data
   7331 			 */
   7332 			if (stcb->asoc.total_flight == 0) {
   7333 				/* If nothing is in flight, we zero
   7334 				 * the packet counter.
   7335 				 */
   7336 				return (length);
   7337 			}
   7338 			return (0);
   7339 
   7340 		} else {
   7341 			/* You can fill the rest */
   7342 			return (goal_mtu);
   7343 		}
   7344 	}
   7345 	/*-
   7346 	 * For those strange folk that make the send buffer
   7347 	 * smaller than our fragmentation point, we can't
   7348 	 * get a full msg in so we have to allow splitting.
   7349 	 */
   7350 	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
   7351 		return (length);
   7352 	}
   7353 
   7354 	if ((length <= goal_mtu) ||
   7355 	    ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
   7356 		/* Sub-optimial residual don't split in non-eeor mode. */
   7357 		return (0);
   7358 	}
   7359 	/* If we reach here length is larger
   7360 	 * than the goal_mtu. Do we wish to split
   7361 	 * it for the sake of packet putting together?
   7362 	 */
   7363 	if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
   7364 		/* Its ok to split it */
   7365 		return (min(goal_mtu, frag_point));
   7366 	}
   7367 	/* Nope, can't split */
   7368 	return (0);
   7369 
   7370 }
   7371 
   7372 static uint32_t
   7373 sctp_move_to_outqueue(struct sctp_tcb *stcb,
   7374                       struct sctp_stream_out *strq,
   7375                       uint32_t goal_mtu,
   7376                       uint32_t frag_point,
   7377                       int *locked,
   7378                       int *giveup,
   7379                       int eeor_mode,
   7380                       int *bail,
   7381                       int so_locked
   7382 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   7383                       SCTP_UNUSED
   7384 #endif
   7385 	)
   7386 {
   7387 	/* Move from the stream to the send_queue keeping track of the total */
   7388 	struct sctp_association *asoc;
   7389 	struct sctp_stream_queue_pending *sp;
   7390 	struct sctp_tmit_chunk *chk;
   7391 	struct sctp_data_chunk *dchkh;
   7392 	uint32_t to_move, length;
   7393 	uint8_t rcv_flags = 0;
   7394 	uint8_t some_taken;
   7395 	uint8_t send_lock_up = 0;
   7396 
   7397 	SCTP_TCB_LOCK_ASSERT(stcb);
   7398 	asoc = &stcb->asoc;
   7399 one_more_time:
   7400 	/*sa_ignore FREED_MEMORY*/
   7401 	sp = TAILQ_FIRST(&strq->outqueue);
   7402 	if (sp == NULL) {
   7403 		*locked = 0;
   7404 		if (send_lock_up == 0) {
   7405 			SCTP_TCB_SEND_LOCK(stcb);
   7406 			send_lock_up = 1;
   7407 		}
   7408 		sp = TAILQ_FIRST(&strq->outqueue);
   7409 		if (sp) {
   7410 			goto one_more_time;
   7411 		}
   7412 		if (strq->last_msg_incomplete) {
   7413 			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
   7414 			            strq->stream_no,
   7415 			            strq->last_msg_incomplete);
   7416 			strq->last_msg_incomplete = 0;
   7417 		}
   7418 		to_move = 0;
   7419 		if (send_lock_up) {
   7420 			SCTP_TCB_SEND_UNLOCK(stcb);
   7421 			send_lock_up = 0;
   7422 		}
   7423 		goto out_of;
   7424 	}
   7425 	if ((sp->msg_is_complete) && (sp->length == 0)) {
   7426 		if (sp->sender_all_done) {
   7427 			/* We are doing differed cleanup. Last
   7428 			 * time through when we took all the data
   7429 			 * the sender_all_done was not set.
   7430 			 */
   7431 			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
   7432 				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
   7433 				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
   7434 				            sp->sender_all_done,
   7435 				            sp->length,
   7436 				            sp->msg_is_complete,
   7437 				            sp->put_last_out,
   7438 				            send_lock_up);
   7439 			}
   7440 			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
   7441 				SCTP_TCB_SEND_LOCK(stcb);
   7442 				send_lock_up = 1;
   7443 			}
   7444 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
   7445 			TAILQ_REMOVE(&strq->outqueue, sp, next);
   7446 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
   7447 			if (sp->net) {
   7448 				sctp_free_remote_addr(sp->net);
   7449 				sp->net = NULL;
   7450 			}
   7451 			if (sp->data) {
   7452 				sctp_m_freem(sp->data);
   7453 				sp->data = NULL;
   7454 			}
   7455 			sctp_free_a_strmoq(stcb, sp, so_locked);
   7456 			/* we can't be locked to it */
   7457 			*locked = 0;
   7458 			stcb->asoc.locked_on_sending = NULL;
   7459 			if (send_lock_up) {
   7460 				SCTP_TCB_SEND_UNLOCK(stcb);
   7461 				send_lock_up = 0;
   7462 			}
   7463 			/* back to get the next msg */
   7464 			goto one_more_time;
   7465 		} else {
   7466 			/* sender just finished this but
   7467 			 * still holds a reference
   7468 			 */
   7469 			*locked = 1;
   7470 			*giveup = 1;
   7471 			to_move = 0;
   7472 			goto out_of;
   7473 		}
   7474 	} else {
   7475 		/* is there some to get */
   7476 		if (sp->length == 0) {
   7477 			/* no */
   7478 			*locked = 1;
   7479 			*giveup = 1;
   7480 			to_move = 0;
   7481 			goto out_of;
   7482 		} else if (sp->discard_rest) {
   7483 			if (send_lock_up == 0) {
   7484 				SCTP_TCB_SEND_LOCK(stcb);
   7485 				send_lock_up = 1;
   7486 			}
   7487 			/* Whack down the size */
   7488 			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
   7489 			if ((stcb->sctp_socket != NULL) &&	     \
   7490 			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
   7491 			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
   7492 				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
   7493 			}
   7494 			if (sp->data) {
   7495 				sctp_m_freem(sp->data);
   7496 				sp->data = NULL;
   7497 				sp->tail_mbuf = NULL;
   7498 			}
   7499 			sp->length = 0;
   7500 			sp->some_taken = 1;
   7501 			*locked = 1;
   7502 			*giveup = 1;
   7503 			to_move = 0;
   7504 			goto out_of;
   7505 		}
   7506 	}
   7507 	some_taken = sp->some_taken;
   7508 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   7509 		sp->msg_is_complete = 1;
   7510 	}
   7511 re_look:
   7512 	length = sp->length;
   7513 	if (sp->msg_is_complete) {
   7514 		/* The message is complete */
   7515 		to_move = min(length, frag_point);
   7516 		if (to_move == length) {
   7517 			/* All of it fits in the MTU */
   7518 			if (sp->some_taken) {
   7519 				rcv_flags |= SCTP_DATA_LAST_FRAG;
   7520 				sp->put_last_out = 1;
   7521 			} else {
   7522 				rcv_flags |= SCTP_DATA_NOT_FRAG;
   7523 				sp->put_last_out = 1;
   7524 			}
   7525 		} else {
   7526 			/* Not all of it fits, we fragment */
   7527 			if (sp->some_taken == 0) {
   7528 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
   7529 			}
   7530 			sp->some_taken = 1;
   7531 		}
   7532 	} else {
   7533 		to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
   7534 		if (to_move) {
   7535 			/*-
   7536 			 * We use a snapshot of length in case it
   7537 			 * is expanding during the compare.
   7538 			 */
   7539 			uint32_t llen;
   7540 
   7541 			llen = length;
   7542 			if (to_move >= llen) {
   7543 				to_move = llen;
   7544 				if (send_lock_up == 0) {
   7545 					/*-
   7546 					 * We are taking all of an incomplete msg
   7547 					 * thus we need a send lock.
   7548 					 */
   7549 					SCTP_TCB_SEND_LOCK(stcb);
   7550 					send_lock_up = 1;
   7551 					if (sp->msg_is_complete) {
   7552 						/* the sender finished the msg */
   7553 						goto re_look;
   7554 					}
   7555 				}
   7556 			}
   7557 			if (sp->some_taken == 0) {
   7558 				rcv_flags |= SCTP_DATA_FIRST_FRAG;
   7559 				sp->some_taken = 1;
   7560 			}
   7561 		} else {
   7562 			/* Nothing to take. */
   7563 			if (sp->some_taken) {
   7564 				*locked = 1;
   7565 			}
   7566 			*giveup = 1;
   7567 			to_move = 0;
   7568 			goto out_of;
   7569 		}
   7570 	}
   7571 
   7572 	/* If we reach here, we can copy out a chunk */
   7573 	sctp_alloc_a_chunk(stcb, chk);
   7574 	if (chk == NULL) {
   7575 		/* No chunk memory */
   7576 		*giveup = 1;
   7577 		to_move = 0;
   7578 		goto out_of;
   7579 	}
   7580 	/* Setup for unordered if needed by looking
   7581 	 * at the user sent info flags.
   7582 	 */
   7583 	if (sp->sinfo_flags & SCTP_UNORDERED) {
   7584 		rcv_flags |= SCTP_DATA_UNORDERED;
   7585 	}
   7586 	if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
   7587 	    ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
   7588 		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
   7589 	}
   7590 	/* clear out the chunk before setting up */
   7591 	memset(chk, 0, sizeof(*chk));
   7592 	chk->rec.data.rcv_flags = rcv_flags;
   7593 
   7594 	if (to_move >= length) {
   7595 		/* we think we can steal the whole thing */
   7596 		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
   7597 			SCTP_TCB_SEND_LOCK(stcb);
   7598 			send_lock_up = 1;
   7599 		}
   7600 		if (to_move < sp->length) {
   7601 			/* bail, it changed */
   7602 			goto dont_do_it;
   7603 		}
   7604 		chk->data = sp->data;
   7605 		chk->last_mbuf = sp->tail_mbuf;
   7606 		/* register the stealing */
   7607 		sp->data = sp->tail_mbuf = NULL;
   7608 	} else {
   7609 		struct mbuf *m;
   7610 	dont_do_it:
   7611 		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
   7612 		chk->last_mbuf = NULL;
   7613 		if (chk->data == NULL) {
   7614 			sp->some_taken = some_taken;
   7615 			sctp_free_a_chunk(stcb, chk, so_locked);
   7616 			*bail = 1;
   7617 			to_move = 0;
   7618 			goto out_of;
   7619 		}
   7620 #ifdef SCTP_MBUF_LOGGING
   7621 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   7622 			struct mbuf *mat;
   7623 
   7624 			for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
   7625 				if (SCTP_BUF_IS_EXTENDED(mat)) {
   7626 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   7627 				}
   7628 			}
   7629 		}
   7630 #endif
   7631 		/* Pull off the data */
   7632 		m_adj(sp->data, to_move);
   7633 		/* Now lets work our way down and compact it */
   7634 		m = sp->data;
   7635 		while (m && (SCTP_BUF_LEN(m) == 0)) {
   7636 			sp->data  = SCTP_BUF_NEXT(m);
   7637 			SCTP_BUF_NEXT(m) = NULL;
   7638 			if (sp->tail_mbuf == m) {
   7639 				/*-
   7640 				 * Freeing tail? TSNH since
   7641 				 * we supposedly were taking less
   7642 				 * than the sp->length.
   7643 				 */
   7644 #ifdef INVARIANTS
   7645 				panic("Huh, freing tail? - TSNH");
   7646 #else
   7647 				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
   7648 				sp->tail_mbuf = sp->data = NULL;
   7649 				sp->length = 0;
   7650 #endif
   7651 
   7652 			}
   7653 			sctp_m_free(m);
   7654 			m = sp->data;
   7655 		}
   7656 	}
   7657 	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
   7658 		chk->copy_by_ref = 1;
   7659 	} else {
   7660 		chk->copy_by_ref = 0;
   7661 	}
   7662 	/* get last_mbuf and counts of mb useage
   7663 	 * This is ugly but hopefully its only one mbuf.
   7664 	 */
   7665 	if (chk->last_mbuf == NULL) {
   7666 		chk->last_mbuf = chk->data;
   7667 		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
   7668 			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
   7669 		}
   7670 	}
   7671 
   7672 	if (to_move > length) {
   7673 		/*- This should not happen either
   7674 		 * since we always lower to_move to the size
   7675 		 * of sp->length if its larger.
   7676 		 */
   7677 #ifdef INVARIANTS
   7678 		panic("Huh, how can to_move be larger?");
   7679 #else
   7680 		SCTP_PRINTF("Huh, how can to_move be larger?\n");
   7681 		sp->length = 0;
   7682 #endif
   7683 	} else {
   7684 		atomic_subtract_int(&sp->length, to_move);
   7685 	}
   7686 	if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
   7687 		/* Not enough room for a chunk header, get some */
   7688 		struct mbuf *m;
   7689 		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
   7690 		if (m == NULL) {
   7691 			/*
   7692 			 * we're in trouble here. _PREPEND below will free
   7693 			 * all the data if there is no leading space, so we
   7694 			 * must put the data back and restore.
   7695 			 */
   7696 			if (send_lock_up == 0) {
   7697 				SCTP_TCB_SEND_LOCK(stcb);
   7698 				send_lock_up = 1;
   7699 			}
   7700 			if (chk->data == NULL) {
   7701 				/* unsteal the data */
   7702 				sp->data = chk->data;
   7703 				sp->tail_mbuf = chk->last_mbuf;
   7704 			} else {
   7705 				struct mbuf *m_tmp;
   7706 				/* reassemble the data */
   7707 				m_tmp = sp->data;
   7708 				sp->data = chk->data;
   7709 				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
   7710 			}
   7711 			sp->some_taken = some_taken;
   7712 			atomic_add_int(&sp->length, to_move);
   7713 			chk->data = NULL;
   7714 			*bail = 1;
   7715 			sctp_free_a_chunk(stcb, chk, so_locked);
   7716 			to_move = 0;
   7717 			goto out_of;
   7718 		} else {
   7719 			SCTP_BUF_LEN(m) = 0;
   7720 			SCTP_BUF_NEXT(m) = chk->data;
   7721 			chk->data = m;
   7722 			M_ALIGN(chk->data, 4);
   7723 		}
   7724 	}
   7725 	SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
   7726 	if (chk->data == NULL) {
   7727 		/* HELP, TSNH since we assured it would not above? */
   7728 #ifdef INVARIANTS
   7729 		panic("prepend failes HELP?");
   7730 #else
   7731 		SCTP_PRINTF("prepend fails HELP?\n");
   7732 		sctp_free_a_chunk(stcb, chk, so_locked);
   7733 #endif
   7734 		*bail = 1;
   7735 		to_move = 0;
   7736 		goto out_of;
   7737 	}
   7738 	sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
   7739 	chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
   7740 	chk->book_size_scale = 0;
   7741 	chk->sent = SCTP_DATAGRAM_UNSENT;
   7742 
   7743 	chk->flags = 0;
   7744 	chk->asoc = &stcb->asoc;
   7745 	chk->pad_inplace = 0;
   7746 	chk->no_fr_allowed = 0;
   7747 	chk->rec.data.stream_seq = strq->next_sequence_send;
   7748 	if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
   7749 	    !(rcv_flags & SCTP_DATA_UNORDERED)) {
   7750 		strq->next_sequence_send++;
   7751 	}
   7752 	chk->rec.data.stream_number = sp->stream;
   7753 	chk->rec.data.payloadtype = sp->ppid;
   7754 	chk->rec.data.context = sp->context;
   7755 	chk->rec.data.doing_fast_retransmit = 0;
   7756 
   7757 	chk->rec.data.timetodrop = sp->ts;
   7758 	chk->flags = sp->act_flags;
   7759 
   7760 	if (sp->net) {
   7761 		chk->whoTo = sp->net;
   7762 		atomic_add_int(&chk->whoTo->ref_count, 1);
   7763 	} else
   7764 		chk->whoTo = NULL;
   7765 
   7766 	if (sp->holds_key_ref) {
   7767 		chk->auth_keyid = sp->auth_keyid;
   7768 		sctp_auth_key_acquire(stcb, chk->auth_keyid);
   7769 		chk->holds_key_ref = 1;
   7770 	}
   7771 
   7772 #if defined(__FreeBSD__) || defined(__Panda__)
   7773 	chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
   7774 #else
   7775 	chk->rec.data.TSN_seq = asoc->sending_seq++;
   7776 #endif
   7777 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
   7778 		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
   7779 		               (uintptr_t)stcb, sp->length,
   7780 		               (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
   7781 		               chk->rec.data.TSN_seq);
   7782 	}
   7783 	dchkh = mtod(chk->data, struct sctp_data_chunk *);
   7784 	/*
   7785 	 * Put the rest of the things in place now. Size was done
   7786 	 * earlier in previous loop prior to padding.
   7787 	 */
   7788 
   7789 #ifdef SCTP_ASOCLOG_OF_TSNS
   7790 	SCTP_TCB_LOCK_ASSERT(stcb);
   7791 	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
   7792 		asoc->tsn_out_at = 0;
   7793 		asoc->tsn_out_wrapped = 1;
   7794 	}
   7795 	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
   7796 	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
   7797 	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
   7798 	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
   7799 	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
   7800 	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
   7801 	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
   7802 	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
   7803 	asoc->tsn_out_at++;
   7804 #endif
   7805 
   7806 	dchkh->ch.chunk_type = SCTP_DATA;
   7807 	dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
   7808 	dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
   7809 	dchkh->dp.stream_id = htons(strq->stream_no);
   7810 	dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
   7811 	dchkh->dp.protocol_id = chk->rec.data.payloadtype;
   7812 	dchkh->ch.chunk_length = htons(chk->send_size);
   7813 	/* Now advance the chk->send_size by the actual pad needed. */
   7814 	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
   7815 		/* need a pad */
   7816 		struct mbuf *lm;
   7817 		int pads;
   7818 
   7819 		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
   7820 		if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
   7821 			chk->pad_inplace = 1;
   7822 		}
   7823 		if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
   7824 			/* pad added an mbuf */
   7825 			chk->last_mbuf = lm;
   7826 		}
   7827 		chk->send_size += pads;
   7828 	}
   7829 	if (PR_SCTP_ENABLED(chk->flags)) {
   7830 		asoc->pr_sctp_cnt++;
   7831 	}
   7832 	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
   7833 		/* All done pull and kill the message */
   7834 		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
   7835 		if (sp->put_last_out == 0) {
   7836 			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
   7837 			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
   7838 			            sp->sender_all_done,
   7839 			            sp->length,
   7840 			            sp->msg_is_complete,
   7841 			            sp->put_last_out,
   7842 			            send_lock_up);
   7843 		}
   7844 		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
   7845 			SCTP_TCB_SEND_LOCK(stcb);
   7846 			send_lock_up = 1;
   7847 		}
   7848 		TAILQ_REMOVE(&strq->outqueue, sp, next);
   7849 		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
   7850 		if (sp->net) {
   7851 			sctp_free_remote_addr(sp->net);
   7852 			sp->net = NULL;
   7853 		}
   7854 		if (sp->data) {
   7855 			sctp_m_freem(sp->data);
   7856 			sp->data = NULL;
   7857 		}
   7858 		sctp_free_a_strmoq(stcb, sp, so_locked);
   7859 
   7860 		/* we can't be locked to it */
   7861 		*locked = 0;
   7862 		stcb->asoc.locked_on_sending = NULL;
   7863 	} else {
   7864 		/* more to go, we are locked */
   7865 		*locked = 1;
   7866 	}
   7867 	asoc->chunks_on_out_queue++;
   7868 	strq->chunks_on_queues++;
   7869 	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
   7870 	asoc->send_queue_cnt++;
   7871 out_of:
   7872 	if (send_lock_up) {
   7873 		SCTP_TCB_SEND_UNLOCK(stcb);
   7874 	}
   7875 	return (to_move);
   7876 }
   7877 
   7878 
   7879 static void
   7880 sctp_fill_outqueue(struct sctp_tcb *stcb,
   7881     struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
   7882 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   7883 	SCTP_UNUSED
   7884 #endif
   7885 )
   7886 {
   7887 	struct sctp_association *asoc;
   7888 	struct sctp_stream_out *strq;
   7889 	int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
   7890 	int locked, giveup;
   7891 
   7892 	SCTP_TCB_LOCK_ASSERT(stcb);
   7893 	asoc = &stcb->asoc;
   7894 	switch (net->ro._l_addr.sa.sa_family) {
   7895 #ifdef INET
   7896 		case AF_INET:
   7897 			goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
   7898 			break;
   7899 #endif
   7900 #ifdef INET6
   7901 		case AF_INET6:
   7902 			goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
   7903 			break;
   7904 #endif
   7905 #if defined(__Userspace__)
   7906 		case AF_CONN:
   7907 			goal_mtu = net->mtu - sizeof(struct sctphdr);
   7908 			break;
   7909 #endif
   7910 		default:
   7911 			/* TSNH */
   7912 			goal_mtu = net->mtu;
   7913 			break;
   7914 	}
   7915 	/* Need an allowance for the data chunk header too */
   7916 	goal_mtu -= sizeof(struct sctp_data_chunk);
   7917 
   7918 	/* must make even word boundary */
   7919 	goal_mtu &= 0xfffffffc;
   7920 	if (asoc->locked_on_sending) {
   7921 		/* We are stuck on one stream until the message completes. */
   7922 		strq = asoc->locked_on_sending;
   7923 		locked = 1;
   7924 	} else {
   7925 		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
   7926 		locked = 0;
   7927 	}
   7928 	while ((goal_mtu > 0) && strq) {
   7929 		giveup = 0;
   7930 		bail = 0;
   7931 		moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
   7932 						       &giveup, eeor_mode, &bail, so_locked);
   7933 		if (moved_how_much)
   7934 			stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
   7935 
   7936 		if (locked) {
   7937 			asoc->locked_on_sending = strq;
   7938 			if ((moved_how_much == 0) || (giveup) || bail)
   7939 				/* no more to move for now */
   7940 				break;
   7941 		} else {
   7942 			asoc->locked_on_sending = NULL;
   7943 			if ((giveup) || bail) {
   7944 				break;
   7945 			}
   7946 			strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
   7947 			if (strq == NULL) {
   7948 				break;
   7949 			}
   7950 		}
   7951 		total_moved += moved_how_much;
   7952 		goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
   7953 		goal_mtu &= 0xfffffffc;
   7954 	}
   7955 	if (bail)
   7956 		*quit_now = 1;
   7957 
   7958 	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
   7959 
   7960 	if (total_moved == 0) {
   7961 		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
   7962 		    (net == stcb->asoc.primary_destination)) {
   7963 			/* ran dry for primary network net */
   7964 			SCTP_STAT_INCR(sctps_primary_randry);
   7965 		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
   7966 			/* ran dry with CMT on */
   7967 			SCTP_STAT_INCR(sctps_cmt_randry);
   7968 		}
   7969 	}
   7970 }
   7971 
   7972 void
   7973 sctp_fix_ecn_echo(struct sctp_association *asoc)
   7974 {
   7975 	struct sctp_tmit_chunk *chk;
   7976 
   7977 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   7978 		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
   7979 			chk->sent = SCTP_DATAGRAM_UNSENT;
   7980 		}
   7981 	}
   7982 }
   7983 
   7984 void
   7985 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
   7986 {
   7987 	struct sctp_association *asoc;
   7988 	struct sctp_tmit_chunk *chk;
   7989 	struct sctp_stream_queue_pending *sp;
   7990 	unsigned int i;
   7991 
   7992 	if (net == NULL) {
   7993 		return;
   7994 	}
   7995 	asoc = &stcb->asoc;
   7996 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
   7997 		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
   7998 			if (sp->net == net) {
   7999 				sctp_free_remote_addr(sp->net);
   8000 				sp->net = NULL;
   8001 			}
   8002 		}
   8003 	}
   8004 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
   8005 		if (chk->whoTo == net) {
   8006 			sctp_free_remote_addr(chk->whoTo);
   8007 			chk->whoTo = NULL;
   8008 		}
   8009 	}
   8010 }
   8011 
   8012 int
   8013 sctp_med_chunk_output(struct sctp_inpcb *inp,
   8014 		      struct sctp_tcb *stcb,
   8015 		      struct sctp_association *asoc,
   8016 		      int *num_out,
   8017 		      int *reason_code,
   8018 		      int control_only, int from_where,
   8019 		      struct timeval *now, int *now_filled, int frag_point, int so_locked
   8020 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   8021 		      SCTP_UNUSED
   8022 #endif
   8023 	)
   8024 {
   8025 	/**
   8026 	 * Ok this is the generic chunk service queue. we must do the
   8027 	 * following: - Service the stream queue that is next, moving any
   8028 	 * message (note I must get a complete message i.e. FIRST/MIDDLE and
   8029 	 * LAST to the out queue in one pass) and assigning TSN's - Check to
   8030 	 * see if the cwnd/rwnd allows any output, if so we go ahead and
   8031 	 * fomulate and send the low level chunks. Making sure to combine
   8032 	 * any control in the control chunk queue also.
   8033 	 */
   8034 	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
   8035 	struct mbuf *outchain, *endoutchain;
   8036 	struct sctp_tmit_chunk *chk, *nchk;
   8037 
   8038 	/* temp arrays for unlinking */
   8039 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
   8040 	int no_fragmentflg, error;
   8041 	unsigned int max_rwnd_per_dest, max_send_per_dest;
   8042 	int one_chunk, hbflag, skip_data_for_this_net;
   8043 	int asconf, cookie, no_out_cnt;
   8044 	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
   8045 	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
   8046 	int tsns_sent = 0;
   8047 	uint32_t auth_offset = 0;
   8048 	struct sctp_auth_chunk *auth = NULL;
   8049 	uint16_t auth_keyid;
   8050 	int override_ok = 1;
   8051 	int skip_fill_up = 0;
   8052 	int data_auth_reqd = 0;
   8053 	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
   8054 	   the destination. */
   8055 	int quit_now = 0;
   8056 
   8057 #if defined(__APPLE__)
   8058 	if (so_locked) {
   8059 		sctp_lock_assert(SCTP_INP_SO(inp));
   8060 	} else {
   8061 		sctp_unlock_assert(SCTP_INP_SO(inp));
   8062 	}
   8063 #endif
   8064 	*num_out = 0;
   8065 	auth_keyid = stcb->asoc.authinfo.active_keyid;
   8066 
   8067 	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   8068 	    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
   8069 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
   8070 		eeor_mode = 1;
   8071 	} else {
   8072 		eeor_mode = 0;
   8073 	}
   8074 	ctl_cnt = no_out_cnt = asconf = cookie = 0;
   8075 	/*
   8076 	 * First lets prime the pump. For each destination, if there is room
   8077 	 * in the flight size, attempt to pull an MTU's worth out of the
   8078 	 * stream queues into the general send_queue
   8079 	 */
   8080 #ifdef SCTP_AUDITING_ENABLED
   8081 	sctp_audit_log(0xC2, 2);
   8082 #endif
   8083 	SCTP_TCB_LOCK_ASSERT(stcb);
   8084 	hbflag = 0;
   8085 	if ((control_only) || (asoc->stream_reset_outstanding))
   8086 		no_data_chunks = 1;
   8087 	else
   8088 		no_data_chunks = 0;
   8089 
   8090 	/* Nothing to possible to send? */
   8091 	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
   8092 	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
   8093 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
   8094 	    TAILQ_EMPTY(&asoc->send_queue) &&
   8095 	    stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
   8096 	nothing_to_send:
   8097 		*reason_code = 9;
   8098 		return (0);
   8099 	}
   8100 	if (asoc->peers_rwnd == 0) {
   8101 		/* No room in peers rwnd */
   8102 		*reason_code = 1;
   8103 		if (asoc->total_flight > 0) {
   8104 			/* we are allowed one chunk in flight */
   8105 			no_data_chunks = 1;
   8106 		}
   8107 	}
   8108 	if (stcb->asoc.ecn_echo_cnt_onq) {
   8109 		/* Record where a sack goes, if any */
   8110 		if (no_data_chunks &&
   8111 		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
   8112 			/* Nothing but ECNe to send - we don't do that */
   8113 			goto nothing_to_send;
   8114 		}
   8115 		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   8116 			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
   8117 			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
   8118 				sack_goes_to = chk->whoTo;
   8119 				break;
   8120 			}
   8121 		}
   8122 	}
   8123 	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
   8124 	if (stcb->sctp_socket)
   8125 		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
   8126 	else
   8127 		max_send_per_dest = 0;
   8128 	if (no_data_chunks == 0) {
   8129 		/* How many non-directed chunks are there? */
   8130 		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
   8131 			if (chk->whoTo == NULL) {
   8132 				/* We already have non-directed
   8133 				 * chunks on the queue, no need
   8134 				 * to do a fill-up.
   8135 				 */
   8136 				skip_fill_up = 1;
   8137 				break;
   8138 			}
   8139 		}
   8140 
   8141 	}
   8142 	if ((no_data_chunks == 0) &&
   8143 	    (skip_fill_up == 0) &&
   8144 	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
   8145 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   8146 			/*
   8147 			 * This for loop we are in takes in
   8148 			 * each net, if its's got space in cwnd and
   8149 			 * has data sent to it (when CMT is off) then it
   8150 			 * calls sctp_fill_outqueue for the net. This gets
   8151 			 * data on the send queue for that network.
   8152 			 *
   8153 			 * In sctp_fill_outqueue TSN's are assigned and
   8154 			 * data is copied out of the stream buffers. Note
   8155 			 * mostly copy by reference (we hope).
   8156 			 */
   8157 			net->window_probe = 0;
   8158 			if ((net != stcb->asoc.alternate) &&
   8159 			    ((net->dest_state & SCTP_ADDR_PF) ||
   8160 			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
   8161 			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
   8162 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   8163 					sctp_log_cwnd(stcb, net, 1,
   8164 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
   8165 				}
   8166 			        continue;
   8167 			}
   8168 			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
   8169 			    (net->flight_size == 0)) {
   8170 				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
   8171 			}
   8172 			if (net->flight_size >= net->cwnd) {
   8173 				/* skip this network, no room - can't fill */
   8174 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   8175 					sctp_log_cwnd(stcb, net, 3,
   8176 						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
   8177 				}
   8178 				continue;
   8179 			}
   8180 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   8181 				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
   8182 			}
   8183 			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
   8184 			if (quit_now) {
   8185 				/* memory alloc failure */
   8186 				no_data_chunks = 1;
   8187 				break;
   8188 			}
   8189 		}
   8190 	}
   8191 	/* now service each destination and send out what we can for it */
   8192 	/* Nothing to send? */
   8193 	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
   8194 	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
   8195 	    TAILQ_EMPTY(&asoc->send_queue)) {
   8196 		*reason_code = 8;
   8197 		return (0);
   8198 	}
   8199 
   8200 	if (asoc->sctp_cmt_on_off > 0) {
   8201 		/* get the last start point */
   8202 		start_at = asoc->last_net_cmt_send_started;
   8203 		if (start_at == NULL) {
   8204 			/* null so to beginning */
   8205 			start_at = TAILQ_FIRST(&asoc->nets);
   8206 		} else {
   8207 			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
   8208 			if (start_at == NULL) {
   8209 				start_at = TAILQ_FIRST(&asoc->nets);
   8210 			}
   8211 		}
   8212 		asoc->last_net_cmt_send_started = start_at;
   8213 	} else {
   8214 		start_at = TAILQ_FIRST(&asoc->nets);
   8215 	}
   8216 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   8217 		if (chk->whoTo == NULL) {
   8218 			if (asoc->alternate) {
   8219 				chk->whoTo = asoc->alternate;
   8220 			} else {
   8221 				chk->whoTo = asoc->primary_destination;
   8222 			}
   8223 			atomic_add_int(&chk->whoTo->ref_count, 1);
   8224 		}
   8225 	}
   8226 	old_start_at = NULL;
   8227 again_one_more_time:
   8228 	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
   8229 		/* how much can we send? */
   8230 		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
   8231 		if (old_start_at && (old_start_at == net)) {
   8232 			/* through list ocmpletely. */
   8233 			break;
   8234 		}
   8235 		tsns_sent = 0xa;
   8236 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
   8237 		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
   8238 		    (net->flight_size >= net->cwnd)) {
   8239 			/* Nothing on control or asconf and flight is full, we can skip
   8240 			 * even in the CMT case.
   8241 			 */
   8242 			continue;
   8243 		}
   8244 		bundle_at = 0;
   8245 		endoutchain = outchain = NULL;
   8246 		no_fragmentflg = 1;
   8247 		one_chunk = 0;
   8248 		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
   8249 			skip_data_for_this_net = 1;
   8250 		} else {
   8251 			skip_data_for_this_net = 0;
   8252 		}
   8253 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__))
   8254 		if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
   8255 			/*
   8256 			 * if we have a route and an ifp check to see if we
   8257 			 * have room to send to this guy
   8258 			 */
   8259 			struct ifnet *ifp;
   8260 
   8261 			ifp = net->ro.ro_rt->rt_ifp;
   8262 			if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
   8263 				SCTP_STAT_INCR(sctps_ifnomemqueued);
   8264 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
   8265 					sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
   8266 				}
   8267 				continue;
   8268 			}
   8269 		}
   8270 #endif
   8271 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
   8272 #ifdef INET
   8273 		case AF_INET:
   8274 			mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
   8275 			break;
   8276 #endif
   8277 #ifdef INET6
   8278 		case AF_INET6:
   8279 			mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
   8280 			break;
   8281 #endif
   8282 #if defined(__Userspace__)
   8283 		case AF_CONN:
   8284 			mtu = net->mtu - sizeof(struct sctphdr);
   8285 			break;
   8286 #endif
   8287 		default:
   8288 			/* TSNH */
   8289 			mtu = net->mtu;
   8290 			break;
   8291 		}
   8292 		mx_mtu = mtu;
   8293 		to_out = 0;
   8294 		if (mtu > asoc->peers_rwnd) {
   8295 			if (asoc->total_flight > 0) {
   8296 				/* We have a packet in flight somewhere */
   8297 				r_mtu = asoc->peers_rwnd;
   8298 			} else {
   8299 				/* We are always allowed to send one MTU out */
   8300 				one_chunk = 1;
   8301 				r_mtu = mtu;
   8302 			}
   8303 		} else {
   8304 			r_mtu = mtu;
   8305 		}
   8306 		/************************/
   8307 		/* ASCONF transmission */
   8308 		/************************/
   8309 		/* Now first lets go through the asconf queue */
   8310 		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
   8311 			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
   8312 				continue;
   8313 			}
   8314 			if (chk->whoTo == NULL) {
   8315 				if (asoc->alternate == NULL) {
   8316 					if (asoc->primary_destination != net) {
   8317 						break;
   8318 					}
   8319 				} else {
   8320 					if (asoc->alternate != net) {
   8321 						break;
   8322 					}
   8323 				}
   8324 			} else {
   8325 				if (chk->whoTo != net) {
   8326 					break;
   8327 				}
   8328 			}
   8329 			if (chk->data == NULL) {
   8330 				break;
   8331 			}
   8332 			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
   8333 			    chk->sent != SCTP_DATAGRAM_RESEND) {
   8334 				break;
   8335 			}
   8336 			/*
   8337 			 * if no AUTH is yet included and this chunk
   8338 			 * requires it, make sure to account for it.  We
   8339 			 * don't apply the size until the AUTH chunk is
   8340 			 * actually added below in case there is no room for
   8341 			 * this chunk. NOTE: we overload the use of "omtu"
   8342 			 * here
   8343 			 */
   8344 			if ((auth == NULL) &&
   8345 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
   8346 							stcb->asoc.peer_auth_chunks)) {
   8347 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   8348 			} else
   8349 				omtu = 0;
   8350 			/* Here we do NOT factor the r_mtu */
   8351 			if ((chk->send_size < (int)(mtu - omtu)) ||
   8352 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
   8353 				/*
   8354 				 * We probably should glom the mbuf chain
   8355 				 * from the chk->data for control but the
   8356 				 * problem is it becomes yet one more level
   8357 				 * of tracking to do if for some reason
   8358 				 * output fails. Then I have got to
   8359 				 * reconstruct the merged control chain.. el
   8360 				 * yucko.. for now we take the easy way and
   8361 				 * do the copy
   8362 				 */
   8363 				/*
   8364 				 * Add an AUTH chunk, if chunk requires it
   8365 				 * save the offset into the chain for AUTH
   8366 				 */
   8367 				if ((auth == NULL) &&
   8368 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
   8369 								 stcb->asoc.peer_auth_chunks))) {
   8370 					outchain = sctp_add_auth_chunk(outchain,
   8371 								       &endoutchain,
   8372 								       &auth,
   8373 								       &auth_offset,
   8374 								       stcb,
   8375 								       chk->rec.chunk_id.id);
   8376 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   8377 				}
   8378 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
   8379 							       (int)chk->rec.chunk_id.can_take_data,
   8380 							       chk->send_size, chk->copy_by_ref);
   8381 				if (outchain == NULL) {
   8382 					*reason_code = 8;
   8383 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   8384 					return (ENOMEM);
   8385 				}
   8386 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   8387 				/* update our MTU size */
   8388 				if (mtu > (chk->send_size + omtu))
   8389 					mtu -= (chk->send_size + omtu);
   8390 				else
   8391 					mtu = 0;
   8392 				to_out += (chk->send_size + omtu);
   8393 				/* Do clear IP_DF ? */
   8394 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
   8395 					no_fragmentflg = 0;
   8396 				}
   8397 				if (chk->rec.chunk_id.can_take_data)
   8398 					chk->data = NULL;
   8399 				/*
   8400 				 * set hb flag since we can
   8401 				 * use these for RTO
   8402 				 */
   8403 				hbflag = 1;
   8404 				asconf = 1;
   8405 				/*
   8406 				 * should sysctl this: don't
   8407 				 * bundle data with ASCONF
   8408 				 * since it requires AUTH
   8409 				 */
   8410 				no_data_chunks = 1;
   8411 				chk->sent = SCTP_DATAGRAM_SENT;
   8412 				if (chk->whoTo == NULL) {
   8413 					chk->whoTo = net;
   8414 					atomic_add_int(&net->ref_count, 1);
   8415 				}
   8416 				chk->snd_count++;
   8417 				if (mtu == 0) {
   8418 					/*
   8419 					 * Ok we are out of room but we can
   8420 					 * output without effecting the
   8421 					 * flight size since this little guy
   8422 					 * is a control only packet.
   8423 					 */
   8424 					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
   8425 					/*
   8426 					 * do NOT clear the asconf
   8427 					 * flag as it is used to do
   8428 					 * appropriate source address
   8429 					 * selection.
   8430 					 */
   8431 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
   8432 					                                        (struct sockaddr *)&net->ro._l_addr,
   8433 					                                        outchain, auth_offset, auth,
   8434 					                                        stcb->asoc.authinfo.active_keyid,
   8435 					                                        no_fragmentflg, 0, asconf,
   8436 					                                        inp->sctp_lport, stcb->rport,
   8437 					                                        htonl(stcb->asoc.peer_vtag),
   8438 					                                        net->port, NULL,
   8439 #if defined(__FreeBSD__)
   8440 					                                        0, 0,
   8441 #endif
   8442 					                                        so_locked))) {
   8443 						if (error == ENOBUFS) {
   8444 							asoc->ifp_had_enobuf = 1;
   8445 							SCTP_STAT_INCR(sctps_lowlevelerr);
   8446 						}
   8447 						if (from_where == 0) {
   8448 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
   8449 						}
   8450 						if (*now_filled == 0) {
   8451 							(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   8452 							*now_filled = 1;
   8453 							*now = net->last_sent_time;
   8454 						} else {
   8455 							net->last_sent_time = *now;
   8456 						}
   8457 						hbflag = 0;
   8458 						/* error, could not output */
   8459 						if (error == EHOSTUNREACH) {
   8460 							/*
   8461 							 * Destination went
   8462 							 * unreachable
   8463 							 * during this send
   8464 							 */
   8465 							sctp_move_chunks_from_net(stcb, net);
   8466 						}
   8467 						*reason_code = 7;
   8468 						continue;
   8469 					} else
   8470 						asoc->ifp_had_enobuf = 0;
   8471 					if (*now_filled == 0) {
   8472 						(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   8473 						*now_filled = 1;
   8474 						*now = net->last_sent_time;
   8475 					} else {
   8476 						net->last_sent_time = *now;
   8477 					}
   8478 					hbflag = 0;
   8479 					/*
   8480 					 * increase the number we sent, if a
   8481 					 * cookie is sent we don't tell them
   8482 					 * any was sent out.
   8483 					 */
   8484 					outchain = endoutchain = NULL;
   8485 					auth = NULL;
   8486 					auth_offset = 0;
   8487 					if (!no_out_cnt)
   8488 						*num_out += ctl_cnt;
   8489 					/* recalc a clean slate and setup */
   8490 					switch (net->ro._l_addr.sa.sa_family) {
   8491 #ifdef INET
   8492 						case AF_INET:
   8493 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
   8494 							break;
   8495 #endif
   8496 #ifdef INET6
   8497 						case AF_INET6:
   8498 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
   8499 							break;
   8500 #endif
   8501 #if defined(__Userspace__)
   8502 						case AF_CONN:
   8503 							mtu = net->mtu - sizeof(struct sctphdr);
   8504 							break;
   8505 #endif
   8506 						default:
   8507 							/* TSNH */
   8508 							mtu = net->mtu;
   8509 							break;
   8510 					}
   8511 					to_out = 0;
   8512 					no_fragmentflg = 1;
   8513 				}
   8514 			}
   8515 		}
   8516 		/************************/
   8517 		/* Control transmission */
   8518 		/************************/
   8519 		/* Now first lets go through the control queue */
   8520 		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
   8521 			if ((sack_goes_to) &&
   8522 			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
   8523 			    (chk->whoTo != sack_goes_to)) {
   8524 				/*
   8525 				 * if we have a sack in queue, and we are looking at an
   8526 				 * ecn echo that is NOT queued to where the sack is going..
   8527 				 */
   8528 				if (chk->whoTo == net) {
   8529 					/* Don't transmit it to where its going (current net) */
   8530 					continue;
   8531 				} else if (sack_goes_to == net) {
   8532 					/* But do transmit it to this address */
   8533 					goto skip_net_check;
   8534 				}
   8535 			}
   8536 			if (chk->whoTo == NULL) {
   8537 				if (asoc->alternate == NULL) {
   8538 					if (asoc->primary_destination != net) {
   8539 						continue;
   8540 					}
   8541 				} else {
   8542 					if (asoc->alternate != net) {
   8543 						continue;
   8544 					}
   8545 				}
   8546 			} else {
   8547 				if (chk->whoTo != net) {
   8548 					continue;
   8549 				}
   8550 			}
   8551 		skip_net_check:
   8552 			if (chk->data == NULL) {
   8553 				continue;
   8554 			}
   8555 			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
   8556 				/*
   8557 				 * It must be unsent. Cookies and ASCONF's
   8558 				 * hang around but there timers will force
   8559 				 * when marked for resend.
   8560 				 */
   8561 				continue;
   8562 			}
   8563 			/*
   8564 			 * if no AUTH is yet included and this chunk
   8565 			 * requires it, make sure to account for it.  We
   8566 			 * don't apply the size until the AUTH chunk is
   8567 			 * actually added below in case there is no room for
   8568 			 * this chunk. NOTE: we overload the use of "omtu"
   8569 			 * here
   8570 			 */
   8571 			if ((auth == NULL) &&
   8572 			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
   8573 							stcb->asoc.peer_auth_chunks)) {
   8574 				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   8575 			} else
   8576 				omtu = 0;
   8577 			/* Here we do NOT factor the r_mtu */
   8578 			if ((chk->send_size <= (int)(mtu - omtu)) ||
   8579 			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
   8580 				/*
   8581 				 * We probably should glom the mbuf chain
   8582 				 * from the chk->data for control but the
   8583 				 * problem is it becomes yet one more level
   8584 				 * of tracking to do if for some reason
   8585 				 * output fails. Then I have got to
   8586 				 * reconstruct the merged control chain.. el
   8587 				 * yucko.. for now we take the easy way and
   8588 				 * do the copy
   8589 				 */
   8590 				/*
   8591 				 * Add an AUTH chunk, if chunk requires it
   8592 				 * save the offset into the chain for AUTH
   8593 				 */
   8594 				if ((auth == NULL) &&
   8595 				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
   8596 								 stcb->asoc.peer_auth_chunks))) {
   8597 					outchain = sctp_add_auth_chunk(outchain,
   8598 								       &endoutchain,
   8599 								       &auth,
   8600 								       &auth_offset,
   8601 								       stcb,
   8602 								       chk->rec.chunk_id.id);
   8603 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   8604 				}
   8605 				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
   8606 							       (int)chk->rec.chunk_id.can_take_data,
   8607 							       chk->send_size, chk->copy_by_ref);
   8608 				if (outchain == NULL) {
   8609 					*reason_code = 8;
   8610 					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   8611 					return (ENOMEM);
   8612 				}
   8613 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   8614 				/* update our MTU size */
   8615 				if (mtu > (chk->send_size + omtu))
   8616 					mtu -= (chk->send_size + omtu);
   8617 				else
   8618 					mtu = 0;
   8619 				to_out += (chk->send_size + omtu);
   8620 				/* Do clear IP_DF ? */
   8621 				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
   8622 					no_fragmentflg = 0;
   8623 				}
   8624 				if (chk->rec.chunk_id.can_take_data)
   8625 					chk->data = NULL;
   8626 				/* Mark things to be removed, if needed */
   8627 				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
   8628 				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
   8629 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
   8630 				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
   8631 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
   8632 				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
   8633 				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
   8634 				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
   8635 				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
   8636 				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
   8637 				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
   8638 					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
   8639 						hbflag = 1;
   8640 					}
   8641 					/* remove these chunks at the end */
   8642 					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
   8643 					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
   8644 						/* turn off the timer */
   8645 						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   8646 							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
   8647 									inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1);
   8648 						}
   8649 					}
   8650 					ctl_cnt++;
   8651 				} else {
   8652 					/*
   8653 					 * Other chunks, since they have
   8654 					 * timers running (i.e. COOKIE)
   8655 					 * we just "trust" that it
   8656 					 * gets sent or retransmitted.
   8657 					 */
   8658 					ctl_cnt++;
   8659 					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
   8660 						cookie = 1;
   8661 						no_out_cnt = 1;
   8662 					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
   8663 						/*
   8664 						 * Increment ecne send count here
   8665 						 * this means we may be over-zealous in
   8666 						 * our counting if the send fails, but its
   8667 						 * the best place to do it (we used to do
   8668 						 * it in the queue of the chunk, but that did
   8669 						 * not tell how many times it was sent.
   8670 						 */
   8671 						SCTP_STAT_INCR(sctps_sendecne);
   8672 					}
   8673 					chk->sent = SCTP_DATAGRAM_SENT;
   8674 					if (chk->whoTo == NULL) {
   8675 						chk->whoTo = net;
   8676 						atomic_add_int(&net->ref_count, 1);
   8677 					}
   8678 					chk->snd_count++;
   8679 				}
   8680 				if (mtu == 0) {
   8681 					/*
   8682 					 * Ok we are out of room but we can
   8683 					 * output without effecting the
   8684 					 * flight size since this little guy
   8685 					 * is a control only packet.
   8686 					 */
   8687 					if (asconf) {
   8688 						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
   8689 						/*
   8690 						 * do NOT clear the asconf
   8691 						 * flag as it is used to do
   8692 						 * appropriate source address
   8693 						 * selection.
   8694 						 */
   8695 					}
   8696 					if (cookie) {
   8697 						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
   8698 						cookie = 0;
   8699 					}
   8700 					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
   8701 					                                        (struct sockaddr *)&net->ro._l_addr,
   8702 					                                        outchain,
   8703 					                                        auth_offset, auth,
   8704 					                                        stcb->asoc.authinfo.active_keyid,
   8705 					                                        no_fragmentflg, 0, asconf,
   8706 					                                        inp->sctp_lport, stcb->rport,
   8707 					                                        htonl(stcb->asoc.peer_vtag),
   8708 					                                        net->port, NULL,
   8709 #if defined(__FreeBSD__)
   8710 					                                        0, 0,
   8711 #endif
   8712 					                                        so_locked))) {
   8713 						if (error == ENOBUFS) {
   8714 							asoc->ifp_had_enobuf = 1;
   8715 							SCTP_STAT_INCR(sctps_lowlevelerr);
   8716 						}
   8717 						if (from_where == 0) {
   8718 							SCTP_STAT_INCR(sctps_lowlevelerrusr);
   8719 						}
   8720 						/* error, could not output */
   8721 						if (hbflag) {
   8722 							if (*now_filled == 0) {
   8723 								(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   8724 								*now_filled = 1;
   8725 								*now = net->last_sent_time;
   8726 							} else {
   8727 								net->last_sent_time = *now;
   8728 							}
   8729 							hbflag = 0;
   8730 						}
   8731 						if (error == EHOSTUNREACH) {
   8732 							/*
   8733 							 * Destination went
   8734 							 * unreachable
   8735 							 * during this send
   8736 							 */
   8737 							sctp_move_chunks_from_net(stcb, net);
   8738 						}
   8739 						*reason_code = 7;
   8740 						continue;
   8741 					} else
   8742 						asoc->ifp_had_enobuf = 0;
   8743 					/* Only HB or ASCONF advances time */
   8744 					if (hbflag) {
   8745 						if (*now_filled == 0) {
   8746 							(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   8747 							*now_filled = 1;
   8748 							*now = net->last_sent_time;
   8749 						} else {
   8750 							net->last_sent_time = *now;
   8751 						}
   8752 						hbflag = 0;
   8753 					}
   8754 					/*
   8755 					 * increase the number we sent, if a
   8756 					 * cookie is sent we don't tell them
   8757 					 * any was sent out.
   8758 					 */
   8759 					outchain = endoutchain = NULL;
   8760 					auth = NULL;
   8761 					auth_offset = 0;
   8762 					if (!no_out_cnt)
   8763 						*num_out += ctl_cnt;
   8764 					/* recalc a clean slate and setup */
   8765 					switch (net->ro._l_addr.sa.sa_family) {
   8766 #ifdef INET
   8767 						case AF_INET:
   8768 							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
   8769 							break;
   8770 #endif
   8771 #ifdef INET6
   8772 						case AF_INET6:
   8773 							mtu = net->mtu - SCTP_MIN_OVERHEAD;
   8774 							break;
   8775 #endif
   8776 #if defined(__Userspace__)
   8777 						case AF_CONN:
   8778 							mtu = net->mtu - sizeof(struct sctphdr);
   8779 							break;
   8780 #endif
   8781 						default:
   8782 							/* TSNH */
   8783 							mtu = net->mtu;
   8784 							break;
   8785 					}
   8786 					to_out = 0;
   8787 					no_fragmentflg = 1;
   8788 				}
   8789 			}
   8790 		}
   8791 		/* JRI: if dest is in PF state, do not send data to it */
   8792 		if ((asoc->sctp_cmt_on_off > 0) &&
   8793 		    (net != stcb->asoc.alternate) &&
   8794 		    (net->dest_state & SCTP_ADDR_PF)) {
   8795 			goto no_data_fill;
   8796 		}
   8797 		if (net->flight_size >= net->cwnd) {
   8798 			goto no_data_fill;
   8799 		}
   8800 		if ((asoc->sctp_cmt_on_off > 0) &&
   8801 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
   8802 		    (net->flight_size > max_rwnd_per_dest)) {
   8803 			goto no_data_fill;
   8804 		}
   8805 		/*
   8806 		 * We need a specific accounting for the usage of the
   8807 		 * send buffer. We also need to check the number of messages
   8808 		 * per net. For now, this is better than nothing and it
   8809 		 * disabled by default...
   8810 		 */
   8811 		if ((asoc->sctp_cmt_on_off > 0) &&
   8812 		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
   8813 		    (max_send_per_dest > 0) &&
   8814 		    (net->flight_size > max_send_per_dest)) {
   8815 			goto no_data_fill;
   8816 		}
   8817 		/*********************/
   8818 		/* Data transmission */
   8819 		/*********************/
   8820 		/*
   8821 		 * if AUTH for DATA is required and no AUTH has been added
   8822 		 * yet, account for this in the mtu now... if no data can be
   8823 		 * bundled, this adjustment won't matter anyways since the
   8824 		 * packet will be going out...
   8825 		 */
   8826 		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
   8827 							     stcb->asoc.peer_auth_chunks);
   8828 		if (data_auth_reqd && (auth == NULL)) {
   8829 			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   8830 		}
   8831 		/* now lets add any data within the MTU constraints */
   8832 		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
   8833 #ifdef INET
   8834 		case AF_INET:
   8835 			if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
   8836 				omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
   8837 			else
   8838 				omtu = 0;
   8839 			break;
   8840 #endif
   8841 #ifdef INET6
   8842 		case AF_INET6:
   8843 			if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
   8844 				omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
   8845 			else
   8846 				omtu = 0;
   8847 			break;
   8848 #endif
   8849 #if defined(__Userspace__)
   8850 		case AF_CONN:
   8851 			if (net->mtu > sizeof(struct sctphdr)) {
   8852 				omtu = net->mtu - sizeof(struct sctphdr);
   8853 			} else {
   8854 				omtu = 0;
   8855 			}
   8856 			break;
   8857 #endif
   8858 		default:
   8859 			/* TSNH */
   8860 			omtu = 0;
   8861 			break;
   8862 		}
   8863 		if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
   8864 		     (skip_data_for_this_net == 0)) ||
   8865 		    (cookie)) {
   8866 			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
   8867 				if (no_data_chunks) {
   8868 					/* let only control go out */
   8869 					*reason_code = 1;
   8870 					break;
   8871 				}
   8872 				if (net->flight_size >= net->cwnd) {
   8873 					/* skip this net, no room for data */
   8874 					*reason_code = 2;
   8875 					break;
   8876 				}
   8877 				if ((chk->whoTo != NULL) &&
   8878 				    (chk->whoTo != net)) {
   8879 					/* Don't send the chunk on this net */
   8880 					continue;
   8881 				}
   8882 
   8883 				if (asoc->sctp_cmt_on_off == 0) {
   8884 					if ((asoc->alternate) &&
   8885 					    (asoc->alternate != net) &&
   8886 					    (chk->whoTo == NULL)) {
   8887 						continue;
   8888 					} else if ((net != asoc->primary_destination) &&
   8889 						   (asoc->alternate == NULL) &&
   8890 						   (chk->whoTo == NULL)) {
   8891 						continue;
   8892 					}
   8893 				}
   8894 				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
   8895 					/*-
   8896 					 * strange, we have a chunk that is
   8897 					 * to big for its destination and
   8898 					 * yet no fragment ok flag.
   8899 					 * Something went wrong when the
   8900 					 * PMTU changed...we did not mark
   8901 					 * this chunk for some reason?? I
   8902 					 * will fix it here by letting IP
   8903 					 * fragment it for now and printing
   8904 					 * a warning. This really should not
   8905 					 * happen ...
   8906 					 */
   8907 					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
   8908 						    chk->send_size, mtu);
   8909 					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
   8910 				}
   8911 				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
   8912 				    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
   8913 					struct sctp_data_chunk *dchkh;
   8914 
   8915 					dchkh = mtod(chk->data, struct sctp_data_chunk *);
   8916 					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
   8917 				}
   8918 				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
   8919 				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
   8920 					/* ok we will add this one */
   8921 
   8922 					/*
   8923 					 * Add an AUTH chunk, if chunk
   8924 					 * requires it, save the offset into
   8925 					 * the chain for AUTH
   8926 					 */
   8927 					if (data_auth_reqd) {
   8928 						if (auth == NULL) {
   8929 							outchain = sctp_add_auth_chunk(outchain,
   8930 										       &endoutchain,
   8931 										       &auth,
   8932 										       &auth_offset,
   8933 										       stcb,
   8934 										       SCTP_DATA);
   8935 							auth_keyid = chk->auth_keyid;
   8936 							override_ok = 0;
   8937 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   8938 						} else if (override_ok) {
   8939 							/* use this data's keyid */
   8940 							auth_keyid = chk->auth_keyid;
   8941 							override_ok = 0;
   8942 						} else if (auth_keyid != chk->auth_keyid) {
   8943 							/* different keyid, so done bundling */
   8944 							break;
   8945 						}
   8946 					}
   8947 					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
   8948 								       chk->send_size, chk->copy_by_ref);
   8949 					if (outchain == NULL) {
   8950 						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
   8951 						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   8952 							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
   8953 						}
   8954 						*reason_code = 3;
   8955 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   8956 						return (ENOMEM);
   8957 					}
   8958 					/* upate our MTU size */
   8959 					/* Do clear IP_DF ? */
   8960 					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
   8961 						no_fragmentflg = 0;
   8962 					}
   8963 					/* unsigned subtraction of mtu */
   8964 					if (mtu > chk->send_size)
   8965 						mtu -= chk->send_size;
   8966 					else
   8967 						mtu = 0;
   8968 					/* unsigned subtraction of r_mtu */
   8969 					if (r_mtu > chk->send_size)
   8970 						r_mtu -= chk->send_size;
   8971 					else
   8972 						r_mtu = 0;
   8973 
   8974 					to_out += chk->send_size;
   8975 					if ((to_out > mx_mtu) && no_fragmentflg) {
   8976 #ifdef INVARIANTS
   8977 						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
   8978 #else
   8979 						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
   8980 							    mx_mtu, to_out);
   8981 #endif
   8982 					}
   8983 					chk->window_probe = 0;
   8984 					data_list[bundle_at++] = chk;
   8985 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
   8986 						break;
   8987 					}
   8988 					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
   8989 						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
   8990 							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
   8991 						} else {
   8992 							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
   8993 						}
   8994 						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
   8995 						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
   8996 							/* Count number of user msg's that were fragmented
   8997 							 * we do this by counting when we see a LAST fragment
   8998 							 * only.
   8999 							 */
   9000 							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
   9001 					}
   9002 					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
   9003 						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
   9004 							data_list[0]->window_probe = 1;
   9005 							net->window_probe = 1;
   9006 						}
   9007 						break;
   9008 					}
   9009 				} else {
   9010 					/*
   9011 					 * Must be sent in order of the
   9012 					 * TSN's (on a network)
   9013 					 */
   9014 					break;
   9015 				}
   9016 			}	/* for (chunk gather loop for this net) */
   9017 		}		/* if asoc.state OPEN */
   9018 	no_data_fill:
   9019 		/* Is there something to send for this destination? */
   9020 		if (outchain) {
   9021 			/* We may need to start a control timer or two */
   9022 			if (asconf) {
   9023 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
   9024 						 stcb, net);
   9025 				/*
   9026 				 * do NOT clear the asconf flag as it is used
   9027 				 * to do appropriate source address selection.
   9028 				 */
   9029 			}
   9030 			if (cookie) {
   9031 				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
   9032 				cookie = 0;
   9033 			}
   9034 			/* must start a send timer if data is being sent */
   9035 			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
   9036 				/*
   9037 				 * no timer running on this destination
   9038 				 * restart it.
   9039 				 */
   9040 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
   9041 			}
   9042 			/* Now send it, if there is anything to send :> */
   9043 			if ((error = sctp_lowlevel_chunk_output(inp,
   9044 			                                        stcb,
   9045 			                                        net,
   9046 			                                        (struct sockaddr *)&net->ro._l_addr,
   9047 			                                        outchain,
   9048 			                                        auth_offset,
   9049 			                                        auth,
   9050 			                                        auth_keyid,
   9051 			                                        no_fragmentflg,
   9052 			                                        bundle_at,
   9053 			                                        asconf,
   9054 			                                        inp->sctp_lport, stcb->rport,
   9055 			                                        htonl(stcb->asoc.peer_vtag),
   9056 			                                        net->port, NULL,
   9057 #if defined(__FreeBSD__)
   9058 			                                        0, 0,
   9059 #endif
   9060 			                                        so_locked))) {
   9061 				/* error, we could not output */
   9062 				if (error == ENOBUFS) {
   9063 					SCTP_STAT_INCR(sctps_lowlevelerr);
   9064 					asoc->ifp_had_enobuf = 1;
   9065 				}
   9066 				if (from_where == 0) {
   9067 					SCTP_STAT_INCR(sctps_lowlevelerrusr);
   9068 				}
   9069 				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
   9070 				if (hbflag) {
   9071 					if (*now_filled == 0) {
   9072 						(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   9073 						*now_filled = 1;
   9074 						*now = net->last_sent_time;
   9075 					} else {
   9076 						net->last_sent_time = *now;
   9077 					}
   9078 					hbflag = 0;
   9079 				}
   9080 				if (error == EHOSTUNREACH) {
   9081 					/*
   9082 					 * Destination went unreachable
   9083 					 * during this send
   9084 					 */
   9085 					sctp_move_chunks_from_net(stcb, net);
   9086 				}
   9087 				*reason_code = 6;
   9088 				/*-
   9089 				 * I add this line to be paranoid. As far as
   9090 				 * I can tell the continue, takes us back to
   9091 				 * the top of the for, but just to make sure
   9092 				 * I will reset these again here.
   9093 				 */
   9094 				ctl_cnt = bundle_at = 0;
   9095 				continue; /* This takes us back to the for() for the nets. */
   9096 			} else {
   9097 				asoc->ifp_had_enobuf = 0;
   9098 			}
   9099 			endoutchain = NULL;
   9100 			auth = NULL;
   9101 			auth_offset = 0;
   9102 			if (bundle_at || hbflag) {
   9103 				/* For data/asconf and hb set time */
   9104 				if (*now_filled == 0) {
   9105 					(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
   9106 					*now_filled = 1;
   9107 					*now = net->last_sent_time;
   9108 				} else {
   9109 					net->last_sent_time = *now;
   9110 				}
   9111 			}
   9112 			if (!no_out_cnt) {
   9113 				*num_out += (ctl_cnt + bundle_at);
   9114 			}
   9115 			if (bundle_at) {
   9116 				/* setup for a RTO measurement */
   9117 				tsns_sent = data_list[0]->rec.data.TSN_seq;
   9118 				/* fill time if not already filled */
   9119 				if (*now_filled == 0) {
   9120 					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
   9121 					*now_filled = 1;
   9122 					*now = asoc->time_last_sent;
   9123 				} else {
   9124 					asoc->time_last_sent = *now;
   9125 				}
   9126 				if (net->rto_needed) {
   9127 					data_list[0]->do_rtt = 1;
   9128 					net->rto_needed = 0;
   9129 				}
   9130 				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
   9131 				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
   9132 			}
   9133 			if (one_chunk) {
   9134 				break;
   9135 			}
   9136 		}
   9137 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   9138 			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
   9139 		}
   9140 	}
   9141 	if (old_start_at == NULL) {
   9142 		old_start_at = start_at;
   9143 		start_at = TAILQ_FIRST(&asoc->nets);
   9144 		if (old_start_at)
   9145 			goto again_one_more_time;
   9146 	}
   9147 
   9148 	/*
   9149 	 * At the end there should be no NON timed chunks hanging on this
   9150 	 * queue.
   9151 	 */
   9152 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   9153 		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
   9154 	}
   9155 	if ((*num_out == 0) && (*reason_code == 0)) {
   9156 		*reason_code = 4;
   9157 	} else {
   9158 		*reason_code = 5;
   9159 	}
   9160 	sctp_clean_up_ctl(stcb, asoc, so_locked);
   9161 	return (0);
   9162 }
   9163 
   9164 void
   9165 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
   9166 {
   9167 	/*-
   9168 	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
   9169 	 * the control chunk queue.
   9170 	 */
   9171 	struct sctp_chunkhdr *hdr;
   9172 	struct sctp_tmit_chunk *chk;
   9173 	struct mbuf *mat;
   9174 
   9175 	SCTP_TCB_LOCK_ASSERT(stcb);
   9176 	sctp_alloc_a_chunk(stcb, chk);
   9177 	if (chk == NULL) {
   9178 		/* no memory */
   9179 		sctp_m_freem(op_err);
   9180 		return;
   9181 	}
   9182 	chk->copy_by_ref = 0;
   9183 	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
   9184 	if (op_err == NULL) {
   9185 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   9186 		return;
   9187 	}
   9188 	chk->send_size = 0;
   9189 	mat = op_err;
   9190 	while (mat != NULL) {
   9191 		chk->send_size += SCTP_BUF_LEN(mat);
   9192 		mat = SCTP_BUF_NEXT(mat);
   9193 	}
   9194 	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
   9195 	chk->rec.chunk_id.can_take_data = 1;
   9196 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9197 	chk->snd_count = 0;
   9198 	chk->flags = 0;
   9199 	chk->asoc = &stcb->asoc;
   9200 	chk->data = op_err;
   9201 	chk->whoTo = NULL;
   9202 	hdr = mtod(op_err, struct sctp_chunkhdr *);
   9203 	hdr->chunk_type = SCTP_OPERATION_ERROR;
   9204 	hdr->chunk_flags = 0;
   9205 	hdr->chunk_length = htons(chk->send_size);
   9206 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
   9207 	    chk,
   9208 	    sctp_next);
   9209 	chk->asoc->ctrl_queue_cnt++;
   9210 }
   9211 
   9212 int
   9213 sctp_send_cookie_echo(struct mbuf *m,
   9214     int offset,
   9215     struct sctp_tcb *stcb,
   9216     struct sctp_nets *net)
   9217 {
   9218 	/*-
   9219 	 * pull out the cookie and put it at the front of the control chunk
   9220 	 * queue.
   9221 	 */
   9222 	int at;
   9223 	struct mbuf *cookie;
   9224 	struct sctp_paramhdr parm, *phdr;
   9225 	struct sctp_chunkhdr *hdr;
   9226 	struct sctp_tmit_chunk *chk;
   9227 	uint16_t ptype, plen;
   9228 
   9229 	/* First find the cookie in the param area */
   9230 	cookie = NULL;
   9231 	at = offset + sizeof(struct sctp_init_chunk);
   9232 
   9233 	SCTP_TCB_LOCK_ASSERT(stcb);
   9234 	do {
   9235 		phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
   9236 		if (phdr == NULL) {
   9237 			return (-3);
   9238 		}
   9239 		ptype = ntohs(phdr->param_type);
   9240 		plen = ntohs(phdr->param_length);
   9241 		if (ptype == SCTP_STATE_COOKIE) {
   9242 			int pad;
   9243 
   9244 			/* found the cookie */
   9245 			if ((pad = (plen % 4))) {
   9246 				plen += 4 - pad;
   9247 			}
   9248 			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
   9249 			if (cookie == NULL) {
   9250 				/* No memory */
   9251 				return (-2);
   9252 			}
   9253 #ifdef SCTP_MBUF_LOGGING
   9254 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   9255 				struct mbuf *mat;
   9256 
   9257 				for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
   9258 					if (SCTP_BUF_IS_EXTENDED(mat)) {
   9259 						sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   9260 					}
   9261 				}
   9262 			}
   9263 #endif
   9264 			break;
   9265 		}
   9266 		at += SCTP_SIZE32(plen);
   9267 	} while (phdr);
   9268 	if (cookie == NULL) {
   9269 		/* Did not find the cookie */
   9270 		return (-3);
   9271 	}
   9272 	/* ok, we got the cookie lets change it into a cookie echo chunk */
   9273 
   9274 	/* first the change from param to cookie */
   9275 	hdr = mtod(cookie, struct sctp_chunkhdr *);
   9276 	hdr->chunk_type = SCTP_COOKIE_ECHO;
   9277 	hdr->chunk_flags = 0;
   9278 	/* get the chunk stuff now and place it in the FRONT of the queue */
   9279 	sctp_alloc_a_chunk(stcb, chk);
   9280 	if (chk == NULL) {
   9281 		/* no memory */
   9282 		sctp_m_freem(cookie);
   9283 		return (-5);
   9284 	}
   9285 	chk->copy_by_ref = 0;
   9286 	chk->send_size = plen;
   9287 	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
   9288 	chk->rec.chunk_id.can_take_data = 0;
   9289 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9290 	chk->snd_count = 0;
   9291 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
   9292 	chk->asoc = &stcb->asoc;
   9293 	chk->data = cookie;
   9294 	chk->whoTo = net;
   9295 	atomic_add_int(&chk->whoTo->ref_count, 1);
   9296 	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
   9297 	chk->asoc->ctrl_queue_cnt++;
   9298 	return (0);
   9299 }
   9300 
   9301 void
   9302 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
   9303     struct mbuf *m,
   9304     int offset,
   9305     int chk_length,
   9306     struct sctp_nets *net)
   9307 {
   9308 	/*
   9309 	 * take a HB request and make it into a HB ack and send it.
   9310 	 */
   9311 	struct mbuf *outchain;
   9312 	struct sctp_chunkhdr *chdr;
   9313 	struct sctp_tmit_chunk *chk;
   9314 
   9315 
   9316 	if (net == NULL)
   9317 		/* must have a net pointer */
   9318 		return;
   9319 
   9320 	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
   9321 	if (outchain == NULL) {
   9322 		/* gak out of memory */
   9323 		return;
   9324 	}
   9325 #ifdef SCTP_MBUF_LOGGING
   9326 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   9327 		struct mbuf *mat;
   9328 
   9329 		for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
   9330 			if (SCTP_BUF_IS_EXTENDED(mat)) {
   9331 				sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   9332 			}
   9333 		}
   9334 	}
   9335 #endif
   9336 	chdr = mtod(outchain, struct sctp_chunkhdr *);
   9337 	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
   9338 	chdr->chunk_flags = 0;
   9339 	if (chk_length % 4) {
   9340 		/* need pad */
   9341 		uint32_t cpthis = 0;
   9342 		int padlen;
   9343 
   9344 		padlen = 4 - (chk_length % 4);
   9345 		m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
   9346 	}
   9347 	sctp_alloc_a_chunk(stcb, chk);
   9348 	if (chk == NULL) {
   9349 		/* no memory */
   9350 		sctp_m_freem(outchain);
   9351 		return;
   9352 	}
   9353 	chk->copy_by_ref = 0;
   9354 	chk->send_size = chk_length;
   9355 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
   9356 	chk->rec.chunk_id.can_take_data = 1;
   9357 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9358 	chk->snd_count = 0;
   9359 	chk->flags = 0;
   9360 	chk->asoc = &stcb->asoc;
   9361 	chk->data = outchain;
   9362 	chk->whoTo = net;
   9363 	atomic_add_int(&chk->whoTo->ref_count, 1);
   9364 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
   9365 	chk->asoc->ctrl_queue_cnt++;
   9366 }
   9367 
   9368 void
   9369 sctp_send_cookie_ack(struct sctp_tcb *stcb)
   9370 {
   9371 	/* formulate and queue a cookie-ack back to sender */
   9372 	struct mbuf *cookie_ack;
   9373 	struct sctp_chunkhdr *hdr;
   9374 	struct sctp_tmit_chunk *chk;
   9375 
   9376 	SCTP_TCB_LOCK_ASSERT(stcb);
   9377 
   9378 	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
   9379 	if (cookie_ack == NULL) {
   9380 		/* no mbuf's */
   9381 		return;
   9382 	}
   9383 	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
   9384 	sctp_alloc_a_chunk(stcb, chk);
   9385 	if (chk == NULL) {
   9386 		/* no memory */
   9387 		sctp_m_freem(cookie_ack);
   9388 		return;
   9389 	}
   9390 	chk->copy_by_ref = 0;
   9391 	chk->send_size = sizeof(struct sctp_chunkhdr);
   9392 	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
   9393 	chk->rec.chunk_id.can_take_data = 1;
   9394 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9395 	chk->snd_count = 0;
   9396 	chk->flags = 0;
   9397 	chk->asoc = &stcb->asoc;
   9398 	chk->data = cookie_ack;
   9399 	if (chk->asoc->last_control_chunk_from != NULL) {
   9400 		chk->whoTo = chk->asoc->last_control_chunk_from;
   9401 		atomic_add_int(&chk->whoTo->ref_count, 1);
   9402 	} else {
   9403 		chk->whoTo = NULL;
   9404 	}
   9405 	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
   9406 	hdr->chunk_type = SCTP_COOKIE_ACK;
   9407 	hdr->chunk_flags = 0;
   9408 	hdr->chunk_length = htons(chk->send_size);
   9409 	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
   9410 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
   9411 	chk->asoc->ctrl_queue_cnt++;
   9412 	return;
   9413 }
   9414 
   9415 
   9416 void
   9417 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
   9418 {
   9419 	/* formulate and queue a SHUTDOWN-ACK back to the sender */
   9420 	struct mbuf *m_shutdown_ack;
   9421 	struct sctp_shutdown_ack_chunk *ack_cp;
   9422 	struct sctp_tmit_chunk *chk;
   9423 
   9424 	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
   9425 	if (m_shutdown_ack == NULL) {
   9426 		/* no mbuf's */
   9427 		return;
   9428 	}
   9429 	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
   9430 	sctp_alloc_a_chunk(stcb, chk);
   9431 	if (chk == NULL) {
   9432 		/* no memory */
   9433 		sctp_m_freem(m_shutdown_ack);
   9434 		return;
   9435 	}
   9436 	chk->copy_by_ref = 0;
   9437 	chk->send_size = sizeof(struct sctp_chunkhdr);
   9438 	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
   9439 	chk->rec.chunk_id.can_take_data = 1;
   9440 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9441 	chk->snd_count = 0;
   9442 	chk->flags = 0;
   9443 	chk->asoc = &stcb->asoc;
   9444 	chk->data = m_shutdown_ack;
   9445 	chk->whoTo = net;
   9446 	if (chk->whoTo) {
   9447 		atomic_add_int(&chk->whoTo->ref_count, 1);
   9448 	}
   9449 	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
   9450 	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
   9451 	ack_cp->ch.chunk_flags = 0;
   9452 	ack_cp->ch.chunk_length = htons(chk->send_size);
   9453 	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
   9454 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
   9455 	chk->asoc->ctrl_queue_cnt++;
   9456 	return;
   9457 }
   9458 
   9459 void
   9460 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
   9461 {
   9462 	/* formulate and queue a SHUTDOWN to the sender */
   9463 	struct mbuf *m_shutdown;
   9464 	struct sctp_shutdown_chunk *shutdown_cp;
   9465 	struct sctp_tmit_chunk *chk;
   9466 
   9467 	m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
   9468 	if (m_shutdown == NULL) {
   9469 		/* no mbuf's */
   9470 		return;
   9471 	}
   9472 	SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
   9473 	sctp_alloc_a_chunk(stcb, chk);
   9474 	if (chk == NULL) {
   9475 		/* no memory */
   9476 		sctp_m_freem(m_shutdown);
   9477 		return;
   9478 	}
   9479 	chk->copy_by_ref = 0;
   9480 	chk->send_size = sizeof(struct sctp_shutdown_chunk);
   9481 	chk->rec.chunk_id.id = SCTP_SHUTDOWN;
   9482 	chk->rec.chunk_id.can_take_data = 1;
   9483 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9484 	chk->snd_count = 0;
   9485 	chk->flags = 0;
   9486 	chk->asoc = &stcb->asoc;
   9487 	chk->data = m_shutdown;
   9488 	chk->whoTo = net;
   9489 	if (chk->whoTo) {
   9490 		atomic_add_int(&chk->whoTo->ref_count, 1);
   9491 	}
   9492 	shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
   9493 	shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
   9494 	shutdown_cp->ch.chunk_flags = 0;
   9495 	shutdown_cp->ch.chunk_length = htons(chk->send_size);
   9496 	shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
   9497 	SCTP_BUF_LEN(m_shutdown) = chk->send_size;
   9498 	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
   9499 	chk->asoc->ctrl_queue_cnt++;
   9500 	return;
   9501 }
   9502 
   9503 void
   9504 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
   9505 {
   9506 	/*
   9507 	 * formulate and queue an ASCONF to the peer.
   9508 	 * ASCONF parameters should be queued on the assoc queue.
   9509 	 */
   9510 	struct sctp_tmit_chunk *chk;
   9511 	struct mbuf *m_asconf;
   9512 	int len;
   9513 
   9514 	SCTP_TCB_LOCK_ASSERT(stcb);
   9515 
   9516 	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
   9517 	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
   9518 		/* can't send a new one if there is one in flight already */
   9519 		return;
   9520 	}
   9521 
   9522 	/* compose an ASCONF chunk, maximum length is PMTU */
   9523 	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
   9524 	if (m_asconf == NULL) {
   9525 		return;
   9526 	}
   9527 
   9528 	sctp_alloc_a_chunk(stcb, chk);
   9529 	if (chk == NULL) {
   9530 		/* no memory */
   9531 		sctp_m_freem(m_asconf);
   9532 		return;
   9533 	}
   9534 
   9535 	chk->copy_by_ref = 0;
   9536 	chk->data = m_asconf;
   9537 	chk->send_size = len;
   9538 	chk->rec.chunk_id.id = SCTP_ASCONF;
   9539 	chk->rec.chunk_id.can_take_data = 0;
   9540 	chk->sent = SCTP_DATAGRAM_UNSENT;
   9541 	chk->snd_count = 0;
   9542 	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
   9543 	chk->asoc = &stcb->asoc;
   9544 	chk->whoTo = net;
   9545 	if (chk->whoTo) {
   9546 		atomic_add_int(&chk->whoTo->ref_count, 1);
   9547 	}
   9548 	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
   9549 	chk->asoc->ctrl_queue_cnt++;
   9550 	return;
   9551 }
   9552 
   9553 void
   9554 sctp_send_asconf_ack(struct sctp_tcb *stcb)
   9555 {
   9556 	/*
   9557 	 * formulate and queue a asconf-ack back to sender.
   9558 	 * the asconf-ack must be stored in the tcb.
   9559 	 */
   9560 	struct sctp_tmit_chunk *chk;
   9561 	struct sctp_asconf_ack *ack, *latest_ack;
   9562 	struct mbuf *m_ack;
   9563 	struct sctp_nets *net = NULL;
   9564 
   9565 	SCTP_TCB_LOCK_ASSERT(stcb);
   9566 	/* Get the latest ASCONF-ACK */
   9567 	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
   9568 	if (latest_ack == NULL) {
   9569 		return;
   9570 	}
   9571 	if (latest_ack->last_sent_to != NULL &&
   9572 	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
   9573 		/* we're doing a retransmission */
   9574 		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
   9575 		if (net == NULL) {
   9576 			/* no alternate */
   9577 			if (stcb->asoc.last_control_chunk_from == NULL) {
   9578 				if (stcb->asoc.alternate) {
   9579 					net = stcb->asoc.alternate;
   9580 				} else {
   9581 					net = stcb->asoc.primary_destination;
   9582 				}
   9583 			} else {
   9584 				net = stcb->asoc.last_control_chunk_from;
   9585 			}
   9586 		}
   9587 	} else {
   9588 		/* normal case */
   9589 		if (stcb->asoc.last_control_chunk_from == NULL) {
   9590 			if (stcb->asoc.alternate) {
   9591 				net = stcb->asoc.alternate;
   9592 			} else {
   9593 				net = stcb->asoc.primary_destination;
   9594 			}
   9595 		} else {
   9596 			net = stcb->asoc.last_control_chunk_from;
   9597 		}
   9598 	}
   9599 	latest_ack->last_sent_to = net;
   9600 
   9601 	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
   9602 		if (ack->data == NULL) {
   9603 			continue;
   9604 		}
   9605 
   9606 		/* copy the asconf_ack */
   9607 		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
   9608 		if (m_ack == NULL) {
   9609 			/* couldn't copy it */
   9610 			return;
   9611 		}
   9612 #ifdef SCTP_MBUF_LOGGING
   9613 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   9614 			struct mbuf *mat;
   9615 
   9616 			for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
   9617 				if (SCTP_BUF_IS_EXTENDED(mat)) {
   9618 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   9619 				}
   9620 			}
   9621 		}
   9622 #endif
   9623 
   9624 		sctp_alloc_a_chunk(stcb, chk);
   9625 		if (chk == NULL) {
   9626 			/* no memory */
   9627 			if (m_ack)
   9628 				sctp_m_freem(m_ack);
   9629 			return;
   9630 		}
   9631 		chk->copy_by_ref = 0;
   9632 
   9633 		chk->whoTo = net;
   9634 		if (chk->whoTo) {
   9635 			atomic_add_int(&chk->whoTo->ref_count, 1);
   9636 		}
   9637 		chk->data = m_ack;
   9638 		chk->send_size = 0;
   9639 		/* Get size */
   9640 		chk->send_size = ack->len;
   9641 		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
   9642 		chk->rec.chunk_id.can_take_data = 1;
   9643 		chk->sent = SCTP_DATAGRAM_UNSENT;
   9644 		chk->snd_count = 0;
   9645 		chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
   9646 		chk->asoc = &stcb->asoc;
   9647 
   9648 		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
   9649 		chk->asoc->ctrl_queue_cnt++;
   9650 	}
   9651 	return;
   9652 }
   9653 
   9654 
   9655 static int
   9656 sctp_chunk_retransmission(struct sctp_inpcb *inp,
   9657     struct sctp_tcb *stcb,
   9658     struct sctp_association *asoc,
   9659     int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
   9660 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   9661     SCTP_UNUSED
   9662 #endif
   9663     )
   9664 {
   9665 	/*-
   9666 	 * send out one MTU of retransmission. If fast_retransmit is
   9667 	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
   9668 	 * rwnd. For a Cookie or Asconf in the control chunk queue we
   9669 	 * retransmit them by themselves.
   9670 	 *
   9671 	 * For data chunks we will pick out the lowest TSN's in the sent_queue
   9672 	 * marked for resend and bundle them all together (up to a MTU of
   9673 	 * destination). The address to send to should have been
   9674 	 * selected/changed where the retransmission was marked (i.e. in FR
   9675 	 * or t3-timeout routines).
   9676 	 */
   9677 	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
   9678 	struct sctp_tmit_chunk *chk, *fwd;
   9679 	struct mbuf *m, *endofchain;
   9680 	struct sctp_nets *net = NULL;
   9681 	uint32_t tsns_sent = 0;
   9682 	int no_fragmentflg, bundle_at, cnt_thru;
   9683 	unsigned int mtu;
   9684 	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
   9685 	struct sctp_auth_chunk *auth = NULL;
   9686 	uint32_t auth_offset = 0;
   9687 	uint16_t auth_keyid;
   9688 	int override_ok = 1;
   9689 	int data_auth_reqd = 0;
   9690 	uint32_t dmtu = 0;
   9691 
   9692 #if defined(__APPLE__)
   9693 	if (so_locked) {
   9694 		sctp_lock_assert(SCTP_INP_SO(inp));
   9695 	} else {
   9696 		sctp_unlock_assert(SCTP_INP_SO(inp));
   9697 	}
   9698 #endif
   9699 	SCTP_TCB_LOCK_ASSERT(stcb);
   9700 	tmr_started = ctl_cnt = bundle_at = error = 0;
   9701 	no_fragmentflg = 1;
   9702 	fwd_tsn = 0;
   9703 	*cnt_out = 0;
   9704 	fwd = NULL;
   9705 	endofchain = m = NULL;
   9706 	auth_keyid = stcb->asoc.authinfo.active_keyid;
   9707 #ifdef SCTP_AUDITING_ENABLED
   9708 	sctp_audit_log(0xC3, 1);
   9709 #endif
   9710 	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
   9711 	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
   9712 		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
   9713 			asoc->sent_queue_retran_cnt);
   9714 		asoc->sent_queue_cnt = 0;
   9715 		asoc->sent_queue_cnt_removeable = 0;
   9716 		/* send back 0/0 so we enter normal transmission */
   9717 		*cnt_out = 0;
   9718 		return (0);
   9719 	}
   9720 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   9721 		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
   9722 		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
   9723 		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
   9724 			if (chk->sent != SCTP_DATAGRAM_RESEND) {
   9725 				continue;
   9726 			}
   9727 			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
   9728 				if (chk != asoc->str_reset) {
   9729 					/*
   9730 					 * not eligible for retran if its
   9731 					 * not ours
   9732 					 */
   9733 					continue;
   9734 				}
   9735 			}
   9736 			ctl_cnt++;
   9737 			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
   9738 				fwd_tsn = 1;
   9739 			}
   9740 			/*
   9741 			 * Add an AUTH chunk, if chunk requires it save the
   9742 			 * offset into the chain for AUTH
   9743 			 */
   9744 			if ((auth == NULL) &&
   9745 			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
   9746 							 stcb->asoc.peer_auth_chunks))) {
   9747 				m = sctp_add_auth_chunk(m, &endofchain,
   9748 							&auth, &auth_offset,
   9749 							stcb,
   9750 							chk->rec.chunk_id.id);
   9751 				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   9752 			}
   9753 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
   9754 			break;
   9755 		}
   9756 	}
   9757 	one_chunk = 0;
   9758 	cnt_thru = 0;
   9759 	/* do we have control chunks to retransmit? */
   9760 	if (m != NULL) {
   9761 		/* Start a timer no matter if we suceed or fail */
   9762 		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
   9763 			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
   9764 		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
   9765 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
   9766 		chk->snd_count++;	/* update our count */
   9767 		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
   9768 		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
   9769 		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
   9770 		                                        no_fragmentflg, 0, 0,
   9771 		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
   9772 		                                        chk->whoTo->port, NULL,
   9773 #if defined(__FreeBSD__)
   9774 		                                        0, 0,
   9775 #endif
   9776 		                                        so_locked))) {
   9777 			SCTP_STAT_INCR(sctps_lowlevelerr);
   9778 			return (error);
   9779 		}
   9780 		endofchain = NULL;
   9781 		auth = NULL;
   9782 		auth_offset = 0;
   9783 		/*
   9784 		 * We don't want to mark the net->sent time here since this
   9785 		 * we use this for HB and retrans cannot measure RTT
   9786 		 */
   9787 		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
   9788 		*cnt_out += 1;
   9789 		chk->sent = SCTP_DATAGRAM_SENT;
   9790 		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
   9791 		if (fwd_tsn == 0) {
   9792 			return (0);
   9793 		} else {
   9794 			/* Clean up the fwd-tsn list */
   9795 			sctp_clean_up_ctl(stcb, asoc, so_locked);
   9796 			return (0);
   9797 		}
   9798 	}
   9799 	/*
   9800 	 * Ok, it is just data retransmission we need to do or that and a
   9801 	 * fwd-tsn with it all.
   9802 	 */
   9803 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
   9804 		return (SCTP_RETRAN_DONE);
   9805 	}
   9806 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
   9807 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
   9808 		/* not yet open, resend the cookie and that is it */
   9809 		return (1);
   9810 	}
   9811 #ifdef SCTP_AUDITING_ENABLED
   9812 	sctp_auditing(20, inp, stcb, NULL);
   9813 #endif
   9814 	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
   9815 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
   9816 		if (chk->sent != SCTP_DATAGRAM_RESEND) {
   9817 			/* No, not sent to this net or not ready for rtx */
   9818 			continue;
   9819 		}
   9820 		if (chk->data == NULL) {
   9821 			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
   9822 			            chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
   9823 			continue;
   9824 		}
   9825 		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
   9826 		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
   9827 			/* Gak, we have exceeded max unlucky retran, abort! */
   9828 			SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
   9829 				    chk->snd_count,
   9830 				    SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
   9831 			atomic_add_int(&stcb->asoc.refcnt, 1);
   9832 			sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
   9833 			SCTP_TCB_LOCK(stcb);
   9834 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   9835 			return (SCTP_RETRAN_EXIT);
   9836 		}
   9837 		/* pick up the net */
   9838 		net = chk->whoTo;
   9839 		switch (net->ro._l_addr.sa.sa_family) {
   9840 #ifdef INET
   9841 			case AF_INET:
   9842 				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
   9843 				break;
   9844 #endif
   9845 #ifdef INET6
   9846 			case AF_INET6:
   9847 				mtu = net->mtu - SCTP_MIN_OVERHEAD;
   9848 				break;
   9849 #endif
   9850 #if defined(__Userspace__)
   9851 			case AF_CONN:
   9852 				mtu = net->mtu - sizeof(struct sctphdr);
   9853 				break;
   9854 #endif
   9855 			default:
   9856 				/* TSNH */
   9857 				mtu = net->mtu;
   9858 				break;
   9859 		}
   9860 
   9861 		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
   9862 			/* No room in peers rwnd */
   9863 			uint32_t tsn;
   9864 
   9865 			tsn = asoc->last_acked_seq + 1;
   9866 			if (tsn == chk->rec.data.TSN_seq) {
   9867 				/*
   9868 				 * we make a special exception for this
   9869 				 * case. The peer has no rwnd but is missing
   9870 				 * the lowest chunk.. which is probably what
   9871 				 * is holding up the rwnd.
   9872 				 */
   9873 				goto one_chunk_around;
   9874 			}
   9875 			return (1);
   9876 		}
   9877 	one_chunk_around:
   9878 		if (asoc->peers_rwnd < mtu) {
   9879 			one_chunk = 1;
   9880 			if ((asoc->peers_rwnd == 0) &&
   9881 			    (asoc->total_flight == 0)) {
   9882 				chk->window_probe = 1;
   9883 				chk->whoTo->window_probe = 1;
   9884 			}
   9885 		}
   9886 #ifdef SCTP_AUDITING_ENABLED
   9887 		sctp_audit_log(0xC3, 2);
   9888 #endif
   9889 		bundle_at = 0;
   9890 		m = NULL;
   9891 		net->fast_retran_ip = 0;
   9892 		if (chk->rec.data.doing_fast_retransmit == 0) {
   9893 			/*
   9894 			 * if no FR in progress skip destination that have
   9895 			 * flight_size > cwnd.
   9896 			 */
   9897 			if (net->flight_size >= net->cwnd) {
   9898 				continue;
   9899 			}
   9900 		} else {
   9901 			/*
   9902 			 * Mark the destination net to have FR recovery
   9903 			 * limits put on it.
   9904 			 */
   9905 			*fr_done = 1;
   9906 			net->fast_retran_ip = 1;
   9907 		}
   9908 
   9909 		/*
   9910 		 * if no AUTH is yet included and this chunk requires it,
   9911 		 * make sure to account for it.  We don't apply the size
   9912 		 * until the AUTH chunk is actually added below in case
   9913 		 * there is no room for this chunk.
   9914 		 */
   9915 		if (data_auth_reqd && (auth == NULL)) {
   9916 			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   9917 		} else
   9918 			dmtu = 0;
   9919 
   9920 		if ((chk->send_size <= (mtu - dmtu)) ||
   9921 		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
   9922 			/* ok we will add this one */
   9923 			if (data_auth_reqd) {
   9924 				if (auth == NULL) {
   9925 					m = sctp_add_auth_chunk(m,
   9926 								&endofchain,
   9927 								&auth,
   9928 								&auth_offset,
   9929 								stcb,
   9930 								SCTP_DATA);
   9931 					auth_keyid = chk->auth_keyid;
   9932 					override_ok = 0;
   9933 					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   9934 				} else if (override_ok) {
   9935 					auth_keyid = chk->auth_keyid;
   9936 					override_ok = 0;
   9937 				} else if (chk->auth_keyid != auth_keyid) {
   9938 					/* different keyid, so done bundling */
   9939 					break;
   9940 				}
   9941 			}
   9942 			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
   9943 			if (m == NULL) {
   9944 				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   9945 				return (ENOMEM);
   9946 			}
   9947 			/* Do clear IP_DF ? */
   9948 			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
   9949 				no_fragmentflg = 0;
   9950 			}
   9951 			/* upate our MTU size */
   9952 			if (mtu > (chk->send_size + dmtu))
   9953 				mtu -= (chk->send_size + dmtu);
   9954 			else
   9955 				mtu = 0;
   9956 			data_list[bundle_at++] = chk;
   9957 			if (one_chunk && (asoc->total_flight <= 0)) {
   9958 				SCTP_STAT_INCR(sctps_windowprobed);
   9959 			}
   9960 		}
   9961 		if (one_chunk == 0) {
   9962 			/*
   9963 			 * now are there anymore forward from chk to pick
   9964 			 * up?
   9965 			 */
   9966 			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
   9967 				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
   9968 					/* Nope, not for retran */
   9969 					continue;
   9970 				}
   9971 				if (fwd->whoTo != net) {
   9972 					/* Nope, not the net in question */
   9973 					continue;
   9974 				}
   9975 				if (data_auth_reqd && (auth == NULL)) {
   9976 					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
   9977 				} else
   9978 					dmtu = 0;
   9979 				if (fwd->send_size <= (mtu - dmtu)) {
   9980 					if (data_auth_reqd) {
   9981 						if (auth == NULL) {
   9982 							m = sctp_add_auth_chunk(m,
   9983 										&endofchain,
   9984 										&auth,
   9985 										&auth_offset,
   9986 										stcb,
   9987 										SCTP_DATA);
   9988 							auth_keyid = fwd->auth_keyid;
   9989 							override_ok = 0;
   9990 							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   9991 						} else if (override_ok) {
   9992 							auth_keyid = fwd->auth_keyid;
   9993 							override_ok = 0;
   9994 						} else if (fwd->auth_keyid != auth_keyid) {
   9995 							/* different keyid, so done bundling */
   9996 							break;
   9997 						}
   9998 					}
   9999 					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
   10000 					if (m == NULL) {
   10001 						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   10002 						return (ENOMEM);
   10003 					}
   10004 					/* Do clear IP_DF ? */
   10005 					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
   10006 						no_fragmentflg = 0;
   10007 					}
   10008 					/* upate our MTU size */
   10009 					if (mtu > (fwd->send_size + dmtu))
   10010 						mtu -= (fwd->send_size + dmtu);
   10011 					else
   10012 						mtu = 0;
   10013 					data_list[bundle_at++] = fwd;
   10014 					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
   10015 						break;
   10016 					}
   10017 				} else {
   10018 					/* can't fit so we are done */
   10019 					break;
   10020 				}
   10021 			}
   10022 		}
   10023 		/* Is there something to send for this destination? */
   10024 		if (m) {
   10025 			/*
   10026 			 * No matter if we fail/or suceed we should start a
   10027 			 * timer. A failure is like a lost IP packet :-)
   10028 			 */
   10029 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   10030 				/*
   10031 				 * no timer running on this destination
   10032 				 * restart it.
   10033 				 */
   10034 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
   10035 				tmr_started = 1;
   10036 			}
   10037 			/* Now lets send it, if there is anything to send :> */
   10038 			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
   10039 			                                        (struct sockaddr *)&net->ro._l_addr, m,
   10040 			                                        auth_offset, auth, auth_keyid,
   10041 			                                        no_fragmentflg, 0, 0,
   10042 			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
   10043 			                                        net->port, NULL,
   10044 #if defined(__FreeBSD__)
   10045 			                                        0, 0,
   10046 #endif
   10047 			                                        so_locked))) {
   10048 				/* error, we could not output */
   10049 				SCTP_STAT_INCR(sctps_lowlevelerr);
   10050 				return (error);
   10051 			}
   10052 			endofchain = NULL;
   10053 			auth = NULL;
   10054 			auth_offset = 0;
   10055 			/* For HB's */
   10056 			/*
   10057 			 * We don't want to mark the net->sent time here
   10058 			 * since this we use this for HB and retrans cannot
   10059 			 * measure RTT
   10060 			 */
   10061 			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
   10062 
   10063 			/* For auto-close */
   10064 			cnt_thru++;
   10065 			if (*now_filled == 0) {
   10066 				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
   10067 				*now = asoc->time_last_sent;
   10068 				*now_filled = 1;
   10069 			} else {
   10070 				asoc->time_last_sent = *now;
   10071 			}
   10072 			*cnt_out += bundle_at;
   10073 #ifdef SCTP_AUDITING_ENABLED
   10074 			sctp_audit_log(0xC4, bundle_at);
   10075 #endif
   10076 			if (bundle_at) {
   10077 				tsns_sent = data_list[0]->rec.data.TSN_seq;
   10078 			}
   10079 			for (i = 0; i < bundle_at; i++) {
   10080 				SCTP_STAT_INCR(sctps_sendretransdata);
   10081 				data_list[i]->sent = SCTP_DATAGRAM_SENT;
   10082 				/*
   10083 				 * When we have a revoked data, and we
   10084 				 * retransmit it, then we clear the revoked
   10085 				 * flag since this flag dictates if we
   10086 				 * subtracted from the fs
   10087 				 */
   10088 				if (data_list[i]->rec.data.chunk_was_revoked) {
   10089 					/* Deflate the cwnd */
   10090 					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
   10091 					data_list[i]->rec.data.chunk_was_revoked = 0;
   10092 				}
   10093 				data_list[i]->snd_count++;
   10094 				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
   10095 				/* record the time */
   10096 				data_list[i]->sent_rcv_time = asoc->time_last_sent;
   10097 				if (data_list[i]->book_size_scale) {
   10098 					/*
   10099 					 * need to double the book size on
   10100 					 * this one
   10101 					 */
   10102 					data_list[i]->book_size_scale = 0;
   10103 					/* Since we double the booksize, we must
   10104 					 * also double the output queue size, since this
   10105 					 * get shrunk when we free by this amount.
   10106 					 */
   10107 					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
   10108 					data_list[i]->book_size *= 2;
   10109 
   10110 
   10111 				} else {
   10112 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   10113 						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
   10114 						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   10115 					}
   10116 					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
   10117 									    (uint32_t) (data_list[i]->send_size +
   10118 											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
   10119 				}
   10120 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   10121 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
   10122 						       data_list[i]->whoTo->flight_size,
   10123 						       data_list[i]->book_size,
   10124 						       (uintptr_t)data_list[i]->whoTo,
   10125 						       data_list[i]->rec.data.TSN_seq);
   10126 				}
   10127 				sctp_flight_size_increase(data_list[i]);
   10128 				sctp_total_flight_increase(stcb, data_list[i]);
   10129 				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   10130 					/* SWS sender side engages */
   10131 					asoc->peers_rwnd = 0;
   10132 				}
   10133 				if ((i == 0) &&
   10134 				    (data_list[i]->rec.data.doing_fast_retransmit)) {
   10135 					SCTP_STAT_INCR(sctps_sendfastretrans);
   10136 					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
   10137 					    (tmr_started == 0)) {
   10138 						/*-
   10139 						 * ok we just fast-retrans'd
   10140 						 * the lowest TSN, i.e the
   10141 						 * first on the list. In
   10142 						 * this case we want to give
   10143 						 * some more time to get a
   10144 						 * SACK back without a
   10145 						 * t3-expiring.
   10146 						 */
   10147 						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
   10148 								SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4);
   10149 						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
   10150 					}
   10151 				}
   10152 			}
   10153 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   10154 				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
   10155 			}
   10156 #ifdef SCTP_AUDITING_ENABLED
   10157 			sctp_auditing(21, inp, stcb, NULL);
   10158 #endif
   10159 		} else {
   10160 			/* None will fit */
   10161 			return (1);
   10162 		}
   10163 		if (asoc->sent_queue_retran_cnt <= 0) {
   10164 			/* all done we have no more to retran */
   10165 			asoc->sent_queue_retran_cnt = 0;
   10166 			break;
   10167 		}
   10168 		if (one_chunk) {
   10169 			/* No more room in rwnd */
   10170 			return (1);
   10171 		}
   10172 		/* stop the for loop here. we sent out a packet */
   10173 		break;
   10174 	}
   10175 	return (0);
   10176 }
   10177 
   10178 static void
   10179 sctp_timer_validation(struct sctp_inpcb *inp,
   10180     struct sctp_tcb *stcb,
   10181     struct sctp_association *asoc)
   10182 {
   10183 	struct sctp_nets *net;
   10184 
   10185 	/* Validate that a timer is running somewhere */
   10186 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   10187 		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   10188 			/* Here is a timer */
   10189 			return;
   10190 		}
   10191 	}
   10192 	SCTP_TCB_LOCK_ASSERT(stcb);
   10193 	/* Gak, we did not have a timer somewhere */
   10194 	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
   10195 	if (asoc->alternate) {
   10196 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
   10197 	} else {
   10198 		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
   10199 	}
   10200 	return;
   10201 }
   10202 
   10203 void
   10204 sctp_chunk_output (struct sctp_inpcb *inp,
   10205     struct sctp_tcb *stcb,
   10206     int from_where,
   10207     int so_locked
   10208 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   10209     SCTP_UNUSED
   10210 #endif
   10211     )
   10212 {
   10213 	/*-
   10214 	 * Ok this is the generic chunk service queue. we must do the
   10215 	 * following:
   10216 	 * - See if there are retransmits pending, if so we must
   10217 	 *   do these first.
   10218 	 * - Service the stream queue that is next, moving any
   10219 	 *   message (note I must get a complete message i.e.
   10220 	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
   10221 	 *   TSN's
   10222 	 * - Check to see if the cwnd/rwnd allows any output, if so we
   10223 	 *   go ahead and fomulate and send the low level chunks. Making sure
   10224 	 *   to combine any control in the control chunk queue also.
   10225 	 */
   10226 	struct sctp_association *asoc;
   10227 	struct sctp_nets *net;
   10228 	int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
   10229 	unsigned int burst_cnt = 0;
   10230 	struct timeval now;
   10231 	int now_filled = 0;
   10232 	int nagle_on;
   10233 	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
   10234 	int un_sent = 0;
   10235 	int fr_done;
   10236 	unsigned int tot_frs = 0;
   10237 
   10238 #if defined(__APPLE__)
   10239 	if (so_locked) {
   10240 		sctp_lock_assert(SCTP_INP_SO(inp));
   10241 	} else {
   10242 		sctp_unlock_assert(SCTP_INP_SO(inp));
   10243 	}
   10244 #endif
   10245 	asoc = &stcb->asoc;
   10246 	/* The Nagle algorithm is only applied when handling a send call. */
   10247 	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
   10248 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
   10249 			nagle_on = 0;
   10250 		} else {
   10251 			nagle_on = 1;
   10252 		}
   10253 	} else {
   10254 		nagle_on = 0;
   10255 	}
   10256 	SCTP_TCB_LOCK_ASSERT(stcb);
   10257 
   10258 	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
   10259 
   10260 	if ((un_sent <= 0) &&
   10261 	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
   10262 	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
   10263 	    (asoc->sent_queue_retran_cnt == 0)) {
   10264 		/* Nothing to do unless there is something to be sent left */
   10265 		return;
   10266 	}
   10267 	/* Do we have something to send, data or control AND
   10268 	 * a sack timer running, if so piggy-back the sack.
   10269 	 */
   10270  	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   10271 		sctp_send_sack(stcb, so_locked);
   10272 		(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
   10273 	}
   10274 	while (asoc->sent_queue_retran_cnt) {
   10275 		/*-
   10276 		 * Ok, it is retransmission time only, we send out only ONE
   10277 		 * packet with a single call off to the retran code.
   10278 		 */
   10279 		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
   10280 			/*-
   10281 			 * Special hook for handling cookiess discarded
   10282 			 * by peer that carried data. Send cookie-ack only
   10283 			 * and then the next call with get the retran's.
   10284 			 */
   10285  			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
   10286 						    from_where,
   10287 						    &now, &now_filled, frag_point, so_locked);
   10288 			return;
   10289 		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
   10290 			/* if its not from a HB then do it */
   10291 			fr_done = 0;
   10292 			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
   10293 			if (fr_done) {
   10294 				tot_frs++;
   10295 			}
   10296 		} else {
   10297 			/*
   10298 			 * its from any other place, we don't allow retran
   10299 			 * output (only control)
   10300 			 */
   10301 			ret = 1;
   10302 		}
   10303 		if (ret > 0) {
   10304 			/* Can't send anymore */
   10305 			/*-
   10306 			 * now lets push out control by calling med-level
   10307 			 * output once. this assures that we WILL send HB's
   10308 			 * if queued too.
   10309 			 */
   10310 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
   10311 						    from_where,
   10312 						    &now, &now_filled, frag_point, so_locked);
   10313 #ifdef SCTP_AUDITING_ENABLED
   10314 			sctp_auditing(8, inp, stcb, NULL);
   10315 #endif
   10316 			sctp_timer_validation(inp, stcb, asoc);
   10317 			return;
   10318 		}
   10319 		if (ret < 0) {
   10320 			/*-
   10321 			 * The count was off.. retran is not happening so do
   10322 			 * the normal retransmission.
   10323 			 */
   10324 #ifdef SCTP_AUDITING_ENABLED
   10325 			sctp_auditing(9, inp, stcb, NULL);
   10326 #endif
   10327 			if (ret == SCTP_RETRAN_EXIT) {
   10328 				return;
   10329 			}
   10330 			break;
   10331 		}
   10332 		if (from_where == SCTP_OUTPUT_FROM_T3) {
   10333 			/* Only one transmission allowed out of a timeout */
   10334 #ifdef SCTP_AUDITING_ENABLED
   10335 			sctp_auditing(10, inp, stcb, NULL);
   10336 #endif
   10337 			/* Push out any control */
   10338 			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
   10339 						    &now, &now_filled, frag_point, so_locked);
   10340 			return;
   10341 		}
   10342 		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
   10343 			/* Hit FR burst limit */
   10344 			return;
   10345 		}
   10346 		if ((num_out == 0) && (ret == 0)) {
   10347 			/* No more retrans to send */
   10348 			break;
   10349 		}
   10350 	}
   10351 #ifdef SCTP_AUDITING_ENABLED
   10352 	sctp_auditing(12, inp, stcb, NULL);
   10353 #endif
   10354 	/* Check for bad destinations, if they exist move chunks around. */
   10355 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   10356 		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
   10357 			/*-
   10358 			 * if possible move things off of this address we
   10359 			 * still may send below due to the dormant state but
   10360 			 * we try to find an alternate address to send to
   10361 			 * and if we have one we move all queued data on the
   10362 			 * out wheel to this alternate address.
   10363 			 */
   10364 			if (net->ref_count > 1)
   10365 				sctp_move_chunks_from_net(stcb, net);
   10366 		} else {
   10367 			/*-
   10368 			 * if ((asoc->sat_network) || (net->addr_is_local))
   10369 			 * { burst_limit = asoc->max_burst *
   10370 			 * SCTP_SAT_NETWORK_BURST_INCR; }
   10371 			 */
   10372 			if (asoc->max_burst > 0) {
   10373 				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
   10374 					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
   10375 						/* JRS - Use the congestion control given in the congestion control module */
   10376 						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
   10377 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
   10378 							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
   10379 						}
   10380 						SCTP_STAT_INCR(sctps_maxburstqueued);
   10381 					}
   10382 					net->fast_retran_ip = 0;
   10383 				} else {
   10384 					if (net->flight_size == 0) {
   10385 						/* Should be decaying the cwnd here */
   10386 						;
   10387 					}
   10388 				}
   10389 			}
   10390 		}
   10391 
   10392 	}
   10393 	burst_cnt = 0;
   10394 	do {
   10395 		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
   10396 					      &reason_code, 0, from_where,
   10397 					      &now, &now_filled, frag_point, so_locked);
   10398 		if (error) {
   10399 			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
   10400 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
   10401 				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
   10402 			}
   10403 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   10404 				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
   10405 				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
   10406 			}
   10407 			break;
   10408 		}
   10409 		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
   10410 
   10411 		tot_out += num_out;
   10412 		burst_cnt++;
   10413 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   10414 			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
   10415 			if (num_out == 0) {
   10416 				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
   10417 			}
   10418 		}
   10419 		if (nagle_on) {
   10420 			/*
   10421 			 * When the Nagle algorithm is used, look at how much
   10422 			 * is unsent, then if its smaller than an MTU and we
   10423 			 * have data in flight we stop, except if we are
   10424 			 * handling a fragmented user message.
   10425 			 */
   10426 			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
   10427 			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
   10428 			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
   10429 			    (stcb->asoc.total_flight > 0) &&
   10430 			    ((stcb->asoc.locked_on_sending == NULL) ||
   10431 			     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
   10432 				break;
   10433 			}
   10434 		}
   10435 		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
   10436 		    TAILQ_EMPTY(&asoc->send_queue) &&
   10437 		    stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
   10438 			/* Nothing left to send */
   10439 			break;
   10440 		}
   10441 		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
   10442 			/* Nothing left to send */
   10443 			break;
   10444 		}
   10445 	} while (num_out &&
   10446 	         ((asoc->max_burst == 0) ||
   10447 		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
   10448 		  (burst_cnt < asoc->max_burst)));
   10449 
   10450 	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
   10451 		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
   10452 			SCTP_STAT_INCR(sctps_maxburstqueued);
   10453 			asoc->burst_limit_applied = 1;
   10454 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
   10455 				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
   10456 			}
   10457 		} else {
   10458 			asoc->burst_limit_applied = 0;
   10459 		}
   10460 	}
   10461 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   10462 		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
   10463 	}
   10464 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
   10465 		tot_out);
   10466 
   10467 	/*-
   10468 	 * Now we need to clean up the control chunk chain if a ECNE is on
   10469 	 * it. It must be marked as UNSENT again so next call will continue
   10470 	 * to send it until such time that we get a CWR, to remove it.
   10471 	 */
   10472 	if (stcb->asoc.ecn_echo_cnt_onq)
   10473 		sctp_fix_ecn_echo(asoc);
   10474 	return;
   10475 }
   10476 
   10477 
   10478 int
   10479 sctp_output(
   10480 	struct sctp_inpcb *inp,
   10481 #if defined(__Panda__)
   10482 	pakhandle_type m,
   10483 #else
   10484 	struct mbuf *m,
   10485 #endif
   10486 	struct sockaddr *addr,
   10487 #if defined(__Panda__)
   10488 	pakhandle_type control,
   10489 #else
   10490 	struct mbuf *control,
   10491 #endif
   10492 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
   10493 	struct thread *p,
   10494 #elif defined(__Windows__)
   10495 	PKTHREAD p,
   10496 #else
   10497 #if defined(__APPLE__)
   10498 	struct proc *p SCTP_UNUSED,
   10499 #else
   10500 	struct proc *p,
   10501 #endif
   10502 #endif
   10503 	int flags)
   10504 {
   10505 	if (inp == NULL) {
   10506 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   10507 		return (EINVAL);
   10508 	}
   10509 
   10510 	if (inp->sctp_socket == NULL) {
   10511 		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   10512 		return (EINVAL);
   10513 	}
   10514 	return (sctp_sosend(inp->sctp_socket,
   10515 			    addr,
   10516 			    (struct uio *)NULL,
   10517 			    m,
   10518 			    control,
   10519 #if defined(__APPLE__) || defined(__Panda__)
   10520 			    flags
   10521 #else
   10522 			    flags, p
   10523 #endif
   10524 			));
   10525 }
   10526 
   10527 void
   10528 send_forward_tsn(struct sctp_tcb *stcb,
   10529 		 struct sctp_association *asoc)
   10530 {
   10531         struct sctp_tmit_chunk *chk;
   10532 	struct sctp_forward_tsn_chunk *fwdtsn;
   10533 	uint32_t advance_peer_ack_point;
   10534 
   10535         SCTP_TCB_LOCK_ASSERT(stcb);
   10536 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   10537 		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
   10538 			/* mark it to unsent */
   10539 			chk->sent = SCTP_DATAGRAM_UNSENT;
   10540 			chk->snd_count = 0;
   10541 			/* Do we correct its output location? */
   10542 			if (chk->whoTo) {
   10543 				sctp_free_remote_addr(chk->whoTo);
   10544 				chk->whoTo = NULL;
   10545 			}
   10546 			goto sctp_fill_in_rest;
   10547 		}
   10548 	}
   10549 	/* Ok if we reach here we must build one */
   10550 	sctp_alloc_a_chunk(stcb, chk);
   10551 	if (chk == NULL) {
   10552 		return;
   10553 	}
   10554 	asoc->fwd_tsn_cnt++;
   10555 	chk->copy_by_ref = 0;
   10556 	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
   10557 	chk->rec.chunk_id.can_take_data = 0;
   10558 	chk->asoc = asoc;
   10559 	chk->whoTo = NULL;
   10560 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
   10561 	if (chk->data == NULL) {
   10562 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   10563 		return;
   10564 	}
   10565 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   10566 	chk->sent = SCTP_DATAGRAM_UNSENT;
   10567 	chk->snd_count = 0;
   10568 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
   10569 	asoc->ctrl_queue_cnt++;
   10570 sctp_fill_in_rest:
   10571 	/*-
   10572 	 * Here we go through and fill out the part that deals with
   10573 	 * stream/seq of the ones we skip.
   10574 	 */
   10575 	SCTP_BUF_LEN(chk->data) = 0;
   10576 	{
   10577 		struct sctp_tmit_chunk *at, *tp1, *last;
   10578 		struct sctp_strseq *strseq;
   10579 		unsigned int cnt_of_space, i, ovh;
   10580 		unsigned int space_needed;
   10581 		unsigned int cnt_of_skipped = 0;
   10582 
   10583 		TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
   10584 			if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
   10585 			    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
   10586 				/* no more to look at */
   10587 				break;
   10588 			}
   10589 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
   10590 				/* We don't report these */
   10591 				continue;
   10592 			}
   10593 			cnt_of_skipped++;
   10594 		}
   10595 		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
   10596 		    (cnt_of_skipped * sizeof(struct sctp_strseq)));
   10597 
   10598 		cnt_of_space = M_TRAILINGSPACE(chk->data);
   10599 
   10600 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
   10601 			ovh = SCTP_MIN_OVERHEAD;
   10602 		} else {
   10603 			ovh = SCTP_MIN_V4_OVERHEAD;
   10604 		}
   10605 		if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
   10606 			/* trim to a mtu size */
   10607 			cnt_of_space = asoc->smallest_mtu - ovh;
   10608 		}
   10609 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   10610 			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   10611 				       0xff, 0, cnt_of_skipped,
   10612 				       asoc->advanced_peer_ack_point);
   10613 
   10614 		}
   10615 		advance_peer_ack_point = asoc->advanced_peer_ack_point;
   10616 		if (cnt_of_space < space_needed) {
   10617 			/*-
   10618 			 * ok we must trim down the chunk by lowering the
   10619 			 * advance peer ack point.
   10620 			 */
   10621 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   10622 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   10623 					       0xff, 0xff, cnt_of_space,
   10624 					       space_needed);
   10625 			}
   10626 			cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
   10627 			cnt_of_skipped /= sizeof(struct sctp_strseq);
   10628 			/*-
   10629 			 * Go through and find the TSN that will be the one
   10630 			 * we report.
   10631 			 */
   10632 			at = TAILQ_FIRST(&asoc->sent_queue);
   10633 			if (at != NULL) {
   10634 				for (i = 0; i < cnt_of_skipped; i++) {
   10635 					tp1 = TAILQ_NEXT(at, sctp_next);
   10636 					if (tp1 == NULL) {
   10637 						break;
   10638 					}
   10639 					at = tp1;
   10640 				}
   10641 			}
   10642 			if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   10643 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   10644 					       0xff, cnt_of_skipped, at->rec.data.TSN_seq,
   10645 					       asoc->advanced_peer_ack_point);
   10646 			}
   10647 			last = at;
   10648 			/*-
   10649 			 * last now points to last one I can report, update
   10650 			 * peer ack point
   10651 			 */
   10652 			if (last)
   10653 				advance_peer_ack_point = last->rec.data.TSN_seq;
   10654 			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
   10655 			               cnt_of_skipped * sizeof(struct sctp_strseq);
   10656 		}
   10657 		chk->send_size = space_needed;
   10658 		/* Setup the chunk */
   10659 		fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
   10660 		fwdtsn->ch.chunk_length = htons(chk->send_size);
   10661 		fwdtsn->ch.chunk_flags = 0;
   10662 		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
   10663 		fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
   10664 		SCTP_BUF_LEN(chk->data) = chk->send_size;
   10665 		fwdtsn++;
   10666 		/*-
   10667 		 * Move pointer to after the fwdtsn and transfer to the
   10668 		 * strseq pointer.
   10669 		 */
   10670 		strseq = (struct sctp_strseq *)fwdtsn;
   10671 		/*-
   10672 		 * Now populate the strseq list. This is done blindly
   10673 		 * without pulling out duplicate stream info. This is
   10674 		 * inefficent but won't harm the process since the peer will
   10675 		 * look at these in sequence and will thus release anything.
   10676 		 * It could mean we exceed the PMTU and chop off some that
   10677 		 * we could have included.. but this is unlikely (aka 1432/4
   10678 		 * would mean 300+ stream seq's would have to be reported in
   10679 		 * one FWD-TSN. With a bit of work we can later FIX this to
   10680 		 * optimize and pull out duplcates.. but it does add more
   10681 		 * overhead. So for now... not!
   10682 		 */
   10683 		at = TAILQ_FIRST(&asoc->sent_queue);
   10684 		for (i = 0; i < cnt_of_skipped; i++) {
   10685 			tp1 = TAILQ_NEXT(at, sctp_next);
   10686 			if (tp1 == NULL)
   10687 				break;
   10688 			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
   10689 				/* We don't report these */
   10690 				i--;
   10691 				at = tp1;
   10692 				continue;
   10693 			}
   10694 			if (at->rec.data.TSN_seq == advance_peer_ack_point) {
   10695 				at->rec.data.fwd_tsn_cnt = 0;
   10696 			}
   10697 			strseq->stream = ntohs(at->rec.data.stream_number);
   10698 			strseq->sequence = ntohs(at->rec.data.stream_seq);
   10699 			strseq++;
   10700 			at = tp1;
   10701 		}
   10702 	}
   10703 	return;
   10704 }
   10705 
   10706 void
   10707 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
   10708 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   10709 	SCTP_UNUSED
   10710 #endif
   10711 )
   10712 {
   10713 	/*-
   10714 	 * Queue up a SACK or NR-SACK in the control queue.
   10715 	 * We must first check to see if a SACK or NR-SACK is
   10716 	 * somehow on the control queue.
   10717 	 * If so, we will take and and remove the old one.
   10718 	 */
   10719 	struct sctp_association *asoc;
   10720 	struct sctp_tmit_chunk *chk, *a_chk;
   10721 	struct sctp_sack_chunk *sack;
   10722 	struct sctp_nr_sack_chunk *nr_sack;
   10723 	struct sctp_gap_ack_block *gap_descriptor;
   10724 	struct sack_track *selector;
   10725 	int mergeable = 0;
   10726 	int offset;
   10727 	caddr_t limit;
   10728 	uint32_t *dup;
   10729 	int limit_reached = 0;
   10730 	unsigned int i, siz, j;
   10731 	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
   10732 	int num_dups = 0;
   10733 	int space_req;
   10734 	uint32_t highest_tsn;
   10735 	uint8_t flags;
   10736 	uint8_t type;
   10737 	uint8_t tsn_map;
   10738 
   10739 	if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
   10740 	    (stcb->asoc.peer_supports_nr_sack == 1)) {
   10741 		type = SCTP_NR_SELECTIVE_ACK;
   10742 	} else {
   10743 		type = SCTP_SELECTIVE_ACK;
   10744 	}
   10745 	a_chk = NULL;
   10746 	asoc = &stcb->asoc;
   10747 	SCTP_TCB_LOCK_ASSERT(stcb);
   10748 	if (asoc->last_data_chunk_from == NULL) {
   10749 		/* Hmm we never received anything */
   10750 		return;
   10751 	}
   10752 	sctp_slide_mapping_arrays(stcb);
   10753 	sctp_set_rwnd(stcb, asoc);
   10754 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   10755 		if (chk->rec.chunk_id.id == type) {
   10756 			/* Hmm, found a sack already on queue, remove it */
   10757 			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
   10758 			asoc->ctrl_queue_cnt--;
   10759 			a_chk = chk;
   10760 			if (a_chk->data) {
   10761 				sctp_m_freem(a_chk->data);
   10762 				a_chk->data = NULL;
   10763 			}
   10764 			if (a_chk->whoTo) {
   10765 				sctp_free_remote_addr(a_chk->whoTo);
   10766 				a_chk->whoTo = NULL;
   10767 			}
   10768 			break;
   10769 		}
   10770 	}
   10771 	if (a_chk == NULL) {
   10772 		sctp_alloc_a_chunk(stcb, a_chk);
   10773 		if (a_chk == NULL) {
   10774 			/* No memory so we drop the idea, and set a timer */
   10775 			if (stcb->asoc.delayed_ack) {
   10776 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
   10777 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
   10778 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   10779 				    stcb->sctp_ep, stcb, NULL);
   10780 			} else {
   10781 				stcb->asoc.send_sack = 1;
   10782 			}
   10783 			return;
   10784 		}
   10785 		a_chk->copy_by_ref = 0;
   10786 		a_chk->rec.chunk_id.id = type;
   10787 		a_chk->rec.chunk_id.can_take_data = 1;
   10788 	}
   10789 	/* Clear our pkt counts */
   10790 	asoc->data_pkts_seen = 0;
   10791 
   10792 	a_chk->asoc = asoc;
   10793 	a_chk->snd_count = 0;
   10794 	a_chk->send_size = 0;	/* fill in later */
   10795 	a_chk->sent = SCTP_DATAGRAM_UNSENT;
   10796 	a_chk->whoTo = NULL;
   10797 
   10798 	if ((asoc->numduptsns) ||
   10799 	    (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
   10800 		/*-
   10801 		 * Ok, we have some duplicates or the destination for the
   10802 		 * sack is unreachable, lets see if we can select an
   10803 		 * alternate than asoc->last_data_chunk_from
   10804 		 */
   10805 		if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
   10806 		    (asoc->used_alt_onsack > asoc->numnets)) {
   10807 			/* We used an alt last time, don't this time */
   10808 			a_chk->whoTo = NULL;
   10809 		} else {
   10810 			asoc->used_alt_onsack++;
   10811 			a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
   10812 		}
   10813 		if (a_chk->whoTo == NULL) {
   10814 			/* Nope, no alternate */
   10815 			a_chk->whoTo = asoc->last_data_chunk_from;
   10816 			asoc->used_alt_onsack = 0;
   10817 		}
   10818 	} else {
   10819 		/*
   10820 		 * No duplicates so we use the last place we received data
   10821 		 * from.
   10822 		 */
   10823 		asoc->used_alt_onsack = 0;
   10824 		a_chk->whoTo = asoc->last_data_chunk_from;
   10825 	}
   10826 	if (a_chk->whoTo) {
   10827 		atomic_add_int(&a_chk->whoTo->ref_count, 1);
   10828 	}
   10829 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
   10830 		highest_tsn = asoc->highest_tsn_inside_map;
   10831 	} else {
   10832 		highest_tsn = asoc->highest_tsn_inside_nr_map;
   10833 	}
   10834 	if (highest_tsn == asoc->cumulative_tsn) {
   10835 		/* no gaps */
   10836 		if (type == SCTP_SELECTIVE_ACK) {
   10837 			space_req = sizeof(struct sctp_sack_chunk);
   10838 		} else {
   10839 			space_req = sizeof(struct sctp_nr_sack_chunk);
   10840 		}
   10841 	} else {
   10842 		/* gaps get a cluster */
   10843 		space_req = MCLBYTES;
   10844 	}
   10845 	/* Ok now lets formulate a MBUF with our sack */
   10846 	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
   10847 	if ((a_chk->data == NULL) ||
   10848 	    (a_chk->whoTo == NULL)) {
   10849 		/* rats, no mbuf memory */
   10850 		if (a_chk->data) {
   10851 			/* was a problem with the destination */
   10852 			sctp_m_freem(a_chk->data);
   10853 			a_chk->data = NULL;
   10854 		}
   10855 		sctp_free_a_chunk(stcb, a_chk, so_locked);
   10856 		/* sa_ignore NO_NULL_CHK */
   10857 		if (stcb->asoc.delayed_ack) {
   10858 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
   10859 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
   10860 			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   10861 			    stcb->sctp_ep, stcb, NULL);
   10862 		} else {
   10863 			stcb->asoc.send_sack = 1;
   10864 		}
   10865 		return;
   10866 	}
   10867 	/* ok, lets go through and fill it in */
   10868 	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
   10869 	space = M_TRAILINGSPACE(a_chk->data);
   10870 	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
   10871 		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
   10872 	}
   10873 	limit = mtod(a_chk->data, caddr_t);
   10874 	limit += space;
   10875 
   10876 	flags = 0;
   10877 
   10878 	if ((asoc->sctp_cmt_on_off > 0) &&
   10879 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   10880 		/*-
   10881 		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
   10882 		 * received, then set high bit to 1, else 0. Reset
   10883 		 * pkts_rcvd.
   10884 		 */
   10885 		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
   10886 		asoc->cmt_dac_pkts_rcvd = 0;
   10887 	}
   10888 #ifdef SCTP_ASOCLOG_OF_TSNS
   10889 	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
   10890 	stcb->asoc.cumack_log_atsnt++;
   10891 	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
   10892 		stcb->asoc.cumack_log_atsnt = 0;
   10893 	}
   10894 #endif
   10895 	/* reset the readers interpretation */
   10896 	stcb->freed_by_sorcv_sincelast = 0;
   10897 
   10898 	if (type == SCTP_SELECTIVE_ACK) {
   10899 		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
   10900 		nr_sack = NULL;
   10901 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
   10902 		if (highest_tsn > asoc->mapping_array_base_tsn) {
   10903 			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
   10904 		} else {
   10905 			siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
   10906 		}
   10907 	} else {
   10908 		sack = NULL;
   10909 		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
   10910 		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
   10911 		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
   10912 			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
   10913 		} else {
   10914 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
   10915 		}
   10916 	}
   10917 
   10918 	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
   10919 		offset = 1;
   10920 	} else {
   10921 		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
   10922 	}
   10923 	if (((type == SCTP_SELECTIVE_ACK) &&
   10924 	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
   10925 	    ((type == SCTP_NR_SELECTIVE_ACK) &&
   10926 	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
   10927 		/* we have a gap .. maybe */
   10928 		for (i = 0; i < siz; i++) {
   10929 			tsn_map = asoc->mapping_array[i];
   10930 			if (type == SCTP_SELECTIVE_ACK) {
   10931 				tsn_map |= asoc->nr_mapping_array[i];
   10932 			}
   10933 			if (i == 0) {
   10934 				/*
   10935 				 * Clear all bits corresponding to TSNs
   10936 				 * smaller or equal to the cumulative TSN.
   10937 				 */
   10938 				tsn_map &= (~0 << (1 - offset));
   10939 			}
   10940 			selector = &sack_array[tsn_map];
   10941 			if (mergeable && selector->right_edge) {
   10942 				/*
   10943 				 * Backup, left and right edges were ok to
   10944 				 * merge.
   10945 				 */
   10946 				num_gap_blocks--;
   10947 				gap_descriptor--;
   10948 			}
   10949 			if (selector->num_entries == 0)
   10950 				mergeable = 0;
   10951 			else {
   10952 				for (j = 0; j < selector->num_entries; j++) {
   10953 					if (mergeable && selector->right_edge) {
   10954 						/*
   10955 						 * do a merge by NOT setting
   10956 						 * the left side
   10957 						 */
   10958 						mergeable = 0;
   10959 					} else {
   10960 						/*
   10961 						 * no merge, set the left
   10962 						 * side
   10963 						 */
   10964 						mergeable = 0;
   10965 						gap_descriptor->start = htons((selector->gaps[j].start + offset));
   10966 					}
   10967 					gap_descriptor->end = htons((selector->gaps[j].end + offset));
   10968 					num_gap_blocks++;
   10969 					gap_descriptor++;
   10970 					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
   10971 						/* no more room */
   10972 						limit_reached = 1;
   10973 						break;
   10974 					}
   10975 				}
   10976 				if (selector->left_edge) {
   10977 					mergeable = 1;
   10978 				}
   10979 			}
   10980 			if (limit_reached) {
   10981 				/* Reached the limit stop */
   10982 				break;
   10983 			}
   10984 			offset += 8;
   10985 		}
   10986 	}
   10987 	if ((type == SCTP_NR_SELECTIVE_ACK) &&
   10988 	    (limit_reached == 0)) {
   10989 
   10990 		mergeable = 0;
   10991 
   10992 		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
   10993 			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
   10994 		} else {
   10995 			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
   10996 		}
   10997 
   10998 		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
   10999 			offset = 1;
   11000 		} else {
   11001 			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
   11002 		}
   11003 		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
   11004 			/* we have a gap .. maybe */
   11005 			for (i = 0; i < siz; i++) {
   11006 				tsn_map = asoc->nr_mapping_array[i];
   11007 				if (i == 0) {
   11008 					/*
   11009 					 * Clear all bits corresponding to TSNs
   11010 					 * smaller or equal to the cumulative TSN.
   11011 					 */
   11012 					tsn_map &= (~0 << (1 - offset));
   11013 				}
   11014 				selector = &sack_array[tsn_map];
   11015 				if (mergeable && selector->right_edge) {
   11016 					/*
   11017 					* Backup, left and right edges were ok to
   11018 					* merge.
   11019 					*/
   11020 					num_nr_gap_blocks--;
   11021 					gap_descriptor--;
   11022 				}
   11023 				if (selector->num_entries == 0)
   11024 					mergeable = 0;
   11025 				else {
   11026 					for (j = 0; j < selector->num_entries; j++) {
   11027 						if (mergeable && selector->right_edge) {
   11028 							/*
   11029 							* do a merge by NOT setting
   11030 							* the left side
   11031 							*/
   11032 							mergeable = 0;
   11033 						} else {
   11034 							/*
   11035 							* no merge, set the left
   11036 							* side
   11037 							*/
   11038 							mergeable = 0;
   11039 							gap_descriptor->start = htons((selector->gaps[j].start + offset));
   11040 						}
   11041 						gap_descriptor->end = htons((selector->gaps[j].end + offset));
   11042 						num_nr_gap_blocks++;
   11043 						gap_descriptor++;
   11044 						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
   11045 							/* no more room */
   11046 							limit_reached = 1;
   11047 							break;
   11048 						}
   11049 					}
   11050 					if (selector->left_edge) {
   11051 						mergeable = 1;
   11052 					}
   11053 				}
   11054 				if (limit_reached) {
   11055 					/* Reached the limit stop */
   11056 					break;
   11057 				}
   11058 				offset += 8;
   11059 			}
   11060 		}
   11061 	}
   11062 	/* now we must add any dups we are going to report. */
   11063 	if ((limit_reached == 0) && (asoc->numduptsns)) {
   11064 		dup = (uint32_t *) gap_descriptor;
   11065 		for (i = 0; i < asoc->numduptsns; i++) {
   11066 			*dup = htonl(asoc->dup_tsns[i]);
   11067 			dup++;
   11068 			num_dups++;
   11069 			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
   11070 				/* no more room */
   11071 				break;
   11072 			}
   11073 		}
   11074 		asoc->numduptsns = 0;
   11075 	}
   11076 	/*
   11077 	 * now that the chunk is prepared queue it to the control chunk
   11078 	 * queue.
   11079 	 */
   11080 	if (type == SCTP_SELECTIVE_ACK) {
   11081 		a_chk->send_size = sizeof(struct sctp_sack_chunk) +
   11082 		                   (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
   11083 		                   num_dups * sizeof(int32_t);
   11084 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
   11085 		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
   11086 		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
   11087 		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
   11088 		sack->sack.num_dup_tsns = htons(num_dups);
   11089 		sack->ch.chunk_type = type;
   11090 		sack->ch.chunk_flags = flags;
   11091 		sack->ch.chunk_length = htons(a_chk->send_size);
   11092 	} else {
   11093 		a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
   11094 		                   (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
   11095 		                   num_dups * sizeof(int32_t);
   11096 		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
   11097 		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
   11098 		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
   11099 		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
   11100 		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
   11101 		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
   11102 		nr_sack->nr_sack.reserved = 0;
   11103 		nr_sack->ch.chunk_type = type;
   11104 		nr_sack->ch.chunk_flags = flags;
   11105 		nr_sack->ch.chunk_length = htons(a_chk->send_size);
   11106 	}
   11107 	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
   11108 	asoc->my_last_reported_rwnd = asoc->my_rwnd;
   11109 	asoc->ctrl_queue_cnt++;
   11110 	asoc->send_sack = 0;
   11111 	SCTP_STAT_INCR(sctps_sendsacks);
   11112 	return;
   11113 }
   11114 
   11115 void
   11116 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
   11117 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   11118     SCTP_UNUSED
   11119 #endif
   11120     )
   11121 {
   11122 	struct mbuf *m_abort, *m, *m_last;
   11123 	struct mbuf *m_out, *m_end = NULL;
   11124 	struct sctp_abort_chunk *abort;
   11125 	struct sctp_auth_chunk *auth = NULL;
   11126 	struct sctp_nets *net;
   11127 	uint32_t vtag;
   11128 	uint32_t auth_offset = 0;
   11129 	uint16_t cause_len, chunk_len, padding_len;
   11130 
   11131 #if defined(__APPLE__)
   11132 	if (so_locked) {
   11133 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
   11134 	} else {
   11135 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
   11136 	}
   11137 #endif
   11138 	SCTP_TCB_LOCK_ASSERT(stcb);
   11139 	/*-
   11140 	 * Add an AUTH chunk, if chunk requires it and save the offset into
   11141 	 * the chain for AUTH
   11142 	 */
   11143 	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
   11144 	                                stcb->asoc.peer_auth_chunks)) {
   11145 		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
   11146 					    stcb, SCTP_ABORT_ASSOCIATION);
   11147 		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   11148 	} else {
   11149 		m_out = NULL;
   11150 	}
   11151 	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
   11152 	if (m_abort == NULL) {
   11153 		if (m_out) {
   11154 			sctp_m_freem(m_out);
   11155 		}
   11156 		if (operr) {
   11157 			sctp_m_freem(operr);
   11158 		}
   11159 		return;
   11160 	}
   11161 	/* link in any error */
   11162 	SCTP_BUF_NEXT(m_abort) = operr;
   11163 	cause_len = 0;
   11164 	m_last = NULL;
   11165 	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
   11166 		cause_len += (uint16_t)SCTP_BUF_LEN(m);
   11167 		if (SCTP_BUF_NEXT(m) == NULL) {
   11168 			m_last = m;
   11169 		}
   11170 	}
   11171 	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
   11172 	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
   11173 	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
   11174 	if (m_out == NULL) {
   11175 		/* NO Auth chunk prepended, so reserve space in front */
   11176 		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
   11177 		m_out = m_abort;
   11178 	} else {
   11179 		/* Put AUTH chunk at the front of the chain */
   11180 		SCTP_BUF_NEXT(m_end) = m_abort;
   11181 	}
   11182 	if (stcb->asoc.alternate) {
   11183 		net = stcb->asoc.alternate;
   11184 	} else {
   11185 		net = stcb->asoc.primary_destination;
   11186 	}
   11187 	/* Fill in the ABORT chunk header. */
   11188 	abort = mtod(m_abort, struct sctp_abort_chunk *);
   11189 	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
   11190 	if (stcb->asoc.peer_vtag == 0) {
   11191 		/* This happens iff the assoc is in COOKIE-WAIT state. */
   11192 		vtag = stcb->asoc.my_vtag;
   11193 		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
   11194 	} else {
   11195 		vtag = stcb->asoc.peer_vtag;
   11196 		abort->ch.chunk_flags = 0;
   11197 	}
   11198 	abort->ch.chunk_length = htons(chunk_len);
   11199 	/* Add padding, if necessary. */
   11200 	if (padding_len > 0) {
   11201 		if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
   11202 			sctp_m_freem(m_out);
   11203 			return;
   11204 		}
   11205 	}
   11206 	(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
   11207 	                                 (struct sockaddr *)&net->ro._l_addr,
   11208 	                                 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
   11209 	                                 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
   11210 	                                 stcb->asoc.primary_destination->port, NULL,
   11211 #if defined(__FreeBSD__)
   11212 	                                 0, 0,
   11213 #endif
   11214 	                                 so_locked);
   11215 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   11216 }
   11217 
   11218 void
   11219 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
   11220                             struct sctp_nets *net,
   11221                             int reflect_vtag)
   11222 {
   11223 	/* formulate and SEND a SHUTDOWN-COMPLETE */
   11224 	struct mbuf *m_shutdown_comp;
   11225 	struct sctp_shutdown_complete_chunk *shutdown_complete;
   11226 	uint32_t vtag;
   11227 	uint8_t flags;
   11228 
   11229 	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
   11230 	if (m_shutdown_comp == NULL) {
   11231 		/* no mbuf's */
   11232 		return;
   11233 	}
   11234 	if (reflect_vtag) {
   11235 		flags = SCTP_HAD_NO_TCB;
   11236 		vtag = stcb->asoc.my_vtag;
   11237 	} else {
   11238 		flags = 0;
   11239 		vtag = stcb->asoc.peer_vtag;
   11240 	}
   11241 	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
   11242 	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
   11243 	shutdown_complete->ch.chunk_flags = flags;
   11244 	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
   11245 	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
   11246 	(void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
   11247 	                                 (struct sockaddr *)&net->ro._l_addr,
   11248 	                                 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
   11249 	                                 stcb->sctp_ep->sctp_lport, stcb->rport,
   11250 	                                 htonl(vtag),
   11251 	                                 net->port, NULL,
   11252 #if defined(__FreeBSD__)
   11253 	                                 0, 0,
   11254 #endif
   11255 	                                 SCTP_SO_NOT_LOCKED);
   11256 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   11257 	return;
   11258 }
   11259 
   11260 #if defined(__FreeBSD__)
   11261 static void
   11262 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
   11263                    struct sctphdr *sh, uint32_t vtag,
   11264                    uint8_t type, struct mbuf *cause,
   11265                    uint8_t use_mflowid, uint32_t mflowid,
   11266                    uint32_t vrf_id, uint16_t port)
   11267 #else
   11268 static void
   11269 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
   11270                    struct sctphdr *sh, uint32_t vtag,
   11271                    uint8_t type, struct mbuf *cause,
   11272                    uint32_t vrf_id SCTP_UNUSED, uint16_t port)
   11273 #endif
   11274 {
   11275 #ifdef __Panda__
   11276 	pakhandle_type o_pak;
   11277 #else
   11278 	struct mbuf *o_pak;
   11279 #endif
   11280 	struct mbuf *mout;
   11281 	struct sctphdr *shout;
   11282 	struct sctp_chunkhdr *ch;
   11283 	struct udphdr *udp;
   11284 	int len, cause_len, padding_len;
   11285 #if defined(INET) || defined(INET6)
   11286 	int ret;
   11287 #endif
   11288 #ifdef INET
   11289 #if defined(__APPLE__) || defined(__Panda__)
   11290 	sctp_route_t ro;
   11291 #endif
   11292 	struct sockaddr_in *src_sin, *dst_sin;
   11293 	struct ip *ip;
   11294 #endif
   11295 #ifdef INET6
   11296 	struct sockaddr_in6 *src_sin6, *dst_sin6;
   11297 	struct ip6_hdr *ip6;
   11298 #endif
   11299 
   11300 	/* Compute the length of the cause and add final padding. */
   11301 	cause_len = 0;
   11302 	if (cause != NULL) {
   11303 		struct mbuf *m_at, *m_last = NULL;
   11304 
   11305 		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
   11306 			if (SCTP_BUF_NEXT(m_at) == NULL)
   11307 				m_last = m_at;
   11308 			cause_len += SCTP_BUF_LEN(m_at);
   11309 		}
   11310 		padding_len = cause_len % 4;
   11311 		if (padding_len != 0) {
   11312 			padding_len = 4 - padding_len;
   11313 		}
   11314 		if (padding_len != 0) {
   11315 			if (sctp_add_pad_tombuf(m_last, padding_len)) {
   11316 				sctp_m_freem(cause);
   11317 				return;
   11318 			}
   11319 		}
   11320 	} else {
   11321 		padding_len = 0;
   11322 	}
   11323 	/* Get an mbuf for the header. */
   11324 	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
   11325 	switch (dst->sa_family) {
   11326 #ifdef INET
   11327 	case AF_INET:
   11328 		len += sizeof(struct ip);
   11329 		break;
   11330 #endif
   11331 #ifdef INET6
   11332 	case AF_INET6:
   11333 		len += sizeof(struct ip6_hdr);
   11334 		break;
   11335 #endif
   11336 	default:
   11337 		break;
   11338 	}
   11339 	if (port) {
   11340 		len += sizeof(struct udphdr);
   11341 	}
   11342 #if defined(__APPLE__)
   11343 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
   11344 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
   11345 #else
   11346 	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
   11347 #endif
   11348 #else
   11349 	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
   11350 #endif
   11351 	if (mout == NULL) {
   11352 		if (cause) {
   11353 			sctp_m_freem(cause);
   11354 		}
   11355 		return;
   11356 	}
   11357 #if defined(__APPLE__)
   11358 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
   11359 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
   11360 #else
   11361 	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
   11362 #endif
   11363 #else
   11364 	SCTP_BUF_RESV_UF(mout, max_linkhdr);
   11365 #endif
   11366 	SCTP_BUF_LEN(mout) = len;
   11367 	SCTP_BUF_NEXT(mout) = cause;
   11368 #if defined(__FreeBSD__)
   11369 	if (use_mflowid != 0) {
   11370 		mout->m_pkthdr.flowid = mflowid;
   11371 		mout->m_flags |= M_FLOWID;
   11372 	}
   11373 #endif
   11374 #ifdef INET
   11375 	ip = NULL;
   11376 #endif
   11377 #ifdef INET6
   11378 	ip6 = NULL;
   11379 #endif
   11380 	switch (dst->sa_family) {
   11381 #ifdef INET
   11382 	case AF_INET:
   11383 		src_sin = (struct sockaddr_in *)src;
   11384 		dst_sin = (struct sockaddr_in *)dst;
   11385 		ip = mtod(mout, struct ip *);
   11386 		ip->ip_v = IPVERSION;
   11387 		ip->ip_hl = (sizeof(struct ip) >> 2);
   11388 		ip->ip_tos = 0;
   11389 #if defined(__FreeBSD__)
   11390 		ip->ip_id = ip_newid();
   11391 #elif defined(__APPLE__)
   11392 #if RANDOM_IP_ID
   11393 		ip->ip_id = ip_randomid();
   11394 #else
   11395 		ip->ip_id = htons(ip_id++);
   11396 #endif
   11397 #else
   11398                 ip->ip_id = htons(ip_id++);
   11399 #endif
   11400 		ip->ip_off = 0;
   11401 		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
   11402 		if (port) {
   11403 			ip->ip_p = IPPROTO_UDP;
   11404 		} else {
   11405 			ip->ip_p = IPPROTO_SCTP;
   11406 		}
   11407 		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
   11408 		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
   11409 		ip->ip_sum = 0;
   11410 		len = sizeof(struct ip);
   11411 		shout = (struct sctphdr *)((caddr_t)ip + len);
   11412 		break;
   11413 #endif
   11414 #ifdef INET6
   11415 	case AF_INET6:
   11416 		src_sin6 = (struct sockaddr_in6 *)src;
   11417 		dst_sin6 = (struct sockaddr_in6 *)dst;
   11418 		ip6 = mtod(mout, struct ip6_hdr *);
   11419 		ip6->ip6_flow = htonl(0x60000000);
   11420 #if defined(__FreeBSD__)
   11421 		if (V_ip6_auto_flowlabel) {
   11422 			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
   11423 		}
   11424 #endif
   11425 #if defined(__Userspace__)
   11426 		ip6->ip6_hlim = IPv6_HOP_LIMIT;
   11427 #else
   11428 		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
   11429 #endif
   11430 		if (port) {
   11431 			ip6->ip6_nxt = IPPROTO_UDP;
   11432 		} else {
   11433 			ip6->ip6_nxt = IPPROTO_SCTP;
   11434 		}
   11435 		ip6->ip6_src = dst_sin6->sin6_addr;
   11436 		ip6->ip6_dst = src_sin6->sin6_addr;
   11437 		len = sizeof(struct ip6_hdr);
   11438 		shout = (struct sctphdr *)((caddr_t)ip6 + len);
   11439 		break;
   11440 #endif
   11441 	default:
   11442 		len = 0;
   11443 		shout = mtod(mout, struct sctphdr *);
   11444 		break;
   11445 	}
   11446 	if (port) {
   11447 		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
   11448 			sctp_m_freem(mout);
   11449 			return;
   11450 		}
   11451 		udp = (struct udphdr *)shout;
   11452 		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
   11453 		udp->uh_dport = port;
   11454 		udp->uh_sum = 0;
   11455 		udp->uh_ulen = htons(sizeof(struct udphdr) +
   11456 		                     sizeof(struct sctphdr) +
   11457 		                     sizeof(struct sctp_chunkhdr) +
   11458 		                     cause_len + padding_len);
   11459 		len += sizeof(struct udphdr);
   11460 		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
   11461 	} else {
   11462 		udp = NULL;
   11463 	}
   11464 	shout->src_port = sh->dest_port;
   11465 	shout->dest_port = sh->src_port;
   11466 	shout->checksum = 0;
   11467 	if (vtag) {
   11468 		shout->v_tag = htonl(vtag);
   11469 	} else {
   11470 		shout->v_tag = sh->v_tag;
   11471 	}
   11472 	len += sizeof(struct sctphdr);
   11473 	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
   11474 	ch->chunk_type = type;
   11475 	if (vtag) {
   11476 		ch->chunk_flags = 0;
   11477 	} else {
   11478 		ch->chunk_flags = SCTP_HAD_NO_TCB;
   11479 	}
   11480 	ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
   11481 	len += sizeof(struct sctp_chunkhdr);
   11482 	len += cause_len + padding_len;
   11483 
   11484 	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
   11485 		sctp_m_freem(mout);
   11486 		return;
   11487 	}
   11488 	SCTP_ATTACH_CHAIN(o_pak, mout, len);
   11489 	switch (dst->sa_family) {
   11490 #ifdef INET
   11491 	case AF_INET:
   11492 #if defined(__APPLE__) || defined(__Panda__)
   11493 		/* zap the stack pointer to the route */
   11494 		bzero(&ro, sizeof(sctp_route_t));
   11495 #if defined(__Panda__)
   11496 		ro._l_addr.sa.sa_family = AF_INET;
   11497 #endif
   11498 #endif
   11499 		if (port) {
   11500 #if !defined(__Windows__) && !defined(__Userspace__)
   11501 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
   11502 			if (V_udp_cksum) {
   11503 				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
   11504 			} else {
   11505 				udp->uh_sum = 0;
   11506 			}
   11507 #else
   11508 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
   11509 #endif
   11510 #else
   11511 			udp->uh_sum = 0;
   11512 #endif
   11513 		}
   11514 #if defined(__FreeBSD__)
   11515 #if __FreeBSD_version >= 1000000
   11516 		ip->ip_len = htons(len);
   11517 #else
   11518 		ip->ip_len = len;
   11519 #endif
   11520 #elif defined(__APPLE__) || defined(__Userspace__)
   11521 		ip->ip_len = len;
   11522 #else
   11523 		ip->ip_len = htons(len);
   11524 #endif
   11525 		if (port) {
   11526 #if defined(SCTP_WITH_NO_CSUM)
   11527 			SCTP_STAT_INCR(sctps_sendnocrc);
   11528 #else
   11529 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
   11530 			SCTP_STAT_INCR(sctps_sendswcrc);
   11531 #endif
   11532 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
   11533 			if (V_udp_cksum) {
   11534 				SCTP_ENABLE_UDP_CSUM(o_pak);
   11535 			}
   11536 #else
   11537 			SCTP_ENABLE_UDP_CSUM(o_pak);
   11538 #endif
   11539 		} else {
   11540 #if defined(SCTP_WITH_NO_CSUM)
   11541 			SCTP_STAT_INCR(sctps_sendnocrc);
   11542 #else
   11543 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
   11544 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
   11545 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
   11546 			SCTP_STAT_INCR(sctps_sendhwcrc);
   11547 #else
   11548 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
   11549 			SCTP_STAT_INCR(sctps_sendswcrc);
   11550 #endif
   11551 #endif
   11552 		}
   11553 #ifdef SCTP_PACKET_LOGGING
   11554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
   11555 			sctp_packet_log(o_pak);
   11556 		}
   11557 #endif
   11558 #if defined(__APPLE__) || defined(__Panda__)
   11559 		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
   11560 		/* Free the route if we got one back */
   11561 		if (ro.ro_rt) {
   11562 			RTFREE(ro.ro_rt);
   11563 			ro.ro_rt = NULL;
   11564 		}
   11565 #else
   11566 		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
   11567 #endif
   11568 		break;
   11569 #endif
   11570 #ifdef INET6
   11571 	case AF_INET6:
   11572 		ip6->ip6_plen = len - sizeof(struct ip6_hdr);
   11573 		if (port) {
   11574 #if defined(SCTP_WITH_NO_CSUM)
   11575 			SCTP_STAT_INCR(sctps_sendnocrc);
   11576 #else
   11577 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
   11578 			SCTP_STAT_INCR(sctps_sendswcrc);
   11579 #endif
   11580 #if defined(__Windows__)
   11581 			udp->uh_sum = 0;
   11582 #elif !defined(__Userspace__)
   11583 			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
   11584 				udp->uh_sum = 0xffff;
   11585 			}
   11586 #endif
   11587 		} else {
   11588 #if defined(SCTP_WITH_NO_CSUM)
   11589 			SCTP_STAT_INCR(sctps_sendnocrc);
   11590 #else
   11591 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000
   11592 #if __FreeBSD_version > 901000
   11593 			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
   11594 #else
   11595 			mout->m_pkthdr.csum_flags = CSUM_SCTP;
   11596 #endif
   11597 			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
   11598 			SCTP_STAT_INCR(sctps_sendhwcrc);
   11599 #else
   11600 			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
   11601 			SCTP_STAT_INCR(sctps_sendswcrc);
   11602 #endif
   11603 #endif
   11604 		}
   11605 #ifdef SCTP_PACKET_LOGGING
   11606 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
   11607 			sctp_packet_log(o_pak);
   11608 		}
   11609 #endif
   11610 		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
   11611 		break;
   11612 #endif
   11613 #if defined(__Userspace__)
   11614 	case AF_CONN:
   11615 	{
   11616 		char *buffer;
   11617 		struct sockaddr_conn *sconn;
   11618 
   11619 		sconn = (struct sockaddr_conn *)src;
   11620 #if defined(SCTP_WITH_NO_CSUM)
   11621 		SCTP_STAT_INCR(sctps_sendnocrc);
   11622 #else
   11623 		shout->checksum = sctp_calculate_cksum(mout, 0);
   11624 		SCTP_STAT_INCR(sctps_sendswcrc);
   11625 #endif
   11626 #ifdef SCTP_PACKET_LOGGING
   11627 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
   11628 			sctp_packet_log(mout);
   11629 		}
   11630 #endif
   11631 		/* Don't alloc/free for each packet */
   11632 		if ((buffer = malloc(len)) != NULL) {
   11633 			m_copydata(mout, 0, len, buffer);
   11634 			SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
   11635 			free(buffer);
   11636 		}
   11637 		sctp_m_freem(mout);
   11638 		break;
   11639 	}
   11640 #endif
   11641 	default:
   11642 		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
   11643 		        dst->sa_family);
   11644 		sctp_m_freem(mout);
   11645 		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   11646 		return;
   11647 	}
   11648 	SCTP_STAT_INCR(sctps_sendpackets);
   11649 	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
   11650 	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
   11651 	return;
   11652 }
   11653 
   11654 void
   11655 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
   11656                              struct sctphdr *sh,
   11657 #if defined(__FreeBSD__)
   11658                              uint8_t use_mflowid, uint32_t mflowid,
   11659 #endif
   11660                              uint32_t vrf_id, uint16_t port)
   11661 {
   11662 	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
   11663 #if defined(__FreeBSD__)
   11664 	                   use_mflowid, mflowid,
   11665 #endif
   11666 	                   vrf_id, port);
   11667 }
   11668 
   11669 void
   11670 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
   11671 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
   11672 	SCTP_UNUSED
   11673 #endif
   11674 )
   11675 {
   11676 	struct sctp_tmit_chunk *chk;
   11677 	struct sctp_heartbeat_chunk *hb;
   11678 	struct timeval now;
   11679 
   11680 	SCTP_TCB_LOCK_ASSERT(stcb);
   11681 	if (net == NULL) {
   11682 		return;
   11683 	}
   11684 	(void)SCTP_GETTIME_TIMEVAL(&now);
   11685 	switch (net->ro._l_addr.sa.sa_family) {
   11686 #ifdef INET
   11687 	case AF_INET:
   11688 		break;
   11689 #endif
   11690 #ifdef INET6
   11691 	case AF_INET6:
   11692 		break;
   11693 #endif
   11694 #if defined(__Userspace__)
   11695 	case AF_CONN:
   11696 		break;
   11697 #endif
   11698 	default:
   11699 		return;
   11700 	}
   11701 	sctp_alloc_a_chunk(stcb, chk);
   11702 	if (chk == NULL) {
   11703 		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
   11704 		return;
   11705 	}
   11706 
   11707 	chk->copy_by_ref = 0;
   11708 	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
   11709 	chk->rec.chunk_id.can_take_data = 1;
   11710 	chk->asoc = &stcb->asoc;
   11711 	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
   11712 
   11713 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
   11714 	if (chk->data == NULL) {
   11715 		sctp_free_a_chunk(stcb, chk, so_locked);
   11716 		return;
   11717 	}
   11718 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   11719 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   11720 	chk->sent = SCTP_DATAGRAM_UNSENT;
   11721 	chk->snd_count = 0;
   11722 	chk->whoTo = net;
   11723 	atomic_add_int(&chk->whoTo->ref_count, 1);
   11724 	/* Now we have a mbuf that we can fill in with the details */
   11725 	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
   11726 	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
   11727 	/* fill out chunk header */
   11728 	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
   11729 	hb->ch.chunk_flags = 0;
   11730 	hb->ch.chunk_length = htons(chk->send_size);
   11731 	/* Fill out hb parameter */
   11732 	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
   11733 	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
   11734 	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
   11735 	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
   11736 	/* Did our user request this one, put it in */
   11737 	hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
   11738 #ifdef HAVE_SA_LEN
   11739 	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
   11740 #else
   11741 	switch (net->ro._l_addr.sa.sa_family) {
   11742 #ifdef INET
   11743 	case AF_INET:
   11744 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
   11745 		break;
   11746 #endif
   11747 #ifdef INET6
   11748 	case AF_INET6:
   11749 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
   11750 		break;
   11751 #endif
   11752 #if defined(__Userspace__)
   11753 	case AF_CONN:
   11754 		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
   11755 		break;
   11756 #endif
   11757 	default:
   11758 		hb->heartbeat.hb_info.addr_len = 0;
   11759 		break;
   11760 	}
   11761 #endif
   11762 	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
   11763 		/*
   11764 		 * we only take from the entropy pool if the address is not
   11765 		 * confirmed.
   11766 		 */
   11767 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
   11768 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
   11769 	} else {
   11770 		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
   11771 		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
   11772 	}
   11773 	switch (net->ro._l_addr.sa.sa_family) {
   11774 #ifdef INET
   11775 	case AF_INET:
   11776 		memcpy(hb->heartbeat.hb_info.address,
   11777 		       &net->ro._l_addr.sin.sin_addr,
   11778 		       sizeof(net->ro._l_addr.sin.sin_addr));
   11779 		break;
   11780 #endif
   11781 #ifdef INET6
   11782 	case AF_INET6:
   11783 		memcpy(hb->heartbeat.hb_info.address,
   11784 		       &net->ro._l_addr.sin6.sin6_addr,
   11785 		       sizeof(net->ro._l_addr.sin6.sin6_addr));
   11786 		break;
   11787 #endif
   11788 #if defined(__Userspace__)
   11789 	case AF_CONN:
   11790 		memcpy(hb->heartbeat.hb_info.address,
   11791 		       &net->ro._l_addr.sconn.sconn_addr,
   11792 		       sizeof(net->ro._l_addr.sconn.sconn_addr));
   11793 		break;
   11794 #endif
   11795 	default:
   11796 		return;
   11797 		break;
   11798 	}
   11799 	net->hb_responded = 0;
   11800 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
   11801 	stcb->asoc.ctrl_queue_cnt++;
   11802 	SCTP_STAT_INCR(sctps_sendheartbeat);
   11803 	return;
   11804 }
   11805 
   11806 void
   11807 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
   11808 		   uint32_t high_tsn)
   11809 {
   11810 	struct sctp_association *asoc;
   11811 	struct sctp_ecne_chunk *ecne;
   11812 	struct sctp_tmit_chunk *chk;
   11813 
   11814 	if (net == NULL) {
   11815 		return;
   11816 	}
   11817 	asoc = &stcb->asoc;
   11818 	SCTP_TCB_LOCK_ASSERT(stcb);
   11819 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   11820 		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
   11821 			/* found a previous ECN_ECHO update it if needed */
   11822 			uint32_t cnt, ctsn;
   11823 			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
   11824 			ctsn = ntohl(ecne->tsn);
   11825 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
   11826 				ecne->tsn = htonl(high_tsn);
   11827 				SCTP_STAT_INCR(sctps_queue_upd_ecne);
   11828 			}
   11829 			cnt = ntohl(ecne->num_pkts_since_cwr);
   11830 			cnt++;
   11831 			ecne->num_pkts_since_cwr = htonl(cnt);
   11832 			return;
   11833 		}
   11834 	}
   11835 	/* nope could not find one to update so we must build one */
   11836 	sctp_alloc_a_chunk(stcb, chk);
   11837 	if (chk == NULL) {
   11838 		return;
   11839 	}
   11840 	chk->copy_by_ref = 0;
   11841 	SCTP_STAT_INCR(sctps_queue_upd_ecne);
   11842 	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
   11843 	chk->rec.chunk_id.can_take_data = 0;
   11844 	chk->asoc = &stcb->asoc;
   11845 	chk->send_size = sizeof(struct sctp_ecne_chunk);
   11846 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
   11847 	if (chk->data == NULL) {
   11848 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   11849 		return;
   11850 	}
   11851 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   11852 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   11853 	chk->sent = SCTP_DATAGRAM_UNSENT;
   11854 	chk->snd_count = 0;
   11855 	chk->whoTo = net;
   11856 	atomic_add_int(&chk->whoTo->ref_count, 1);
   11857 
   11858 	stcb->asoc.ecn_echo_cnt_onq++;
   11859 	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
   11860 	ecne->ch.chunk_type = SCTP_ECN_ECHO;
   11861 	ecne->ch.chunk_flags = 0;
   11862 	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
   11863 	ecne->tsn = htonl(high_tsn);
   11864 	ecne->num_pkts_since_cwr = htonl(1);
   11865 	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
   11866 	asoc->ctrl_queue_cnt++;
   11867 }
   11868 
   11869 void
   11870 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
   11871     struct mbuf *m, int len, int iphlen, int bad_crc)
   11872 {
   11873 	struct sctp_association *asoc;
   11874 	struct sctp_pktdrop_chunk *drp;
   11875 	struct sctp_tmit_chunk *chk;
   11876 	uint8_t *datap;
   11877 	int was_trunc = 0;
   11878 	int fullsz = 0;
   11879 	long spc;
   11880 	int offset;
   11881 	struct sctp_chunkhdr *ch, chunk_buf;
   11882 	unsigned int chk_length;
   11883 
   11884         if (!stcb) {
   11885             return;
   11886         }
   11887 	asoc = &stcb->asoc;
   11888 	SCTP_TCB_LOCK_ASSERT(stcb);
   11889 	if (asoc->peer_supports_pktdrop == 0) {
   11890 		/*-
   11891 		 * peer must declare support before I send one.
   11892 		 */
   11893 		return;
   11894 	}
   11895 	if (stcb->sctp_socket == NULL) {
   11896 		return;
   11897 	}
   11898 	sctp_alloc_a_chunk(stcb, chk);
   11899 	if (chk == NULL) {
   11900 		return;
   11901 	}
   11902 	chk->copy_by_ref = 0;
   11903 	len -= iphlen;
   11904 	chk->send_size = len;
   11905         /* Validate that we do not have an ABORT in here. */
   11906 	offset = iphlen + sizeof(struct sctphdr);
   11907 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
   11908 						   sizeof(*ch), (uint8_t *) & chunk_buf);
   11909 	while (ch != NULL) {
   11910 		chk_length = ntohs(ch->chunk_length);
   11911 		if (chk_length < sizeof(*ch)) {
   11912 			/* break to abort land */
   11913 			break;
   11914 		}
   11915 		switch (ch->chunk_type) {
   11916 		case SCTP_PACKET_DROPPED:
   11917 		case SCTP_ABORT_ASSOCIATION:
   11918 		case SCTP_INITIATION_ACK:
   11919 			/**
   11920 			 * We don't respond with an PKT-DROP to an ABORT
   11921 			 * or PKT-DROP. We also do not respond to an
   11922 			 * INIT-ACK, because we can't know if the initiation
   11923 			 * tag is correct or not.
   11924 			 */
   11925 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   11926 			return;
   11927 		default:
   11928 			break;
   11929 		}
   11930 		offset += SCTP_SIZE32(chk_length);
   11931 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
   11932 		    sizeof(*ch), (uint8_t *) & chunk_buf);
   11933 	}
   11934 
   11935 	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
   11936 	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
   11937 		/* only send 1 mtu worth, trim off the
   11938 		 * excess on the end.
   11939 		 */
   11940 		fullsz = len;
   11941 		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
   11942 		was_trunc = 1;
   11943 	}
   11944 	chk->asoc = &stcb->asoc;
   11945 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
   11946 	if (chk->data == NULL) {
   11947 jump_out:
   11948 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   11949 		return;
   11950 	}
   11951 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   11952 	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
   11953 	if (drp == NULL) {
   11954 		sctp_m_freem(chk->data);
   11955 		chk->data = NULL;
   11956 		goto jump_out;
   11957 	}
   11958 	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
   11959 	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
   11960 	chk->book_size_scale = 0;
   11961 	if (was_trunc) {
   11962 		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
   11963 		drp->trunc_len = htons(fullsz);
   11964 		/* Len is already adjusted to size minus overhead above
   11965 		 * take out the pkt_drop chunk itself from it.
   11966 		 */
   11967 		chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
   11968 		len = chk->send_size;
   11969 	} else {
   11970 		/* no truncation needed */
   11971 		drp->ch.chunk_flags = 0;
   11972 		drp->trunc_len = htons(0);
   11973 	}
   11974 	if (bad_crc) {
   11975 		drp->ch.chunk_flags |= SCTP_BADCRC;
   11976 	}
   11977 	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
   11978 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   11979 	chk->sent = SCTP_DATAGRAM_UNSENT;
   11980 	chk->snd_count = 0;
   11981 	if (net) {
   11982 		/* we should hit here */
   11983 		chk->whoTo = net;
   11984 		atomic_add_int(&chk->whoTo->ref_count, 1);
   11985 	} else {
   11986 		chk->whoTo = NULL;
   11987 	}
   11988 	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
   11989 	chk->rec.chunk_id.can_take_data = 1;
   11990 	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
   11991 	drp->ch.chunk_length = htons(chk->send_size);
   11992 	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
   11993 	if (spc < 0) {
   11994 		spc = 0;
   11995 	}
   11996 	drp->bottle_bw = htonl(spc);
   11997 	if (asoc->my_rwnd) {
   11998 		drp->current_onq = htonl(asoc->size_on_reasm_queue +
   11999 		    asoc->size_on_all_streams +
   12000 		    asoc->my_rwnd_control_len +
   12001 		    stcb->sctp_socket->so_rcv.sb_cc);
   12002 	} else {
   12003 		/*-
   12004 		 * If my rwnd is 0, possibly from mbuf depletion as well as
   12005 		 * space used, tell the peer there is NO space aka onq == bw
   12006 		 */
   12007 		drp->current_onq = htonl(spc);
   12008 	}
   12009 	drp->reserved = 0;
   12010 	datap = drp->data;
   12011 	m_copydata(m, iphlen, len, (caddr_t)datap);
   12012 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
   12013 	asoc->ctrl_queue_cnt++;
   12014 }
   12015 
   12016 void
   12017 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
   12018 {
   12019 	struct sctp_association *asoc;
   12020 	struct sctp_cwr_chunk *cwr;
   12021 	struct sctp_tmit_chunk *chk;
   12022 
   12023 	SCTP_TCB_LOCK_ASSERT(stcb);
   12024 	if (net == NULL) {
   12025 		return;
   12026 	}
   12027 	asoc = &stcb->asoc;
   12028 	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
   12029 		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
   12030 			/* found a previous CWR queued to same destination update it if needed */
   12031 			uint32_t ctsn;
   12032 			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
   12033 			ctsn = ntohl(cwr->tsn);
   12034 			if (SCTP_TSN_GT(high_tsn, ctsn)) {
   12035 				cwr->tsn = htonl(high_tsn);
   12036 			}
   12037 			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
   12038 				/* Make sure override is carried */
   12039 				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
   12040 			}
   12041 			return;
   12042 		}
   12043 	}
   12044 	sctp_alloc_a_chunk(stcb, chk);
   12045 	if (chk == NULL) {
   12046 		return;
   12047 	}
   12048 	chk->copy_by_ref = 0;
   12049 	chk->rec.chunk_id.id = SCTP_ECN_CWR;
   12050 	chk->rec.chunk_id.can_take_data = 1;
   12051 	chk->asoc = &stcb->asoc;
   12052 	chk->send_size = sizeof(struct sctp_cwr_chunk);
   12053 	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
   12054 	if (chk->data == NULL) {
   12055 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   12056 		return;
   12057 	}
   12058 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   12059 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12060 	chk->sent = SCTP_DATAGRAM_UNSENT;
   12061 	chk->snd_count = 0;
   12062 	chk->whoTo = net;
   12063 	atomic_add_int(&chk->whoTo->ref_count, 1);
   12064 	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
   12065 	cwr->ch.chunk_type = SCTP_ECN_CWR;
   12066 	cwr->ch.chunk_flags = override;
   12067 	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
   12068 	cwr->tsn = htonl(high_tsn);
   12069 	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
   12070 	asoc->ctrl_queue_cnt++;
   12071 }
   12072 
   12073 void
   12074 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
   12075                           int number_entries, uint16_t * list,
   12076                           uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
   12077 {
   12078 	uint16_t len, old_len, i;
   12079 	struct sctp_stream_reset_out_request *req_out;
   12080 	struct sctp_chunkhdr *ch;
   12081 
   12082 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12083 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12084 
   12085 	/* get to new offset for the param. */
   12086 	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
   12087 	/* now how long will this param be? */
   12088 	len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
   12089 	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
   12090 	req_out->ph.param_length = htons(len);
   12091 	req_out->request_seq = htonl(seq);
   12092 	req_out->response_seq = htonl(resp_seq);
   12093 	req_out->send_reset_at_tsn = htonl(last_sent);
   12094 	if (number_entries) {
   12095 		for (i = 0; i < number_entries; i++) {
   12096 			req_out->list_of_streams[i] = htons(list[i]);
   12097 		}
   12098 	}
   12099 	if (SCTP_SIZE32(len) > len) {
   12100 		/*-
   12101 		 * Need to worry about the pad we may end up adding to the
   12102 		 * end. This is easy since the struct is either aligned to 4
   12103 		 * bytes or 2 bytes off.
   12104 		 */
   12105 		req_out->list_of_streams[number_entries] = 0;
   12106 	}
   12107 	/* now fix the chunk length */
   12108 	ch->chunk_length = htons(len + old_len);
   12109 	chk->book_size = len + old_len;
   12110 	chk->book_size_scale = 0;
   12111 	chk->send_size = SCTP_SIZE32(chk->book_size);
   12112 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12113 	return;
   12114 }
   12115 
   12116 static void
   12117 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
   12118                          int number_entries, uint16_t *list,
   12119                          uint32_t seq)
   12120 {
   12121 	uint16_t len, old_len, i;
   12122 	struct sctp_stream_reset_in_request *req_in;
   12123 	struct sctp_chunkhdr *ch;
   12124 
   12125 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12126 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12127 
   12128 	/* get to new offset for the param. */
   12129 	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
   12130 	/* now how long will this param be? */
   12131 	len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
   12132 	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
   12133 	req_in->ph.param_length = htons(len);
   12134 	req_in->request_seq = htonl(seq);
   12135 	if (number_entries) {
   12136 		for (i = 0; i < number_entries; i++) {
   12137 			req_in->list_of_streams[i] = htons(list[i]);
   12138 		}
   12139 	}
   12140 	if (SCTP_SIZE32(len) > len) {
   12141 		/*-
   12142 		 * Need to worry about the pad we may end up adding to the
   12143 		 * end. This is easy since the struct is either aligned to 4
   12144 		 * bytes or 2 bytes off.
   12145 		 */
   12146 		req_in->list_of_streams[number_entries] = 0;
   12147 	}
   12148 	/* now fix the chunk length */
   12149 	ch->chunk_length = htons(len + old_len);
   12150 	chk->book_size = len + old_len;
   12151 	chk->book_size_scale = 0;
   12152 	chk->send_size = SCTP_SIZE32(chk->book_size);
   12153 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12154 	return;
   12155 }
   12156 
   12157 static void
   12158 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
   12159                           uint32_t seq)
   12160 {
   12161 	uint16_t len, old_len;
   12162 	struct sctp_stream_reset_tsn_request *req_tsn;
   12163 	struct sctp_chunkhdr *ch;
   12164 
   12165 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12166 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12167 
   12168 	/* get to new offset for the param. */
   12169 	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
   12170 	/* now how long will this param be? */
   12171 	len = sizeof(struct sctp_stream_reset_tsn_request);
   12172 	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
   12173 	req_tsn->ph.param_length = htons(len);
   12174 	req_tsn->request_seq = htonl(seq);
   12175 
   12176 	/* now fix the chunk length */
   12177 	ch->chunk_length = htons(len + old_len);
   12178 	chk->send_size = len + old_len;
   12179 	chk->book_size = SCTP_SIZE32(chk->send_size);
   12180 	chk->book_size_scale = 0;
   12181 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
   12182 	return;
   12183 }
   12184 
   12185 void
   12186 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
   12187                              uint32_t resp_seq, uint32_t result)
   12188 {
   12189 	uint16_t len, old_len;
   12190 	struct sctp_stream_reset_response *resp;
   12191 	struct sctp_chunkhdr *ch;
   12192 
   12193 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12194 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12195 
   12196 	/* get to new offset for the param. */
   12197 	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
   12198 	/* now how long will this param be? */
   12199 	len = sizeof(struct sctp_stream_reset_response);
   12200 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
   12201 	resp->ph.param_length = htons(len);
   12202 	resp->response_seq = htonl(resp_seq);
   12203 	resp->result = ntohl(result);
   12204 
   12205 	/* now fix the chunk length */
   12206 	ch->chunk_length = htons(len + old_len);
   12207 	chk->book_size = len + old_len;
   12208 	chk->book_size_scale = 0;
   12209 	chk->send_size = SCTP_SIZE32(chk->book_size);
   12210 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12211 	return;
   12212 }
   12213 
   12214 void
   12215 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
   12216                                  uint32_t resp_seq, uint32_t result,
   12217                                  uint32_t send_una, uint32_t recv_next)
   12218 {
   12219 	uint16_t len, old_len;
   12220 	struct sctp_stream_reset_response_tsn *resp;
   12221 	struct sctp_chunkhdr *ch;
   12222 
   12223 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12224 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12225 
   12226 	/* get to new offset for the param. */
   12227 	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
   12228 	/* now how long will this param be? */
   12229 	len = sizeof(struct sctp_stream_reset_response_tsn);
   12230 	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
   12231 	resp->ph.param_length = htons(len);
   12232 	resp->response_seq = htonl(resp_seq);
   12233 	resp->result = htonl(result);
   12234 	resp->senders_next_tsn = htonl(send_una);
   12235 	resp->receivers_next_tsn = htonl(recv_next);
   12236 
   12237 	/* now fix the chunk length */
   12238 	ch->chunk_length = htons(len + old_len);
   12239 	chk->book_size = len + old_len;
   12240 	chk->send_size = SCTP_SIZE32(chk->book_size);
   12241 	chk->book_size_scale = 0;
   12242 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12243 	return;
   12244 }
   12245 
   12246 static void
   12247 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
   12248 		       uint32_t seq,
   12249 		       uint16_t adding)
   12250 {
   12251 	uint16_t len, old_len;
   12252 	struct sctp_chunkhdr *ch;
   12253 	struct sctp_stream_reset_add_strm *addstr;
   12254 
   12255 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12256 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12257 
   12258 	/* get to new offset for the param. */
   12259 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
   12260 	/* now how long will this param be? */
   12261 	len = sizeof(struct sctp_stream_reset_add_strm);
   12262 
   12263 	/* Fill it out. */
   12264 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
   12265 	addstr->ph.param_length = htons(len);
   12266 	addstr->request_seq = htonl(seq);
   12267 	addstr->number_of_streams = htons(adding);
   12268 	addstr->reserved = 0;
   12269 
   12270 	/* now fix the chunk length */
   12271 	ch->chunk_length = htons(len + old_len);
   12272 	chk->send_size = len + old_len;
   12273 	chk->book_size = SCTP_SIZE32(chk->send_size);
   12274 	chk->book_size_scale = 0;
   12275 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
   12276 	return;
   12277 }
   12278 
   12279 static void
   12280 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
   12281                       uint32_t seq,
   12282                       uint16_t adding)
   12283 {
   12284 	uint16_t len, old_len;
   12285 	struct sctp_chunkhdr *ch;
   12286 	struct sctp_stream_reset_add_strm *addstr;
   12287 
   12288 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12289 	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
   12290 
   12291 	/* get to new offset for the param. */
   12292 	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
   12293 	/* now how long will this param be? */
   12294 	len = sizeof(struct sctp_stream_reset_add_strm);
   12295 	/* Fill it out. */
   12296 	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
   12297 	addstr->ph.param_length = htons(len);
   12298 	addstr->request_seq = htonl(seq);
   12299 	addstr->number_of_streams = htons(adding);
   12300 	addstr->reserved = 0;
   12301 
   12302 	/* now fix the chunk length */
   12303 	ch->chunk_length = htons(len + old_len);
   12304 	chk->send_size = len + old_len;
   12305 	chk->book_size = SCTP_SIZE32(chk->send_size);
   12306 	chk->book_size_scale = 0;
   12307 	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
   12308 	return;
   12309 }
   12310 
   12311 int
   12312 sctp_send_str_reset_req(struct sctp_tcb *stcb,
   12313                         int number_entries, uint16_t *list,
   12314                         uint8_t send_out_req,
   12315                         uint8_t send_in_req,
   12316                         uint8_t send_tsn_req,
   12317                         uint8_t add_stream,
   12318                         uint16_t adding_o,
   12319                         uint16_t adding_i, uint8_t peer_asked)
   12320 {
   12321 
   12322 	struct sctp_association *asoc;
   12323 	struct sctp_tmit_chunk *chk;
   12324 	struct sctp_chunkhdr *ch;
   12325 	uint32_t seq;
   12326 
   12327 	asoc = &stcb->asoc;
   12328 	if (asoc->stream_reset_outstanding) {
   12329 		/*-
   12330 		 * Already one pending, must get ACK back to clear the flag.
   12331 		 */
   12332 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
   12333 		return (EBUSY);
   12334 	}
   12335 	if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
   12336 	    (add_stream == 0)) {
   12337 		/* nothing to do */
   12338 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   12339 		return (EINVAL);
   12340 	}
   12341 	if (send_tsn_req && (send_out_req || send_in_req)) {
   12342 		/* error, can't do that */
   12343 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   12344 		return (EINVAL);
   12345 	}
   12346 	sctp_alloc_a_chunk(stcb, chk);
   12347 	if (chk == NULL) {
   12348 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12349 		return (ENOMEM);
   12350 	}
   12351 	chk->copy_by_ref = 0;
   12352 	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
   12353 	chk->rec.chunk_id.can_take_data = 0;
   12354 	chk->asoc = &stcb->asoc;
   12355 	chk->book_size = sizeof(struct sctp_chunkhdr);
   12356 	chk->send_size = SCTP_SIZE32(chk->book_size);
   12357 	chk->book_size_scale = 0;
   12358 
   12359 	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
   12360 	if (chk->data == NULL) {
   12361 		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
   12362 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12363 		return (ENOMEM);
   12364 	}
   12365 	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
   12366 
   12367 	/* setup chunk parameters */
   12368 	chk->sent = SCTP_DATAGRAM_UNSENT;
   12369 	chk->snd_count = 0;
   12370 	if (stcb->asoc.alternate) {
   12371 		chk->whoTo = stcb->asoc.alternate;
   12372 	} else {
   12373 		chk->whoTo = stcb->asoc.primary_destination;
   12374 	}
   12375 	atomic_add_int(&chk->whoTo->ref_count, 1);
   12376 	ch = mtod(chk->data, struct sctp_chunkhdr *);
   12377 	ch->chunk_type = SCTP_STREAM_RESET;
   12378 	ch->chunk_flags = 0;
   12379 	ch->chunk_length = htons(chk->book_size);
   12380 	SCTP_BUF_LEN(chk->data) = chk->send_size;
   12381 
   12382 	seq = stcb->asoc.str_reset_seq_out;
   12383 	if (send_out_req) {
   12384 		sctp_add_stream_reset_out(chk, number_entries, list,
   12385 					  seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
   12386 		asoc->stream_reset_out_is_outstanding = 1;
   12387 		seq++;
   12388 		asoc->stream_reset_outstanding++;
   12389 	}
   12390 	if ((add_stream & 1) &&
   12391 	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
   12392 		/* Need to allocate more */
   12393 		struct sctp_stream_out *oldstream;
   12394 		struct sctp_stream_queue_pending *sp, *nsp;
   12395 		int i;
   12396 
   12397 		oldstream = stcb->asoc.strmout;
   12398 		/* get some more */
   12399 		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
   12400 			    ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)),
   12401 			    SCTP_M_STRMO);
   12402 		if (stcb->asoc.strmout == NULL) {
   12403 			uint8_t x;
   12404 			stcb->asoc.strmout = oldstream;
   12405 			/* Turn off the bit */
   12406 			x = add_stream & 0xfe;
   12407 			add_stream = x;
   12408 			goto skip_stuff;
   12409 		}
   12410 		/* Ok now we proceed with copying the old out stuff and
   12411 		 * initializing the new stuff.
   12412 		 */
   12413 		SCTP_TCB_SEND_LOCK(stcb);
   12414 		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
   12415 		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
   12416 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
   12417 			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
   12418 			stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
   12419 			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
   12420 			stcb->asoc.strmout[i].stream_no = i;
   12421 			stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
   12422 			/* now anything on those queues? */
   12423 			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
   12424 				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
   12425 				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
   12426 			}
   12427 			/* Now move assoc pointers too */
   12428 			if (stcb->asoc.last_out_stream == &oldstream[i]) {
   12429 				stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
   12430 			}
   12431 			if (stcb->asoc.locked_on_sending == &oldstream[i]) {
   12432 				stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
   12433 			}
   12434 		}
   12435 		/* now the new streams */
   12436 		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
   12437 		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
   12438 			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
   12439 			stcb->asoc.strmout[i].chunks_on_queues = 0;
   12440 			stcb->asoc.strmout[i].next_sequence_send = 0x0;
   12441 			stcb->asoc.strmout[i].stream_no = i;
   12442 			stcb->asoc.strmout[i].last_msg_incomplete = 0;
   12443 			stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
   12444 		}
   12445 		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
   12446 		SCTP_FREE(oldstream, SCTP_M_STRMO);
   12447 		SCTP_TCB_SEND_UNLOCK(stcb);
   12448 	}
   12449 skip_stuff:
   12450 	if ((add_stream & 1) && (adding_o > 0)) {
   12451 		asoc->strm_pending_add_size = adding_o;
   12452 		asoc->peer_req_out = peer_asked;
   12453 		sctp_add_an_out_stream(chk, seq, adding_o);
   12454 		seq++;
   12455 		asoc->stream_reset_outstanding++;
   12456 	}
   12457 	if ((add_stream & 2) && (adding_i > 0)) {
   12458 		sctp_add_an_in_stream(chk, seq, adding_i);
   12459 		seq++;
   12460 		asoc->stream_reset_outstanding++;
   12461 	}
   12462 	if (send_in_req) {
   12463 		sctp_add_stream_reset_in(chk, number_entries, list, seq);
   12464 		seq++;
   12465 		asoc->stream_reset_outstanding++;
   12466 	}
   12467 	if (send_tsn_req) {
   12468 		sctp_add_stream_reset_tsn(chk, seq);
   12469 		asoc->stream_reset_outstanding++;
   12470 	}
   12471 	asoc->str_reset = chk;
   12472 	/* insert the chunk for sending */
   12473 	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
   12474 			  chk,
   12475 			  sctp_next);
   12476 	asoc->ctrl_queue_cnt++;
   12477 	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
   12478 	return (0);
   12479 }
   12480 
   12481 void
   12482 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
   12483                 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
   12484 #if defined(__FreeBSD__)
   12485                 uint8_t use_mflowid, uint32_t mflowid,
   12486 #endif
   12487                 uint32_t vrf_id, uint16_t port)
   12488 {
   12489 	/* Don't respond to an ABORT with an ABORT. */
   12490 	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
   12491 		if (cause)
   12492 			sctp_m_freem(cause);
   12493 		return;
   12494 	}
   12495 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
   12496 #if defined(__FreeBSD__)
   12497 	                   use_mflowid, mflowid,
   12498 #endif
   12499 	                   vrf_id, port);
   12500 	return;
   12501 }
   12502 
   12503 void
   12504 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
   12505                    struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
   12506 #if defined(__FreeBSD__)
   12507                    uint8_t use_mflowid, uint32_t mflowid,
   12508 #endif
   12509                    uint32_t vrf_id, uint16_t port)
   12510 {
   12511 	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
   12512 #if defined(__FreeBSD__)
   12513 	                   use_mflowid, mflowid,
   12514 #endif
   12515 	                   vrf_id, port);
   12516 	return;
   12517 }
   12518 
   12519 static struct mbuf *
   12520 sctp_copy_resume(struct uio *uio,
   12521 		 int max_send_len,
   12522 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
   12523 		 int user_marks_eor,
   12524 #endif
   12525 		 int *error,
   12526 		 uint32_t *sndout,
   12527 		 struct mbuf **new_tail)
   12528 {
   12529 #if defined(__Panda__)
   12530 	struct mbuf *m;
   12531 
   12532 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
   12533 			(user_marks_eor ? M_EOR : 0));
   12534 	if (m == NULL) {
   12535 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12536 		*error = ENOMEM;
   12537 	} else {
   12538 		*sndout = m_length(m, NULL);
   12539 		*new_tail = m_last(m);
   12540 	}
   12541 	return (m);
   12542 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
   12543 	struct mbuf *m;
   12544 
   12545 	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
   12546 		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
   12547 	if (m == NULL) {
   12548 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12549 		*error = ENOMEM;
   12550 	} else {
   12551 		*sndout = m_length(m, NULL);
   12552 		*new_tail = m_last(m);
   12553 	}
   12554 	return (m);
   12555 #else
   12556 	int left, cancpy, willcpy;
   12557 	struct mbuf *m, *head;
   12558 
   12559 #if defined(__APPLE__)
   12560 #if defined(APPLE_LEOPARD)
   12561         left = min(uio->uio_resid, max_send_len);
   12562 #else
   12563         left = min(uio_resid(uio), max_send_len);
   12564 #endif
   12565 #else
   12566         left = min(uio->uio_resid, max_send_len);
   12567 #endif
   12568 	/* Always get a header just in case */
   12569 	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
   12570 	if (head == NULL) {
   12571 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12572 		*error = ENOMEM;
   12573 		return (NULL);
   12574 	}
   12575 	cancpy = M_TRAILINGSPACE(head);
   12576 	willcpy = min(cancpy, left);
   12577 	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
   12578 	if (*error) {
   12579 		sctp_m_freem(head);
   12580 		return (NULL);
   12581 	}
   12582 	*sndout += willcpy;
   12583 	left -= willcpy;
   12584 	SCTP_BUF_LEN(head) = willcpy;
   12585 	m = head;
   12586 	*new_tail = head;
   12587 	while (left > 0) {
   12588 		/* move in user data */
   12589 		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
   12590 		if (SCTP_BUF_NEXT(m) == NULL) {
   12591 			sctp_m_freem(head);
   12592 			*new_tail = NULL;
   12593 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12594 			*error = ENOMEM;
   12595 			return (NULL);
   12596 		}
   12597 		m = SCTP_BUF_NEXT(m);
   12598 		cancpy = M_TRAILINGSPACE(m);
   12599 		willcpy = min(cancpy, left);
   12600 		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
   12601 		if (*error) {
   12602 			sctp_m_freem(head);
   12603 			*new_tail = NULL;
   12604 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   12605 			*error = EFAULT;
   12606 			return (NULL);
   12607 		}
   12608 		SCTP_BUF_LEN(m) = willcpy;
   12609 		left -= willcpy;
   12610 		*sndout += willcpy;
   12611 		*new_tail = m;
   12612 		if (left == 0) {
   12613 			SCTP_BUF_NEXT(m) = NULL;
   12614 		}
   12615 	}
   12616 	return (head);
   12617 #endif
   12618 }
   12619 
   12620 static int
   12621 sctp_copy_one(struct sctp_stream_queue_pending *sp,
   12622 	      struct uio *uio,
   12623 	      int resv_upfront)
   12624 {
   12625 	int left;
   12626 #if defined(__Panda__)
   12627 	left = sp->length;
   12628 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
   12629 			       resv_upfront, 0);
   12630 	if (sp->data == NULL) {
   12631 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12632 		return (ENOMEM);
   12633 	}
   12634 
   12635 	sp->tail_mbuf = m_last(sp->data);
   12636 	return (0);
   12637 
   12638 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000
   12639 	left = sp->length;
   12640 	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
   12641 			       resv_upfront, 0);
   12642 	if (sp->data == NULL) {
   12643 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12644 		return (ENOMEM);
   12645 	}
   12646 
   12647 	sp->tail_mbuf = m_last(sp->data);
   12648 	return (0);
   12649 #else
   12650 	int cancpy, willcpy, error;
   12651 	struct mbuf *m, *head;
   12652 	int cpsz = 0;
   12653 
   12654 	/* First one gets a header */
   12655 	left = sp->length;
   12656 	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
   12657 	if (m == NULL) {
   12658 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12659 		return (ENOMEM);
   12660 	}
   12661 	/*-
   12662 	 * Add this one for m in now, that way if the alloc fails we won't
   12663 	 * have a bad cnt.
   12664 	 */
   12665 	SCTP_BUF_RESV_UF(m, resv_upfront);
   12666 	cancpy = M_TRAILINGSPACE(m);
   12667 	willcpy = min(cancpy, left);
   12668 	while (left > 0) {
   12669 		/* move in user data */
   12670 		error = uiomove(mtod(m, caddr_t), willcpy, uio);
   12671 		if (error) {
   12672 			sctp_m_freem(head);
   12673 			return (error);
   12674 		}
   12675 		SCTP_BUF_LEN(m) = willcpy;
   12676 		left -= willcpy;
   12677 		cpsz += willcpy;
   12678 		if (left > 0) {
   12679 			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
   12680 			if (SCTP_BUF_NEXT(m) == NULL) {
   12681 				/*
   12682 				 * the head goes back to caller, he can free
   12683 				 * the rest
   12684 				 */
   12685 				sctp_m_freem(head);
   12686 				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12687 				return (ENOMEM);
   12688 			}
   12689 			m = SCTP_BUF_NEXT(m);
   12690 			cancpy = M_TRAILINGSPACE(m);
   12691 			willcpy = min(cancpy, left);
   12692 		} else {
   12693 			sp->tail_mbuf = m;
   12694 			SCTP_BUF_NEXT(m) = NULL;
   12695 		}
   12696 	}
   12697 	sp->data = head;
   12698 	sp->length = cpsz;
   12699 	return (0);
   12700 #endif
   12701 }
   12702 
   12703 
   12704 
   12705 static struct sctp_stream_queue_pending *
   12706 sctp_copy_it_in(struct sctp_tcb *stcb,
   12707     struct sctp_association *asoc,
   12708     struct sctp_sndrcvinfo *srcv,
   12709     struct uio *uio,
   12710     struct sctp_nets *net,
   12711     int max_send_len,
   12712     int user_marks_eor,
   12713     int *error)
   12714 
   12715 {
   12716 	/*-
   12717 	 * This routine must be very careful in its work. Protocol
   12718 	 * processing is up and running so care must be taken to spl...()
   12719 	 * when you need to do something that may effect the stcb/asoc. The
   12720 	 * sb is locked however. When data is copied the protocol processing
   12721 	 * should be enabled since this is a slower operation...
   12722 	 */
   12723 	struct sctp_stream_queue_pending *sp = NULL;
   12724 	int resv_in_first;
   12725 
   12726 	*error = 0;
   12727 	/* Now can we send this? */
   12728 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
   12729 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
   12730 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
   12731 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
   12732 		/* got data while shutting down */
   12733 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
   12734 		*error = ECONNRESET;
   12735 		goto out_now;
   12736 	}
   12737 	sctp_alloc_a_strmoq(stcb, sp);
   12738 	if (sp == NULL) {
   12739 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   12740 		*error = ENOMEM;
   12741 		goto out_now;
   12742 	}
   12743 	sp->act_flags = 0;
   12744 	sp->sender_all_done = 0;
   12745 	sp->sinfo_flags = srcv->sinfo_flags;
   12746 	sp->timetolive = srcv->sinfo_timetolive;
   12747 	sp->ppid = srcv->sinfo_ppid;
   12748 	sp->context = srcv->sinfo_context;
   12749 	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
   12750 
   12751 	sp->stream = srcv->sinfo_stream;
   12752 #if defined(__APPLE__)
   12753 #if defined(APPLE_LEOPARD)
   12754 	sp->length = min(uio->uio_resid, max_send_len);
   12755 #else
   12756 	sp->length = min(uio_resid(uio), max_send_len);
   12757 #endif
   12758 #else
   12759 	sp->length = min(uio->uio_resid, max_send_len);
   12760 #endif
   12761 #if defined(__APPLE__)
   12762 #if defined(APPLE_LEOPARD)
   12763 	if ((sp->length == (uint32_t)uio->uio_resid) &&
   12764 #else
   12765 	if ((sp->length == (uint32_t)uio_resid(uio)) &&
   12766 #endif
   12767 #else
   12768 	if ((sp->length == (uint32_t)uio->uio_resid) &&
   12769 #endif
   12770 	    ((user_marks_eor == 0) ||
   12771 	     (srcv->sinfo_flags & SCTP_EOF) ||
   12772 	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
   12773 		sp->msg_is_complete = 1;
   12774 	} else {
   12775 		sp->msg_is_complete = 0;
   12776 	}
   12777 	sp->sender_all_done = 0;
   12778 	sp->some_taken = 0;
   12779 	sp->put_last_out = 0;
   12780 	resv_in_first = sizeof(struct sctp_data_chunk);
   12781 	sp->data = sp->tail_mbuf = NULL;
   12782 	if (sp->length == 0) {
   12783 		*error = 0;
   12784 		goto skip_copy;
   12785 	}
   12786 	if (srcv->sinfo_keynumber_valid) {
   12787 		sp->auth_keyid = srcv->sinfo_keynumber;
   12788 	} else {
   12789 		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
   12790 	}
   12791 	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
   12792 		sctp_auth_key_acquire(stcb, sp->auth_keyid);
   12793 		sp->holds_key_ref = 1;
   12794 	}
   12795 #if defined(__APPLE__)
   12796 	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
   12797 #endif
   12798 	*error = sctp_copy_one(sp, uio, resv_in_first);
   12799 #if defined(__APPLE__)
   12800 	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
   12801 #endif
   12802  skip_copy:
   12803 	if (*error) {
   12804 		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
   12805 		sp = NULL;
   12806 	} else {
   12807 		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
   12808 			sp->net = net;
   12809 			atomic_add_int(&sp->net->ref_count, 1);
   12810 		} else {
   12811 			sp->net = NULL;
   12812 		}
   12813 		sctp_set_prsctp_policy(sp);
   12814 	}
   12815 out_now:
   12816 	return (sp);
   12817 }
   12818 
   12819 
   12820 int
   12821 sctp_sosend(struct socket *so,
   12822             struct sockaddr *addr,
   12823             struct uio *uio,
   12824 #ifdef __Panda__
   12825             pakhandle_type top,
   12826             pakhandle_type icontrol,
   12827 #else
   12828             struct mbuf *top,
   12829             struct mbuf *control,
   12830 #endif
   12831 #if defined(__APPLE__) || defined(__Panda__)
   12832             int flags
   12833 #else
   12834             int flags,
   12835 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
   12836             struct thread *p
   12837 #elif defined(__Windows__)
   12838             PKTHREAD p
   12839 #else
   12840 #if defined(__Userspace__)
   12841             /*
   12842 	     * proc is a dummy in __Userspace__ and will not be passed
   12843 	     * to sctp_lower_sosend
   12844 	     */
   12845 #endif
   12846             struct proc *p
   12847 #endif
   12848 #endif
   12849 )
   12850 {
   12851 #ifdef __Panda__
   12852 	struct mbuf *control = NULL;
   12853 #endif
   12854 #if defined(__APPLE__)
   12855 	struct proc *p = current_proc();
   12856 #endif
   12857 	int error, use_sndinfo = 0;
   12858 	struct sctp_sndrcvinfo sndrcvninfo;
   12859 	struct sockaddr *addr_to_use;
   12860 #if defined(INET) && defined(INET6)
   12861 	struct sockaddr_in sin;
   12862 #endif
   12863 
   12864 #if defined(__APPLE__)
   12865 	SCTP_SOCKET_LOCK(so, 1);
   12866 #endif
   12867 #ifdef __Panda__
   12868 	control = SCTP_HEADER_TO_CHAIN(icontrol);
   12869 #endif
   12870 	if (control) {
   12871 		/* process cmsg snd/rcv info (maybe a assoc-id) */
   12872 		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
   12873 		    sizeof(sndrcvninfo))) {
   12874 			/* got one */
   12875 			use_sndinfo = 1;
   12876 		}
   12877 	}
   12878 	addr_to_use = addr;
   12879 #if defined(INET) && defined(INET6)
   12880 	if ((addr) && (addr->sa_family == AF_INET6)) {
   12881 		struct sockaddr_in6 *sin6;
   12882 
   12883 		sin6 = (struct sockaddr_in6 *)addr;
   12884 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
   12885 			in6_sin6_2_sin(&sin, sin6);
   12886 			addr_to_use = (struct sockaddr *)&sin;
   12887 		}
   12888 	}
   12889 #endif
   12890 	error = sctp_lower_sosend(so, addr_to_use, uio, top,
   12891 #ifdef __Panda__
   12892 				  icontrol,
   12893 #else
   12894 				  control,
   12895 #endif
   12896 				  flags,
   12897 				  use_sndinfo ? &sndrcvninfo: NULL
   12898 #if !(defined(__Panda__) || defined(__Userspace__))
   12899 				  , p
   12900 #endif
   12901 		);
   12902 #if defined(__APPLE__)
   12903 	SCTP_SOCKET_UNLOCK(so, 1);
   12904 #endif
   12905 	return (error);
   12906 }
   12907 
   12908 
   12909 int
   12910 sctp_lower_sosend(struct socket *so,
   12911                   struct sockaddr *addr,
   12912                   struct uio *uio,
   12913 #ifdef __Panda__
   12914                   pakhandle_type i_pak,
   12915                   pakhandle_type i_control,
   12916 #else
   12917                   struct mbuf *i_pak,
   12918                   struct mbuf *control,
   12919 #endif
   12920                   int flags,
   12921                   struct sctp_sndrcvinfo *srcv
   12922 #if !(defined( __Panda__) || defined(__Userspace__))
   12923                   ,
   12924 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
   12925                   struct thread *p
   12926 #elif defined(__Windows__)
   12927                   PKTHREAD p
   12928 #else
   12929                   struct proc *p
   12930 #endif
   12931 #endif
   12932 	)
   12933 {
   12934 	unsigned int sndlen = 0, max_len;
   12935 	int error, len;
   12936 	struct mbuf *top = NULL;
   12937 #ifdef __Panda__
   12938 	struct mbuf *control = NULL;
   12939 #endif
   12940 	int queue_only = 0, queue_only_for_init = 0;
   12941 	int free_cnt_applied = 0;
   12942 	int un_sent;
   12943 	int now_filled = 0;
   12944 	unsigned int inqueue_bytes = 0;
   12945 	struct sctp_block_entry be;
   12946 	struct sctp_inpcb *inp;
   12947 	struct sctp_tcb *stcb = NULL;
   12948 	struct timeval now;
   12949 	struct sctp_nets *net;
   12950 	struct sctp_association *asoc;
   12951 	struct sctp_inpcb *t_inp;
   12952 	int user_marks_eor;
   12953 	int create_lock_applied = 0;
   12954 	int nagle_applies = 0;
   12955 	int some_on_control = 0;
   12956 	int got_all_of_the_send = 0;
   12957 	int hold_tcblock = 0;
   12958 	int non_blocking = 0;
   12959 	uint32_t local_add_more, local_soresv = 0;
   12960 	uint16_t port;
   12961 	uint16_t sinfo_flags;
   12962 	sctp_assoc_t sinfo_assoc_id;
   12963 
   12964 	error = 0;
   12965 	net = NULL;
   12966 	stcb = NULL;
   12967 	asoc = NULL;
   12968 
   12969 #if defined(__APPLE__)
   12970 	sctp_lock_assert(so);
   12971 #endif
   12972 	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
   12973 	if (inp == NULL) {
   12974 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   12975 		error = EINVAL;
   12976 		if (i_pak) {
   12977 			SCTP_RELEASE_PKT(i_pak);
   12978 		}
   12979 		return (error);
   12980 	}
   12981 	if ((uio == NULL) && (i_pak == NULL)) {
   12982 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   12983 		return (EINVAL);
   12984 	}
   12985 	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
   12986 	atomic_add_int(&inp->total_sends, 1);
   12987 	if (uio) {
   12988 #if defined(__APPLE__)
   12989 #if defined(APPLE_LEOPARD)
   12990 		if (uio->uio_resid < 0) {
   12991 #else
   12992 		if (uio_resid(uio) < 0) {
   12993 #endif
   12994 #else
   12995 		if (uio->uio_resid < 0) {
   12996 #endif
   12997 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   12998 			return (EINVAL);
   12999 		}
   13000 #if defined(__APPLE__)
   13001 #if defined(APPLE_LEOPARD)
   13002 		sndlen = uio->uio_resid;
   13003 #else
   13004 		sndlen = uio_resid(uio);
   13005 #endif
   13006 #else
   13007 		sndlen = uio->uio_resid;
   13008 #endif
   13009 	} else {
   13010 		top = SCTP_HEADER_TO_CHAIN(i_pak);
   13011 #ifdef __Panda__
   13012 		/*-
   13013 		 * app len indicates the datalen, dgsize for cases
   13014 		 * of SCTP_EOF/ABORT will not have the right len
   13015 		 */
   13016 		sndlen = SCTP_APP_DATA_LEN(i_pak);
   13017 		/*-
   13018 		 * Set the particle len also to zero to match
   13019 		 * up with app len. We only have one particle
   13020 		 * if app len is zero for Panda. This is ensured
   13021 		 * in the socket lib
   13022 		 */
   13023 		if (sndlen == 0) {
   13024 			SCTP_BUF_LEN(top)  = 0;
   13025 		}
   13026 		/*-
   13027 		 * We delink the chain from header, but keep
   13028 		 * the header around as we will need it in
   13029 		 * EAGAIN case
   13030 		 */
   13031 		SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
   13032 #else
   13033 		sndlen = SCTP_HEADER_LEN(i_pak);
   13034 #endif
   13035 	}
   13036 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
   13037 		(void *)addr,
   13038 	        sndlen);
   13039 #ifdef __Panda__
   13040 	if (i_control) {
   13041 		control = SCTP_HEADER_TO_CHAIN(i_control);
   13042 	}
   13043 #endif
   13044 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
   13045 	    (inp->sctp_socket->so_qlimit)) {
   13046 		/* The listener can NOT send */
   13047 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
   13048 		error = ENOTCONN;
   13049 		goto out_unlocked;
   13050 	}
   13051 	/**
   13052 	 * Pre-screen address, if one is given the sin-len
   13053 	 * must be set correctly!
   13054 	 */
   13055 	if (addr) {
   13056 		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
   13057 		switch (raddr->sa.sa_family) {
   13058 #ifdef INET
   13059 		case AF_INET:
   13060 #ifdef HAVE_SIN_LEN
   13061 			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
   13062 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13063 				error = EINVAL;
   13064 				goto out_unlocked;
   13065 			}
   13066 #endif
   13067 			port = raddr->sin.sin_port;
   13068 			break;
   13069 #endif
   13070 #ifdef INET6
   13071 		case AF_INET6:
   13072 #ifdef HAVE_SIN6_LEN
   13073 			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
   13074 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13075 				error = EINVAL;
   13076 				goto out_unlocked;
   13077 			}
   13078 #endif
   13079 			port = raddr->sin6.sin6_port;
   13080 			break;
   13081 #endif
   13082 #if defined(__Userspace__)
   13083 		case AF_CONN:
   13084 #ifdef HAVE_SCONN_LEN
   13085 			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
   13086 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13087 				error = EINVAL;
   13088 				goto out_unlocked;
   13089 			}
   13090 #endif
   13091 			port = raddr->sconn.sconn_port;
   13092 			break;
   13093 #endif
   13094 		default:
   13095 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
   13096 			error = EAFNOSUPPORT;
   13097 			goto out_unlocked;
   13098 		}
   13099 	} else
   13100 		port = 0;
   13101 
   13102 	if (srcv) {
   13103 		sinfo_flags = srcv->sinfo_flags;
   13104 		sinfo_assoc_id = srcv->sinfo_assoc_id;
   13105 		if (INVALID_SINFO_FLAG(sinfo_flags) ||
   13106 		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
   13107 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13108 			error = EINVAL;
   13109 			goto out_unlocked;
   13110 		}
   13111 		if (srcv->sinfo_flags)
   13112 			SCTP_STAT_INCR(sctps_sends_with_flags);
   13113 	} else {
   13114 		sinfo_flags = inp->def_send.sinfo_flags;
   13115 		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
   13116 	}
   13117 	if (sinfo_flags & SCTP_SENDALL) {
   13118 		/* its a sendall */
   13119 		error = sctp_sendall(inp, uio, top, srcv);
   13120 		top = NULL;
   13121 		goto out_unlocked;
   13122 	}
   13123 	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
   13124 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13125 		error = EINVAL;
   13126 		goto out_unlocked;
   13127 	}
   13128 	/* now we must find the assoc */
   13129 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
   13130 	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
   13131 		SCTP_INP_RLOCK(inp);
   13132 		stcb = LIST_FIRST(&inp->sctp_asoc_list);
   13133 		if (stcb) {
   13134 			SCTP_TCB_LOCK(stcb);
   13135 			hold_tcblock = 1;
   13136 		}
   13137 		SCTP_INP_RUNLOCK(inp);
   13138 	} else if (sinfo_assoc_id) {
   13139 		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
   13140 	} else if (addr) {
   13141 		/*-
   13142 		 * Since we did not use findep we must
   13143 		 * increment it, and if we don't find a tcb
   13144 		 * decrement it.
   13145 		 */
   13146 		SCTP_INP_WLOCK(inp);
   13147 		SCTP_INP_INCR_REF(inp);
   13148 		SCTP_INP_WUNLOCK(inp);
   13149 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
   13150 		if (stcb == NULL) {
   13151 			SCTP_INP_WLOCK(inp);
   13152 			SCTP_INP_DECR_REF(inp);
   13153 			SCTP_INP_WUNLOCK(inp);
   13154 		} else {
   13155 			hold_tcblock = 1;
   13156 		}
   13157 	}
   13158 	if ((stcb == NULL) && (addr)) {
   13159 		/* Possible implicit send? */
   13160 		SCTP_ASOC_CREATE_LOCK(inp);
   13161 		create_lock_applied = 1;
   13162 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
   13163 		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
   13164 			/* Should I really unlock ? */
   13165 			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13166 			error = EINVAL;
   13167 			goto out_unlocked;
   13168 
   13169 		}
   13170 		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
   13171 		    (addr->sa_family == AF_INET6)) {
   13172 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13173 			error = EINVAL;
   13174 			goto out_unlocked;
   13175 		}
   13176 		SCTP_INP_WLOCK(inp);
   13177 		SCTP_INP_INCR_REF(inp);
   13178 		SCTP_INP_WUNLOCK(inp);
   13179 		/* With the lock applied look again */
   13180 		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
   13181 		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
   13182 			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
   13183 		}
   13184 		if (stcb == NULL) {
   13185 			SCTP_INP_WLOCK(inp);
   13186 			SCTP_INP_DECR_REF(inp);
   13187 			SCTP_INP_WUNLOCK(inp);
   13188 		} else {
   13189 			hold_tcblock = 1;
   13190 		}
   13191 		if (error) {
   13192 			goto out_unlocked;
   13193 		}
   13194 		if (t_inp != inp) {
   13195 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
   13196 			error = ENOTCONN;
   13197 			goto out_unlocked;
   13198 		}
   13199 	}
   13200 	if (stcb == NULL) {
   13201 		if (addr == NULL) {
   13202 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
   13203 			error = ENOENT;
   13204 			goto out_unlocked;
   13205 		} else {
   13206 			/* We must go ahead and start the INIT process */
   13207 			uint32_t vrf_id;
   13208 
   13209 			if ((sinfo_flags & SCTP_ABORT) ||
   13210 			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
   13211 				/*-
   13212 				 * User asks to abort a non-existant assoc,
   13213 				 * or EOF a non-existant assoc with no data
   13214 				 */
   13215 				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
   13216 				error = ENOENT;
   13217 				goto out_unlocked;
   13218 			}
   13219 			/* get an asoc/stcb struct */
   13220 			vrf_id = inp->def_vrf_id;
   13221 #ifdef INVARIANTS
   13222 			if (create_lock_applied == 0) {
   13223 				panic("Error, should hold create lock and I don't?");
   13224 			}
   13225 #endif
   13226 			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
   13227 #if !(defined( __Panda__) || defined(__Userspace__))
   13228 					       p
   13229 #else
   13230 					       (struct proc *)NULL
   13231 #endif
   13232 				);
   13233 			if (stcb == NULL) {
   13234 				/* Error is setup for us in the call */
   13235 				goto out_unlocked;
   13236 			}
   13237 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
   13238 				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
   13239 				/* Set the connected flag so we can queue data */
   13240 				soisconnecting(so);
   13241 			}
   13242 			hold_tcblock = 1;
   13243 			if (create_lock_applied) {
   13244 				SCTP_ASOC_CREATE_UNLOCK(inp);
   13245 				create_lock_applied = 0;
   13246 			} else {
   13247 				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
   13248 			}
   13249 			/* Turn on queue only flag to prevent data from being sent */
   13250 			queue_only = 1;
   13251 			asoc = &stcb->asoc;
   13252 			SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
   13253 			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
   13254 
   13255 			/* initialize authentication params for the assoc */
   13256 			sctp_initialize_auth_params(inp, stcb);
   13257 
   13258 			if (control) {
   13259 				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
   13260 					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
   13261 					hold_tcblock = 0;
   13262 					stcb = NULL;
   13263 					goto out_unlocked;
   13264 				}
   13265 			}
   13266 			/* out with the INIT */
   13267 			queue_only_for_init = 1;
   13268 			/*-
   13269 			 * we may want to dig in after this call and adjust the MTU
   13270 			 * value. It defaulted to 1500 (constant) but the ro
   13271 			 * structure may now have an update and thus we may need to
   13272 			 * change it BEFORE we append the message.
   13273 			 */
   13274 		}
   13275 	} else
   13276 		asoc = &stcb->asoc;
   13277 	if (srcv == NULL)
   13278 		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
   13279 	if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
   13280 		if (addr)
   13281 			net = sctp_findnet(stcb, addr);
   13282 		else
   13283 			net = NULL;
   13284 		if ((net == NULL) ||
   13285 		    ((port != 0) && (port != stcb->rport))) {
   13286 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13287 			error = EINVAL;
   13288 			goto out_unlocked;
   13289 		}
   13290 	} else {
   13291 		if (stcb->asoc.alternate) {
   13292 			net = stcb->asoc.alternate;
   13293 		} else {
   13294 			net = stcb->asoc.primary_destination;
   13295 		}
   13296 	}
   13297 	atomic_add_int(&stcb->total_sends, 1);
   13298 	/* Keep the stcb from being freed under our feet */
   13299 	atomic_add_int(&asoc->refcnt, 1);
   13300 	free_cnt_applied = 1;
   13301 
   13302 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
   13303 		if (sndlen > asoc->smallest_mtu) {
   13304 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
   13305 			error = EMSGSIZE;
   13306 			goto out_unlocked;
   13307 		}
   13308 	}
   13309 #if defined(__Userspace__)
   13310 	if (inp->recv_callback) {
   13311 		non_blocking = 1;
   13312 	}
   13313 #else
   13314 	if (SCTP_SO_IS_NBIO(so)
   13315 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
   13316 	     || (flags & MSG_NBIO)
   13317 #endif
   13318 	    ) {
   13319 		non_blocking = 1;
   13320 	}
   13321 #endif
   13322 	/* would we block? */
   13323 	if (non_blocking) {
   13324 		if (hold_tcblock == 0) {
   13325 			SCTP_TCB_LOCK(stcb);
   13326 			hold_tcblock = 1;
   13327 		}
   13328 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   13329 		if ((SCTP_SB_LIMIT_SND(so) <  (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
   13330 		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
   13331 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
   13332 			if (sndlen > SCTP_SB_LIMIT_SND(so))
   13333 				error = EMSGSIZE;
   13334 			else
   13335 				error = EWOULDBLOCK;
   13336 			goto out_unlocked;
   13337 		}
   13338 		stcb->asoc.sb_send_resv += sndlen;
   13339 		SCTP_TCB_UNLOCK(stcb);
   13340 		hold_tcblock = 0;
   13341 	} else {
   13342 		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
   13343 	}
   13344 	local_soresv = sndlen;
   13345 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
   13346 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
   13347 		error = ECONNRESET;
   13348 		goto out_unlocked;
   13349 	}
   13350 	if (create_lock_applied) {
   13351 		SCTP_ASOC_CREATE_UNLOCK(inp);
   13352 		create_lock_applied = 0;
   13353 	}
   13354 	if (asoc->stream_reset_outstanding) {
   13355 		/*
   13356 		 * Can't queue any data while stream reset is underway.
   13357 		 */
   13358 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
   13359 		error = EAGAIN;
   13360 		goto out_unlocked;
   13361 	}
   13362 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
   13363 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
   13364 		queue_only = 1;
   13365 	}
   13366 	/* we are now done with all control */
   13367 	if (control) {
   13368 		sctp_m_freem(control);
   13369 		control = NULL;
   13370 	}
   13371 	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
   13372 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
   13373 	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
   13374 	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
   13375 		if (srcv->sinfo_flags & SCTP_ABORT) {
   13376 			;
   13377 		} else {
   13378 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
   13379 			error = ECONNRESET;
   13380 			goto out_unlocked;
   13381 		}
   13382 	}
   13383 	/* Ok, we will attempt a msgsnd :> */
   13384 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
   13385 	if (p) {
   13386 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000
   13387 		p->td_ru.ru_msgsnd++;
   13388 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
   13389 		p->td_proc->p_stats->p_ru.ru_msgsnd++;
   13390 #else
   13391 		p->p_stats->p_ru.ru_msgsnd++;
   13392 #endif
   13393 	}
   13394 #endif
   13395 	/* Are we aborting? */
   13396 	if (srcv->sinfo_flags & SCTP_ABORT) {
   13397 		struct mbuf *mm;
   13398 		int tot_demand, tot_out = 0, max_out;
   13399 
   13400 		SCTP_STAT_INCR(sctps_sends_with_abort);
   13401 		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
   13402 		    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
   13403 			/* It has to be up before we abort */
   13404 			/* how big is the user initiated abort? */
   13405 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13406 			error = EINVAL;
   13407 			goto out;
   13408 		}
   13409 		if (hold_tcblock) {
   13410 			SCTP_TCB_UNLOCK(stcb);
   13411 			hold_tcblock = 0;
   13412 		}
   13413 		if (top) {
   13414 			struct mbuf *cntm = NULL;
   13415 
   13416 			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
   13417 			if (sndlen != 0) {
   13418 				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
   13419 					tot_out += SCTP_BUF_LEN(cntm);
   13420 				}
   13421 			}
   13422 		} else {
   13423 			/* Must fit in a MTU */
   13424 			tot_out = sndlen;
   13425 			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
   13426 			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
   13427 				/* To big */
   13428 				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
   13429 				error = EMSGSIZE;
   13430 				goto out;
   13431 			}
   13432 			mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
   13433 		}
   13434 		if (mm == NULL) {
   13435 			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
   13436 			error = ENOMEM;
   13437 			goto out;
   13438 		}
   13439 		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
   13440 		max_out -= sizeof(struct sctp_abort_msg);
   13441 		if (tot_out > max_out) {
   13442 			tot_out = max_out;
   13443 		}
   13444 		if (mm) {
   13445 			struct sctp_paramhdr *ph;
   13446 
   13447 			/* now move forward the data pointer */
   13448 			ph = mtod(mm, struct sctp_paramhdr *);
   13449 			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
   13450 			ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
   13451 			ph++;
   13452 			SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
   13453 			if (top == NULL) {
   13454 #if defined(__APPLE__)
   13455 				SCTP_SOCKET_UNLOCK(so, 0);
   13456 #endif
   13457 				error = uiomove((caddr_t)ph, (int)tot_out, uio);
   13458 #if defined(__APPLE__)
   13459 				SCTP_SOCKET_LOCK(so, 0);
   13460 #endif
   13461 				if (error) {
   13462 					/*-
   13463 					 * Here if we can't get his data we
   13464 					 * still abort we just don't get to
   13465 					 * send the users note :-0
   13466 					 */
   13467 					sctp_m_freem(mm);
   13468 					mm = NULL;
   13469 				}
   13470 			} else {
   13471 				if (sndlen != 0) {
   13472 					SCTP_BUF_NEXT(mm) = top;
   13473 				}
   13474 			}
   13475 		}
   13476 		if (hold_tcblock == 0) {
   13477 			SCTP_TCB_LOCK(stcb);
   13478 		}
   13479 		atomic_add_int(&stcb->asoc.refcnt, -1);
   13480 		free_cnt_applied = 0;
   13481 		/* release this lock, otherwise we hang on ourselves */
   13482 		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
   13483 		/* now relock the stcb so everything is sane */
   13484 		hold_tcblock = 0;
   13485 		stcb = NULL;
   13486 		/* In this case top is already chained to mm
   13487 		 * avoid double free, since we free it below if
   13488 		 * top != NULL and driver would free it after sending
   13489 		 * the packet out
   13490 		 */
   13491 		if (sndlen != 0) {
   13492 			top = NULL;
   13493 		}
   13494 		goto out_unlocked;
   13495 	}
   13496 	/* Calculate the maximum we can send */
   13497 	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   13498 	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
   13499 		if (non_blocking) {
   13500 			/* we already checked for non-blocking above. */
   13501 			max_len = sndlen;
   13502 		} else {
   13503 			max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
   13504 		}
   13505 	} else {
   13506 		max_len = 0;
   13507 	}
   13508 	if (hold_tcblock) {
   13509 		SCTP_TCB_UNLOCK(stcb);
   13510 		hold_tcblock = 0;
   13511 	}
   13512 	/* Is the stream no. valid? */
   13513 	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
   13514 		/* Invalid stream number */
   13515 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13516 		error = EINVAL;
   13517 		goto out_unlocked;
   13518 	}
   13519 	if (asoc->strmout == NULL) {
   13520 		/* huh? software error */
   13521 		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
   13522 		error = EFAULT;
   13523 		goto out_unlocked;
   13524 	}
   13525 
   13526 	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
   13527 	if ((user_marks_eor == 0) &&
   13528 	    (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
   13529 		/* It will NEVER fit */
   13530 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
   13531 		error = EMSGSIZE;
   13532 		goto out_unlocked;
   13533 	}
   13534 	if ((uio == NULL) && user_marks_eor) {
   13535 		/*-
   13536 		 * We do not support eeor mode for
   13537 		 * sending with mbuf chains (like sendfile).
   13538 		 */
   13539 		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13540 		error = EINVAL;
   13541 		goto out_unlocked;
   13542 	}
   13543 
   13544 	if (user_marks_eor) {
   13545 		local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
   13546 	} else {
   13547 		/*-
   13548 		 * For non-eeor the whole message must fit in
   13549 		 * the socket send buffer.
   13550 		 */
   13551 		local_add_more = sndlen;
   13552 	}
   13553 	len = 0;
   13554 	if (non_blocking) {
   13555 		goto skip_preblock;
   13556 	}
   13557 	if (((max_len <= local_add_more) &&
   13558 	     (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
   13559 	    (max_len == 0) ||
   13560 	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
   13561 		/* No room right now ! */
   13562 		SOCKBUF_LOCK(&so->so_snd);
   13563 		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   13564 		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
   13565 		       ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
   13566 			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
   13567 			        (unsigned int)SCTP_SB_LIMIT_SND(so),
   13568 			        inqueue_bytes,
   13569 			        local_add_more,
   13570 			        stcb->asoc.stream_queue_cnt,
   13571 			        stcb->asoc.chunks_on_out_queue,
   13572 			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
   13573 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   13574 				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
   13575 			}
   13576 			be.error = 0;
   13577 #if !defined(__Panda__) && !defined(__Windows__)
   13578 			stcb->block_entry = &be;
   13579 #endif
   13580 			error = sbwait(&so->so_snd);
   13581 			stcb->block_entry = NULL;
   13582 			if (error || so->so_error || be.error) {
   13583 				if (error == 0) {
   13584 					if (so->so_error)
   13585 						error = so->so_error;
   13586 					if (be.error) {
   13587 						error = be.error;
   13588 					}
   13589 				}
   13590 				SOCKBUF_UNLOCK(&so->so_snd);
   13591 				goto out_unlocked;
   13592 			}
   13593 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   13594 				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
   13595 				               asoc, stcb->asoc.total_output_queue_size);
   13596 			}
   13597 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
   13598 				goto out_unlocked;
   13599 			}
   13600 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   13601 		}
   13602 		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
   13603 			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
   13604 		} else {
   13605 			max_len = 0;
   13606 		}
   13607 		SOCKBUF_UNLOCK(&so->so_snd);
   13608 	}
   13609 
   13610 skip_preblock:
   13611 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
   13612 		goto out_unlocked;
   13613 	}
   13614 #if defined(__APPLE__)
   13615 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
   13616 #endif
   13617 	/* sndlen covers for mbuf case
   13618 	 * uio_resid covers for the non-mbuf case
   13619 	 * NOTE: uio will be null when top/mbuf is passed
   13620 	 */
   13621 	if (sndlen == 0) {
   13622 		if (srcv->sinfo_flags & SCTP_EOF) {
   13623 			got_all_of_the_send = 1;
   13624 			goto dataless_eof;
   13625 		} else {
   13626 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13627 			error = EINVAL;
   13628 			goto out;
   13629 		}
   13630 	}
   13631 	if (top == NULL) {
   13632 		struct sctp_stream_queue_pending *sp;
   13633 		struct sctp_stream_out *strm;
   13634 		uint32_t sndout;
   13635 
   13636 		SCTP_TCB_SEND_LOCK(stcb);
   13637 		if ((asoc->stream_locked) &&
   13638 		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
   13639 			SCTP_TCB_SEND_UNLOCK(stcb);
   13640 			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
   13641 			error = EINVAL;
   13642 			goto out;
   13643 		}
   13644 		SCTP_TCB_SEND_UNLOCK(stcb);
   13645 
   13646 		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
   13647 		if (strm->last_msg_incomplete == 0) {
   13648 		do_a_copy_in:
   13649 			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
   13650 			if ((sp == NULL) || (error)) {
   13651 				goto out;
   13652 			}
   13653 			SCTP_TCB_SEND_LOCK(stcb);
   13654 			if (sp->msg_is_complete) {
   13655 				strm->last_msg_incomplete = 0;
   13656 				asoc->stream_locked = 0;
   13657 			} else {
   13658 				/* Just got locked to this guy in
   13659 				 * case of an interrupt.
   13660 				 */
   13661 				strm->last_msg_incomplete = 1;
   13662 				asoc->stream_locked = 1;
   13663 				asoc->stream_locked_on  = srcv->sinfo_stream;
   13664 				sp->sender_all_done = 0;
   13665 			}
   13666 			sctp_snd_sb_alloc(stcb, sp->length);
   13667 			atomic_add_int(&asoc->stream_queue_cnt, 1);
   13668 			if (srcv->sinfo_flags & SCTP_UNORDERED) {
   13669 				SCTP_STAT_INCR(sctps_sends_with_unord);
   13670 			}
   13671 			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
   13672 			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
   13673 			SCTP_TCB_SEND_UNLOCK(stcb);
   13674 		} else {
   13675 			SCTP_TCB_SEND_LOCK(stcb);
   13676 			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
   13677 			SCTP_TCB_SEND_UNLOCK(stcb);
   13678 			if (sp == NULL) {
   13679 				/* ???? Huh ??? last msg is gone */
   13680 #ifdef INVARIANTS
   13681 				panic("Warning: Last msg marked incomplete, yet nothing left?");
   13682 #else
   13683 				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
   13684 				strm->last_msg_incomplete = 0;
   13685 #endif
   13686 				goto do_a_copy_in;
   13687 
   13688 			}
   13689 		}
   13690 #if defined(__APPLE__)
   13691 #if defined(APPLE_LEOPARD)
   13692 		while (uio->uio_resid > 0) {
   13693 #else
   13694 		while (uio_resid(uio) > 0) {
   13695 #endif
   13696 #else
   13697 		while (uio->uio_resid > 0) {
   13698 #endif
   13699 			/* How much room do we have? */
   13700 			struct mbuf *new_tail, *mm;
   13701 
   13702 			if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
   13703 				max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
   13704 			else
   13705 				max_len = 0;
   13706 
   13707 			if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
   13708 			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
   13709 #if defined(__APPLE__)
   13710 #if defined(APPLE_LEOPARD)
   13711 			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
   13712 #else
   13713 			    (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
   13714 #endif
   13715 #else
   13716 			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
   13717 #endif
   13718 				sndout = 0;
   13719 				new_tail = NULL;
   13720 				if (hold_tcblock) {
   13721 					SCTP_TCB_UNLOCK(stcb);
   13722 					hold_tcblock = 0;
   13723 				}
   13724 #if defined(__APPLE__)
   13725 				SCTP_SOCKET_UNLOCK(so, 0);
   13726 #endif
   13727 #if defined(__FreeBSD__) && __FreeBSD_version > 602000
   13728 				    mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
   13729 #else
   13730 				    mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
   13731 #endif
   13732 #if defined(__APPLE__)
   13733 				SCTP_SOCKET_LOCK(so, 0);
   13734 #endif
   13735 				if ((mm == NULL) || error) {
   13736 					if (mm) {
   13737 						sctp_m_freem(mm);
   13738 					}
   13739 					goto out;
   13740 				}
   13741 				/* Update the mbuf and count */
   13742 				SCTP_TCB_SEND_LOCK(stcb);
   13743 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
   13744 					/* we need to get out.
   13745 					 * Peer probably aborted.
   13746 					 */
   13747 					sctp_m_freem(mm);
   13748 					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
   13749 						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
   13750 						error = ECONNRESET;
   13751 					}
   13752 					SCTP_TCB_SEND_UNLOCK(stcb);
   13753 					goto out;
   13754 				}
   13755 				if (sp->tail_mbuf) {
   13756 					/* tack it to the end */
   13757 					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
   13758 					sp->tail_mbuf = new_tail;
   13759 				} else {
   13760 					/* A stolen mbuf */
   13761 					sp->data = mm;
   13762 					sp->tail_mbuf = new_tail;
   13763 				}
   13764 				sctp_snd_sb_alloc(stcb, sndout);
   13765 				atomic_add_int(&sp->length,sndout);
   13766 				len += sndout;
   13767 
   13768 				/* Did we reach EOR? */
   13769 #if defined(__APPLE__)
   13770 #if defined(APPLE_LEOPARD)
   13771 				if ((uio->uio_resid == 0) &&
   13772 #else
   13773 				if ((uio_resid(uio) == 0) &&
   13774 #endif
   13775 #else
   13776 				if ((uio->uio_resid == 0) &&
   13777 #endif
   13778 				    ((user_marks_eor == 0) ||
   13779 				     (srcv->sinfo_flags & SCTP_EOF) ||
   13780 				     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
   13781 					sp->msg_is_complete = 1;
   13782 				} else {
   13783 					sp->msg_is_complete = 0;
   13784 				}
   13785 				SCTP_TCB_SEND_UNLOCK(stcb);
   13786 			}
   13787 #if defined(__APPLE__)
   13788 #if defined(APPLE_LEOPARD)
   13789 			if (uio->uio_resid == 0) {
   13790 #else
   13791 			if (uio_resid(uio) == 0) {
   13792 #endif
   13793 #else
   13794 			if (uio->uio_resid == 0) {
   13795 #endif
   13796 				/* got it all? */
   13797 				continue;
   13798 			}
   13799 			/* PR-SCTP? */
   13800 			if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
   13801 				/* This is ugly but we must assure locking order */
   13802 				if (hold_tcblock == 0) {
   13803 					SCTP_TCB_LOCK(stcb);
   13804 					hold_tcblock = 1;
   13805 				}
   13806 				sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
   13807 				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   13808 				if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
   13809 					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
   13810 				else
   13811 					max_len = 0;
   13812 				if (max_len > 0) {
   13813 					continue;
   13814 				}
   13815 				SCTP_TCB_UNLOCK(stcb);
   13816 				hold_tcblock = 0;
   13817 			}
   13818 			/* wait for space now */
   13819 			if (non_blocking) {
   13820 				/* Non-blocking io in place out */
   13821 				goto skip_out_eof;
   13822 			}
   13823 			/* What about the INIT, send it maybe */
   13824 			if (queue_only_for_init) {
   13825 				if (hold_tcblock == 0) {
   13826 					SCTP_TCB_LOCK(stcb);
   13827 					hold_tcblock = 1;
   13828 				}
   13829 				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
   13830 					/* a collision took us forward? */
   13831 					queue_only = 0;
   13832 				} else {
   13833 					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
   13834 					SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
   13835 					queue_only = 1;
   13836 				}
   13837 			}
   13838 			if ((net->flight_size > net->cwnd) &&
   13839 			    (asoc->sctp_cmt_on_off == 0)) {
   13840 				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
   13841 				queue_only = 1;
   13842 			} else if (asoc->ifp_had_enobuf) {
   13843 				SCTP_STAT_INCR(sctps_ifnomemqueued);
   13844 				if (net->flight_size > (2 * net->mtu)) {
   13845 					queue_only = 1;
   13846 				}
   13847 				asoc->ifp_had_enobuf = 0;
   13848 			}
   13849 			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
   13850 			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
   13851 			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
   13852 			    (stcb->asoc.total_flight > 0) &&
   13853 			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
   13854 			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
   13855 
   13856 				/*-
   13857 				 * Ok, Nagle is set on and we have data outstanding.
   13858 				 * Don't send anything and let SACKs drive out the
   13859 				 * data unless wen have a "full" segment to send.
   13860 				 */
   13861 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
   13862 					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
   13863 				}
   13864 				SCTP_STAT_INCR(sctps_naglequeued);
   13865 				nagle_applies = 1;
   13866 			} else {
   13867 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
   13868 					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
   13869 						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
   13870 				}
   13871 				SCTP_STAT_INCR(sctps_naglesent);
   13872 				nagle_applies = 0;
   13873 			}
   13874 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   13875 
   13876 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
   13877 					       nagle_applies, un_sent);
   13878 				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
   13879 					       stcb->asoc.total_flight,
   13880 					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
   13881 			}
   13882 			if (queue_only_for_init)
   13883 				queue_only_for_init = 0;
   13884 			if ((queue_only == 0) && (nagle_applies == 0)) {
   13885 				/*-
   13886 				 * need to start chunk output
   13887 				 * before blocking.. note that if
   13888 				 * a lock is already applied, then
   13889 				 * the input via the net is happening
   13890 				 * and I don't need to start output :-D
   13891 				 */
   13892 				if (hold_tcblock == 0) {
   13893 					if (SCTP_TCB_TRYLOCK(stcb)) {
   13894 						hold_tcblock = 1;
   13895 						sctp_chunk_output(inp,
   13896 								  stcb,
   13897 								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
   13898 					}
   13899 				} else {
   13900 					sctp_chunk_output(inp,
   13901 							  stcb,
   13902 							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
   13903 				}
   13904 				if (hold_tcblock == 1) {
   13905 					SCTP_TCB_UNLOCK(stcb);
   13906 					hold_tcblock = 0;
   13907 				}
   13908 			}
   13909 			SOCKBUF_LOCK(&so->so_snd);
   13910 			/*-
   13911 			 * This is a bit strange, but I think it will
   13912 			 * work. The total_output_queue_size is locked and
   13913 			 * protected by the TCB_LOCK, which we just released.
   13914 			 * There is a race that can occur between releasing it
   13915 			 * above, and me getting the socket lock, where sacks
   13916 			 * come in but we have not put the SB_WAIT on the
   13917 			 * so_snd buffer to get the wakeup. After the LOCK
   13918 			 * is applied the sack_processing will also need to
   13919 			 * LOCK the so->so_snd to do the actual sowwakeup(). So
   13920 			 * once we have the socket buffer lock if we recheck the
   13921 			 * size we KNOW we will get to sleep safely with the
   13922 			 * wakeup flag in place.
   13923 			 */
   13924 			if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
   13925 						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
   13926 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   13927 #if defined(__APPLE__)
   13928 #if defined(APPLE_LEOPARD)
   13929 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
   13930 						       asoc, uio->uio_resid);
   13931 #else
   13932 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
   13933 						       asoc, uio_resid(uio));
   13934 #endif
   13935 #else
   13936 					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
   13937 						       asoc, uio->uio_resid);
   13938 #endif
   13939 				}
   13940 				be.error = 0;
   13941 #if !defined(__Panda__) && !defined(__Windows__)
   13942 				stcb->block_entry = &be;
   13943 #endif
   13944 #if defined(__APPLE__)
   13945 				sbunlock(&so->so_snd, 1);
   13946 #endif
   13947 				error = sbwait(&so->so_snd);
   13948 				stcb->block_entry = NULL;
   13949 
   13950 				if (error || so->so_error || be.error) {
   13951 					if (error == 0) {
   13952 						if (so->so_error)
   13953 							error = so->so_error;
   13954 						if (be.error) {
   13955 							error = be.error;
   13956 						}
   13957 					}
   13958 					SOCKBUF_UNLOCK(&so->so_snd);
   13959 					goto out_unlocked;
   13960 				}
   13961 
   13962 #if defined(__APPLE__)
   13963 				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
   13964 #endif
   13965 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   13966 					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
   13967 						       asoc, stcb->asoc.total_output_queue_size);
   13968 				}
   13969 			}
   13970 			SOCKBUF_UNLOCK(&so->so_snd);
   13971 			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
   13972 				goto out_unlocked;
   13973 			}
   13974 		}
   13975 		SCTP_TCB_SEND_LOCK(stcb);
   13976 		if (sp) {
   13977 			if (sp->msg_is_complete == 0) {
   13978 				strm->last_msg_incomplete = 1;
   13979 				asoc->stream_locked = 1;
   13980 				asoc->stream_locked_on  = srcv->sinfo_stream;
   13981 			} else {
   13982 				sp->sender_all_done = 1;
   13983 				strm->last_msg_incomplete = 0;
   13984 				asoc->stream_locked = 0;
   13985 			}
   13986 		} else {
   13987 			SCTP_PRINTF("Huh no sp TSNH?\n");
   13988 			strm->last_msg_incomplete = 0;
   13989 			asoc->stream_locked = 0;
   13990 		}
   13991 		SCTP_TCB_SEND_UNLOCK(stcb);
   13992 #if defined(__APPLE__)
   13993 #if defined(APPLE_LEOPARD)
   13994 		if (uio->uio_resid == 0) {
   13995 #else
   13996 		if (uio_resid(uio) == 0) {
   13997 #endif
   13998 #else
   13999 		if (uio->uio_resid == 0) {
   14000 #endif
   14001 			got_all_of_the_send = 1;
   14002 		}
   14003 	} else {
   14004 		/* We send in a 0, since we do NOT have any locks */
   14005 		error = sctp_msg_append(stcb, net, top, srcv, 0);
   14006 		top = NULL;
   14007 		if (srcv->sinfo_flags & SCTP_EOF) {
   14008 			/*
   14009 			 * This should only happen for Panda for the mbuf
   14010 			 * send case, which does NOT yet support EEOR mode.
   14011 			 * Thus, we can just set this flag to do the proper
   14012 			 * EOF handling.
   14013 			 */
   14014 			got_all_of_the_send = 1;
   14015 		}
   14016 	}
   14017 	if (error) {
   14018 		goto out;
   14019 	}
   14020 dataless_eof:
   14021 	/* EOF thing ? */
   14022 	if ((srcv->sinfo_flags & SCTP_EOF) &&
   14023 	    (got_all_of_the_send == 1)) {
   14024 		int cnt;
   14025 		SCTP_STAT_INCR(sctps_sends_with_eof);
   14026 		error = 0;
   14027 		if (hold_tcblock == 0) {
   14028 			SCTP_TCB_LOCK(stcb);
   14029 			hold_tcblock = 1;
   14030 		}
   14031 		cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
   14032 		if (TAILQ_EMPTY(&asoc->send_queue) &&
   14033 		    TAILQ_EMPTY(&asoc->sent_queue) &&
   14034 		    (cnt == 0)) {
   14035 			if (asoc->locked_on_sending) {
   14036 				goto abort_anyway;
   14037 			}
   14038 			/* there is nothing queued to send, so I'm done... */
   14039 			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
   14040 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
   14041 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
   14042 				struct sctp_nets *netp;
   14043 
   14044 				/* only send SHUTDOWN the first time through */
   14045 				if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
   14046 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   14047 				}
   14048 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
   14049 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   14050 				sctp_stop_timers_for_shutdown(stcb);
   14051 				if (stcb->asoc.alternate) {
   14052 					netp = stcb->asoc.alternate;
   14053 				} else {
   14054 					netp = stcb->asoc.primary_destination;
   14055 				}
   14056 				sctp_send_shutdown(stcb, netp);
   14057 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
   14058 				                 netp);
   14059 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
   14060 				                 asoc->primary_destination);
   14061 			}
   14062 		} else {
   14063 			/*-
   14064 			 * we still got (or just got) data to send, so set
   14065 			 * SHUTDOWN_PENDING
   14066 			 */
   14067 			/*-
   14068 			 * XXX sockets draft says that SCTP_EOF should be
   14069 			 * sent with no data.  currently, we will allow user
   14070 			 * data to be sent first and move to
   14071 			 * SHUTDOWN-PENDING
   14072 			 */
   14073 			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
   14074 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
   14075 			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
   14076 				if (hold_tcblock == 0) {
   14077 					SCTP_TCB_LOCK(stcb);
   14078 					hold_tcblock = 1;
   14079 				}
   14080 				if (asoc->locked_on_sending) {
   14081 					/* Locked to send out the data */
   14082 					struct sctp_stream_queue_pending *sp;
   14083 					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
   14084 					if (sp) {
   14085 						if ((sp->length == 0) && (sp->msg_is_complete == 0))
   14086 							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
   14087 					}
   14088 				}
   14089 				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
   14090 				if (TAILQ_EMPTY(&asoc->send_queue) &&
   14091 				    TAILQ_EMPTY(&asoc->sent_queue) &&
   14092 				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
   14093 				abort_anyway:
   14094 					if (free_cnt_applied) {
   14095 						atomic_add_int(&stcb->asoc.refcnt, -1);
   14096 						free_cnt_applied = 0;
   14097 					}
   14098 					sctp_abort_an_association(stcb->sctp_ep, stcb,
   14099 					                          NULL, SCTP_SO_LOCKED);
   14100 					/* now relock the stcb so everything is sane */
   14101 					hold_tcblock = 0;
   14102 					stcb = NULL;
   14103 					goto out;
   14104 				}
   14105 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
   14106 				                 asoc->primary_destination);
   14107 				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
   14108 			}
   14109 		}
   14110 	}
   14111 skip_out_eof:
   14112 	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
   14113 		some_on_control = 1;
   14114 	}
   14115 	if (queue_only_for_init) {
   14116 		if (hold_tcblock == 0) {
   14117 			SCTP_TCB_LOCK(stcb);
   14118 			hold_tcblock = 1;
   14119 		}
   14120 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
   14121 			/* a collision took us forward? */
   14122 			queue_only = 0;
   14123 		} else {
   14124 			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
   14125 			SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
   14126 			queue_only = 1;
   14127 		}
   14128 	}
   14129 	if ((net->flight_size > net->cwnd) &&
   14130 	    (stcb->asoc.sctp_cmt_on_off == 0)) {
   14131 		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
   14132 		queue_only = 1;
   14133 	} else if (asoc->ifp_had_enobuf) {
   14134 		SCTP_STAT_INCR(sctps_ifnomemqueued);
   14135 		if (net->flight_size > (2 * net->mtu)) {
   14136 			queue_only = 1;
   14137 		}
   14138 		asoc->ifp_had_enobuf = 0;
   14139 	}
   14140 	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
   14141 	           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
   14142 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
   14143 	    (stcb->asoc.total_flight > 0) &&
   14144 	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
   14145 	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
   14146 		/*-
   14147 		 * Ok, Nagle is set on and we have data outstanding.
   14148 		 * Don't send anything and let SACKs drive out the
   14149 		 * data unless wen have a "full" segment to send.
   14150 		 */
   14151 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
   14152 			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
   14153 		}
   14154 		SCTP_STAT_INCR(sctps_naglequeued);
   14155 		nagle_applies = 1;
   14156 	} else {
   14157 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
   14158 			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
   14159 				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
   14160 		}
   14161 		SCTP_STAT_INCR(sctps_naglesent);
   14162 		nagle_applies = 0;
   14163 	}
   14164 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
   14165 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
   14166 		               nagle_applies, un_sent);
   14167 		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
   14168 		               stcb->asoc.total_flight,
   14169 		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
   14170 	}
   14171 	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
   14172 		/* we can attempt to send too. */
   14173 		if (hold_tcblock == 0) {
   14174 			/* If there is activity recv'ing sacks no need to send */
   14175 			if (SCTP_TCB_TRYLOCK(stcb)) {
   14176 				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
   14177 				hold_tcblock = 1;
   14178 			}
   14179 		} else {
   14180 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
   14181 		}
   14182 	} else if ((queue_only == 0) &&
   14183 	           (stcb->asoc.peers_rwnd == 0) &&
   14184 	           (stcb->asoc.total_flight == 0)) {
   14185 		/* We get to have a probe outstanding */
   14186 		if (hold_tcblock == 0) {
   14187 			hold_tcblock = 1;
   14188 			SCTP_TCB_LOCK(stcb);
   14189 		}
   14190 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
   14191 	} else if (some_on_control) {
   14192 		int num_out, reason, frag_point;
   14193 
   14194 		/* Here we do control only */
   14195 		if (hold_tcblock == 0) {
   14196 			hold_tcblock = 1;
   14197 			SCTP_TCB_LOCK(stcb);
   14198 		}
   14199 		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
   14200 		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
   14201 		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
   14202 	}
   14203 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
   14204 	        queue_only, stcb->asoc.peers_rwnd, un_sent,
   14205 		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
   14206 	        stcb->asoc.total_output_queue_size, error);
   14207 
   14208 out:
   14209 #if defined(__APPLE__)
   14210 	sbunlock(&so->so_snd, 1);
   14211 #endif
   14212 out_unlocked:
   14213 
   14214 	if (local_soresv && stcb) {
   14215 		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
   14216 	}
   14217 	if (create_lock_applied) {
   14218 		SCTP_ASOC_CREATE_UNLOCK(inp);
   14219 	}
   14220 	if ((stcb) && hold_tcblock) {
   14221 		SCTP_TCB_UNLOCK(stcb);
   14222 	}
   14223 	if (stcb && free_cnt_applied) {
   14224 		atomic_add_int(&stcb->asoc.refcnt, -1);
   14225 	}
   14226 #ifdef INVARIANTS
   14227 #if !defined(__APPLE__)
   14228 	if (stcb) {
   14229 		if (mtx_owned(&stcb->tcb_mtx)) {
   14230 			panic("Leaving with tcb mtx owned?");
   14231 		}
   14232 		if (mtx_owned(&stcb->tcb_send_mtx)) {
   14233 			panic("Leaving with tcb send mtx owned?");
   14234 		}
   14235 	}
   14236 #endif
   14237 #endif
   14238 #ifdef __Panda__
   14239 	/*
   14240 	 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
   14241 	 * to particle when pak is passed in, so that caller
   14242 	 * can try again with this pak
   14243 	 *
   14244 	 * NOTE: For other cases, including success case,
   14245 	 * we simply want to return the header back to free
   14246 	 * pool
   14247 	 */
   14248 	if (top) {
   14249 		if ((error == EAGAIN) || (error == ENOMEM)) {
   14250 			SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
   14251 			top = NULL;
   14252 		} else {
   14253 			(void)SCTP_RELEASE_HEADER(i_pak);
   14254 		}
   14255 	} else {
   14256 		/* This is to handle cases when top has
   14257 		 * been reset to NULL but pak might not
   14258 		 * be freed
   14259 		 */
   14260 		if (i_pak) {
   14261 			(void)SCTP_RELEASE_HEADER(i_pak);
   14262 		}
   14263 	}
   14264 #endif
   14265 #ifdef INVARIANTS
   14266 	if (inp) {
   14267 		sctp_validate_no_locks(inp);
   14268 	} else {
   14269 		SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
   14270 	}
   14271 #endif
   14272 	if (top) {
   14273 		sctp_m_freem(top);
   14274 	}
   14275 	if (control) {
   14276 		sctp_m_freem(control);
   14277 	}
   14278 	return (error);
   14279 }
   14280 
   14281 
   14282 /*
   14283  * generate an AUTHentication chunk, if required
   14284  */
   14285 struct mbuf *
   14286 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
   14287     struct sctp_auth_chunk **auth_ret, uint32_t * offset,
   14288     struct sctp_tcb *stcb, uint8_t chunk)
   14289 {
   14290 	struct mbuf *m_auth;
   14291 	struct sctp_auth_chunk *auth;
   14292 	int chunk_len;
   14293 	struct mbuf *cn;
   14294 
   14295 	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
   14296 	    (stcb == NULL))
   14297 		return (m);
   14298 
   14299 	/* sysctl disabled auth? */
   14300 	if (SCTP_BASE_SYSCTL(sctp_auth_disable))
   14301 		return (m);
   14302 
   14303 	/* peer doesn't do auth... */
   14304 	if (!stcb->asoc.peer_supports_auth) {
   14305 		return (m);
   14306 	}
   14307 	/* does the requested chunk require auth? */
   14308 	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
   14309 		return (m);
   14310 	}
   14311 	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
   14312 	if (m_auth == NULL) {
   14313 		/* no mbuf's */
   14314 		return (m);
   14315 	}
   14316 	/* reserve some space if this will be the first mbuf */
   14317 	if (m == NULL)
   14318 		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
   14319 	/* fill in the AUTH chunk details */
   14320 	auth = mtod(m_auth, struct sctp_auth_chunk *);
   14321 	bzero(auth, sizeof(*auth));
   14322 	auth->ch.chunk_type = SCTP_AUTHENTICATION;
   14323 	auth->ch.chunk_flags = 0;
   14324 	chunk_len = sizeof(*auth) +
   14325 	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
   14326 	auth->ch.chunk_length = htons(chunk_len);
   14327 	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
   14328 	/* key id and hmac digest will be computed and filled in upon send */
   14329 
   14330 	/* save the offset where the auth was inserted into the chain */
   14331 	*offset = 0;
   14332 	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
   14333 		*offset += SCTP_BUF_LEN(cn);
   14334 	}
   14335 
   14336 	/* update length and return pointer to the auth chunk */
   14337 	SCTP_BUF_LEN(m_auth) = chunk_len;
   14338 	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
   14339 	if (auth_ret != NULL)
   14340 		*auth_ret = auth;
   14341 
   14342 	return (m);
   14343 }
   14344 
   14345 #if defined(__FreeBSD__)  || defined(__APPLE__)
   14346 #ifdef INET6
   14347 int
   14348 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
   14349 {
   14350 	struct nd_prefix *pfx = NULL;
   14351 	struct nd_pfxrouter *pfxrtr = NULL;
   14352 	struct sockaddr_in6 gw6;
   14353 
   14354 	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
   14355 		return (0);
   14356 
   14357 	/* get prefix entry of address */
   14358 	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
   14359 		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
   14360 			continue;
   14361 		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
   14362 		    &src6->sin6_addr, &pfx->ndpr_mask))
   14363 			break;
   14364 	}
   14365 	/* no prefix entry in the prefix list */
   14366 	if (pfx == NULL) {
   14367 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
   14368 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
   14369 		return (0);
   14370 	}
   14371 
   14372 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
   14373 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
   14374 
   14375 	/* search installed gateway from prefix entry */
   14376 	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
   14377 		memset(&gw6, 0, sizeof(struct sockaddr_in6));
   14378 		gw6.sin6_family = AF_INET6;
   14379 #ifdef HAVE_SIN6_LEN
   14380 		gw6.sin6_len = sizeof(struct sockaddr_in6);
   14381 #endif
   14382 		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
   14383 		    sizeof(struct in6_addr));
   14384 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
   14385 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
   14386 		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
   14387 		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
   14388 		if (sctp_cmpaddr((struct sockaddr *)&gw6,
   14389 				ro->ro_rt->rt_gateway)) {
   14390 			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
   14391 			return (1);
   14392 		}
   14393 	}
   14394 	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
   14395 	return (0);
   14396 }
   14397 #endif
   14398 
   14399 int
   14400 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
   14401 {
   14402 #ifdef INET
   14403 	struct sockaddr_in *sin, *mask;
   14404 	struct ifaddr *ifa;
   14405 	struct in_addr srcnetaddr, gwnetaddr;
   14406 
   14407 	if (ro == NULL || ro->ro_rt == NULL ||
   14408 	    sifa->address.sa.sa_family != AF_INET) {
   14409 		return (0);
   14410 	}
   14411 	ifa = (struct ifaddr *)sifa->ifa;
   14412 	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
   14413 	sin = (struct sockaddr_in *)&sifa->address.sin;
   14414 	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
   14415 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
   14416 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
   14417 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
   14418 
   14419 	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
   14420 	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
   14421 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
   14422 	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
   14423 	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
   14424 	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
   14425 		return (1);
   14426 	}
   14427 #endif
   14428 	return (0);
   14429 }
   14430 #elif defined(__Userspace__)
   14431 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
   14432 int
   14433 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
   14434 {
   14435     return (0);
   14436 }
   14437 int
   14438 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
   14439 {
   14440     return (0);
   14441 }
   14442 
   14443 #endif
   14444