1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #ifdef __FreeBSD__ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 271230 2014-09-07 18:05:37Z tuexen $"); 36 #endif 37 38 #include <netinet/sctp_os.h> 39 #ifdef __FreeBSD__ 40 #include <sys/proc.h> 41 #endif 42 #include <netinet/sctp_var.h> 43 #include <netinet/sctp_sysctl.h> 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_pcb.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctputil.h> 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_timer.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_indata.h> 54 #include <netinet/sctp_bsd_addr.h> 55 #include <netinet/sctp_input.h> 56 #include <netinet/sctp_crc32.h> 57 #if defined(__Userspace_os_Linux) 58 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ 59 #endif 60 #if defined(INET) || defined(INET6) 61 #if !defined(__Userspace_os_Windows) 62 #include <netinet/udp.h> 63 #endif 64 #endif 65 #if defined(__APPLE__) 66 #include <netinet/in.h> 67 #endif 68 #if defined(__FreeBSD__) 69 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 70 #include <netinet/udp_var.h> 71 #endif 72 #include <machine/in_cksum.h> 73 #endif 74 #if defined(__Userspace__) && defined(INET6) 75 #include <netinet6/sctp6_var.h> 76 #endif 77 78 #if defined(__APPLE__) 79 #define APPLE_FILE_NO 3 80 #endif 81 82 #if defined(__APPLE__) 83 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)) 84 #define SCTP_MAX_LINKHDR 16 85 #endif 86 #endif 87 88 #define SCTP_MAX_GAPS_INARRAY 4 89 struct sack_track { 90 uint8_t right_edge; /* mergable on the right edge */ 91 uint8_t left_edge; /* mergable on the left edge */ 92 uint8_t num_entries; 93 uint8_t spare; 94 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 95 }; 96 97 struct sack_track sack_array[256] = { 98 {0, 0, 0, 0, /* 0x00 */ 99 {{0, 0}, 100 {0, 0}, 101 {0, 0}, 102 {0, 0} 103 } 104 }, 105 {1, 0, 1, 0, /* 0x01 */ 106 {{0, 0}, 107 {0, 0}, 108 {0, 0}, 109 {0, 0} 110 } 111 }, 112 {0, 0, 1, 0, /* 0x02 */ 113 {{1, 1}, 114 {0, 0}, 115 {0, 0}, 116 {0, 0} 117 } 118 }, 119 {1, 0, 1, 0, /* 0x03 */ 120 {{0, 1}, 121 {0, 0}, 122 {0, 0}, 123 {0, 0} 124 } 125 }, 126 {0, 0, 1, 0, /* 0x04 */ 127 {{2, 2}, 128 {0, 0}, 129 {0, 0}, 130 {0, 0} 131 } 132 }, 133 {1, 0, 2, 0, /* 0x05 */ 134 {{0, 0}, 135 {2, 2}, 136 {0, 0}, 137 {0, 0} 138 } 139 }, 140 {0, 0, 1, 0, /* 0x06 */ 141 {{1, 2}, 142 {0, 0}, 143 {0, 0}, 144 {0, 0} 145 } 146 }, 147 {1, 0, 1, 0, /* 0x07 */ 148 {{0, 2}, 149 {0, 0}, 150 {0, 0}, 151 {0, 0} 152 } 153 }, 154 {0, 0, 1, 0, /* 0x08 */ 155 {{3, 3}, 156 {0, 0}, 157 {0, 0}, 158 {0, 0} 159 } 160 }, 161 {1, 0, 2, 0, /* 0x09 */ 162 {{0, 0}, 163 {3, 3}, 164 {0, 0}, 165 {0, 0} 166 } 167 }, 168 {0, 0, 2, 0, /* 0x0a */ 169 {{1, 1}, 170 {3, 3}, 171 {0, 0}, 172 {0, 0} 173 } 174 }, 175 {1, 0, 2, 0, /* 0x0b */ 176 {{0, 1}, 177 {3, 3}, 178 {0, 0}, 179 {0, 0} 180 } 181 }, 182 {0, 0, 1, 0, /* 0x0c */ 183 {{2, 3}, 184 {0, 0}, 185 {0, 0}, 186 {0, 0} 187 } 188 }, 189 {1, 0, 2, 0, /* 0x0d */ 190 {{0, 0}, 191 {2, 3}, 192 {0, 0}, 193 {0, 0} 194 } 195 }, 196 {0, 0, 1, 0, /* 0x0e */ 197 {{1, 3}, 198 {0, 0}, 199 {0, 0}, 200 {0, 0} 201 } 202 }, 203 {1, 0, 1, 0, /* 0x0f */ 204 {{0, 3}, 205 {0, 0}, 206 {0, 0}, 207 {0, 0} 208 } 209 }, 210 {0, 0, 1, 0, /* 0x10 */ 211 {{4, 4}, 212 {0, 0}, 213 {0, 0}, 214 {0, 0} 215 } 216 }, 217 {1, 0, 2, 0, /* 0x11 */ 218 {{0, 0}, 219 {4, 4}, 220 {0, 0}, 221 {0, 0} 222 } 223 }, 224 {0, 0, 2, 0, /* 0x12 */ 225 {{1, 1}, 226 {4, 4}, 227 {0, 0}, 228 {0, 0} 229 } 230 }, 231 {1, 0, 2, 0, /* 0x13 */ 232 {{0, 1}, 233 {4, 4}, 234 {0, 0}, 235 {0, 0} 236 } 237 }, 238 {0, 0, 2, 0, /* 0x14 */ 239 {{2, 2}, 240 {4, 4}, 241 {0, 0}, 242 {0, 0} 243 } 244 }, 245 {1, 0, 3, 0, /* 0x15 */ 246 {{0, 0}, 247 {2, 2}, 248 {4, 4}, 249 {0, 0} 250 } 251 }, 252 {0, 0, 2, 0, /* 0x16 */ 253 {{1, 2}, 254 {4, 4}, 255 {0, 0}, 256 {0, 0} 257 } 258 }, 259 {1, 0, 2, 0, /* 0x17 */ 260 {{0, 2}, 261 {4, 4}, 262 {0, 0}, 263 {0, 0} 264 } 265 }, 266 {0, 0, 1, 0, /* 0x18 */ 267 {{3, 4}, 268 {0, 0}, 269 {0, 0}, 270 {0, 0} 271 } 272 }, 273 {1, 0, 2, 0, /* 0x19 */ 274 {{0, 0}, 275 {3, 4}, 276 {0, 0}, 277 {0, 0} 278 } 279 }, 280 {0, 0, 2, 0, /* 0x1a */ 281 {{1, 1}, 282 {3, 4}, 283 {0, 0}, 284 {0, 0} 285 } 286 }, 287 {1, 0, 2, 0, /* 0x1b */ 288 {{0, 1}, 289 {3, 4}, 290 {0, 0}, 291 {0, 0} 292 } 293 }, 294 {0, 0, 1, 0, /* 0x1c */ 295 {{2, 4}, 296 {0, 0}, 297 {0, 0}, 298 {0, 0} 299 } 300 }, 301 {1, 0, 2, 0, /* 0x1d */ 302 {{0, 0}, 303 {2, 4}, 304 {0, 0}, 305 {0, 0} 306 } 307 }, 308 {0, 0, 1, 0, /* 0x1e */ 309 {{1, 4}, 310 {0, 0}, 311 {0, 0}, 312 {0, 0} 313 } 314 }, 315 {1, 0, 1, 0, /* 0x1f */ 316 {{0, 4}, 317 {0, 0}, 318 {0, 0}, 319 {0, 0} 320 } 321 }, 322 {0, 0, 1, 0, /* 0x20 */ 323 {{5, 5}, 324 {0, 0}, 325 {0, 0}, 326 {0, 0} 327 } 328 }, 329 {1, 0, 2, 0, /* 0x21 */ 330 {{0, 0}, 331 {5, 5}, 332 {0, 0}, 333 {0, 0} 334 } 335 }, 336 {0, 0, 2, 0, /* 0x22 */ 337 {{1, 1}, 338 {5, 5}, 339 {0, 0}, 340 {0, 0} 341 } 342 }, 343 {1, 0, 2, 0, /* 0x23 */ 344 {{0, 1}, 345 {5, 5}, 346 {0, 0}, 347 {0, 0} 348 } 349 }, 350 {0, 0, 2, 0, /* 0x24 */ 351 {{2, 2}, 352 {5, 5}, 353 {0, 0}, 354 {0, 0} 355 } 356 }, 357 {1, 0, 3, 0, /* 0x25 */ 358 {{0, 0}, 359 {2, 2}, 360 {5, 5}, 361 {0, 0} 362 } 363 }, 364 {0, 0, 2, 0, /* 0x26 */ 365 {{1, 2}, 366 {5, 5}, 367 {0, 0}, 368 {0, 0} 369 } 370 }, 371 {1, 0, 2, 0, /* 0x27 */ 372 {{0, 2}, 373 {5, 5}, 374 {0, 0}, 375 {0, 0} 376 } 377 }, 378 {0, 0, 2, 0, /* 0x28 */ 379 {{3, 3}, 380 {5, 5}, 381 {0, 0}, 382 {0, 0} 383 } 384 }, 385 {1, 0, 3, 0, /* 0x29 */ 386 {{0, 0}, 387 {3, 3}, 388 {5, 5}, 389 {0, 0} 390 } 391 }, 392 {0, 0, 3, 0, /* 0x2a */ 393 {{1, 1}, 394 {3, 3}, 395 {5, 5}, 396 {0, 0} 397 } 398 }, 399 {1, 0, 3, 0, /* 0x2b */ 400 {{0, 1}, 401 {3, 3}, 402 {5, 5}, 403 {0, 0} 404 } 405 }, 406 {0, 0, 2, 0, /* 0x2c */ 407 {{2, 3}, 408 {5, 5}, 409 {0, 0}, 410 {0, 0} 411 } 412 }, 413 {1, 0, 3, 0, /* 0x2d */ 414 {{0, 0}, 415 {2, 3}, 416 {5, 5}, 417 {0, 0} 418 } 419 }, 420 {0, 0, 2, 0, /* 0x2e */ 421 {{1, 3}, 422 {5, 5}, 423 {0, 0}, 424 {0, 0} 425 } 426 }, 427 {1, 0, 2, 0, /* 0x2f */ 428 {{0, 3}, 429 {5, 5}, 430 {0, 0}, 431 {0, 0} 432 } 433 }, 434 {0, 0, 1, 0, /* 0x30 */ 435 {{4, 5}, 436 {0, 0}, 437 {0, 0}, 438 {0, 0} 439 } 440 }, 441 {1, 0, 2, 0, /* 0x31 */ 442 {{0, 0}, 443 {4, 5}, 444 {0, 0}, 445 {0, 0} 446 } 447 }, 448 {0, 0, 2, 0, /* 0x32 */ 449 {{1, 1}, 450 {4, 5}, 451 {0, 0}, 452 {0, 0} 453 } 454 }, 455 {1, 0, 2, 0, /* 0x33 */ 456 {{0, 1}, 457 {4, 5}, 458 {0, 0}, 459 {0, 0} 460 } 461 }, 462 {0, 0, 2, 0, /* 0x34 */ 463 {{2, 2}, 464 {4, 5}, 465 {0, 0}, 466 {0, 0} 467 } 468 }, 469 {1, 0, 3, 0, /* 0x35 */ 470 {{0, 0}, 471 {2, 2}, 472 {4, 5}, 473 {0, 0} 474 } 475 }, 476 {0, 0, 2, 0, /* 0x36 */ 477 {{1, 2}, 478 {4, 5}, 479 {0, 0}, 480 {0, 0} 481 } 482 }, 483 {1, 0, 2, 0, /* 0x37 */ 484 {{0, 2}, 485 {4, 5}, 486 {0, 0}, 487 {0, 0} 488 } 489 }, 490 {0, 0, 1, 0, /* 0x38 */ 491 {{3, 5}, 492 {0, 0}, 493 {0, 0}, 494 {0, 0} 495 } 496 }, 497 {1, 0, 2, 0, /* 0x39 */ 498 {{0, 0}, 499 {3, 5}, 500 {0, 0}, 501 {0, 0} 502 } 503 }, 504 {0, 0, 2, 0, /* 0x3a */ 505 {{1, 1}, 506 {3, 5}, 507 {0, 0}, 508 {0, 0} 509 } 510 }, 511 {1, 0, 2, 0, /* 0x3b */ 512 {{0, 1}, 513 {3, 5}, 514 {0, 0}, 515 {0, 0} 516 } 517 }, 518 {0, 0, 1, 0, /* 0x3c */ 519 {{2, 5}, 520 {0, 0}, 521 {0, 0}, 522 {0, 0} 523 } 524 }, 525 {1, 0, 2, 0, /* 0x3d */ 526 {{0, 0}, 527 {2, 5}, 528 {0, 0}, 529 {0, 0} 530 } 531 }, 532 {0, 0, 1, 0, /* 0x3e */ 533 {{1, 5}, 534 {0, 0}, 535 {0, 0}, 536 {0, 0} 537 } 538 }, 539 {1, 0, 1, 0, /* 0x3f */ 540 {{0, 5}, 541 {0, 0}, 542 {0, 0}, 543 {0, 0} 544 } 545 }, 546 {0, 0, 1, 0, /* 0x40 */ 547 {{6, 6}, 548 {0, 0}, 549 {0, 0}, 550 {0, 0} 551 } 552 }, 553 {1, 0, 2, 0, /* 0x41 */ 554 {{0, 0}, 555 {6, 6}, 556 {0, 0}, 557 {0, 0} 558 } 559 }, 560 {0, 0, 2, 0, /* 0x42 */ 561 {{1, 1}, 562 {6, 6}, 563 {0, 0}, 564 {0, 0} 565 } 566 }, 567 {1, 0, 2, 0, /* 0x43 */ 568 {{0, 1}, 569 {6, 6}, 570 {0, 0}, 571 {0, 0} 572 } 573 }, 574 {0, 0, 2, 0, /* 0x44 */ 575 {{2, 2}, 576 {6, 6}, 577 {0, 0}, 578 {0, 0} 579 } 580 }, 581 {1, 0, 3, 0, /* 0x45 */ 582 {{0, 0}, 583 {2, 2}, 584 {6, 6}, 585 {0, 0} 586 } 587 }, 588 {0, 0, 2, 0, /* 0x46 */ 589 {{1, 2}, 590 {6, 6}, 591 {0, 0}, 592 {0, 0} 593 } 594 }, 595 {1, 0, 2, 0, /* 0x47 */ 596 {{0, 2}, 597 {6, 6}, 598 {0, 0}, 599 {0, 0} 600 } 601 }, 602 {0, 0, 2, 0, /* 0x48 */ 603 {{3, 3}, 604 {6, 6}, 605 {0, 0}, 606 {0, 0} 607 } 608 }, 609 {1, 0, 3, 0, /* 0x49 */ 610 {{0, 0}, 611 {3, 3}, 612 {6, 6}, 613 {0, 0} 614 } 615 }, 616 {0, 0, 3, 0, /* 0x4a */ 617 {{1, 1}, 618 {3, 3}, 619 {6, 6}, 620 {0, 0} 621 } 622 }, 623 {1, 0, 3, 0, /* 0x4b */ 624 {{0, 1}, 625 {3, 3}, 626 {6, 6}, 627 {0, 0} 628 } 629 }, 630 {0, 0, 2, 0, /* 0x4c */ 631 {{2, 3}, 632 {6, 6}, 633 {0, 0}, 634 {0, 0} 635 } 636 }, 637 {1, 0, 3, 0, /* 0x4d */ 638 {{0, 0}, 639 {2, 3}, 640 {6, 6}, 641 {0, 0} 642 } 643 }, 644 {0, 0, 2, 0, /* 0x4e */ 645 {{1, 3}, 646 {6, 6}, 647 {0, 0}, 648 {0, 0} 649 } 650 }, 651 {1, 0, 2, 0, /* 0x4f */ 652 {{0, 3}, 653 {6, 6}, 654 {0, 0}, 655 {0, 0} 656 } 657 }, 658 {0, 0, 2, 0, /* 0x50 */ 659 {{4, 4}, 660 {6, 6}, 661 {0, 0}, 662 {0, 0} 663 } 664 }, 665 {1, 0, 3, 0, /* 0x51 */ 666 {{0, 0}, 667 {4, 4}, 668 {6, 6}, 669 {0, 0} 670 } 671 }, 672 {0, 0, 3, 0, /* 0x52 */ 673 {{1, 1}, 674 {4, 4}, 675 {6, 6}, 676 {0, 0} 677 } 678 }, 679 {1, 0, 3, 0, /* 0x53 */ 680 {{0, 1}, 681 {4, 4}, 682 {6, 6}, 683 {0, 0} 684 } 685 }, 686 {0, 0, 3, 0, /* 0x54 */ 687 {{2, 2}, 688 {4, 4}, 689 {6, 6}, 690 {0, 0} 691 } 692 }, 693 {1, 0, 4, 0, /* 0x55 */ 694 {{0, 0}, 695 {2, 2}, 696 {4, 4}, 697 {6, 6} 698 } 699 }, 700 {0, 0, 3, 0, /* 0x56 */ 701 {{1, 2}, 702 {4, 4}, 703 {6, 6}, 704 {0, 0} 705 } 706 }, 707 {1, 0, 3, 0, /* 0x57 */ 708 {{0, 2}, 709 {4, 4}, 710 {6, 6}, 711 {0, 0} 712 } 713 }, 714 {0, 0, 2, 0, /* 0x58 */ 715 {{3, 4}, 716 {6, 6}, 717 {0, 0}, 718 {0, 0} 719 } 720 }, 721 {1, 0, 3, 0, /* 0x59 */ 722 {{0, 0}, 723 {3, 4}, 724 {6, 6}, 725 {0, 0} 726 } 727 }, 728 {0, 0, 3, 0, /* 0x5a */ 729 {{1, 1}, 730 {3, 4}, 731 {6, 6}, 732 {0, 0} 733 } 734 }, 735 {1, 0, 3, 0, /* 0x5b */ 736 {{0, 1}, 737 {3, 4}, 738 {6, 6}, 739 {0, 0} 740 } 741 }, 742 {0, 0, 2, 0, /* 0x5c */ 743 {{2, 4}, 744 {6, 6}, 745 {0, 0}, 746 {0, 0} 747 } 748 }, 749 {1, 0, 3, 0, /* 0x5d */ 750 {{0, 0}, 751 {2, 4}, 752 {6, 6}, 753 {0, 0} 754 } 755 }, 756 {0, 0, 2, 0, /* 0x5e */ 757 {{1, 4}, 758 {6, 6}, 759 {0, 0}, 760 {0, 0} 761 } 762 }, 763 {1, 0, 2, 0, /* 0x5f */ 764 {{0, 4}, 765 {6, 6}, 766 {0, 0}, 767 {0, 0} 768 } 769 }, 770 {0, 0, 1, 0, /* 0x60 */ 771 {{5, 6}, 772 {0, 0}, 773 {0, 0}, 774 {0, 0} 775 } 776 }, 777 {1, 0, 2, 0, /* 0x61 */ 778 {{0, 0}, 779 {5, 6}, 780 {0, 0}, 781 {0, 0} 782 } 783 }, 784 {0, 0, 2, 0, /* 0x62 */ 785 {{1, 1}, 786 {5, 6}, 787 {0, 0}, 788 {0, 0} 789 } 790 }, 791 {1, 0, 2, 0, /* 0x63 */ 792 {{0, 1}, 793 {5, 6}, 794 {0, 0}, 795 {0, 0} 796 } 797 }, 798 {0, 0, 2, 0, /* 0x64 */ 799 {{2, 2}, 800 {5, 6}, 801 {0, 0}, 802 {0, 0} 803 } 804 }, 805 {1, 0, 3, 0, /* 0x65 */ 806 {{0, 0}, 807 {2, 2}, 808 {5, 6}, 809 {0, 0} 810 } 811 }, 812 {0, 0, 2, 0, /* 0x66 */ 813 {{1, 2}, 814 {5, 6}, 815 {0, 0}, 816 {0, 0} 817 } 818 }, 819 {1, 0, 2, 0, /* 0x67 */ 820 {{0, 2}, 821 {5, 6}, 822 {0, 0}, 823 {0, 0} 824 } 825 }, 826 {0, 0, 2, 0, /* 0x68 */ 827 {{3, 3}, 828 {5, 6}, 829 {0, 0}, 830 {0, 0} 831 } 832 }, 833 {1, 0, 3, 0, /* 0x69 */ 834 {{0, 0}, 835 {3, 3}, 836 {5, 6}, 837 {0, 0} 838 } 839 }, 840 {0, 0, 3, 0, /* 0x6a */ 841 {{1, 1}, 842 {3, 3}, 843 {5, 6}, 844 {0, 0} 845 } 846 }, 847 {1, 0, 3, 0, /* 0x6b */ 848 {{0, 1}, 849 {3, 3}, 850 {5, 6}, 851 {0, 0} 852 } 853 }, 854 {0, 0, 2, 0, /* 0x6c */ 855 {{2, 3}, 856 {5, 6}, 857 {0, 0}, 858 {0, 0} 859 } 860 }, 861 {1, 0, 3, 0, /* 0x6d */ 862 {{0, 0}, 863 {2, 3}, 864 {5, 6}, 865 {0, 0} 866 } 867 }, 868 {0, 0, 2, 0, /* 0x6e */ 869 {{1, 3}, 870 {5, 6}, 871 {0, 0}, 872 {0, 0} 873 } 874 }, 875 {1, 0, 2, 0, /* 0x6f */ 876 {{0, 3}, 877 {5, 6}, 878 {0, 0}, 879 {0, 0} 880 } 881 }, 882 {0, 0, 1, 0, /* 0x70 */ 883 {{4, 6}, 884 {0, 0}, 885 {0, 0}, 886 {0, 0} 887 } 888 }, 889 {1, 0, 2, 0, /* 0x71 */ 890 {{0, 0}, 891 {4, 6}, 892 {0, 0}, 893 {0, 0} 894 } 895 }, 896 {0, 0, 2, 0, /* 0x72 */ 897 {{1, 1}, 898 {4, 6}, 899 {0, 0}, 900 {0, 0} 901 } 902 }, 903 {1, 0, 2, 0, /* 0x73 */ 904 {{0, 1}, 905 {4, 6}, 906 {0, 0}, 907 {0, 0} 908 } 909 }, 910 {0, 0, 2, 0, /* 0x74 */ 911 {{2, 2}, 912 {4, 6}, 913 {0, 0}, 914 {0, 0} 915 } 916 }, 917 {1, 0, 3, 0, /* 0x75 */ 918 {{0, 0}, 919 {2, 2}, 920 {4, 6}, 921 {0, 0} 922 } 923 }, 924 {0, 0, 2, 0, /* 0x76 */ 925 {{1, 2}, 926 {4, 6}, 927 {0, 0}, 928 {0, 0} 929 } 930 }, 931 {1, 0, 2, 0, /* 0x77 */ 932 {{0, 2}, 933 {4, 6}, 934 {0, 0}, 935 {0, 0} 936 } 937 }, 938 {0, 0, 1, 0, /* 0x78 */ 939 {{3, 6}, 940 {0, 0}, 941 {0, 0}, 942 {0, 0} 943 } 944 }, 945 {1, 0, 2, 0, /* 0x79 */ 946 {{0, 0}, 947 {3, 6}, 948 {0, 0}, 949 {0, 0} 950 } 951 }, 952 {0, 0, 2, 0, /* 0x7a */ 953 {{1, 1}, 954 {3, 6}, 955 {0, 0}, 956 {0, 0} 957 } 958 }, 959 {1, 0, 2, 0, /* 0x7b */ 960 {{0, 1}, 961 {3, 6}, 962 {0, 0}, 963 {0, 0} 964 } 965 }, 966 {0, 0, 1, 0, /* 0x7c */ 967 {{2, 6}, 968 {0, 0}, 969 {0, 0}, 970 {0, 0} 971 } 972 }, 973 {1, 0, 2, 0, /* 0x7d */ 974 {{0, 0}, 975 {2, 6}, 976 {0, 0}, 977 {0, 0} 978 } 979 }, 980 {0, 0, 1, 0, /* 0x7e */ 981 {{1, 6}, 982 {0, 0}, 983 {0, 0}, 984 {0, 0} 985 } 986 }, 987 {1, 0, 1, 0, /* 0x7f */ 988 {{0, 6}, 989 {0, 0}, 990 {0, 0}, 991 {0, 0} 992 } 993 }, 994 {0, 1, 1, 0, /* 0x80 */ 995 {{7, 7}, 996 {0, 0}, 997 {0, 0}, 998 {0, 0} 999 } 1000 }, 1001 {1, 1, 2, 0, /* 0x81 */ 1002 {{0, 0}, 1003 {7, 7}, 1004 {0, 0}, 1005 {0, 0} 1006 } 1007 }, 1008 {0, 1, 2, 0, /* 0x82 */ 1009 {{1, 1}, 1010 {7, 7}, 1011 {0, 0}, 1012 {0, 0} 1013 } 1014 }, 1015 {1, 1, 2, 0, /* 0x83 */ 1016 {{0, 1}, 1017 {7, 7}, 1018 {0, 0}, 1019 {0, 0} 1020 } 1021 }, 1022 {0, 1, 2, 0, /* 0x84 */ 1023 {{2, 2}, 1024 {7, 7}, 1025 {0, 0}, 1026 {0, 0} 1027 } 1028 }, 1029 {1, 1, 3, 0, /* 0x85 */ 1030 {{0, 0}, 1031 {2, 2}, 1032 {7, 7}, 1033 {0, 0} 1034 } 1035 }, 1036 {0, 1, 2, 0, /* 0x86 */ 1037 {{1, 2}, 1038 {7, 7}, 1039 {0, 0}, 1040 {0, 0} 1041 } 1042 }, 1043 {1, 1, 2, 0, /* 0x87 */ 1044 {{0, 2}, 1045 {7, 7}, 1046 {0, 0}, 1047 {0, 0} 1048 } 1049 }, 1050 {0, 1, 2, 0, /* 0x88 */ 1051 {{3, 3}, 1052 {7, 7}, 1053 {0, 0}, 1054 {0, 0} 1055 } 1056 }, 1057 {1, 1, 3, 0, /* 0x89 */ 1058 {{0, 0}, 1059 {3, 3}, 1060 {7, 7}, 1061 {0, 0} 1062 } 1063 }, 1064 {0, 1, 3, 0, /* 0x8a */ 1065 {{1, 1}, 1066 {3, 3}, 1067 {7, 7}, 1068 {0, 0} 1069 } 1070 }, 1071 {1, 1, 3, 0, /* 0x8b */ 1072 {{0, 1}, 1073 {3, 3}, 1074 {7, 7}, 1075 {0, 0} 1076 } 1077 }, 1078 {0, 1, 2, 0, /* 0x8c */ 1079 {{2, 3}, 1080 {7, 7}, 1081 {0, 0}, 1082 {0, 0} 1083 } 1084 }, 1085 {1, 1, 3, 0, /* 0x8d */ 1086 {{0, 0}, 1087 {2, 3}, 1088 {7, 7}, 1089 {0, 0} 1090 } 1091 }, 1092 {0, 1, 2, 0, /* 0x8e */ 1093 {{1, 3}, 1094 {7, 7}, 1095 {0, 0}, 1096 {0, 0} 1097 } 1098 }, 1099 {1, 1, 2, 0, /* 0x8f */ 1100 {{0, 3}, 1101 {7, 7}, 1102 {0, 0}, 1103 {0, 0} 1104 } 1105 }, 1106 {0, 1, 2, 0, /* 0x90 */ 1107 {{4, 4}, 1108 {7, 7}, 1109 {0, 0}, 1110 {0, 0} 1111 } 1112 }, 1113 {1, 1, 3, 0, /* 0x91 */ 1114 {{0, 0}, 1115 {4, 4}, 1116 {7, 7}, 1117 {0, 0} 1118 } 1119 }, 1120 {0, 1, 3, 0, /* 0x92 */ 1121 {{1, 1}, 1122 {4, 4}, 1123 {7, 7}, 1124 {0, 0} 1125 } 1126 }, 1127 {1, 1, 3, 0, /* 0x93 */ 1128 {{0, 1}, 1129 {4, 4}, 1130 {7, 7}, 1131 {0, 0} 1132 } 1133 }, 1134 {0, 1, 3, 0, /* 0x94 */ 1135 {{2, 2}, 1136 {4, 4}, 1137 {7, 7}, 1138 {0, 0} 1139 } 1140 }, 1141 {1, 1, 4, 0, /* 0x95 */ 1142 {{0, 0}, 1143 {2, 2}, 1144 {4, 4}, 1145 {7, 7} 1146 } 1147 }, 1148 {0, 1, 3, 0, /* 0x96 */ 1149 {{1, 2}, 1150 {4, 4}, 1151 {7, 7}, 1152 {0, 0} 1153 } 1154 }, 1155 {1, 1, 3, 0, /* 0x97 */ 1156 {{0, 2}, 1157 {4, 4}, 1158 {7, 7}, 1159 {0, 0} 1160 } 1161 }, 1162 {0, 1, 2, 0, /* 0x98 */ 1163 {{3, 4}, 1164 {7, 7}, 1165 {0, 0}, 1166 {0, 0} 1167 } 1168 }, 1169 {1, 1, 3, 0, /* 0x99 */ 1170 {{0, 0}, 1171 {3, 4}, 1172 {7, 7}, 1173 {0, 0} 1174 } 1175 }, 1176 {0, 1, 3, 0, /* 0x9a */ 1177 {{1, 1}, 1178 {3, 4}, 1179 {7, 7}, 1180 {0, 0} 1181 } 1182 }, 1183 {1, 1, 3, 0, /* 0x9b */ 1184 {{0, 1}, 1185 {3, 4}, 1186 {7, 7}, 1187 {0, 0} 1188 } 1189 }, 1190 {0, 1, 2, 0, /* 0x9c */ 1191 {{2, 4}, 1192 {7, 7}, 1193 {0, 0}, 1194 {0, 0} 1195 } 1196 }, 1197 {1, 1, 3, 0, /* 0x9d */ 1198 {{0, 0}, 1199 {2, 4}, 1200 {7, 7}, 1201 {0, 0} 1202 } 1203 }, 1204 {0, 1, 2, 0, /* 0x9e */ 1205 {{1, 4}, 1206 {7, 7}, 1207 {0, 0}, 1208 {0, 0} 1209 } 1210 }, 1211 {1, 1, 2, 0, /* 0x9f */ 1212 {{0, 4}, 1213 {7, 7}, 1214 {0, 0}, 1215 {0, 0} 1216 } 1217 }, 1218 {0, 1, 2, 0, /* 0xa0 */ 1219 {{5, 5}, 1220 {7, 7}, 1221 {0, 0}, 1222 {0, 0} 1223 } 1224 }, 1225 {1, 1, 3, 0, /* 0xa1 */ 1226 {{0, 0}, 1227 {5, 5}, 1228 {7, 7}, 1229 {0, 0} 1230 } 1231 }, 1232 {0, 1, 3, 0, /* 0xa2 */ 1233 {{1, 1}, 1234 {5, 5}, 1235 {7, 7}, 1236 {0, 0} 1237 } 1238 }, 1239 {1, 1, 3, 0, /* 0xa3 */ 1240 {{0, 1}, 1241 {5, 5}, 1242 {7, 7}, 1243 {0, 0} 1244 } 1245 }, 1246 {0, 1, 3, 0, /* 0xa4 */ 1247 {{2, 2}, 1248 {5, 5}, 1249 {7, 7}, 1250 {0, 0} 1251 } 1252 }, 1253 {1, 1, 4, 0, /* 0xa5 */ 1254 {{0, 0}, 1255 {2, 2}, 1256 {5, 5}, 1257 {7, 7} 1258 } 1259 }, 1260 {0, 1, 3, 0, /* 0xa6 */ 1261 {{1, 2}, 1262 {5, 5}, 1263 {7, 7}, 1264 {0, 0} 1265 } 1266 }, 1267 {1, 1, 3, 0, /* 0xa7 */ 1268 {{0, 2}, 1269 {5, 5}, 1270 {7, 7}, 1271 {0, 0} 1272 } 1273 }, 1274 {0, 1, 3, 0, /* 0xa8 */ 1275 {{3, 3}, 1276 {5, 5}, 1277 {7, 7}, 1278 {0, 0} 1279 } 1280 }, 1281 {1, 1, 4, 0, /* 0xa9 */ 1282 {{0, 0}, 1283 {3, 3}, 1284 {5, 5}, 1285 {7, 7} 1286 } 1287 }, 1288 {0, 1, 4, 0, /* 0xaa */ 1289 {{1, 1}, 1290 {3, 3}, 1291 {5, 5}, 1292 {7, 7} 1293 } 1294 }, 1295 {1, 1, 4, 0, /* 0xab */ 1296 {{0, 1}, 1297 {3, 3}, 1298 {5, 5}, 1299 {7, 7} 1300 } 1301 }, 1302 {0, 1, 3, 0, /* 0xac */ 1303 {{2, 3}, 1304 {5, 5}, 1305 {7, 7}, 1306 {0, 0} 1307 } 1308 }, 1309 {1, 1, 4, 0, /* 0xad */ 1310 {{0, 0}, 1311 {2, 3}, 1312 {5, 5}, 1313 {7, 7} 1314 } 1315 }, 1316 {0, 1, 3, 0, /* 0xae */ 1317 {{1, 3}, 1318 {5, 5}, 1319 {7, 7}, 1320 {0, 0} 1321 } 1322 }, 1323 {1, 1, 3, 0, /* 0xaf */ 1324 {{0, 3}, 1325 {5, 5}, 1326 {7, 7}, 1327 {0, 0} 1328 } 1329 }, 1330 {0, 1, 2, 0, /* 0xb0 */ 1331 {{4, 5}, 1332 {7, 7}, 1333 {0, 0}, 1334 {0, 0} 1335 } 1336 }, 1337 {1, 1, 3, 0, /* 0xb1 */ 1338 {{0, 0}, 1339 {4, 5}, 1340 {7, 7}, 1341 {0, 0} 1342 } 1343 }, 1344 {0, 1, 3, 0, /* 0xb2 */ 1345 {{1, 1}, 1346 {4, 5}, 1347 {7, 7}, 1348 {0, 0} 1349 } 1350 }, 1351 {1, 1, 3, 0, /* 0xb3 */ 1352 {{0, 1}, 1353 {4, 5}, 1354 {7, 7}, 1355 {0, 0} 1356 } 1357 }, 1358 {0, 1, 3, 0, /* 0xb4 */ 1359 {{2, 2}, 1360 {4, 5}, 1361 {7, 7}, 1362 {0, 0} 1363 } 1364 }, 1365 {1, 1, 4, 0, /* 0xb5 */ 1366 {{0, 0}, 1367 {2, 2}, 1368 {4, 5}, 1369 {7, 7} 1370 } 1371 }, 1372 {0, 1, 3, 0, /* 0xb6 */ 1373 {{1, 2}, 1374 {4, 5}, 1375 {7, 7}, 1376 {0, 0} 1377 } 1378 }, 1379 {1, 1, 3, 0, /* 0xb7 */ 1380 {{0, 2}, 1381 {4, 5}, 1382 {7, 7}, 1383 {0, 0} 1384 } 1385 }, 1386 {0, 1, 2, 0, /* 0xb8 */ 1387 {{3, 5}, 1388 {7, 7}, 1389 {0, 0}, 1390 {0, 0} 1391 } 1392 }, 1393 {1, 1, 3, 0, /* 0xb9 */ 1394 {{0, 0}, 1395 {3, 5}, 1396 {7, 7}, 1397 {0, 0} 1398 } 1399 }, 1400 {0, 1, 3, 0, /* 0xba */ 1401 {{1, 1}, 1402 {3, 5}, 1403 {7, 7}, 1404 {0, 0} 1405 } 1406 }, 1407 {1, 1, 3, 0, /* 0xbb */ 1408 {{0, 1}, 1409 {3, 5}, 1410 {7, 7}, 1411 {0, 0} 1412 } 1413 }, 1414 {0, 1, 2, 0, /* 0xbc */ 1415 {{2, 5}, 1416 {7, 7}, 1417 {0, 0}, 1418 {0, 0} 1419 } 1420 }, 1421 {1, 1, 3, 0, /* 0xbd */ 1422 {{0, 0}, 1423 {2, 5}, 1424 {7, 7}, 1425 {0, 0} 1426 } 1427 }, 1428 {0, 1, 2, 0, /* 0xbe */ 1429 {{1, 5}, 1430 {7, 7}, 1431 {0, 0}, 1432 {0, 0} 1433 } 1434 }, 1435 {1, 1, 2, 0, /* 0xbf */ 1436 {{0, 5}, 1437 {7, 7}, 1438 {0, 0}, 1439 {0, 0} 1440 } 1441 }, 1442 {0, 1, 1, 0, /* 0xc0 */ 1443 {{6, 7}, 1444 {0, 0}, 1445 {0, 0}, 1446 {0, 0} 1447 } 1448 }, 1449 {1, 1, 2, 0, /* 0xc1 */ 1450 {{0, 0}, 1451 {6, 7}, 1452 {0, 0}, 1453 {0, 0} 1454 } 1455 }, 1456 {0, 1, 2, 0, /* 0xc2 */ 1457 {{1, 1}, 1458 {6, 7}, 1459 {0, 0}, 1460 {0, 0} 1461 } 1462 }, 1463 {1, 1, 2, 0, /* 0xc3 */ 1464 {{0, 1}, 1465 {6, 7}, 1466 {0, 0}, 1467 {0, 0} 1468 } 1469 }, 1470 {0, 1, 2, 0, /* 0xc4 */ 1471 {{2, 2}, 1472 {6, 7}, 1473 {0, 0}, 1474 {0, 0} 1475 } 1476 }, 1477 {1, 1, 3, 0, /* 0xc5 */ 1478 {{0, 0}, 1479 {2, 2}, 1480 {6, 7}, 1481 {0, 0} 1482 } 1483 }, 1484 {0, 1, 2, 0, /* 0xc6 */ 1485 {{1, 2}, 1486 {6, 7}, 1487 {0, 0}, 1488 {0, 0} 1489 } 1490 }, 1491 {1, 1, 2, 0, /* 0xc7 */ 1492 {{0, 2}, 1493 {6, 7}, 1494 {0, 0}, 1495 {0, 0} 1496 } 1497 }, 1498 {0, 1, 2, 0, /* 0xc8 */ 1499 {{3, 3}, 1500 {6, 7}, 1501 {0, 0}, 1502 {0, 0} 1503 } 1504 }, 1505 {1, 1, 3, 0, /* 0xc9 */ 1506 {{0, 0}, 1507 {3, 3}, 1508 {6, 7}, 1509 {0, 0} 1510 } 1511 }, 1512 {0, 1, 3, 0, /* 0xca */ 1513 {{1, 1}, 1514 {3, 3}, 1515 {6, 7}, 1516 {0, 0} 1517 } 1518 }, 1519 {1, 1, 3, 0, /* 0xcb */ 1520 {{0, 1}, 1521 {3, 3}, 1522 {6, 7}, 1523 {0, 0} 1524 } 1525 }, 1526 {0, 1, 2, 0, /* 0xcc */ 1527 {{2, 3}, 1528 {6, 7}, 1529 {0, 0}, 1530 {0, 0} 1531 } 1532 }, 1533 {1, 1, 3, 0, /* 0xcd */ 1534 {{0, 0}, 1535 {2, 3}, 1536 {6, 7}, 1537 {0, 0} 1538 } 1539 }, 1540 {0, 1, 2, 0, /* 0xce */ 1541 {{1, 3}, 1542 {6, 7}, 1543 {0, 0}, 1544 {0, 0} 1545 } 1546 }, 1547 {1, 1, 2, 0, /* 0xcf */ 1548 {{0, 3}, 1549 {6, 7}, 1550 {0, 0}, 1551 {0, 0} 1552 } 1553 }, 1554 {0, 1, 2, 0, /* 0xd0 */ 1555 {{4, 4}, 1556 {6, 7}, 1557 {0, 0}, 1558 {0, 0} 1559 } 1560 }, 1561 {1, 1, 3, 0, /* 0xd1 */ 1562 {{0, 0}, 1563 {4, 4}, 1564 {6, 7}, 1565 {0, 0} 1566 } 1567 }, 1568 {0, 1, 3, 0, /* 0xd2 */ 1569 {{1, 1}, 1570 {4, 4}, 1571 {6, 7}, 1572 {0, 0} 1573 } 1574 }, 1575 {1, 1, 3, 0, /* 0xd3 */ 1576 {{0, 1}, 1577 {4, 4}, 1578 {6, 7}, 1579 {0, 0} 1580 } 1581 }, 1582 {0, 1, 3, 0, /* 0xd4 */ 1583 {{2, 2}, 1584 {4, 4}, 1585 {6, 7}, 1586 {0, 0} 1587 } 1588 }, 1589 {1, 1, 4, 0, /* 0xd5 */ 1590 {{0, 0}, 1591 {2, 2}, 1592 {4, 4}, 1593 {6, 7} 1594 } 1595 }, 1596 {0, 1, 3, 0, /* 0xd6 */ 1597 {{1, 2}, 1598 {4, 4}, 1599 {6, 7}, 1600 {0, 0} 1601 } 1602 }, 1603 {1, 1, 3, 0, /* 0xd7 */ 1604 {{0, 2}, 1605 {4, 4}, 1606 {6, 7}, 1607 {0, 0} 1608 } 1609 }, 1610 {0, 1, 2, 0, /* 0xd8 */ 1611 {{3, 4}, 1612 {6, 7}, 1613 {0, 0}, 1614 {0, 0} 1615 } 1616 }, 1617 {1, 1, 3, 0, /* 0xd9 */ 1618 {{0, 0}, 1619 {3, 4}, 1620 {6, 7}, 1621 {0, 0} 1622 } 1623 }, 1624 {0, 1, 3, 0, /* 0xda */ 1625 {{1, 1}, 1626 {3, 4}, 1627 {6, 7}, 1628 {0, 0} 1629 } 1630 }, 1631 {1, 1, 3, 0, /* 0xdb */ 1632 {{0, 1}, 1633 {3, 4}, 1634 {6, 7}, 1635 {0, 0} 1636 } 1637 }, 1638 {0, 1, 2, 0, /* 0xdc */ 1639 {{2, 4}, 1640 {6, 7}, 1641 {0, 0}, 1642 {0, 0} 1643 } 1644 }, 1645 {1, 1, 3, 0, /* 0xdd */ 1646 {{0, 0}, 1647 {2, 4}, 1648 {6, 7}, 1649 {0, 0} 1650 } 1651 }, 1652 {0, 1, 2, 0, /* 0xde */ 1653 {{1, 4}, 1654 {6, 7}, 1655 {0, 0}, 1656 {0, 0} 1657 } 1658 }, 1659 {1, 1, 2, 0, /* 0xdf */ 1660 {{0, 4}, 1661 {6, 7}, 1662 {0, 0}, 1663 {0, 0} 1664 } 1665 }, 1666 {0, 1, 1, 0, /* 0xe0 */ 1667 {{5, 7}, 1668 {0, 0}, 1669 {0, 0}, 1670 {0, 0} 1671 } 1672 }, 1673 {1, 1, 2, 0, /* 0xe1 */ 1674 {{0, 0}, 1675 {5, 7}, 1676 {0, 0}, 1677 {0, 0} 1678 } 1679 }, 1680 {0, 1, 2, 0, /* 0xe2 */ 1681 {{1, 1}, 1682 {5, 7}, 1683 {0, 0}, 1684 {0, 0} 1685 } 1686 }, 1687 {1, 1, 2, 0, /* 0xe3 */ 1688 {{0, 1}, 1689 {5, 7}, 1690 {0, 0}, 1691 {0, 0} 1692 } 1693 }, 1694 {0, 1, 2, 0, /* 0xe4 */ 1695 {{2, 2}, 1696 {5, 7}, 1697 {0, 0}, 1698 {0, 0} 1699 } 1700 }, 1701 {1, 1, 3, 0, /* 0xe5 */ 1702 {{0, 0}, 1703 {2, 2}, 1704 {5, 7}, 1705 {0, 0} 1706 } 1707 }, 1708 {0, 1, 2, 0, /* 0xe6 */ 1709 {{1, 2}, 1710 {5, 7}, 1711 {0, 0}, 1712 {0, 0} 1713 } 1714 }, 1715 {1, 1, 2, 0, /* 0xe7 */ 1716 {{0, 2}, 1717 {5, 7}, 1718 {0, 0}, 1719 {0, 0} 1720 } 1721 }, 1722 {0, 1, 2, 0, /* 0xe8 */ 1723 {{3, 3}, 1724 {5, 7}, 1725 {0, 0}, 1726 {0, 0} 1727 } 1728 }, 1729 {1, 1, 3, 0, /* 0xe9 */ 1730 {{0, 0}, 1731 {3, 3}, 1732 {5, 7}, 1733 {0, 0} 1734 } 1735 }, 1736 {0, 1, 3, 0, /* 0xea */ 1737 {{1, 1}, 1738 {3, 3}, 1739 {5, 7}, 1740 {0, 0} 1741 } 1742 }, 1743 {1, 1, 3, 0, /* 0xeb */ 1744 {{0, 1}, 1745 {3, 3}, 1746 {5, 7}, 1747 {0, 0} 1748 } 1749 }, 1750 {0, 1, 2, 0, /* 0xec */ 1751 {{2, 3}, 1752 {5, 7}, 1753 {0, 0}, 1754 {0, 0} 1755 } 1756 }, 1757 {1, 1, 3, 0, /* 0xed */ 1758 {{0, 0}, 1759 {2, 3}, 1760 {5, 7}, 1761 {0, 0} 1762 } 1763 }, 1764 {0, 1, 2, 0, /* 0xee */ 1765 {{1, 3}, 1766 {5, 7}, 1767 {0, 0}, 1768 {0, 0} 1769 } 1770 }, 1771 {1, 1, 2, 0, /* 0xef */ 1772 {{0, 3}, 1773 {5, 7}, 1774 {0, 0}, 1775 {0, 0} 1776 } 1777 }, 1778 {0, 1, 1, 0, /* 0xf0 */ 1779 {{4, 7}, 1780 {0, 0}, 1781 {0, 0}, 1782 {0, 0} 1783 } 1784 }, 1785 {1, 1, 2, 0, /* 0xf1 */ 1786 {{0, 0}, 1787 {4, 7}, 1788 {0, 0}, 1789 {0, 0} 1790 } 1791 }, 1792 {0, 1, 2, 0, /* 0xf2 */ 1793 {{1, 1}, 1794 {4, 7}, 1795 {0, 0}, 1796 {0, 0} 1797 } 1798 }, 1799 {1, 1, 2, 0, /* 0xf3 */ 1800 {{0, 1}, 1801 {4, 7}, 1802 {0, 0}, 1803 {0, 0} 1804 } 1805 }, 1806 {0, 1, 2, 0, /* 0xf4 */ 1807 {{2, 2}, 1808 {4, 7}, 1809 {0, 0}, 1810 {0, 0} 1811 } 1812 }, 1813 {1, 1, 3, 0, /* 0xf5 */ 1814 {{0, 0}, 1815 {2, 2}, 1816 {4, 7}, 1817 {0, 0} 1818 } 1819 }, 1820 {0, 1, 2, 0, /* 0xf6 */ 1821 {{1, 2}, 1822 {4, 7}, 1823 {0, 0}, 1824 {0, 0} 1825 } 1826 }, 1827 {1, 1, 2, 0, /* 0xf7 */ 1828 {{0, 2}, 1829 {4, 7}, 1830 {0, 0}, 1831 {0, 0} 1832 } 1833 }, 1834 {0, 1, 1, 0, /* 0xf8 */ 1835 {{3, 7}, 1836 {0, 0}, 1837 {0, 0}, 1838 {0, 0} 1839 } 1840 }, 1841 {1, 1, 2, 0, /* 0xf9 */ 1842 {{0, 0}, 1843 {3, 7}, 1844 {0, 0}, 1845 {0, 0} 1846 } 1847 }, 1848 {0, 1, 2, 0, /* 0xfa */ 1849 {{1, 1}, 1850 {3, 7}, 1851 {0, 0}, 1852 {0, 0} 1853 } 1854 }, 1855 {1, 1, 2, 0, /* 0xfb */ 1856 {{0, 1}, 1857 {3, 7}, 1858 {0, 0}, 1859 {0, 0} 1860 } 1861 }, 1862 {0, 1, 1, 0, /* 0xfc */ 1863 {{2, 7}, 1864 {0, 0}, 1865 {0, 0}, 1866 {0, 0} 1867 } 1868 }, 1869 {1, 1, 2, 0, /* 0xfd */ 1870 {{0, 0}, 1871 {2, 7}, 1872 {0, 0}, 1873 {0, 0} 1874 } 1875 }, 1876 {0, 1, 1, 0, /* 0xfe */ 1877 {{1, 7}, 1878 {0, 0}, 1879 {0, 0}, 1880 {0, 0} 1881 } 1882 }, 1883 {1, 1, 1, 0, /* 0xff */ 1884 {{0, 7}, 1885 {0, 0}, 1886 {0, 0}, 1887 {0, 0} 1888 } 1889 } 1890 }; 1891 1892 1893 int 1894 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1895 struct sctp_scoping *scope, 1896 int do_update) 1897 { 1898 if ((scope->loopback_scope == 0) && 1899 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1900 /* 1901 * skip loopback if not in scope * 1902 */ 1903 return (0); 1904 } 1905 switch (ifa->address.sa.sa_family) { 1906 #ifdef INET 1907 case AF_INET: 1908 if (scope->ipv4_addr_legal) { 1909 struct sockaddr_in *sin; 1910 1911 sin = &ifa->address.sin; 1912 if (sin->sin_addr.s_addr == 0) { 1913 /* not in scope , unspecified */ 1914 return (0); 1915 } 1916 if ((scope->ipv4_local_scope == 0) && 1917 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1918 /* private address not in scope */ 1919 return (0); 1920 } 1921 } else { 1922 return (0); 1923 } 1924 break; 1925 #endif 1926 #ifdef INET6 1927 case AF_INET6: 1928 if (scope->ipv6_addr_legal) { 1929 struct sockaddr_in6 *sin6; 1930 1931 #if !defined(__Panda__) 1932 /* Must update the flags, bummer, which 1933 * means any IFA locks must now be applied HERE <-> 1934 */ 1935 if (do_update) { 1936 sctp_gather_internal_ifa_flags(ifa); 1937 } 1938 #endif 1939 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1940 return (0); 1941 } 1942 /* ok to use deprecated addresses? */ 1943 sin6 = &ifa->address.sin6; 1944 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1945 /* skip unspecifed addresses */ 1946 return (0); 1947 } 1948 if ( /* (local_scope == 0) && */ 1949 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1950 return (0); 1951 } 1952 if ((scope->site_scope == 0) && 1953 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1954 return (0); 1955 } 1956 } else { 1957 return (0); 1958 } 1959 break; 1960 #endif 1961 #if defined(__Userspace__) 1962 case AF_CONN: 1963 if (!scope->conn_addr_legal) { 1964 return (0); 1965 } 1966 break; 1967 #endif 1968 default: 1969 return (0); 1970 } 1971 return (1); 1972 } 1973 1974 static struct mbuf * 1975 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) 1976 { 1977 #if defined(INET) || defined(INET6) 1978 struct sctp_paramhdr *parmh; 1979 struct mbuf *mret; 1980 uint16_t plen; 1981 #endif 1982 1983 switch (ifa->address.sa.sa_family) { 1984 #ifdef INET 1985 case AF_INET: 1986 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param); 1987 break; 1988 #endif 1989 #ifdef INET6 1990 case AF_INET6: 1991 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param); 1992 break; 1993 #endif 1994 default: 1995 return (m); 1996 } 1997 #if defined(INET) || defined(INET6) 1998 if (M_TRAILINGSPACE(m) >= plen) { 1999 /* easy side we just drop it on the end */ 2000 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 2001 mret = m; 2002 } else { 2003 /* Need more space */ 2004 mret = m; 2005 while (SCTP_BUF_NEXT(mret) != NULL) { 2006 mret = SCTP_BUF_NEXT(mret); 2007 } 2008 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA); 2009 if (SCTP_BUF_NEXT(mret) == NULL) { 2010 /* We are hosed, can't add more addresses */ 2011 return (m); 2012 } 2013 mret = SCTP_BUF_NEXT(mret); 2014 parmh = mtod(mret, struct sctp_paramhdr *); 2015 } 2016 /* now add the parameter */ 2017 switch (ifa->address.sa.sa_family) { 2018 #ifdef INET 2019 case AF_INET: 2020 { 2021 struct sctp_ipv4addr_param *ipv4p; 2022 struct sockaddr_in *sin; 2023 2024 sin = &ifa->address.sin; 2025 ipv4p = (struct sctp_ipv4addr_param *)parmh; 2026 parmh->param_type = htons(SCTP_IPV4_ADDRESS); 2027 parmh->param_length = htons(plen); 2028 ipv4p->addr = sin->sin_addr.s_addr; 2029 SCTP_BUF_LEN(mret) += plen; 2030 break; 2031 } 2032 #endif 2033 #ifdef INET6 2034 case AF_INET6: 2035 { 2036 struct sctp_ipv6addr_param *ipv6p; 2037 struct sockaddr_in6 *sin6; 2038 2039 sin6 = &ifa->address.sin6; 2040 ipv6p = (struct sctp_ipv6addr_param *)parmh; 2041 parmh->param_type = htons(SCTP_IPV6_ADDRESS); 2042 parmh->param_length = htons(plen); 2043 memcpy(ipv6p->addr, &sin6->sin6_addr, 2044 sizeof(ipv6p->addr)); 2045 #if defined(SCTP_EMBEDDED_V6_SCOPE) 2046 /* clear embedded scope in the address */ 2047 in6_clearscope((struct in6_addr *)ipv6p->addr); 2048 #endif 2049 SCTP_BUF_LEN(mret) += plen; 2050 break; 2051 } 2052 #endif 2053 default: 2054 return (m); 2055 } 2056 if (len != NULL) { 2057 *len += plen; 2058 } 2059 return (mret); 2060 #endif 2061 } 2062 2063 2064 struct mbuf * 2065 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2066 struct sctp_scoping *scope, 2067 struct mbuf *m_at, int cnt_inits_to, 2068 uint16_t *padding_len, uint16_t *chunk_len) 2069 { 2070 struct sctp_vrf *vrf = NULL; 2071 int cnt, limit_out = 0, total_count; 2072 uint32_t vrf_id; 2073 2074 vrf_id = inp->def_vrf_id; 2075 SCTP_IPI_ADDR_RLOCK(); 2076 vrf = sctp_find_vrf(vrf_id); 2077 if (vrf == NULL) { 2078 SCTP_IPI_ADDR_RUNLOCK(); 2079 return (m_at); 2080 } 2081 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2082 struct sctp_ifa *sctp_ifap; 2083 struct sctp_ifn *sctp_ifnp; 2084 2085 cnt = cnt_inits_to; 2086 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2087 limit_out = 1; 2088 cnt = SCTP_ADDRESS_LIMIT; 2089 goto skip_count; 2090 } 2091 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2092 if ((scope->loopback_scope == 0) && 2093 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2094 /* 2095 * Skip loopback devices if loopback_scope 2096 * not set 2097 */ 2098 continue; 2099 } 2100 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2101 #if defined(__FreeBSD__) 2102 #ifdef INET 2103 if ((sctp_ifap->address.sa.sa_family == AF_INET) && 2104 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2105 &sctp_ifap->address.sin.sin_addr) != 0)) { 2106 continue; 2107 } 2108 #endif 2109 #ifdef INET6 2110 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && 2111 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2112 &sctp_ifap->address.sin6.sin6_addr) != 0)) { 2113 continue; 2114 } 2115 #endif 2116 #endif 2117 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 2118 continue; 2119 } 2120 #if defined(__Userspace__) 2121 if (sctp_ifap->address.sa.sa_family == AF_CONN) { 2122 continue; 2123 } 2124 #endif 2125 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) { 2126 continue; 2127 } 2128 cnt++; 2129 if (cnt > SCTP_ADDRESS_LIMIT) { 2130 break; 2131 } 2132 } 2133 if (cnt > SCTP_ADDRESS_LIMIT) { 2134 break; 2135 } 2136 } 2137 skip_count: 2138 if (cnt > 1) { 2139 total_count = 0; 2140 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2141 cnt = 0; 2142 if ((scope->loopback_scope == 0) && 2143 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2144 /* 2145 * Skip loopback devices if 2146 * loopback_scope not set 2147 */ 2148 continue; 2149 } 2150 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2151 #if defined(__FreeBSD__) 2152 #ifdef INET 2153 if ((sctp_ifap->address.sa.sa_family == AF_INET) && 2154 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2155 &sctp_ifap->address.sin.sin_addr) != 0)) { 2156 continue; 2157 } 2158 #endif 2159 #ifdef INET6 2160 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && 2161 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2162 &sctp_ifap->address.sin6.sin6_addr) != 0)) { 2163 continue; 2164 } 2165 #endif 2166 #endif 2167 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 2168 continue; 2169 } 2170 #if defined(__Userspace__) 2171 if (sctp_ifap->address.sa.sa_family == AF_CONN) { 2172 continue; 2173 } 2174 #endif 2175 if (sctp_is_address_in_scope(sctp_ifap, 2176 scope, 0) == 0) { 2177 continue; 2178 } 2179 if ((chunk_len != NULL) && 2180 (padding_len != NULL) && 2181 (*padding_len > 0)) { 2182 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 2183 SCTP_BUF_LEN(m_at) += *padding_len; 2184 *chunk_len += *padding_len; 2185 *padding_len = 0; 2186 } 2187 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len); 2188 if (limit_out) { 2189 cnt++; 2190 total_count++; 2191 if (cnt >= 2) { 2192 /* two from each address */ 2193 break; 2194 } 2195 if (total_count > SCTP_ADDRESS_LIMIT) { 2196 /* No more addresses */ 2197 break; 2198 } 2199 } 2200 } 2201 } 2202 } 2203 } else { 2204 struct sctp_laddr *laddr; 2205 2206 cnt = cnt_inits_to; 2207 /* First, how many ? */ 2208 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2209 if (laddr->ifa == NULL) { 2210 continue; 2211 } 2212 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2213 /* Address being deleted by the system, dont 2214 * list. 2215 */ 2216 continue; 2217 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2218 /* Address being deleted on this ep 2219 * don't list. 2220 */ 2221 continue; 2222 } 2223 #if defined(__Userspace__) 2224 if (laddr->ifa->address.sa.sa_family == AF_CONN) { 2225 continue; 2226 } 2227 #endif 2228 if (sctp_is_address_in_scope(laddr->ifa, 2229 scope, 1) == 0) { 2230 continue; 2231 } 2232 cnt++; 2233 } 2234 /* 2235 * To get through a NAT we only list addresses if we have 2236 * more than one. That way if you just bind a single address 2237 * we let the source of the init dictate our address. 2238 */ 2239 if (cnt > 1) { 2240 cnt = cnt_inits_to; 2241 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2242 if (laddr->ifa == NULL) { 2243 continue; 2244 } 2245 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 2246 continue; 2247 } 2248 #if defined(__Userspace__) 2249 if (laddr->ifa->address.sa.sa_family == AF_CONN) { 2250 continue; 2251 } 2252 #endif 2253 if (sctp_is_address_in_scope(laddr->ifa, 2254 scope, 0) == 0) { 2255 continue; 2256 } 2257 if ((chunk_len != NULL) && 2258 (padding_len != NULL) && 2259 (*padding_len > 0)) { 2260 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 2261 SCTP_BUF_LEN(m_at) += *padding_len; 2262 *chunk_len += *padding_len; 2263 *padding_len = 0; 2264 } 2265 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len); 2266 cnt++; 2267 if (cnt >= SCTP_ADDRESS_LIMIT) { 2268 break; 2269 } 2270 } 2271 } 2272 } 2273 SCTP_IPI_ADDR_RUNLOCK(); 2274 return (m_at); 2275 } 2276 2277 static struct sctp_ifa * 2278 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 2279 uint8_t dest_is_loop, 2280 uint8_t dest_is_priv, 2281 sa_family_t fam) 2282 { 2283 uint8_t dest_is_global = 0; 2284 /* dest_is_priv is true if destination is a private address */ 2285 /* dest_is_loop is true if destination is a loopback addresses */ 2286 2287 /** 2288 * Here we determine if its a preferred address. A preferred address 2289 * means it is the same scope or higher scope then the destination. 2290 * L = loopback, P = private, G = global 2291 * ----------------------------------------- 2292 * src | dest | result 2293 * ---------------------------------------- 2294 * L | L | yes 2295 * ----------------------------------------- 2296 * P | L | yes-v4 no-v6 2297 * ----------------------------------------- 2298 * G | L | yes-v4 no-v6 2299 * ----------------------------------------- 2300 * L | P | no 2301 * ----------------------------------------- 2302 * P | P | yes 2303 * ----------------------------------------- 2304 * G | P | no 2305 * ----------------------------------------- 2306 * L | G | no 2307 * ----------------------------------------- 2308 * P | G | no 2309 * ----------------------------------------- 2310 * G | G | yes 2311 * ----------------------------------------- 2312 */ 2313 2314 if (ifa->address.sa.sa_family != fam) { 2315 /* forget mis-matched family */ 2316 return (NULL); 2317 } 2318 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2319 dest_is_global = 1; 2320 } 2321 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 2322 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 2323 /* Ok the address may be ok */ 2324 #ifdef INET6 2325 if (fam == AF_INET6) { 2326 /* ok to use deprecated addresses? no lets not! */ 2327 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2328 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 2329 return (NULL); 2330 } 2331 if (ifa->src_is_priv && !ifa->src_is_loop) { 2332 if (dest_is_loop) { 2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 2334 return (NULL); 2335 } 2336 } 2337 if (ifa->src_is_glob) { 2338 if (dest_is_loop) { 2339 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 2340 return (NULL); 2341 } 2342 } 2343 } 2344 #endif 2345 /* Now that we know what is what, implement or table 2346 * this could in theory be done slicker (it used to be), but this 2347 * is straightforward and easier to validate :-) 2348 */ 2349 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 2350 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 2351 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 2352 dest_is_loop, dest_is_priv, dest_is_global); 2353 2354 if ((ifa->src_is_loop) && (dest_is_priv)) { 2355 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 2356 return (NULL); 2357 } 2358 if ((ifa->src_is_glob) && (dest_is_priv)) { 2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 2360 return (NULL); 2361 } 2362 if ((ifa->src_is_loop) && (dest_is_global)) { 2363 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 2364 return (NULL); 2365 } 2366 if ((ifa->src_is_priv) && (dest_is_global)) { 2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 2368 return (NULL); 2369 } 2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 2371 /* its a preferred address */ 2372 return (ifa); 2373 } 2374 2375 static struct sctp_ifa * 2376 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2377 uint8_t dest_is_loop, 2378 uint8_t dest_is_priv, 2379 sa_family_t fam) 2380 { 2381 uint8_t dest_is_global = 0; 2382 2383 /** 2384 * Here we determine if its a acceptable address. A acceptable 2385 * address means it is the same scope or higher scope but we can 2386 * allow for NAT which means its ok to have a global dest and a 2387 * private src. 2388 * 2389 * L = loopback, P = private, G = global 2390 * ----------------------------------------- 2391 * src | dest | result 2392 * ----------------------------------------- 2393 * L | L | yes 2394 * ----------------------------------------- 2395 * P | L | yes-v4 no-v6 2396 * ----------------------------------------- 2397 * G | L | yes 2398 * ----------------------------------------- 2399 * L | P | no 2400 * ----------------------------------------- 2401 * P | P | yes 2402 * ----------------------------------------- 2403 * G | P | yes - May not work 2404 * ----------------------------------------- 2405 * L | G | no 2406 * ----------------------------------------- 2407 * P | G | yes - May not work 2408 * ----------------------------------------- 2409 * G | G | yes 2410 * ----------------------------------------- 2411 */ 2412 2413 if (ifa->address.sa.sa_family != fam) { 2414 /* forget non matching family */ 2415 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n", 2416 ifa->address.sa.sa_family, fam); 2417 return (NULL); 2418 } 2419 /* Ok the address may be ok */ 2420 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa); 2421 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n", 2422 dest_is_loop, dest_is_priv); 2423 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2424 dest_is_global = 1; 2425 } 2426 #ifdef INET6 2427 if (fam == AF_INET6) { 2428 /* ok to use deprecated addresses? */ 2429 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2430 return (NULL); 2431 } 2432 if (ifa->src_is_priv) { 2433 /* Special case, linklocal to loop */ 2434 if (dest_is_loop) 2435 return (NULL); 2436 } 2437 } 2438 #endif 2439 /* 2440 * Now that we know what is what, implement our table. 2441 * This could in theory be done slicker (it used to be), but this 2442 * is straightforward and easier to validate :-) 2443 */ 2444 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n", 2445 ifa->src_is_loop, 2446 dest_is_priv); 2447 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 2448 return (NULL); 2449 } 2450 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n", 2451 ifa->src_is_loop, 2452 dest_is_global); 2453 if ((ifa->src_is_loop == 1) && (dest_is_global)) { 2454 return (NULL); 2455 } 2456 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n"); 2457 /* its an acceptable address */ 2458 return (ifa); 2459 } 2460 2461 int 2462 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2463 { 2464 struct sctp_laddr *laddr; 2465 2466 if (stcb == NULL) { 2467 /* There are no restrictions, no TCB :-) */ 2468 return (0); 2469 } 2470 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2471 if (laddr->ifa == NULL) { 2472 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2473 __FUNCTION__); 2474 continue; 2475 } 2476 if (laddr->ifa == ifa) { 2477 /* Yes it is on the list */ 2478 return (1); 2479 } 2480 } 2481 return (0); 2482 } 2483 2484 2485 int 2486 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2487 { 2488 struct sctp_laddr *laddr; 2489 2490 if (ifa == NULL) 2491 return (0); 2492 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2493 if (laddr->ifa == NULL) { 2494 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2495 __FUNCTION__); 2496 continue; 2497 } 2498 if ((laddr->ifa == ifa) && laddr->action == 0) 2499 /* same pointer */ 2500 return (1); 2501 } 2502 return (0); 2503 } 2504 2505 2506 2507 static struct sctp_ifa * 2508 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2509 sctp_route_t *ro, 2510 uint32_t vrf_id, 2511 int non_asoc_addr_ok, 2512 uint8_t dest_is_priv, 2513 uint8_t dest_is_loop, 2514 sa_family_t fam) 2515 { 2516 struct sctp_laddr *laddr, *starting_point; 2517 void *ifn; 2518 int resettotop = 0; 2519 struct sctp_ifn *sctp_ifn; 2520 struct sctp_ifa *sctp_ifa, *sifa; 2521 struct sctp_vrf *vrf; 2522 uint32_t ifn_index; 2523 2524 vrf = sctp_find_vrf(vrf_id); 2525 if (vrf == NULL) 2526 return (NULL); 2527 2528 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2529 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2530 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2531 /* 2532 * first question, is the ifn we will emit on in our list, if so, we 2533 * want such an address. Note that we first looked for a 2534 * preferred address. 2535 */ 2536 if (sctp_ifn) { 2537 /* is a preferred one on the interface we route out? */ 2538 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2539 #if defined(__FreeBSD__) 2540 #ifdef INET 2541 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2542 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2543 &sctp_ifa->address.sin.sin_addr) != 0)) { 2544 continue; 2545 } 2546 #endif 2547 #ifdef INET6 2548 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2549 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2550 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2551 continue; 2552 } 2553 #endif 2554 #endif 2555 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2556 (non_asoc_addr_ok == 0)) 2557 continue; 2558 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, 2559 dest_is_loop, 2560 dest_is_priv, fam); 2561 if (sifa == NULL) 2562 continue; 2563 if (sctp_is_addr_in_ep(inp, sifa)) { 2564 atomic_add_int(&sifa->refcount, 1); 2565 return (sifa); 2566 } 2567 } 2568 } 2569 /* 2570 * ok, now we now need to find one on the list of the addresses. 2571 * We can't get one on the emitting interface so let's find first 2572 * a preferred one. If not that an acceptable one otherwise... 2573 * we return NULL. 2574 */ 2575 starting_point = inp->next_addr_touse; 2576 once_again: 2577 if (inp->next_addr_touse == NULL) { 2578 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2579 resettotop = 1; 2580 } 2581 for (laddr = inp->next_addr_touse; laddr; 2582 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2583 if (laddr->ifa == NULL) { 2584 /* address has been removed */ 2585 continue; 2586 } 2587 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2588 /* address is being deleted */ 2589 continue; 2590 } 2591 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, 2592 dest_is_priv, fam); 2593 if (sifa == NULL) 2594 continue; 2595 atomic_add_int(&sifa->refcount, 1); 2596 return (sifa); 2597 } 2598 if (resettotop == 0) { 2599 inp->next_addr_touse = NULL; 2600 goto once_again; 2601 } 2602 2603 inp->next_addr_touse = starting_point; 2604 resettotop = 0; 2605 once_again_too: 2606 if (inp->next_addr_touse == NULL) { 2607 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2608 resettotop = 1; 2609 } 2610 2611 /* ok, what about an acceptable address in the inp */ 2612 for (laddr = inp->next_addr_touse; laddr; 2613 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2614 if (laddr->ifa == NULL) { 2615 /* address has been removed */ 2616 continue; 2617 } 2618 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2619 /* address is being deleted */ 2620 continue; 2621 } 2622 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2623 dest_is_priv, fam); 2624 if (sifa == NULL) 2625 continue; 2626 atomic_add_int(&sifa->refcount, 1); 2627 return (sifa); 2628 } 2629 if (resettotop == 0) { 2630 inp->next_addr_touse = NULL; 2631 goto once_again_too; 2632 } 2633 2634 /* 2635 * no address bound can be a source for the destination we are in 2636 * trouble 2637 */ 2638 return (NULL); 2639 } 2640 2641 2642 2643 static struct sctp_ifa * 2644 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2645 struct sctp_tcb *stcb, 2646 sctp_route_t *ro, 2647 uint32_t vrf_id, 2648 uint8_t dest_is_priv, 2649 uint8_t dest_is_loop, 2650 int non_asoc_addr_ok, 2651 sa_family_t fam) 2652 { 2653 struct sctp_laddr *laddr, *starting_point; 2654 void *ifn; 2655 struct sctp_ifn *sctp_ifn; 2656 struct sctp_ifa *sctp_ifa, *sifa; 2657 uint8_t start_at_beginning = 0; 2658 struct sctp_vrf *vrf; 2659 uint32_t ifn_index; 2660 2661 /* 2662 * first question, is the ifn we will emit on in our list, if so, we 2663 * want that one. 2664 */ 2665 vrf = sctp_find_vrf(vrf_id); 2666 if (vrf == NULL) 2667 return (NULL); 2668 2669 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2670 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2671 sctp_ifn = sctp_find_ifn( ifn, ifn_index); 2672 2673 /* 2674 * first question, is the ifn we will emit on in our list? If so, 2675 * we want that one. First we look for a preferred. Second, we go 2676 * for an acceptable. 2677 */ 2678 if (sctp_ifn) { 2679 /* first try for a preferred address on the ep */ 2680 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2681 #if defined(__FreeBSD__) 2682 #ifdef INET 2683 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2684 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2685 &sctp_ifa->address.sin.sin_addr) != 0)) { 2686 continue; 2687 } 2688 #endif 2689 #ifdef INET6 2690 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2691 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2692 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2693 continue; 2694 } 2695 #endif 2696 #endif 2697 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2698 continue; 2699 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2700 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2701 if (sifa == NULL) 2702 continue; 2703 if (((non_asoc_addr_ok == 0) && 2704 (sctp_is_addr_restricted(stcb, sifa))) || 2705 (non_asoc_addr_ok && 2706 (sctp_is_addr_restricted(stcb, sifa)) && 2707 (!sctp_is_addr_pending(stcb, sifa)))) { 2708 /* on the no-no list */ 2709 continue; 2710 } 2711 atomic_add_int(&sifa->refcount, 1); 2712 return (sifa); 2713 } 2714 } 2715 /* next try for an acceptable address on the ep */ 2716 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2717 #if defined(__FreeBSD__) 2718 #ifdef INET 2719 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2720 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2721 &sctp_ifa->address.sin.sin_addr) != 0)) { 2722 continue; 2723 } 2724 #endif 2725 #ifdef INET6 2726 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2727 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2728 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2729 continue; 2730 } 2731 #endif 2732 #endif 2733 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2734 continue; 2735 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2736 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam); 2737 if (sifa == NULL) 2738 continue; 2739 if (((non_asoc_addr_ok == 0) && 2740 (sctp_is_addr_restricted(stcb, sifa))) || 2741 (non_asoc_addr_ok && 2742 (sctp_is_addr_restricted(stcb, sifa)) && 2743 (!sctp_is_addr_pending(stcb, sifa)))) { 2744 /* on the no-no list */ 2745 continue; 2746 } 2747 atomic_add_int(&sifa->refcount, 1); 2748 return (sifa); 2749 } 2750 } 2751 2752 } 2753 /* 2754 * if we can't find one like that then we must look at all 2755 * addresses bound to pick one at first preferable then 2756 * secondly acceptable. 2757 */ 2758 starting_point = stcb->asoc.last_used_address; 2759 sctp_from_the_top: 2760 if (stcb->asoc.last_used_address == NULL) { 2761 start_at_beginning = 1; 2762 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2763 } 2764 /* search beginning with the last used address */ 2765 for (laddr = stcb->asoc.last_used_address; laddr; 2766 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2767 if (laddr->ifa == NULL) { 2768 /* address has been removed */ 2769 continue; 2770 } 2771 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2772 /* address is being deleted */ 2773 continue; 2774 } 2775 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2776 if (sifa == NULL) 2777 continue; 2778 if (((non_asoc_addr_ok == 0) && 2779 (sctp_is_addr_restricted(stcb, sifa))) || 2780 (non_asoc_addr_ok && 2781 (sctp_is_addr_restricted(stcb, sifa)) && 2782 (!sctp_is_addr_pending(stcb, sifa)))) { 2783 /* on the no-no list */ 2784 continue; 2785 } 2786 stcb->asoc.last_used_address = laddr; 2787 atomic_add_int(&sifa->refcount, 1); 2788 return (sifa); 2789 } 2790 if (start_at_beginning == 0) { 2791 stcb->asoc.last_used_address = NULL; 2792 goto sctp_from_the_top; 2793 } 2794 /* now try for any higher scope than the destination */ 2795 stcb->asoc.last_used_address = starting_point; 2796 start_at_beginning = 0; 2797 sctp_from_the_top2: 2798 if (stcb->asoc.last_used_address == NULL) { 2799 start_at_beginning = 1; 2800 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2801 } 2802 /* search beginning with the last used address */ 2803 for (laddr = stcb->asoc.last_used_address; laddr; 2804 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2805 if (laddr->ifa == NULL) { 2806 /* address has been removed */ 2807 continue; 2808 } 2809 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2810 /* address is being deleted */ 2811 continue; 2812 } 2813 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2814 dest_is_priv, fam); 2815 if (sifa == NULL) 2816 continue; 2817 if (((non_asoc_addr_ok == 0) && 2818 (sctp_is_addr_restricted(stcb, sifa))) || 2819 (non_asoc_addr_ok && 2820 (sctp_is_addr_restricted(stcb, sifa)) && 2821 (!sctp_is_addr_pending(stcb, sifa)))) { 2822 /* on the no-no list */ 2823 continue; 2824 } 2825 stcb->asoc.last_used_address = laddr; 2826 atomic_add_int(&sifa->refcount, 1); 2827 return (sifa); 2828 } 2829 if (start_at_beginning == 0) { 2830 stcb->asoc.last_used_address = NULL; 2831 goto sctp_from_the_top2; 2832 } 2833 return (NULL); 2834 } 2835 2836 static struct sctp_ifa * 2837 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2838 #if defined(__FreeBSD__) 2839 struct sctp_inpcb *inp, 2840 #else 2841 struct sctp_inpcb *inp SCTP_UNUSED, 2842 #endif 2843 struct sctp_tcb *stcb, 2844 int non_asoc_addr_ok, 2845 uint8_t dest_is_loop, 2846 uint8_t dest_is_priv, 2847 int addr_wanted, 2848 sa_family_t fam, 2849 sctp_route_t *ro 2850 ) 2851 { 2852 struct sctp_ifa *ifa, *sifa; 2853 int num_eligible_addr = 0; 2854 #ifdef INET6 2855 #ifdef SCTP_EMBEDDED_V6_SCOPE 2856 struct sockaddr_in6 sin6, lsa6; 2857 2858 if (fam == AF_INET6) { 2859 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); 2860 #ifdef SCTP_KAME 2861 (void)sa6_recoverscope(&sin6); 2862 #else 2863 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL); 2864 #endif /* SCTP_KAME */ 2865 } 2866 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 2867 #endif /* INET6 */ 2868 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2869 #if defined(__FreeBSD__) 2870 #ifdef INET 2871 if ((ifa->address.sa.sa_family == AF_INET) && 2872 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2873 &ifa->address.sin.sin_addr) != 0)) { 2874 continue; 2875 } 2876 #endif 2877 #ifdef INET6 2878 if ((ifa->address.sa.sa_family == AF_INET6) && 2879 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2880 &ifa->address.sin6.sin6_addr) != 0)) { 2881 continue; 2882 } 2883 #endif 2884 #endif 2885 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2886 (non_asoc_addr_ok == 0)) 2887 continue; 2888 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2889 dest_is_priv, fam); 2890 if (sifa == NULL) 2891 continue; 2892 #ifdef INET6 2893 if (fam == AF_INET6 && 2894 dest_is_loop && 2895 sifa->src_is_loop && sifa->src_is_priv) { 2896 /* don't allow fe80::1 to be a src on loop ::1, we don't list it 2897 * to the peer so we will get an abort. 2898 */ 2899 continue; 2900 } 2901 #ifdef SCTP_EMBEDDED_V6_SCOPE 2902 if (fam == AF_INET6 && 2903 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && 2904 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 2905 /* link-local <-> link-local must belong to the same scope. */ 2906 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); 2907 #ifdef SCTP_KAME 2908 (void)sa6_recoverscope(&lsa6); 2909 #else 2910 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL); 2911 #endif /* SCTP_KAME */ 2912 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { 2913 continue; 2914 } 2915 } 2916 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 2917 #endif /* INET6 */ 2918 2919 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) 2920 /* Check if the IPv6 address matches to next-hop. 2921 In the mobile case, old IPv6 address may be not deleted 2922 from the interface. Then, the interface has previous and 2923 new addresses. We should use one corresponding to the 2924 next-hop. (by micchie) 2925 */ 2926 #ifdef INET6 2927 if (stcb && fam == AF_INET6 && 2928 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2929 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) 2930 == 0) { 2931 continue; 2932 } 2933 } 2934 #endif 2935 #ifdef INET 2936 /* Avoid topologically incorrect IPv4 address */ 2937 if (stcb && fam == AF_INET && 2938 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2939 if (sctp_v4src_match_nexthop(sifa, ro) == 0) { 2940 continue; 2941 } 2942 } 2943 #endif 2944 #endif 2945 if (stcb) { 2946 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 2947 continue; 2948 } 2949 if (((non_asoc_addr_ok == 0) && 2950 (sctp_is_addr_restricted(stcb, sifa))) || 2951 (non_asoc_addr_ok && 2952 (sctp_is_addr_restricted(stcb, sifa)) && 2953 (!sctp_is_addr_pending(stcb, sifa)))) { 2954 /* 2955 * It is restricted for some reason.. 2956 * probably not yet added. 2957 */ 2958 continue; 2959 } 2960 } 2961 if (num_eligible_addr >= addr_wanted) { 2962 return (sifa); 2963 } 2964 num_eligible_addr++; 2965 } 2966 return (NULL); 2967 } 2968 2969 2970 static int 2971 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 2972 #if defined(__FreeBSD__) 2973 struct sctp_inpcb *inp, 2974 #else 2975 struct sctp_inpcb *inp SCTP_UNUSED, 2976 #endif 2977 struct sctp_tcb *stcb, 2978 int non_asoc_addr_ok, 2979 uint8_t dest_is_loop, 2980 uint8_t dest_is_priv, 2981 sa_family_t fam) 2982 { 2983 struct sctp_ifa *ifa, *sifa; 2984 int num_eligible_addr = 0; 2985 2986 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2987 #if defined(__FreeBSD__) 2988 #ifdef INET 2989 if ((ifa->address.sa.sa_family == AF_INET) && 2990 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2991 &ifa->address.sin.sin_addr) != 0)) { 2992 continue; 2993 } 2994 #endif 2995 #ifdef INET6 2996 if ((ifa->address.sa.sa_family == AF_INET6) && 2997 (stcb != NULL) && 2998 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2999 &ifa->address.sin6.sin6_addr) != 0)) { 3000 continue; 3001 } 3002 #endif 3003 #endif 3004 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3005 (non_asoc_addr_ok == 0)) { 3006 continue; 3007 } 3008 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 3009 dest_is_priv, fam); 3010 if (sifa == NULL) { 3011 continue; 3012 } 3013 if (stcb) { 3014 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 3015 continue; 3016 } 3017 if (((non_asoc_addr_ok == 0) && 3018 (sctp_is_addr_restricted(stcb, sifa))) || 3019 (non_asoc_addr_ok && 3020 (sctp_is_addr_restricted(stcb, sifa)) && 3021 (!sctp_is_addr_pending(stcb, sifa)))) { 3022 /* 3023 * It is restricted for some reason.. 3024 * probably not yet added. 3025 */ 3026 continue; 3027 } 3028 } 3029 num_eligible_addr++; 3030 } 3031 return (num_eligible_addr); 3032 } 3033 3034 static struct sctp_ifa * 3035 sctp_choose_boundall(struct sctp_inpcb *inp, 3036 struct sctp_tcb *stcb, 3037 struct sctp_nets *net, 3038 sctp_route_t *ro, 3039 uint32_t vrf_id, 3040 uint8_t dest_is_priv, 3041 uint8_t dest_is_loop, 3042 int non_asoc_addr_ok, 3043 sa_family_t fam) 3044 { 3045 int cur_addr_num = 0, num_preferred = 0; 3046 void *ifn; 3047 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 3048 struct sctp_ifa *sctp_ifa, *sifa; 3049 uint32_t ifn_index; 3050 struct sctp_vrf *vrf; 3051 #ifdef INET 3052 int retried = 0; 3053 #endif 3054 3055 /*- 3056 * For boundall we can use any address in the association. 3057 * If non_asoc_addr_ok is set we can use any address (at least in 3058 * theory). So we look for preferred addresses first. If we find one, 3059 * we use it. Otherwise we next try to get an address on the 3060 * interface, which we should be able to do (unless non_asoc_addr_ok 3061 * is false and we are routed out that way). In these cases where we 3062 * can't use the address of the interface we go through all the 3063 * ifn's looking for an address we can use and fill that in. Punting 3064 * means we send back address 0, which will probably cause problems 3065 * actually since then IP will fill in the address of the route ifn, 3066 * which means we probably already rejected it.. i.e. here comes an 3067 * abort :-<. 3068 */ 3069 vrf = sctp_find_vrf(vrf_id); 3070 if (vrf == NULL) 3071 return (NULL); 3072 3073 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 3074 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 3075 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index); 3076 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); 3077 if (sctp_ifn == NULL) { 3078 /* ?? We don't have this guy ?? */ 3079 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n"); 3080 goto bound_all_plan_b; 3081 } 3082 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n", 3083 ifn_index, sctp_ifn->ifn_name); 3084 3085 if (net) { 3086 cur_addr_num = net->indx_of_eligible_next_to_use; 3087 } 3088 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 3089 inp, stcb, 3090 non_asoc_addr_ok, 3091 dest_is_loop, 3092 dest_is_priv, fam); 3093 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", 3094 num_preferred, sctp_ifn->ifn_name); 3095 if (num_preferred == 0) { 3096 /* 3097 * no eligible addresses, we must use some other interface 3098 * address if we can find one. 3099 */ 3100 goto bound_all_plan_b; 3101 } 3102 /* 3103 * Ok we have num_eligible_addr set with how many we can use, this 3104 * may vary from call to call due to addresses being deprecated 3105 * etc.. 3106 */ 3107 if (cur_addr_num >= num_preferred) { 3108 cur_addr_num = 0; 3109 } 3110 /* 3111 * select the nth address from the list (where cur_addr_num is the 3112 * nth) and 0 is the first one, 1 is the second one etc... 3113 */ 3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 3115 3116 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, 3117 dest_is_priv, cur_addr_num, fam, ro); 3118 3119 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 3120 if (sctp_ifa) { 3121 atomic_add_int(&sctp_ifa->refcount, 1); 3122 if (net) { 3123 /* save off where the next one we will want */ 3124 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 3125 } 3126 return (sctp_ifa); 3127 } 3128 /* 3129 * plan_b: Look at all interfaces and find a preferred address. If 3130 * no preferred fall through to plan_c. 3131 */ 3132 bound_all_plan_b: 3133 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 3134 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3135 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", 3136 sctp_ifn->ifn_name); 3137 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3138 /* wrong base scope */ 3139 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); 3140 continue; 3141 } 3142 if ((sctp_ifn == looked_at) && looked_at) { 3143 /* already looked at this guy */ 3144 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); 3145 continue; 3146 } 3147 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, 3148 dest_is_loop, dest_is_priv, fam); 3149 SCTPDBG(SCTP_DEBUG_OUTPUT2, 3150 "Found ifn:%p %d preferred source addresses\n", 3151 ifn, num_preferred); 3152 if (num_preferred == 0) { 3153 /* None on this interface. */ 3154 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n"); 3155 continue; 3156 } 3157 SCTPDBG(SCTP_DEBUG_OUTPUT2, 3158 "num preferred:%d on interface:%p cur_addr_num:%d\n", 3159 num_preferred, (void *)sctp_ifn, cur_addr_num); 3160 3161 /* 3162 * Ok we have num_eligible_addr set with how many we can 3163 * use, this may vary from call to call due to addresses 3164 * being deprecated etc.. 3165 */ 3166 if (cur_addr_num >= num_preferred) { 3167 cur_addr_num = 0; 3168 } 3169 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, 3170 dest_is_priv, cur_addr_num, fam, ro); 3171 if (sifa == NULL) 3172 continue; 3173 if (net) { 3174 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 3175 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 3176 cur_addr_num); 3177 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 3178 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 3179 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 3180 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 3181 } 3182 atomic_add_int(&sifa->refcount, 1); 3183 return (sifa); 3184 } 3185 #ifdef INET 3186 again_with_private_addresses_allowed: 3187 #endif 3188 /* plan_c: do we have an acceptable address on the emit interface */ 3189 sifa = NULL; 3190 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n"); 3191 if (emit_ifn == NULL) { 3192 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n"); 3193 goto plan_d; 3194 } 3195 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 3196 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); 3197 #if defined(__FreeBSD__) 3198 #ifdef INET 3199 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3200 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3201 &sctp_ifa->address.sin.sin_addr) != 0)) { 3202 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n"); 3203 continue; 3204 } 3205 #endif 3206 #ifdef INET6 3207 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3208 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3209 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3210 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n"); 3211 continue; 3212 } 3213 #endif 3214 #endif 3215 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3216 (non_asoc_addr_ok == 0)) { 3217 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n"); 3218 continue; 3219 } 3220 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 3221 dest_is_priv, fam); 3222 if (sifa == NULL) { 3223 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n"); 3224 continue; 3225 } 3226 if (stcb) { 3227 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 3228 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n"); 3229 sifa = NULL; 3230 continue; 3231 } 3232 if (((non_asoc_addr_ok == 0) && 3233 (sctp_is_addr_restricted(stcb, sifa))) || 3234 (non_asoc_addr_ok && 3235 (sctp_is_addr_restricted(stcb, sifa)) && 3236 (!sctp_is_addr_pending(stcb, sifa)))) { 3237 /* 3238 * It is restricted for some 3239 * reason.. probably not yet added. 3240 */ 3241 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n"); 3242 sifa = NULL; 3243 continue; 3244 } 3245 } else { 3246 SCTP_PRINTF("Stcb is null - no print\n"); 3247 } 3248 atomic_add_int(&sifa->refcount, 1); 3249 goto out; 3250 } 3251 plan_d: 3252 /* 3253 * plan_d: We are in trouble. No preferred address on the emit 3254 * interface. And not even a preferred address on all interfaces. 3255 * Go out and see if we can find an acceptable address somewhere 3256 * amongst all interfaces. 3257 */ 3258 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at); 3259 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3260 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3261 /* wrong base scope */ 3262 continue; 3263 } 3264 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 3265 #if defined(__FreeBSD__) 3266 #ifdef INET 3267 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3268 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3269 &sctp_ifa->address.sin.sin_addr) != 0)) { 3270 continue; 3271 } 3272 #endif 3273 #ifdef INET6 3274 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3275 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3276 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3277 continue; 3278 } 3279 #endif 3280 #endif 3281 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3282 (non_asoc_addr_ok == 0)) 3283 continue; 3284 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 3285 dest_is_loop, 3286 dest_is_priv, fam); 3287 if (sifa == NULL) 3288 continue; 3289 if (stcb) { 3290 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 3291 sifa = NULL; 3292 continue; 3293 } 3294 if (((non_asoc_addr_ok == 0) && 3295 (sctp_is_addr_restricted(stcb, sifa))) || 3296 (non_asoc_addr_ok && 3297 (sctp_is_addr_restricted(stcb, sifa)) && 3298 (!sctp_is_addr_pending(stcb, sifa)))) { 3299 /* 3300 * It is restricted for some 3301 * reason.. probably not yet added. 3302 */ 3303 sifa = NULL; 3304 continue; 3305 } 3306 } 3307 goto out; 3308 } 3309 } 3310 #ifdef INET 3311 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { 3312 stcb->asoc.scope.ipv4_local_scope = 1; 3313 retried = 1; 3314 goto again_with_private_addresses_allowed; 3315 } else if (retried == 1) { 3316 stcb->asoc.scope.ipv4_local_scope = 0; 3317 } 3318 #endif 3319 out: 3320 #ifdef INET 3321 if (sifa) { 3322 if (retried == 1) { 3323 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3324 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3325 /* wrong base scope */ 3326 continue; 3327 } 3328 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 3329 struct sctp_ifa *tmp_sifa; 3330 3331 #if defined(__FreeBSD__) 3332 #ifdef INET 3333 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3334 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3335 &sctp_ifa->address.sin.sin_addr) != 0)) { 3336 continue; 3337 } 3338 #endif 3339 #ifdef INET6 3340 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3341 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3342 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3343 continue; 3344 } 3345 #endif 3346 #endif 3347 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3348 (non_asoc_addr_ok == 0)) 3349 continue; 3350 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 3351 dest_is_loop, 3352 dest_is_priv, fam); 3353 if (tmp_sifa == NULL) { 3354 continue; 3355 } 3356 if (tmp_sifa == sifa) { 3357 continue; 3358 } 3359 if (stcb) { 3360 if (sctp_is_address_in_scope(tmp_sifa, 3361 &stcb->asoc.scope, 0) == 0) { 3362 continue; 3363 } 3364 if (((non_asoc_addr_ok == 0) && 3365 (sctp_is_addr_restricted(stcb, tmp_sifa))) || 3366 (non_asoc_addr_ok && 3367 (sctp_is_addr_restricted(stcb, tmp_sifa)) && 3368 (!sctp_is_addr_pending(stcb, tmp_sifa)))) { 3369 /* 3370 * It is restricted for some 3371 * reason.. probably not yet added. 3372 */ 3373 continue; 3374 } 3375 } 3376 if ((tmp_sifa->address.sin.sin_family == AF_INET) && 3377 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) { 3378 sctp_add_local_addr_restricted(stcb, tmp_sifa); 3379 } 3380 } 3381 } 3382 } 3383 atomic_add_int(&sifa->refcount, 1); 3384 } 3385 #endif 3386 return (sifa); 3387 } 3388 3389 3390 3391 /* tcb may be NULL */ 3392 struct sctp_ifa * 3393 sctp_source_address_selection(struct sctp_inpcb *inp, 3394 struct sctp_tcb *stcb, 3395 sctp_route_t *ro, 3396 struct sctp_nets *net, 3397 int non_asoc_addr_ok, uint32_t vrf_id) 3398 { 3399 struct sctp_ifa *answer; 3400 uint8_t dest_is_priv, dest_is_loop; 3401 sa_family_t fam; 3402 #ifdef INET 3403 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 3404 #endif 3405 #ifdef INET6 3406 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 3407 #endif 3408 3409 /** 3410 * Rules: - Find the route if needed, cache if I can. - Look at 3411 * interface address in route, Is it in the bound list. If so we 3412 * have the best source. - If not we must rotate amongst the 3413 * addresses. 3414 * 3415 * Cavets and issues 3416 * 3417 * Do we need to pay attention to scope. We can have a private address 3418 * or a global address we are sourcing or sending to. So if we draw 3419 * it out 3420 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3421 * For V4 3422 * ------------------------------------------ 3423 * source * dest * result 3424 * ----------------------------------------- 3425 * <a> Private * Global * NAT 3426 * ----------------------------------------- 3427 * <b> Private * Private * No problem 3428 * ----------------------------------------- 3429 * <c> Global * Private * Huh, How will this work? 3430 * ----------------------------------------- 3431 * <d> Global * Global * No Problem 3432 *------------------------------------------ 3433 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3434 * For V6 3435 *------------------------------------------ 3436 * source * dest * result 3437 * ----------------------------------------- 3438 * <a> Linklocal * Global * 3439 * ----------------------------------------- 3440 * <b> Linklocal * Linklocal * No problem 3441 * ----------------------------------------- 3442 * <c> Global * Linklocal * Huh, How will this work? 3443 * ----------------------------------------- 3444 * <d> Global * Global * No Problem 3445 *------------------------------------------ 3446 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3447 * 3448 * And then we add to that what happens if there are multiple addresses 3449 * assigned to an interface. Remember the ifa on a ifn is a linked 3450 * list of addresses. So one interface can have more than one IP 3451 * address. What happens if we have both a private and a global 3452 * address? Do we then use context of destination to sort out which 3453 * one is best? And what about NAT's sending P->G may get you a NAT 3454 * translation, or should you select the G thats on the interface in 3455 * preference. 3456 * 3457 * Decisions: 3458 * 3459 * - count the number of addresses on the interface. 3460 * - if it is one, no problem except case <c>. 3461 * For <a> we will assume a NAT out there. 3462 * - if there are more than one, then we need to worry about scope P 3463 * or G. We should prefer G -> G and P -> P if possible. 3464 * Then as a secondary fall back to mixed types G->P being a last 3465 * ditch one. 3466 * - The above all works for bound all, but bound specific we need to 3467 * use the same concept but instead only consider the bound 3468 * addresses. If the bound set is NOT assigned to the interface then 3469 * we must use rotation amongst the bound addresses.. 3470 */ 3471 if (ro->ro_rt == NULL) { 3472 /* 3473 * Need a route to cache. 3474 */ 3475 SCTP_RTALLOC(ro, vrf_id); 3476 } 3477 if (ro->ro_rt == NULL) { 3478 return (NULL); 3479 } 3480 fam = ro->ro_dst.sa_family; 3481 dest_is_priv = dest_is_loop = 0; 3482 /* Setup our scopes for the destination */ 3483 switch (fam) { 3484 #ifdef INET 3485 case AF_INET: 3486 /* Scope based on outbound address */ 3487 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 3488 dest_is_loop = 1; 3489 if (net != NULL) { 3490 /* mark it as local */ 3491 net->addr_is_local = 1; 3492 } 3493 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 3494 dest_is_priv = 1; 3495 } 3496 break; 3497 #endif 3498 #ifdef INET6 3499 case AF_INET6: 3500 /* Scope based on outbound address */ 3501 #if defined(__Userspace_os_Windows) 3502 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { 3503 #else 3504 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || 3505 SCTP_ROUTE_IS_REAL_LOOP(ro)) { 3506 #endif 3507 /* 3508 * If the address is a loopback address, which 3509 * consists of "::1" OR "fe80::1%lo0", we are loopback 3510 * scope. But we don't use dest_is_priv (link local 3511 * addresses). 3512 */ 3513 dest_is_loop = 1; 3514 if (net != NULL) { 3515 /* mark it as local */ 3516 net->addr_is_local = 1; 3517 } 3518 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 3519 dest_is_priv = 1; 3520 } 3521 break; 3522 #endif 3523 } 3524 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 3525 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst); 3526 SCTP_IPI_ADDR_RLOCK(); 3527 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3528 /* 3529 * Bound all case 3530 */ 3531 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3532 dest_is_priv, dest_is_loop, 3533 non_asoc_addr_ok, fam); 3534 SCTP_IPI_ADDR_RUNLOCK(); 3535 return (answer); 3536 } 3537 /* 3538 * Subset bound case 3539 */ 3540 if (stcb) { 3541 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro, 3542 vrf_id, dest_is_priv, 3543 dest_is_loop, 3544 non_asoc_addr_ok, fam); 3545 } else { 3546 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 3547 non_asoc_addr_ok, 3548 dest_is_priv, 3549 dest_is_loop, fam); 3550 } 3551 SCTP_IPI_ADDR_RUNLOCK(); 3552 return (answer); 3553 } 3554 3555 static int 3556 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) 3557 { 3558 #if defined(__Userspace_os_Windows) 3559 WSACMSGHDR cmh; 3560 #else 3561 struct cmsghdr cmh; 3562 #endif 3563 int tlen, at, found; 3564 struct sctp_sndinfo sndinfo; 3565 struct sctp_prinfo prinfo; 3566 struct sctp_authinfo authinfo; 3567 3568 tlen = SCTP_BUF_LEN(control); 3569 at = 0; 3570 found = 0; 3571 /* 3572 * Independent of how many mbufs, find the c_type inside the control 3573 * structure and copy out the data. 3574 */ 3575 while (at < tlen) { 3576 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3577 /* There is not enough room for one more. */ 3578 return (found); 3579 } 3580 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3581 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3582 /* We dont't have a complete CMSG header. */ 3583 return (found); 3584 } 3585 if (((int)cmh.cmsg_len + at) > tlen) { 3586 /* We don't have the complete CMSG. */ 3587 return (found); 3588 } 3589 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3590 ((c_type == cmh.cmsg_type) || 3591 ((c_type == SCTP_SNDRCV) && 3592 ((cmh.cmsg_type == SCTP_SNDINFO) || 3593 (cmh.cmsg_type == SCTP_PRINFO) || 3594 (cmh.cmsg_type == SCTP_AUTHINFO))))) { 3595 if (c_type == cmh.cmsg_type) { 3596 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) { 3597 return (found); 3598 } 3599 /* It is exactly what we want. Copy it out. */ 3600 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data); 3601 return (1); 3602 } else { 3603 struct sctp_sndrcvinfo *sndrcvinfo; 3604 3605 sndrcvinfo = (struct sctp_sndrcvinfo *)data; 3606 if (found == 0) { 3607 if (cpsize < sizeof(struct sctp_sndrcvinfo)) { 3608 return (found); 3609 } 3610 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo)); 3611 } 3612 switch (cmh.cmsg_type) { 3613 case SCTP_SNDINFO: 3614 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) { 3615 return (found); 3616 } 3617 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo); 3618 sndrcvinfo->sinfo_stream = sndinfo.snd_sid; 3619 sndrcvinfo->sinfo_flags = sndinfo.snd_flags; 3620 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid; 3621 sndrcvinfo->sinfo_context = sndinfo.snd_context; 3622 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id; 3623 break; 3624 case SCTP_PRINFO: 3625 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) { 3626 return (found); 3627 } 3628 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo); 3629 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) { 3630 sndrcvinfo->sinfo_timetolive = prinfo.pr_value; 3631 } else { 3632 sndrcvinfo->sinfo_timetolive = 0; 3633 } 3634 sndrcvinfo->sinfo_flags |= prinfo.pr_policy; 3635 break; 3636 case SCTP_AUTHINFO: 3637 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) { 3638 return (found); 3639 } 3640 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo); 3641 sndrcvinfo->sinfo_keynumber_valid = 1; 3642 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber; 3643 break; 3644 default: 3645 return (found); 3646 } 3647 found = 1; 3648 } 3649 } 3650 at += CMSG_ALIGN(cmh.cmsg_len); 3651 } 3652 return (found); 3653 } 3654 3655 static int 3656 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) 3657 { 3658 #if defined(__Userspace_os_Windows) 3659 WSACMSGHDR cmh; 3660 #else 3661 struct cmsghdr cmh; 3662 #endif 3663 int tlen, at; 3664 struct sctp_initmsg initmsg; 3665 #ifdef INET 3666 struct sockaddr_in sin; 3667 #endif 3668 #ifdef INET6 3669 struct sockaddr_in6 sin6; 3670 #endif 3671 3672 tlen = SCTP_BUF_LEN(control); 3673 at = 0; 3674 while (at < tlen) { 3675 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3676 /* There is not enough room for one more. */ 3677 *error = EINVAL; 3678 return (1); 3679 } 3680 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3681 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3682 /* We dont't have a complete CMSG header. */ 3683 *error = EINVAL; 3684 return (1); 3685 } 3686 if (((int)cmh.cmsg_len + at) > tlen) { 3687 /* We don't have the complete CMSG. */ 3688 *error = EINVAL; 3689 return (1); 3690 } 3691 if (cmh.cmsg_level == IPPROTO_SCTP) { 3692 switch (cmh.cmsg_type) { 3693 case SCTP_INIT: 3694 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) { 3695 *error = EINVAL; 3696 return (1); 3697 } 3698 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg); 3699 if (initmsg.sinit_max_attempts) 3700 stcb->asoc.max_init_times = initmsg.sinit_max_attempts; 3701 if (initmsg.sinit_num_ostreams) 3702 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams; 3703 if (initmsg.sinit_max_instreams) 3704 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams; 3705 if (initmsg.sinit_max_init_timeo) 3706 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo; 3707 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) { 3708 struct sctp_stream_out *tmp_str; 3709 unsigned int i; 3710 #if defined(SCTP_DETAILED_STR_STATS) 3711 int j; 3712 #endif 3713 3714 /* Default is NOT correct */ 3715 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n", 3716 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams); 3717 SCTP_TCB_UNLOCK(stcb); 3718 SCTP_MALLOC(tmp_str, 3719 struct sctp_stream_out *, 3720 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)), 3721 SCTP_M_STRMO); 3722 SCTP_TCB_LOCK(stcb); 3723 if (tmp_str != NULL) { 3724 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO); 3725 stcb->asoc.strmout = tmp_str; 3726 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams; 3727 } else { 3728 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt; 3729 } 3730 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3731 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3732 stcb->asoc.strmout[i].chunks_on_queues = 0; 3733 stcb->asoc.strmout[i].next_sequence_send = 0; 3734 #if defined(SCTP_DETAILED_STR_STATS) 3735 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 3736 stcb->asoc.strmout[i].abandoned_sent[j] = 0; 3737 stcb->asoc.strmout[i].abandoned_unsent[j] = 0; 3738 } 3739 #else 3740 stcb->asoc.strmout[i].abandoned_sent[0] = 0; 3741 stcb->asoc.strmout[i].abandoned_unsent[0] = 0; 3742 #endif 3743 stcb->asoc.strmout[i].stream_no = i; 3744 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3745 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 3746 } 3747 } 3748 break; 3749 #ifdef INET 3750 case SCTP_DSTADDRV4: 3751 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { 3752 *error = EINVAL; 3753 return (1); 3754 } 3755 memset(&sin, 0, sizeof(struct sockaddr_in)); 3756 sin.sin_family = AF_INET; 3757 #ifdef HAVE_SIN_LEN 3758 sin.sin_len = sizeof(struct sockaddr_in); 3759 #endif 3760 sin.sin_port = stcb->rport; 3761 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 3762 if ((sin.sin_addr.s_addr == INADDR_ANY) || 3763 (sin.sin_addr.s_addr == INADDR_BROADCAST) || 3764 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 3765 *error = EINVAL; 3766 return (1); 3767 } 3768 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, 3769 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3770 *error = ENOBUFS; 3771 return (1); 3772 } 3773 break; 3774 #endif 3775 #ifdef INET6 3776 case SCTP_DSTADDRV6: 3777 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { 3778 *error = EINVAL; 3779 return (1); 3780 } 3781 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 3782 sin6.sin6_family = AF_INET6; 3783 #ifdef HAVE_SIN6_LEN 3784 sin6.sin6_len = sizeof(struct sockaddr_in6); 3785 #endif 3786 sin6.sin6_port = stcb->rport; 3787 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 3788 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) || 3789 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { 3790 *error = EINVAL; 3791 return (1); 3792 } 3793 #ifdef INET 3794 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 3795 in6_sin6_2_sin(&sin, &sin6); 3796 if ((sin.sin_addr.s_addr == INADDR_ANY) || 3797 (sin.sin_addr.s_addr == INADDR_BROADCAST) || 3798 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 3799 *error = EINVAL; 3800 return (1); 3801 } 3802 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, 3803 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3804 *error = ENOBUFS; 3805 return (1); 3806 } 3807 } else 3808 #endif 3809 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, 3810 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3811 *error = ENOBUFS; 3812 return (1); 3813 } 3814 break; 3815 #endif 3816 default: 3817 break; 3818 } 3819 } 3820 at += CMSG_ALIGN(cmh.cmsg_len); 3821 } 3822 return (0); 3823 } 3824 3825 static struct sctp_tcb * 3826 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, 3827 uint16_t port, 3828 struct mbuf *control, 3829 struct sctp_nets **net_p, 3830 int *error) 3831 { 3832 #if defined(__Userspace_os_Windows) 3833 WSACMSGHDR cmh; 3834 #else 3835 struct cmsghdr cmh; 3836 #endif 3837 int tlen, at; 3838 struct sctp_tcb *stcb; 3839 struct sockaddr *addr; 3840 #ifdef INET 3841 struct sockaddr_in sin; 3842 #endif 3843 #ifdef INET6 3844 struct sockaddr_in6 sin6; 3845 #endif 3846 3847 tlen = SCTP_BUF_LEN(control); 3848 at = 0; 3849 while (at < tlen) { 3850 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3851 /* There is not enough room for one more. */ 3852 *error = EINVAL; 3853 return (NULL); 3854 } 3855 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3856 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3857 /* We dont't have a complete CMSG header. */ 3858 *error = EINVAL; 3859 return (NULL); 3860 } 3861 if (((int)cmh.cmsg_len + at) > tlen) { 3862 /* We don't have the complete CMSG. */ 3863 *error = EINVAL; 3864 return (NULL); 3865 } 3866 if (cmh.cmsg_level == IPPROTO_SCTP) { 3867 switch (cmh.cmsg_type) { 3868 #ifdef INET 3869 case SCTP_DSTADDRV4: 3870 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { 3871 *error = EINVAL; 3872 return (NULL); 3873 } 3874 memset(&sin, 0, sizeof(struct sockaddr_in)); 3875 sin.sin_family = AF_INET; 3876 #ifdef HAVE_SIN_LEN 3877 sin.sin_len = sizeof(struct sockaddr_in); 3878 #endif 3879 sin.sin_port = port; 3880 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 3881 addr = (struct sockaddr *)&sin; 3882 break; 3883 #endif 3884 #ifdef INET6 3885 case SCTP_DSTADDRV6: 3886 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { 3887 *error = EINVAL; 3888 return (NULL); 3889 } 3890 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 3891 sin6.sin6_family = AF_INET6; 3892 #ifdef HAVE_SIN6_LEN 3893 sin6.sin6_len = sizeof(struct sockaddr_in6); 3894 #endif 3895 sin6.sin6_port = port; 3896 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 3897 #ifdef INET 3898 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 3899 in6_sin6_2_sin(&sin, &sin6); 3900 addr = (struct sockaddr *)&sin; 3901 } else 3902 #endif 3903 addr = (struct sockaddr *)&sin6; 3904 break; 3905 #endif 3906 default: 3907 addr = NULL; 3908 break; 3909 } 3910 if (addr) { 3911 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL); 3912 if (stcb != NULL) { 3913 return (stcb); 3914 } 3915 } 3916 } 3917 at += CMSG_ALIGN(cmh.cmsg_len); 3918 } 3919 return (NULL); 3920 } 3921 3922 static struct mbuf * 3923 sctp_add_cookie(struct mbuf *init, int init_offset, 3924 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature) 3925 { 3926 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3927 struct sctp_state_cookie *stc; 3928 struct sctp_paramhdr *ph; 3929 uint8_t *foo; 3930 int sig_offset; 3931 uint16_t cookie_sz; 3932 3933 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3934 sizeof(struct sctp_paramhdr)), 0, 3935 M_NOWAIT, 1, MT_DATA); 3936 if (mret == NULL) { 3937 return (NULL); 3938 } 3939 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT); 3940 if (copy_init == NULL) { 3941 sctp_m_freem(mret); 3942 return (NULL); 3943 } 3944 #ifdef SCTP_MBUF_LOGGING 3945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3946 struct mbuf *mat; 3947 3948 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) { 3949 if (SCTP_BUF_IS_EXTENDED(mat)) { 3950 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3951 } 3952 } 3953 } 3954 #endif 3955 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3956 M_NOWAIT); 3957 if (copy_initack == NULL) { 3958 sctp_m_freem(mret); 3959 sctp_m_freem(copy_init); 3960 return (NULL); 3961 } 3962 #ifdef SCTP_MBUF_LOGGING 3963 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3964 struct mbuf *mat; 3965 3966 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) { 3967 if (SCTP_BUF_IS_EXTENDED(mat)) { 3968 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3969 } 3970 } 3971 } 3972 #endif 3973 /* easy side we just drop it on the end */ 3974 ph = mtod(mret, struct sctp_paramhdr *); 3975 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3976 sizeof(struct sctp_paramhdr); 3977 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3978 sizeof(struct sctp_paramhdr)); 3979 ph->param_type = htons(SCTP_STATE_COOKIE); 3980 ph->param_length = 0; /* fill in at the end */ 3981 /* Fill in the stc cookie data */ 3982 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); 3983 3984 /* tack the INIT and then the INIT-ACK onto the chain */ 3985 cookie_sz = 0; 3986 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3987 cookie_sz += SCTP_BUF_LEN(m_at); 3988 if (SCTP_BUF_NEXT(m_at) == NULL) { 3989 SCTP_BUF_NEXT(m_at) = copy_init; 3990 break; 3991 } 3992 } 3993 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3994 cookie_sz += SCTP_BUF_LEN(m_at); 3995 if (SCTP_BUF_NEXT(m_at) == NULL) { 3996 SCTP_BUF_NEXT(m_at) = copy_initack; 3997 break; 3998 } 3999 } 4000 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 4001 cookie_sz += SCTP_BUF_LEN(m_at); 4002 if (SCTP_BUF_NEXT(m_at) == NULL) { 4003 break; 4004 } 4005 } 4006 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA); 4007 if (sig == NULL) { 4008 /* no space, so free the entire chain */ 4009 sctp_m_freem(mret); 4010 return (NULL); 4011 } 4012 SCTP_BUF_LEN(sig) = 0; 4013 SCTP_BUF_NEXT(m_at) = sig; 4014 sig_offset = 0; 4015 foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset); 4016 memset(foo, 0, SCTP_SIGNATURE_SIZE); 4017 *signature = foo; 4018 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 4019 cookie_sz += SCTP_SIGNATURE_SIZE; 4020 ph->param_length = htons(cookie_sz); 4021 return (mret); 4022 } 4023 4024 4025 static uint8_t 4026 sctp_get_ect(struct sctp_tcb *stcb) 4027 { 4028 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) { 4029 return (SCTP_ECT0_BIT); 4030 } else { 4031 return (0); 4032 } 4033 } 4034 4035 #if defined(INET) || defined(INET6) 4036 static void 4037 sctp_handle_no_route(struct sctp_tcb *stcb, 4038 struct sctp_nets *net, 4039 int so_locked) 4040 { 4041 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n"); 4042 4043 if (net) { 4044 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was "); 4045 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa); 4046 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 4047 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 4048 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net); 4049 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 4050 stcb, 0, 4051 (void *)net, 4052 so_locked); 4053 net->dest_state &= ~SCTP_ADDR_REACHABLE; 4054 net->dest_state &= ~SCTP_ADDR_PF; 4055 } 4056 } 4057 if (stcb) { 4058 if (net == stcb->asoc.primary_destination) { 4059 /* need a new primary */ 4060 struct sctp_nets *alt; 4061 4062 alt = sctp_find_alternate_net(stcb, net, 0); 4063 if (alt != net) { 4064 if (stcb->asoc.alternate) { 4065 sctp_free_remote_addr(stcb->asoc.alternate); 4066 } 4067 stcb->asoc.alternate = alt; 4068 atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 4069 if (net->ro._s_addr) { 4070 sctp_free_ifa(net->ro._s_addr); 4071 net->ro._s_addr = NULL; 4072 } 4073 net->src_addr_selected = 0; 4074 } 4075 } 4076 } 4077 } 4078 } 4079 #endif 4080 4081 static int 4082 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 4083 struct sctp_tcb *stcb, /* may be NULL */ 4084 struct sctp_nets *net, 4085 struct sockaddr *to, 4086 struct mbuf *m, 4087 uint32_t auth_offset, 4088 struct sctp_auth_chunk *auth, 4089 uint16_t auth_keyid, 4090 int nofragment_flag, 4091 int ecn_ok, 4092 int out_of_asoc_ok, 4093 uint16_t src_port, 4094 uint16_t dest_port, 4095 uint32_t v_tag, 4096 uint16_t port, 4097 union sctp_sockstore *over_addr, 4098 #if defined(__FreeBSD__) 4099 uint8_t use_mflowid, uint32_t mflowid, 4100 #endif 4101 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4102 int so_locked SCTP_UNUSED 4103 #else 4104 int so_locked 4105 #endif 4106 ) 4107 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 4108 { 4109 /** 4110 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header 4111 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure: 4112 * - fill in the HMAC digest of any AUTH chunk in the packet. 4113 * - calculate and fill in the SCTP checksum. 4114 * - prepend an IP address header. 4115 * - if boundall use INADDR_ANY. 4116 * - if boundspecific do source address selection. 4117 * - set fragmentation option for ipV4. 4118 * - On return from IP output, check/adjust mtu size of output 4119 * interface and smallest_mtu size as well. 4120 */ 4121 /* Will need ifdefs around this */ 4122 #ifdef __Panda__ 4123 pakhandle_type o_pak; 4124 #endif 4125 struct mbuf *newm; 4126 struct sctphdr *sctphdr; 4127 int packet_length; 4128 int ret; 4129 #if defined(INET) || defined(INET6) 4130 uint32_t vrf_id; 4131 #endif 4132 #if defined(INET) || defined(INET6) 4133 #if !defined(__Panda__) 4134 struct mbuf *o_pak; 4135 #endif 4136 sctp_route_t *ro = NULL; 4137 struct udphdr *udp = NULL; 4138 #endif 4139 uint8_t tos_value; 4140 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4141 struct socket *so = NULL; 4142 #endif 4143 4144 #if defined(__APPLE__) 4145 if (so_locked) { 4146 sctp_lock_assert(SCTP_INP_SO(inp)); 4147 SCTP_TCB_LOCK_ASSERT(stcb); 4148 } else { 4149 sctp_unlock_assert(SCTP_INP_SO(inp)); 4150 } 4151 #endif 4152 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 4153 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 4154 sctp_m_freem(m); 4155 return (EFAULT); 4156 } 4157 #if defined(INET) || defined(INET6) 4158 if (stcb) { 4159 vrf_id = stcb->asoc.vrf_id; 4160 } else { 4161 vrf_id = inp->def_vrf_id; 4162 } 4163 #endif 4164 /* fill in the HMAC digest for any AUTH chunk in the packet */ 4165 if ((auth != NULL) && (stcb != NULL)) { 4166 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); 4167 } 4168 4169 if (net) { 4170 tos_value = net->dscp; 4171 } else if (stcb) { 4172 tos_value = stcb->asoc.default_dscp; 4173 } else { 4174 tos_value = inp->sctp_ep.default_dscp; 4175 } 4176 4177 switch (to->sa_family) { 4178 #ifdef INET 4179 case AF_INET: 4180 { 4181 struct ip *ip = NULL; 4182 sctp_route_t iproute; 4183 int len; 4184 4185 len = sizeof(struct ip) + sizeof(struct sctphdr); 4186 if (port) { 4187 len += sizeof(struct udphdr); 4188 } 4189 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 4190 if (newm == NULL) { 4191 sctp_m_freem(m); 4192 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4193 return (ENOMEM); 4194 } 4195 SCTP_ALIGN_TO_END(newm, len); 4196 SCTP_BUF_LEN(newm) = len; 4197 SCTP_BUF_NEXT(newm) = m; 4198 m = newm; 4199 #if defined(__FreeBSD__) 4200 if (net != NULL) { 4201 #ifdef INVARIANTS 4202 if (net->flowidset == 0) { 4203 panic("Flow ID not set"); 4204 } 4205 #endif 4206 m->m_pkthdr.flowid = net->flowid; 4207 m->m_flags |= M_FLOWID; 4208 } else { 4209 if (use_mflowid != 0) { 4210 m->m_pkthdr.flowid = mflowid; 4211 m->m_flags |= M_FLOWID; 4212 } 4213 } 4214 #endif 4215 packet_length = sctp_calculate_len(m); 4216 ip = mtod(m, struct ip *); 4217 ip->ip_v = IPVERSION; 4218 ip->ip_hl = (sizeof(struct ip) >> 2); 4219 if (tos_value == 0) { 4220 /* 4221 * This means especially, that it is not set at the 4222 * SCTP layer. So use the value from the IP layer. 4223 */ 4224 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 4225 tos_value = inp->ip_inp.inp.inp_ip_tos; 4226 #else 4227 tos_value = inp->inp_ip_tos; 4228 #endif 4229 } 4230 tos_value &= 0xfc; 4231 if (ecn_ok) { 4232 tos_value |= sctp_get_ect(stcb); 4233 } 4234 if ((nofragment_flag) && (port == 0)) { 4235 #if defined(__FreeBSD__) 4236 #if __FreeBSD_version >= 1000000 4237 ip->ip_off = htons(IP_DF); 4238 #else 4239 ip->ip_off = IP_DF; 4240 #endif 4241 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__) 4242 ip->ip_off = IP_DF; 4243 #else 4244 ip->ip_off = htons(IP_DF); 4245 #endif 4246 } else { 4247 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 4248 ip->ip_off = htons(0); 4249 #else 4250 ip->ip_off = 0; 4251 #endif 4252 } 4253 #if defined(__FreeBSD__) 4254 /* FreeBSD has a function for ip_id's */ 4255 ip->ip_id = ip_newid(); 4256 #elif defined(RANDOM_IP_ID) 4257 /* Apple has RANDOM_IP_ID switch */ 4258 ip->ip_id = htons(ip_randomid()); 4259 #elif defined(__Userspace__) 4260 ip->ip_id = htons(SCTP_IP_ID(inp)++); 4261 #else 4262 ip->ip_id = SCTP_IP_ID(inp)++; 4263 #endif 4264 4265 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 4266 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 4267 #else 4268 ip->ip_ttl = inp->inp_ip_ttl; 4269 #endif 4270 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 4271 ip->ip_len = htons(packet_length); 4272 #else 4273 ip->ip_len = packet_length; 4274 #endif 4275 ip->ip_tos = tos_value; 4276 if (port) { 4277 ip->ip_p = IPPROTO_UDP; 4278 } else { 4279 ip->ip_p = IPPROTO_SCTP; 4280 } 4281 ip->ip_sum = 0; 4282 if (net == NULL) { 4283 ro = &iproute; 4284 memset(&iproute, 0, sizeof(iproute)); 4285 #ifdef HAVE_SA_LEN 4286 memcpy(&ro->ro_dst, to, to->sa_len); 4287 #else 4288 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in)); 4289 #endif 4290 } else { 4291 ro = (sctp_route_t *)&net->ro; 4292 } 4293 /* Now the address selection part */ 4294 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 4295 4296 /* call the routine to select the src address */ 4297 if (net && out_of_asoc_ok == 0) { 4298 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 4299 sctp_free_ifa(net->ro._s_addr); 4300 net->ro._s_addr = NULL; 4301 net->src_addr_selected = 0; 4302 if (ro->ro_rt) { 4303 RTFREE(ro->ro_rt); 4304 ro->ro_rt = NULL; 4305 } 4306 } 4307 if (net->src_addr_selected == 0) { 4308 /* Cache the source address */ 4309 net->ro._s_addr = sctp_source_address_selection(inp,stcb, 4310 ro, net, 0, 4311 vrf_id); 4312 net->src_addr_selected = 1; 4313 } 4314 if (net->ro._s_addr == NULL) { 4315 /* No route to host */ 4316 net->src_addr_selected = 0; 4317 sctp_handle_no_route(stcb, net, so_locked); 4318 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4319 sctp_m_freem(m); 4320 return (EHOSTUNREACH); 4321 } 4322 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 4323 } else { 4324 if (over_addr == NULL) { 4325 struct sctp_ifa *_lsrc; 4326 4327 _lsrc = sctp_source_address_selection(inp, stcb, ro, 4328 net, 4329 out_of_asoc_ok, 4330 vrf_id); 4331 if (_lsrc == NULL) { 4332 sctp_handle_no_route(stcb, net, so_locked); 4333 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4334 sctp_m_freem(m); 4335 return (EHOSTUNREACH); 4336 } 4337 ip->ip_src = _lsrc->address.sin.sin_addr; 4338 sctp_free_ifa(_lsrc); 4339 } else { 4340 ip->ip_src = over_addr->sin.sin_addr; 4341 SCTP_RTALLOC(ro, vrf_id); 4342 } 4343 } 4344 if (port) { 4345 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 4346 sctp_handle_no_route(stcb, net, so_locked); 4347 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4348 sctp_m_freem(m); 4349 return (EHOSTUNREACH); 4350 } 4351 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 4352 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 4353 udp->uh_dport = port; 4354 udp->uh_ulen = htons(packet_length - sizeof(struct ip)); 4355 #if !defined(__Windows__) && !defined(__Userspace__) 4356 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 4357 if (V_udp_cksum) { 4358 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 4359 } else { 4360 udp->uh_sum = 0; 4361 } 4362 #else 4363 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 4364 #endif 4365 #else 4366 udp->uh_sum = 0; 4367 #endif 4368 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 4369 } else { 4370 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); 4371 } 4372 4373 sctphdr->src_port = src_port; 4374 sctphdr->dest_port = dest_port; 4375 sctphdr->v_tag = v_tag; 4376 sctphdr->checksum = 0; 4377 4378 /* 4379 * If source address selection fails and we find no route 4380 * then the ip_output should fail as well with a 4381 * NO_ROUTE_TO_HOST type error. We probably should catch 4382 * that somewhere and abort the association right away 4383 * (assuming this is an INIT being sent). 4384 */ 4385 if (ro->ro_rt == NULL) { 4386 /* 4387 * src addr selection failed to find a route (or 4388 * valid source addr), so we can't get there from 4389 * here (yet)! 4390 */ 4391 sctp_handle_no_route(stcb, net, so_locked); 4392 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4393 sctp_m_freem(m); 4394 return (EHOSTUNREACH); 4395 } 4396 if (ro != &iproute) { 4397 memcpy(&iproute, ro, sizeof(*ro)); 4398 } 4399 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 4400 (uint32_t) (ntohl(ip->ip_src.s_addr))); 4401 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 4402 (uint32_t)(ntohl(ip->ip_dst.s_addr))); 4403 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 4404 (void *)ro->ro_rt); 4405 4406 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 4407 /* failed to prepend data, give up */ 4408 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4409 sctp_m_freem(m); 4410 return (ENOMEM); 4411 } 4412 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 4413 if (port) { 4414 #if defined(SCTP_WITH_NO_CSUM) 4415 SCTP_STAT_INCR(sctps_sendnocrc); 4416 #else 4417 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); 4418 SCTP_STAT_INCR(sctps_sendswcrc); 4419 #endif 4420 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 4421 if (V_udp_cksum) { 4422 SCTP_ENABLE_UDP_CSUM(o_pak); 4423 } 4424 #else 4425 SCTP_ENABLE_UDP_CSUM(o_pak); 4426 #endif 4427 } else { 4428 #if defined(SCTP_WITH_NO_CSUM) 4429 SCTP_STAT_INCR(sctps_sendnocrc); 4430 #else 4431 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 4432 m->m_pkthdr.csum_flags = CSUM_SCTP; 4433 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 4434 SCTP_STAT_INCR(sctps_sendhwcrc); 4435 #else 4436 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 4437 (stcb) && (stcb->asoc.scope.loopback_scope))) { 4438 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip)); 4439 SCTP_STAT_INCR(sctps_sendswcrc); 4440 } else { 4441 SCTP_STAT_INCR(sctps_sendnocrc); 4442 } 4443 #endif 4444 #endif 4445 } 4446 #ifdef SCTP_PACKET_LOGGING 4447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 4448 sctp_packet_log(o_pak); 4449 #endif 4450 /* send it out. table id is taken from stcb */ 4451 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4452 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4453 so = SCTP_INP_SO(inp); 4454 SCTP_SOCKET_UNLOCK(so, 0); 4455 } 4456 #endif 4457 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); 4458 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4459 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4460 atomic_add_int(&stcb->asoc.refcnt, 1); 4461 SCTP_TCB_UNLOCK(stcb); 4462 SCTP_SOCKET_LOCK(so, 0); 4463 SCTP_TCB_LOCK(stcb); 4464 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4465 } 4466 #endif 4467 SCTP_STAT_INCR(sctps_sendpackets); 4468 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 4469 if (ret) 4470 SCTP_STAT_INCR(sctps_senderrors); 4471 4472 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 4473 if (net == NULL) { 4474 /* free tempy routes */ 4475 #if defined(__FreeBSD__) && __FreeBSD_version > 901000 4476 RO_RTFREE(ro); 4477 #else 4478 if (ro->ro_rt) { 4479 RTFREE(ro->ro_rt); 4480 ro->ro_rt = NULL; 4481 } 4482 #endif 4483 } else { 4484 /* PMTU check versus smallest asoc MTU goes here */ 4485 if ((ro->ro_rt != NULL) && 4486 (net->ro._s_addr)) { 4487 uint32_t mtu; 4488 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4489 if (net->port) { 4490 mtu -= sizeof(struct udphdr); 4491 } 4492 if (mtu && (stcb->asoc.smallest_mtu > mtu)) { 4493 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 4494 net->mtu = mtu; 4495 } 4496 } else if (ro->ro_rt == NULL) { 4497 /* route was freed */ 4498 if (net->ro._s_addr && 4499 net->src_addr_selected) { 4500 sctp_free_ifa(net->ro._s_addr); 4501 net->ro._s_addr = NULL; 4502 } 4503 net->src_addr_selected = 0; 4504 } 4505 } 4506 return (ret); 4507 } 4508 #endif 4509 #ifdef INET6 4510 case AF_INET6: 4511 { 4512 uint32_t flowlabel, flowinfo; 4513 struct ip6_hdr *ip6h; 4514 struct route_in6 ip6route; 4515 #if !(defined(__Panda__) || defined(__Userspace__)) 4516 struct ifnet *ifp; 4517 #endif 4518 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 4519 int prev_scope = 0; 4520 #ifdef SCTP_EMBEDDED_V6_SCOPE 4521 struct sockaddr_in6 lsa6_storage; 4522 int error; 4523 #endif 4524 u_short prev_port = 0; 4525 int len; 4526 4527 if (net) { 4528 flowlabel = net->flowlabel; 4529 } else if (stcb) { 4530 flowlabel = stcb->asoc.default_flowlabel; 4531 } else { 4532 flowlabel = inp->sctp_ep.default_flowlabel; 4533 } 4534 if (flowlabel == 0) { 4535 /* 4536 * This means especially, that it is not set at the 4537 * SCTP layer. So use the value from the IP layer. 4538 */ 4539 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 4540 flowlabel = ntohl(inp->ip_inp.inp.inp_flow); 4541 #else 4542 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo); 4543 #endif 4544 } 4545 flowlabel &= 0x000fffff; 4546 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr); 4547 if (port) { 4548 len += sizeof(struct udphdr); 4549 } 4550 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 4551 if (newm == NULL) { 4552 sctp_m_freem(m); 4553 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4554 return (ENOMEM); 4555 } 4556 SCTP_ALIGN_TO_END(newm, len); 4557 SCTP_BUF_LEN(newm) = len; 4558 SCTP_BUF_NEXT(newm) = m; 4559 m = newm; 4560 #if defined(__FreeBSD__) 4561 if (net != NULL) { 4562 #ifdef INVARIANTS 4563 if (net->flowidset == 0) { 4564 panic("Flow ID not set"); 4565 } 4566 #endif 4567 m->m_pkthdr.flowid = net->flowid; 4568 m->m_flags |= M_FLOWID; 4569 } else { 4570 if (use_mflowid != 0) { 4571 m->m_pkthdr.flowid = mflowid; 4572 m->m_flags |= M_FLOWID; 4573 } 4574 } 4575 #endif 4576 packet_length = sctp_calculate_len(m); 4577 4578 ip6h = mtod(m, struct ip6_hdr *); 4579 /* protect *sin6 from overwrite */ 4580 sin6 = (struct sockaddr_in6 *)to; 4581 tmp = *sin6; 4582 sin6 = &tmp; 4583 4584 #ifdef SCTP_EMBEDDED_V6_SCOPE 4585 /* KAME hack: embed scopeid */ 4586 #if defined(__APPLE__) 4587 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4588 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4589 #else 4590 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4591 #endif 4592 #elif defined(SCTP_KAME) 4593 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4594 #else 4595 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4596 #endif 4597 { 4598 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4599 return (EINVAL); 4600 } 4601 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4602 if (net == NULL) { 4603 memset(&ip6route, 0, sizeof(ip6route)); 4604 ro = (sctp_route_t *)&ip6route; 4605 #ifdef HAVE_SIN6_LEN 4606 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 4607 #else 4608 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6)); 4609 #endif 4610 } else { 4611 ro = (sctp_route_t *)&net->ro; 4612 } 4613 /* 4614 * We assume here that inp_flow is in host byte order within 4615 * the TCB! 4616 */ 4617 if (tos_value == 0) { 4618 /* 4619 * This means especially, that it is not set at the 4620 * SCTP layer. So use the value from the IP layer. 4621 */ 4622 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 4623 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 4624 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff; 4625 #else 4626 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff; 4627 #endif 4628 #endif 4629 } 4630 tos_value &= 0xfc; 4631 if (ecn_ok) { 4632 tos_value |= sctp_get_ect(stcb); 4633 } 4634 flowinfo = 0x06; 4635 flowinfo <<= 8; 4636 flowinfo |= tos_value; 4637 flowinfo <<= 20; 4638 flowinfo |= flowlabel; 4639 ip6h->ip6_flow = htonl(flowinfo); 4640 if (port) { 4641 ip6h->ip6_nxt = IPPROTO_UDP; 4642 } else { 4643 ip6h->ip6_nxt = IPPROTO_SCTP; 4644 } 4645 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); 4646 ip6h->ip6_dst = sin6->sin6_addr; 4647 4648 /* 4649 * Add SRC address selection here: we can only reuse to a 4650 * limited degree the kame src-addr-sel, since we can try 4651 * their selection but it may not be bound. 4652 */ 4653 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 4654 lsa6_tmp.sin6_family = AF_INET6; 4655 #ifdef HAVE_SIN6_LEN 4656 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 4657 #endif 4658 lsa6 = &lsa6_tmp; 4659 if (net && out_of_asoc_ok == 0) { 4660 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 4661 sctp_free_ifa(net->ro._s_addr); 4662 net->ro._s_addr = NULL; 4663 net->src_addr_selected = 0; 4664 if (ro->ro_rt) { 4665 RTFREE(ro->ro_rt); 4666 ro->ro_rt = NULL; 4667 } 4668 } 4669 if (net->src_addr_selected == 0) { 4670 #ifdef SCTP_EMBEDDED_V6_SCOPE 4671 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4672 /* KAME hack: embed scopeid */ 4673 #if defined(__APPLE__) 4674 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4675 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4676 #else 4677 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4678 #endif 4679 #elif defined(SCTP_KAME) 4680 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4681 #else 4682 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4683 #endif 4684 { 4685 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4686 return (EINVAL); 4687 } 4688 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4689 /* Cache the source address */ 4690 net->ro._s_addr = sctp_source_address_selection(inp, 4691 stcb, 4692 ro, 4693 net, 4694 0, 4695 vrf_id); 4696 #ifdef SCTP_EMBEDDED_V6_SCOPE 4697 #ifdef SCTP_KAME 4698 (void)sa6_recoverscope(sin6); 4699 #else 4700 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 4701 #endif /* SCTP_KAME */ 4702 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4703 net->src_addr_selected = 1; 4704 } 4705 if (net->ro._s_addr == NULL) { 4706 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 4707 net->src_addr_selected = 0; 4708 sctp_handle_no_route(stcb, net, so_locked); 4709 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4710 sctp_m_freem(m); 4711 return (EHOSTUNREACH); 4712 } 4713 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 4714 } else { 4715 #ifdef SCTP_EMBEDDED_V6_SCOPE 4716 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 4717 /* KAME hack: embed scopeid */ 4718 #if defined(__APPLE__) 4719 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4720 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4721 #else 4722 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4723 #endif 4724 #elif defined(SCTP_KAME) 4725 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4726 #else 4727 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4728 #endif 4729 { 4730 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4731 return (EINVAL); 4732 } 4733 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4734 if (over_addr == NULL) { 4735 struct sctp_ifa *_lsrc; 4736 4737 _lsrc = sctp_source_address_selection(inp, stcb, ro, 4738 net, 4739 out_of_asoc_ok, 4740 vrf_id); 4741 if (_lsrc == NULL) { 4742 sctp_handle_no_route(stcb, net, so_locked); 4743 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4744 sctp_m_freem(m); 4745 return (EHOSTUNREACH); 4746 } 4747 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 4748 sctp_free_ifa(_lsrc); 4749 } else { 4750 lsa6->sin6_addr = over_addr->sin6.sin6_addr; 4751 SCTP_RTALLOC(ro, vrf_id); 4752 } 4753 #ifdef SCTP_EMBEDDED_V6_SCOPE 4754 #ifdef SCTP_KAME 4755 (void)sa6_recoverscope(sin6); 4756 #else 4757 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 4758 #endif /* SCTP_KAME */ 4759 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4760 } 4761 lsa6->sin6_port = inp->sctp_lport; 4762 4763 if (ro->ro_rt == NULL) { 4764 /* 4765 * src addr selection failed to find a route (or 4766 * valid source addr), so we can't get there from 4767 * here! 4768 */ 4769 sctp_handle_no_route(stcb, net, so_locked); 4770 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4771 sctp_m_freem(m); 4772 return (EHOSTUNREACH); 4773 } 4774 #ifndef SCOPEDROUTING 4775 #ifdef SCTP_EMBEDDED_V6_SCOPE 4776 /* 4777 * XXX: sa6 may not have a valid sin6_scope_id in the 4778 * non-SCOPEDROUTING case. 4779 */ 4780 bzero(&lsa6_storage, sizeof(lsa6_storage)); 4781 lsa6_storage.sin6_family = AF_INET6; 4782 #ifdef HAVE_SIN6_LEN 4783 lsa6_storage.sin6_len = sizeof(lsa6_storage); 4784 #endif 4785 #ifdef SCTP_KAME 4786 lsa6_storage.sin6_addr = lsa6->sin6_addr; 4787 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 4788 #else 4789 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr, 4790 NULL)) != 0) { 4791 #endif /* SCTP_KAME */ 4792 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); 4793 sctp_m_freem(m); 4794 return (error); 4795 } 4796 /* XXX */ 4797 lsa6_storage.sin6_addr = lsa6->sin6_addr; 4798 lsa6_storage.sin6_port = inp->sctp_lport; 4799 lsa6 = &lsa6_storage; 4800 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4801 #endif /* SCOPEDROUTING */ 4802 ip6h->ip6_src = lsa6->sin6_addr; 4803 4804 if (port) { 4805 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 4806 sctp_handle_no_route(stcb, net, so_locked); 4807 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4808 sctp_m_freem(m); 4809 return (EHOSTUNREACH); 4810 } 4811 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 4812 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 4813 udp->uh_dport = port; 4814 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr)); 4815 udp->uh_sum = 0; 4816 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 4817 } else { 4818 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 4819 } 4820 4821 sctphdr->src_port = src_port; 4822 sctphdr->dest_port = dest_port; 4823 sctphdr->v_tag = v_tag; 4824 sctphdr->checksum = 0; 4825 4826 /* 4827 * We set the hop limit now since there is a good chance 4828 * that our ro pointer is now filled 4829 */ 4830 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 4831 #if !(defined(__Panda__) || defined(__Userspace__)) 4832 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 4833 #endif 4834 4835 #ifdef SCTP_DEBUG 4836 /* Copy to be sure something bad is not happening */ 4837 sin6->sin6_addr = ip6h->ip6_dst; 4838 lsa6->sin6_addr = ip6h->ip6_src; 4839 #endif 4840 4841 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 4842 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 4843 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 4844 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 4845 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 4846 if (net) { 4847 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4848 /* preserve the port and scope for link local send */ 4849 prev_scope = sin6->sin6_scope_id; 4850 prev_port = sin6->sin6_port; 4851 } 4852 4853 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 4854 /* failed to prepend data, give up */ 4855 sctp_m_freem(m); 4856 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4857 return (ENOMEM); 4858 } 4859 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 4860 if (port) { 4861 #if defined(SCTP_WITH_NO_CSUM) 4862 SCTP_STAT_INCR(sctps_sendnocrc); 4863 #else 4864 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 4865 SCTP_STAT_INCR(sctps_sendswcrc); 4866 #endif 4867 #if defined(__Windows__) 4868 udp->uh_sum = 0; 4869 #elif !defined(__Userspace__) 4870 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { 4871 udp->uh_sum = 0xffff; 4872 } 4873 #endif 4874 } else { 4875 #if defined(SCTP_WITH_NO_CSUM) 4876 SCTP_STAT_INCR(sctps_sendnocrc); 4877 #else 4878 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 4879 #if __FreeBSD_version < 900000 4880 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); 4881 SCTP_STAT_INCR(sctps_sendswcrc); 4882 #else 4883 #if __FreeBSD_version > 901000 4884 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 4885 #else 4886 m->m_pkthdr.csum_flags = CSUM_SCTP; 4887 #endif 4888 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 4889 SCTP_STAT_INCR(sctps_sendhwcrc); 4890 #endif 4891 #else 4892 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 4893 (stcb) && (stcb->asoc.scope.loopback_scope))) { 4894 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); 4895 SCTP_STAT_INCR(sctps_sendswcrc); 4896 } else { 4897 SCTP_STAT_INCR(sctps_sendnocrc); 4898 } 4899 #endif 4900 #endif 4901 } 4902 /* send it out. table id is taken from stcb */ 4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4904 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4905 so = SCTP_INP_SO(inp); 4906 SCTP_SOCKET_UNLOCK(so, 0); 4907 } 4908 #endif 4909 #ifdef SCTP_PACKET_LOGGING 4910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 4911 sctp_packet_log(o_pak); 4912 #endif 4913 #if !(defined(__Panda__) || defined(__Userspace__)) 4914 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id); 4915 #else 4916 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id); 4917 #endif 4918 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4919 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4920 atomic_add_int(&stcb->asoc.refcnt, 1); 4921 SCTP_TCB_UNLOCK(stcb); 4922 SCTP_SOCKET_LOCK(so, 0); 4923 SCTP_TCB_LOCK(stcb); 4924 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4925 } 4926 #endif 4927 if (net) { 4928 /* for link local this must be done */ 4929 sin6->sin6_scope_id = prev_scope; 4930 sin6->sin6_port = prev_port; 4931 } 4932 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 4933 SCTP_STAT_INCR(sctps_sendpackets); 4934 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 4935 if (ret) { 4936 SCTP_STAT_INCR(sctps_senderrors); 4937 } 4938 if (net == NULL) { 4939 /* Now if we had a temp route free it */ 4940 #if defined(__FreeBSD__) && __FreeBSD_version > 901000 4941 RO_RTFREE(ro); 4942 #else 4943 if (ro->ro_rt) { 4944 RTFREE(ro->ro_rt); 4945 ro->ro_rt = NULL; 4946 } 4947 #endif 4948 } else { 4949 /* PMTU check versus smallest asoc MTU goes here */ 4950 if (ro->ro_rt == NULL) { 4951 /* Route was freed */ 4952 if (net->ro._s_addr && 4953 net->src_addr_selected) { 4954 sctp_free_ifa(net->ro._s_addr); 4955 net->ro._s_addr = NULL; 4956 } 4957 net->src_addr_selected = 0; 4958 } 4959 if ((ro->ro_rt != NULL) && 4960 (net->ro._s_addr)) { 4961 uint32_t mtu; 4962 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4963 if (mtu && 4964 (stcb->asoc.smallest_mtu > mtu)) { 4965 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 4966 net->mtu = mtu; 4967 if (net->port) { 4968 net->mtu -= sizeof(struct udphdr); 4969 } 4970 } 4971 } 4972 #if !defined(__Panda__) && !defined(__Userspace__) 4973 else if (ifp) { 4974 #if defined(__Windows__) 4975 #define ND_IFINFO(ifp) (ifp) 4976 #define linkmtu if_mtu 4977 #endif 4978 if (ND_IFINFO(ifp)->linkmtu && 4979 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 4980 sctp_mtu_size_reset(inp, 4981 &stcb->asoc, 4982 ND_IFINFO(ifp)->linkmtu); 4983 } 4984 } 4985 #endif 4986 } 4987 return (ret); 4988 } 4989 #endif 4990 #if defined(__Userspace__) 4991 case AF_CONN: 4992 { 4993 char *buffer; 4994 struct sockaddr_conn *sconn; 4995 int len; 4996 4997 sconn = (struct sockaddr_conn *)to; 4998 len = sizeof(struct sctphdr); 4999 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 5000 if (newm == NULL) { 5001 sctp_m_freem(m); 5002 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 5003 return (ENOMEM); 5004 } 5005 SCTP_ALIGN_TO_END(newm, len); 5006 SCTP_BUF_LEN(newm) = len; 5007 SCTP_BUF_NEXT(newm) = m; 5008 m = newm; 5009 packet_length = sctp_calculate_len(m); 5010 sctphdr = mtod(m, struct sctphdr *); 5011 sctphdr->src_port = src_port; 5012 sctphdr->dest_port = dest_port; 5013 sctphdr->v_tag = v_tag; 5014 sctphdr->checksum = 0; 5015 #if defined(SCTP_WITH_NO_CSUM) 5016 SCTP_STAT_INCR(sctps_sendnocrc); 5017 #else 5018 sctphdr->checksum = sctp_calculate_cksum(m, 0); 5019 SCTP_STAT_INCR(sctps_sendswcrc); 5020 #endif 5021 if (tos_value == 0) { 5022 tos_value = inp->ip_inp.inp.inp_ip_tos; 5023 } 5024 tos_value &= 0xfc; 5025 if (ecn_ok) { 5026 tos_value |= sctp_get_ect(stcb); 5027 } 5028 /* Don't alloc/free for each packet */ 5029 if ((buffer = malloc(packet_length)) != NULL) { 5030 m_copydata(m, 0, packet_length, buffer); 5031 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag); 5032 free(buffer); 5033 } else { 5034 ret = ENOMEM; 5035 } 5036 sctp_m_freem(m); 5037 return (ret); 5038 } 5039 #endif 5040 default: 5041 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 5042 ((struct sockaddr *)to)->sa_family); 5043 sctp_m_freem(m); 5044 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 5045 return (EFAULT); 5046 } 5047 } 5048 5049 5050 void 5051 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked 5052 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 5053 SCTP_UNUSED 5054 #endif 5055 ) 5056 { 5057 struct mbuf *m, *m_last; 5058 struct sctp_nets *net; 5059 struct sctp_init_chunk *init; 5060 struct sctp_supported_addr_param *sup_addr; 5061 struct sctp_adaptation_layer_indication *ali; 5062 struct sctp_supported_chunk_types_param *pr_supported; 5063 struct sctp_paramhdr *ph; 5064 int cnt_inits_to = 0; 5065 int ret; 5066 uint16_t num_ext, chunk_len, padding_len, parameter_len; 5067 5068 #if defined(__APPLE__) 5069 if (so_locked) { 5070 sctp_lock_assert(SCTP_INP_SO(inp)); 5071 } else { 5072 sctp_unlock_assert(SCTP_INP_SO(inp)); 5073 } 5074 #endif 5075 /* INIT's always go to the primary (and usually ONLY address) */ 5076 net = stcb->asoc.primary_destination; 5077 if (net == NULL) { 5078 net = TAILQ_FIRST(&stcb->asoc.nets); 5079 if (net == NULL) { 5080 /* TSNH */ 5081 return; 5082 } 5083 /* we confirm any address we send an INIT to */ 5084 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 5085 (void)sctp_set_primary_addr(stcb, NULL, net); 5086 } else { 5087 /* we confirm any address we send an INIT to */ 5088 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 5089 } 5090 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 5091 #ifdef INET6 5092 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 5093 /* 5094 * special hook, if we are sending to link local it will not 5095 * show up in our private address count. 5096 */ 5097 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr)) 5098 cnt_inits_to = 1; 5099 } 5100 #endif 5101 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5102 /* This case should not happen */ 5103 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); 5104 return; 5105 } 5106 /* start the INIT timer */ 5107 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 5108 5109 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA); 5110 if (m == NULL) { 5111 /* No memory, INIT timer will re-attempt. */ 5112 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); 5113 return; 5114 } 5115 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk); 5116 padding_len = 0; 5117 /* Now lets put the chunk header in place */ 5118 init = mtod(m, struct sctp_init_chunk *); 5119 /* now the chunk header */ 5120 init->ch.chunk_type = SCTP_INITIATION; 5121 init->ch.chunk_flags = 0; 5122 /* fill in later from mbuf we build */ 5123 init->ch.chunk_length = 0; 5124 /* place in my tag */ 5125 init->init.initiate_tag = htonl(stcb->asoc.my_vtag); 5126 /* set up some of the credits. */ 5127 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0, 5128 SCTP_MINIMAL_RWND)); 5129 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 5130 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 5131 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); 5132 5133 /* Adaptation layer indication parameter */ 5134 if (inp->sctp_ep.adaptation_layer_indicator_provided) { 5135 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); 5136 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); 5137 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 5138 ali->ph.param_length = htons(parameter_len); 5139 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); 5140 chunk_len += parameter_len; 5141 } 5142 5143 /* ECN parameter */ 5144 if (stcb->asoc.ecn_supported == 1) { 5145 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5146 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5147 ph->param_type = htons(SCTP_ECN_CAPABLE); 5148 ph->param_length = htons(parameter_len); 5149 chunk_len += parameter_len; 5150 } 5151 5152 /* PR-SCTP supported parameter */ 5153 if (stcb->asoc.prsctp_supported == 1) { 5154 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5155 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5156 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); 5157 ph->param_length = htons(parameter_len); 5158 chunk_len += parameter_len; 5159 } 5160 5161 /* Add NAT friendly parameter. */ 5162 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { 5163 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5164 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5165 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 5166 ph->param_length = htons(parameter_len); 5167 chunk_len += parameter_len; 5168 } 5169 5170 /* And now tell the peer which extensions we support */ 5171 num_ext = 0; 5172 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); 5173 if (stcb->asoc.prsctp_supported == 1) { 5174 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 5175 } 5176 if (stcb->asoc.auth_supported == 1) { 5177 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 5178 } 5179 if (stcb->asoc.asconf_supported == 1) { 5180 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 5181 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 5182 } 5183 if (stcb->asoc.reconfig_supported == 1) { 5184 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 5185 } 5186 if (stcb->asoc.nrsack_supported == 1) { 5187 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 5188 } 5189 if (stcb->asoc.pktdrop_supported == 1) { 5190 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 5191 } 5192 if (num_ext > 0) { 5193 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; 5194 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 5195 pr_supported->ph.param_length = htons(parameter_len); 5196 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5197 chunk_len += parameter_len; 5198 } 5199 /* add authentication parameters */ 5200 if (stcb->asoc.auth_supported) { 5201 /* attach RANDOM parameter, if available */ 5202 if (stcb->asoc.authinfo.random != NULL) { 5203 struct sctp_auth_random *randp; 5204 5205 if (padding_len > 0) { 5206 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5207 chunk_len += padding_len; 5208 padding_len = 0; 5209 } 5210 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); 5211 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len; 5212 /* random key already contains the header */ 5213 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len); 5214 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5215 chunk_len += parameter_len; 5216 } 5217 /* add HMAC_ALGO parameter */ 5218 if (stcb->asoc.local_hmacs != NULL) { 5219 struct sctp_auth_hmac_algo *hmacs; 5220 5221 if (padding_len > 0) { 5222 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5223 chunk_len += padding_len; 5224 padding_len = 0; 5225 } 5226 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); 5227 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) + 5228 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t)); 5229 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 5230 hmacs->ph.param_length = htons(parameter_len); 5231 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids); 5232 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5233 chunk_len += parameter_len; 5234 } 5235 /* add CHUNKS parameter */ 5236 if (stcb->asoc.local_auth_chunks != NULL) { 5237 struct sctp_auth_chunk_list *chunks; 5238 5239 if (padding_len > 0) { 5240 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5241 chunk_len += padding_len; 5242 padding_len = 0; 5243 } 5244 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); 5245 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) + 5246 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks)); 5247 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 5248 chunks->ph.param_length = htons(parameter_len); 5249 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); 5250 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5251 chunk_len += parameter_len; 5252 } 5253 } 5254 5255 /* now any cookie time extensions */ 5256 if (stcb->asoc.cookie_preserve_req) { 5257 struct sctp_cookie_perserve_param *cookie_preserve; 5258 5259 if (padding_len > 0) { 5260 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5261 chunk_len += padding_len; 5262 padding_len = 0; 5263 } 5264 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param); 5265 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len); 5266 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 5267 cookie_preserve->ph.param_length = htons(parameter_len); 5268 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 5269 stcb->asoc.cookie_preserve_req = 0; 5270 chunk_len += parameter_len; 5271 } 5272 5273 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) { 5274 uint8_t i; 5275 5276 if (padding_len > 0) { 5277 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5278 chunk_len += padding_len; 5279 padding_len = 0; 5280 } 5281 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5282 if (stcb->asoc.scope.ipv4_addr_legal) { 5283 parameter_len += (uint16_t)sizeof(uint16_t); 5284 } 5285 if (stcb->asoc.scope.ipv6_addr_legal) { 5286 parameter_len += (uint16_t)sizeof(uint16_t); 5287 } 5288 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len); 5289 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 5290 sup_addr->ph.param_length = htons(parameter_len); 5291 i = 0; 5292 if (stcb->asoc.scope.ipv4_addr_legal) { 5293 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS); 5294 } 5295 if (stcb->asoc.scope.ipv6_addr_legal) { 5296 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS); 5297 } 5298 padding_len = 4 - 2 * i; 5299 chunk_len += parameter_len; 5300 } 5301 5302 SCTP_BUF_LEN(m) = chunk_len; 5303 /* now the addresses */ 5304 /* To optimize this we could put the scoping stuff 5305 * into a structure and remove the individual uint8's from 5306 * the assoc structure. Then we could just sifa in the 5307 * address within the stcb. But for now this is a quick 5308 * hack to get the address stuff teased apart. 5309 */ 5310 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, 5311 m, cnt_inits_to, 5312 &padding_len, &chunk_len); 5313 5314 init->ch.chunk_length = htons(chunk_len); 5315 if (padding_len > 0) { 5316 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 5317 sctp_m_freem(m); 5318 return; 5319 } 5320 } 5321 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); 5322 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 5323 (struct sockaddr *)&net->ro._l_addr, 5324 m, 0, NULL, 0, 0, 0, 0, 5325 inp->sctp_lport, stcb->rport, htonl(0), 5326 net->port, NULL, 5327 #if defined(__FreeBSD__) 5328 0, 0, 5329 #endif 5330 so_locked); 5331 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); 5332 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5333 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5334 } 5335 5336 struct mbuf * 5337 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 5338 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly) 5339 { 5340 /* 5341 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 5342 * being equal to the beginning of the params i.e. (iphlen + 5343 * sizeof(struct sctp_init_msg) parse through the parameters to the 5344 * end of the mbuf verifying that all parameters are known. 5345 * 5346 * For unknown parameters build and return a mbuf with 5347 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 5348 * processing this chunk stop, and set *abort_processing to 1. 5349 * 5350 * By having param_offset be pre-set to where parameters begin it is 5351 * hoped that this routine may be reused in the future by new 5352 * features. 5353 */ 5354 struct sctp_paramhdr *phdr, params; 5355 5356 struct mbuf *mat, *op_err; 5357 char tempbuf[SCTP_PARAM_BUFFER_SIZE]; 5358 int at, limit, pad_needed; 5359 uint16_t ptype, plen, padded_size; 5360 int err_at; 5361 5362 *abort_processing = 0; 5363 mat = in_initpkt; 5364 err_at = 0; 5365 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 5366 at = param_offset; 5367 op_err = NULL; 5368 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); 5369 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 5370 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 5371 ptype = ntohs(phdr->param_type); 5372 plen = ntohs(phdr->param_length); 5373 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 5374 /* wacked parameter */ 5375 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); 5376 goto invalid_size; 5377 } 5378 limit -= SCTP_SIZE32(plen); 5379 /*- 5380 * All parameters for all chunks that we know/understand are 5381 * listed here. We process them other places and make 5382 * appropriate stop actions per the upper bits. However this 5383 * is the generic routine processor's can call to get back 5384 * an operr.. to either incorporate (init-ack) or send. 5385 */ 5386 padded_size = SCTP_SIZE32(plen); 5387 switch (ptype) { 5388 /* Param's with variable size */ 5389 case SCTP_HEARTBEAT_INFO: 5390 case SCTP_STATE_COOKIE: 5391 case SCTP_UNRECOG_PARAM: 5392 case SCTP_ERROR_CAUSE_IND: 5393 /* ok skip fwd */ 5394 at += padded_size; 5395 break; 5396 /* Param's with variable size within a range */ 5397 case SCTP_CHUNK_LIST: 5398 case SCTP_SUPPORTED_CHUNK_EXT: 5399 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 5400 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); 5401 goto invalid_size; 5402 } 5403 at += padded_size; 5404 break; 5405 case SCTP_SUPPORTED_ADDRTYPE: 5406 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 5407 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); 5408 goto invalid_size; 5409 } 5410 at += padded_size; 5411 break; 5412 case SCTP_RANDOM: 5413 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 5414 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); 5415 goto invalid_size; 5416 } 5417 at += padded_size; 5418 break; 5419 case SCTP_SET_PRIM_ADDR: 5420 case SCTP_DEL_IP_ADDRESS: 5421 case SCTP_ADD_IP_ADDRESS: 5422 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 5423 (padded_size != sizeof(struct sctp_asconf_addr_param))) { 5424 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); 5425 goto invalid_size; 5426 } 5427 at += padded_size; 5428 break; 5429 /* Param's with a fixed size */ 5430 case SCTP_IPV4_ADDRESS: 5431 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 5432 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); 5433 goto invalid_size; 5434 } 5435 at += padded_size; 5436 break; 5437 case SCTP_IPV6_ADDRESS: 5438 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 5439 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); 5440 goto invalid_size; 5441 } 5442 at += padded_size; 5443 break; 5444 case SCTP_COOKIE_PRESERVE: 5445 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 5446 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); 5447 goto invalid_size; 5448 } 5449 at += padded_size; 5450 break; 5451 case SCTP_HAS_NAT_SUPPORT: 5452 *nat_friendly = 1; 5453 /* fall through */ 5454 case SCTP_PRSCTP_SUPPORTED: 5455 if (padded_size != sizeof(struct sctp_paramhdr)) { 5456 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen); 5457 goto invalid_size; 5458 } 5459 at += padded_size; 5460 break; 5461 case SCTP_ECN_CAPABLE: 5462 if (padded_size != sizeof(struct sctp_paramhdr)) { 5463 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); 5464 goto invalid_size; 5465 } 5466 at += padded_size; 5467 break; 5468 case SCTP_ULP_ADAPTATION: 5469 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 5470 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); 5471 goto invalid_size; 5472 } 5473 at += padded_size; 5474 break; 5475 case SCTP_SUCCESS_REPORT: 5476 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 5477 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); 5478 goto invalid_size; 5479 } 5480 at += padded_size; 5481 break; 5482 case SCTP_HOSTNAME_ADDRESS: 5483 { 5484 /* We can NOT handle HOST NAME addresses!! */ 5485 int l_len; 5486 5487 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); 5488 *abort_processing = 1; 5489 if (op_err == NULL) { 5490 /* Ok need to try to get a mbuf */ 5491 #ifdef INET6 5492 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5493 #else 5494 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5495 #endif 5496 l_len += plen; 5497 l_len += sizeof(struct sctp_paramhdr); 5498 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5499 if (op_err) { 5500 SCTP_BUF_LEN(op_err) = 0; 5501 /* 5502 * pre-reserve space for ip and sctp 5503 * header and chunk hdr 5504 */ 5505 #ifdef INET6 5506 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5507 #else 5508 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5509 #endif 5510 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5511 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5512 } 5513 } 5514 if (op_err) { 5515 /* If we have space */ 5516 struct sctp_paramhdr s; 5517 5518 if (err_at % 4) { 5519 uint32_t cpthis = 0; 5520 5521 pad_needed = 4 - (err_at % 4); 5522 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 5523 err_at += pad_needed; 5524 } 5525 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 5526 s.param_length = htons(sizeof(s) + plen); 5527 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 5528 err_at += sizeof(s); 5529 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); 5530 if (phdr == NULL) { 5531 sctp_m_freem(op_err); 5532 /* 5533 * we are out of memory but we still 5534 * need to have a look at what to do 5535 * (the system is in trouble 5536 * though). 5537 */ 5538 return (NULL); 5539 } 5540 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 5541 } 5542 return (op_err); 5543 break; 5544 } 5545 default: 5546 /* 5547 * we do not recognize the parameter figure out what 5548 * we do. 5549 */ 5550 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); 5551 if ((ptype & 0x4000) == 0x4000) { 5552 /* Report bit is set?? */ 5553 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); 5554 if (op_err == NULL) { 5555 int l_len; 5556 /* Ok need to try to get an mbuf */ 5557 #ifdef INET6 5558 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5559 #else 5560 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5561 #endif 5562 l_len += plen; 5563 l_len += sizeof(struct sctp_paramhdr); 5564 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5565 if (op_err) { 5566 SCTP_BUF_LEN(op_err) = 0; 5567 #ifdef INET6 5568 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5569 #else 5570 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5571 #endif 5572 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5573 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5574 } 5575 } 5576 if (op_err) { 5577 /* If we have space */ 5578 struct sctp_paramhdr s; 5579 5580 if (err_at % 4) { 5581 uint32_t cpthis = 0; 5582 5583 pad_needed = 4 - (err_at % 4); 5584 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 5585 err_at += pad_needed; 5586 } 5587 s.param_type = htons(SCTP_UNRECOG_PARAM); 5588 s.param_length = htons(sizeof(s) + plen); 5589 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 5590 err_at += sizeof(s); 5591 if (plen > sizeof(tempbuf)) { 5592 plen = sizeof(tempbuf); 5593 } 5594 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); 5595 if (phdr == NULL) { 5596 sctp_m_freem(op_err); 5597 /* 5598 * we are out of memory but 5599 * we still need to have a 5600 * look at what to do (the 5601 * system is in trouble 5602 * though). 5603 */ 5604 op_err = NULL; 5605 goto more_processing; 5606 } 5607 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 5608 err_at += plen; 5609 } 5610 } 5611 more_processing: 5612 if ((ptype & 0x8000) == 0x0000) { 5613 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); 5614 return (op_err); 5615 } else { 5616 /* skip this chunk and continue processing */ 5617 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); 5618 at += SCTP_SIZE32(plen); 5619 } 5620 break; 5621 5622 } 5623 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 5624 } 5625 return (op_err); 5626 invalid_size: 5627 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); 5628 *abort_processing = 1; 5629 if ((op_err == NULL) && phdr) { 5630 int l_len; 5631 #ifdef INET6 5632 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5633 #else 5634 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 5635 #endif 5636 l_len += (2 * sizeof(struct sctp_paramhdr)); 5637 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5638 if (op_err) { 5639 SCTP_BUF_LEN(op_err) = 0; 5640 #ifdef INET6 5641 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5642 #else 5643 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5644 #endif 5645 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5646 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5647 } 5648 } 5649 if ((op_err) && phdr) { 5650 struct sctp_paramhdr s; 5651 5652 if (err_at % 4) { 5653 uint32_t cpthis = 0; 5654 5655 pad_needed = 4 - (err_at % 4); 5656 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 5657 err_at += pad_needed; 5658 } 5659 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5660 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); 5661 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 5662 err_at += sizeof(s); 5663 /* Only copy back the p-hdr that caused the issue */ 5664 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); 5665 } 5666 return (op_err); 5667 } 5668 5669 static int 5670 sctp_are_there_new_addresses(struct sctp_association *asoc, 5671 struct mbuf *in_initpkt, int offset, struct sockaddr *src) 5672 { 5673 /* 5674 * Given a INIT packet, look through the packet to verify that there 5675 * are NO new addresses. As we go through the parameters add reports 5676 * of any un-understood parameters that require an error. Also we 5677 * must return (1) to drop the packet if we see a un-understood 5678 * parameter that tells us to drop the chunk. 5679 */ 5680 struct sockaddr *sa_touse; 5681 struct sockaddr *sa; 5682 struct sctp_paramhdr *phdr, params; 5683 uint16_t ptype, plen; 5684 uint8_t fnd; 5685 struct sctp_nets *net; 5686 #ifdef INET 5687 struct sockaddr_in sin4, *sa4; 5688 #endif 5689 #ifdef INET6 5690 struct sockaddr_in6 sin6, *sa6; 5691 #endif 5692 5693 #ifdef INET 5694 memset(&sin4, 0, sizeof(sin4)); 5695 sin4.sin_family = AF_INET; 5696 #ifdef HAVE_SIN_LEN 5697 sin4.sin_len = sizeof(sin4); 5698 #endif 5699 #endif 5700 #ifdef INET6 5701 memset(&sin6, 0, sizeof(sin6)); 5702 sin6.sin6_family = AF_INET6; 5703 #ifdef HAVE_SIN6_LEN 5704 sin6.sin6_len = sizeof(sin6); 5705 #endif 5706 #endif 5707 /* First what about the src address of the pkt ? */ 5708 fnd = 0; 5709 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5710 sa = (struct sockaddr *)&net->ro._l_addr; 5711 if (sa->sa_family == src->sa_family) { 5712 #ifdef INET 5713 if (sa->sa_family == AF_INET) { 5714 struct sockaddr_in *src4; 5715 5716 sa4 = (struct sockaddr_in *)sa; 5717 src4 = (struct sockaddr_in *)src; 5718 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { 5719 fnd = 1; 5720 break; 5721 } 5722 } 5723 #endif 5724 #ifdef INET6 5725 if (sa->sa_family == AF_INET6) { 5726 struct sockaddr_in6 *src6; 5727 5728 sa6 = (struct sockaddr_in6 *)sa; 5729 src6 = (struct sockaddr_in6 *)src; 5730 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { 5731 fnd = 1; 5732 break; 5733 } 5734 } 5735 #endif 5736 } 5737 } 5738 if (fnd == 0) { 5739 /* New address added! no need to look futher. */ 5740 return (1); 5741 } 5742 /* Ok so far lets munge through the rest of the packet */ 5743 offset += sizeof(struct sctp_init_chunk); 5744 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 5745 while (phdr) { 5746 sa_touse = NULL; 5747 ptype = ntohs(phdr->param_type); 5748 plen = ntohs(phdr->param_length); 5749 switch (ptype) { 5750 #ifdef INET 5751 case SCTP_IPV4_ADDRESS: 5752 { 5753 struct sctp_ipv4addr_param *p4, p4_buf; 5754 5755 phdr = sctp_get_next_param(in_initpkt, offset, 5756 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 5757 if (plen != sizeof(struct sctp_ipv4addr_param) || 5758 phdr == NULL) { 5759 return (1); 5760 } 5761 p4 = (struct sctp_ipv4addr_param *)phdr; 5762 sin4.sin_addr.s_addr = p4->addr; 5763 sa_touse = (struct sockaddr *)&sin4; 5764 break; 5765 } 5766 #endif 5767 #ifdef INET6 5768 case SCTP_IPV6_ADDRESS: 5769 { 5770 struct sctp_ipv6addr_param *p6, p6_buf; 5771 5772 phdr = sctp_get_next_param(in_initpkt, offset, 5773 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 5774 if (plen != sizeof(struct sctp_ipv6addr_param) || 5775 phdr == NULL) { 5776 return (1); 5777 } 5778 p6 = (struct sctp_ipv6addr_param *)phdr; 5779 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 5780 sizeof(p6->addr)); 5781 sa_touse = (struct sockaddr *)&sin6; 5782 break; 5783 } 5784 #endif 5785 default: 5786 sa_touse = NULL; 5787 break; 5788 } 5789 if (sa_touse) { 5790 /* ok, sa_touse points to one to check */ 5791 fnd = 0; 5792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5793 sa = (struct sockaddr *)&net->ro._l_addr; 5794 if (sa->sa_family != sa_touse->sa_family) { 5795 continue; 5796 } 5797 #ifdef INET 5798 if (sa->sa_family == AF_INET) { 5799 sa4 = (struct sockaddr_in *)sa; 5800 if (sa4->sin_addr.s_addr == 5801 sin4.sin_addr.s_addr) { 5802 fnd = 1; 5803 break; 5804 } 5805 } 5806 #endif 5807 #ifdef INET6 5808 if (sa->sa_family == AF_INET6) { 5809 sa6 = (struct sockaddr_in6 *)sa; 5810 if (SCTP6_ARE_ADDR_EQUAL( 5811 sa6, &sin6)) { 5812 fnd = 1; 5813 break; 5814 } 5815 } 5816 #endif 5817 } 5818 if (!fnd) { 5819 /* New addr added! no need to look further */ 5820 return (1); 5821 } 5822 } 5823 offset += SCTP_SIZE32(plen); 5824 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 5825 } 5826 return (0); 5827 } 5828 5829 /* 5830 * Given a MBUF chain that was sent into us containing an INIT. Build a 5831 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 5832 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 5833 * message (i.e. the struct sctp_init_msg). 5834 */ 5835 void 5836 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 5837 struct mbuf *init_pkt, int iphlen, int offset, 5838 struct sockaddr *src, struct sockaddr *dst, 5839 struct sctphdr *sh, struct sctp_init_chunk *init_chk, 5840 #if defined(__FreeBSD__) 5841 uint8_t use_mflowid, uint32_t mflowid, 5842 #endif 5843 uint32_t vrf_id, uint16_t port, int hold_inp_lock) 5844 { 5845 struct sctp_association *asoc; 5846 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err; 5847 struct sctp_init_ack_chunk *initack; 5848 struct sctp_adaptation_layer_indication *ali; 5849 struct sctp_supported_chunk_types_param *pr_supported; 5850 struct sctp_paramhdr *ph; 5851 union sctp_sockstore *over_addr; 5852 struct sctp_scoping scp; 5853 #ifdef INET 5854 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst; 5855 struct sockaddr_in *src4 = (struct sockaddr_in *)src; 5856 struct sockaddr_in *sin; 5857 #endif 5858 #ifdef INET6 5859 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst; 5860 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src; 5861 struct sockaddr_in6 *sin6; 5862 #endif 5863 #if defined(__Userspace__) 5864 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst; 5865 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src; 5866 struct sockaddr_conn *sconn; 5867 #endif 5868 struct sockaddr *to; 5869 struct sctp_state_cookie stc; 5870 struct sctp_nets *net = NULL; 5871 uint8_t *signature = NULL; 5872 int cnt_inits_to = 0; 5873 uint16_t his_limit, i_want; 5874 int abort_flag; 5875 int nat_friendly = 0; 5876 struct socket *so; 5877 uint16_t num_ext, chunk_len, padding_len, parameter_len; 5878 5879 if (stcb) { 5880 asoc = &stcb->asoc; 5881 } else { 5882 asoc = NULL; 5883 } 5884 if ((asoc != NULL) && 5885 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 5886 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) { 5887 /* new addresses, out of here in non-cookie-wait states */ 5888 /* 5889 * Send a ABORT, we don't add the new address error clause 5890 * though we even set the T bit and copy in the 0 tag.. this 5891 * looks no different than if no listener was present. 5892 */ 5893 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5894 "Address added"); 5895 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, 5896 #if defined(__FreeBSD__) 5897 use_mflowid, mflowid, 5898 #endif 5899 vrf_id, port); 5900 return; 5901 } 5902 abort_flag = 0; 5903 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 5904 (offset + sizeof(struct sctp_init_chunk)), 5905 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly); 5906 if (abort_flag) { 5907 do_a_abort: 5908 if (op_err == NULL) { 5909 char msg[SCTP_DIAG_INFO_LEN]; 5910 5911 snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 5912 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5913 msg); 5914 } 5915 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 5916 init_chk->init.initiate_tag, op_err, 5917 #if defined(__FreeBSD__) 5918 use_mflowid, mflowid, 5919 #endif 5920 vrf_id, port); 5921 return; 5922 } 5923 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 5924 if (m == NULL) { 5925 /* No memory, INIT timer will re-attempt. */ 5926 if (op_err) 5927 sctp_m_freem(op_err); 5928 return; 5929 } 5930 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk); 5931 padding_len = 0; 5932 5933 /* 5934 * We might not overwrite the identification[] completely and on 5935 * some platforms time_entered will contain some padding. 5936 * Therefore zero out the cookie to avoid putting 5937 * uninitialized memory on the wire. 5938 */ 5939 memset(&stc, 0, sizeof(struct sctp_state_cookie)); 5940 5941 /* the time I built cookie */ 5942 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); 5943 5944 /* populate any tie tags */ 5945 if (asoc != NULL) { 5946 /* unlock before tag selections */ 5947 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 5948 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 5949 stc.cookie_life = asoc->cookie_life; 5950 net = asoc->primary_destination; 5951 } else { 5952 stc.tie_tag_my_vtag = 0; 5953 stc.tie_tag_peer_vtag = 0; 5954 /* life I will award this cookie */ 5955 stc.cookie_life = inp->sctp_ep.def_cookie_life; 5956 } 5957 5958 /* copy in the ports for later check */ 5959 stc.myport = sh->dest_port; 5960 stc.peerport = sh->src_port; 5961 5962 /* 5963 * If we wanted to honor cookie life extentions, we would add to 5964 * stc.cookie_life. For now we should NOT honor any extension 5965 */ 5966 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 5967 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5968 stc.ipv6_addr_legal = 1; 5969 if (SCTP_IPV6_V6ONLY(inp)) { 5970 stc.ipv4_addr_legal = 0; 5971 } else { 5972 stc.ipv4_addr_legal = 1; 5973 } 5974 #if defined(__Userspace__) 5975 stc.conn_addr_legal = 0; 5976 #endif 5977 } else { 5978 stc.ipv6_addr_legal = 0; 5979 #if defined(__Userspace__) 5980 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { 5981 stc.conn_addr_legal = 1; 5982 stc.ipv4_addr_legal = 0; 5983 } else { 5984 stc.conn_addr_legal = 0; 5985 stc.ipv4_addr_legal = 1; 5986 } 5987 #else 5988 stc.ipv4_addr_legal = 1; 5989 #endif 5990 } 5991 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 5992 stc.ipv4_scope = 1; 5993 #else 5994 stc.ipv4_scope = 0; 5995 #endif 5996 if (net == NULL) { 5997 to = src; 5998 switch (dst->sa_family) { 5999 #ifdef INET 6000 case AF_INET: 6001 { 6002 /* lookup address */ 6003 stc.address[0] = src4->sin_addr.s_addr; 6004 stc.address[1] = 0; 6005 stc.address[2] = 0; 6006 stc.address[3] = 0; 6007 stc.addr_type = SCTP_IPV4_ADDRESS; 6008 /* local from address */ 6009 stc.laddress[0] = dst4->sin_addr.s_addr; 6010 stc.laddress[1] = 0; 6011 stc.laddress[2] = 0; 6012 stc.laddress[3] = 0; 6013 stc.laddr_type = SCTP_IPV4_ADDRESS; 6014 /* scope_id is only for v6 */ 6015 stc.scope_id = 0; 6016 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 6017 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) { 6018 stc.ipv4_scope = 1; 6019 } 6020 #else 6021 stc.ipv4_scope = 1; 6022 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 6023 /* Must use the address in this case */ 6024 if (sctp_is_address_on_local_host(src, vrf_id)) { 6025 stc.loopback_scope = 1; 6026 stc.ipv4_scope = 1; 6027 stc.site_scope = 1; 6028 stc.local_scope = 0; 6029 } 6030 break; 6031 } 6032 #endif 6033 #ifdef INET6 6034 case AF_INET6: 6035 { 6036 stc.addr_type = SCTP_IPV6_ADDRESS; 6037 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); 6038 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000)) 6039 stc.scope_id = in6_getscope(&src6->sin6_addr); 6040 #else 6041 stc.scope_id = 0; 6042 #endif 6043 if (sctp_is_address_on_local_host(src, vrf_id)) { 6044 stc.loopback_scope = 1; 6045 stc.local_scope = 0; 6046 stc.site_scope = 1; 6047 stc.ipv4_scope = 1; 6048 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) { 6049 /* 6050 * If the new destination is a LINK_LOCAL we 6051 * must have common both site and local 6052 * scope. Don't set local scope though since 6053 * we must depend on the source to be added 6054 * implicitly. We cannot assure just because 6055 * we share one link that all links are 6056 * common. 6057 */ 6058 #if defined(__APPLE__) 6059 /* Mac OS X currently doesn't have in6_getscope() */ 6060 stc.scope_id = src6->sin6_addr.s6_addr16[1]; 6061 #endif 6062 stc.local_scope = 0; 6063 stc.site_scope = 1; 6064 stc.ipv4_scope = 1; 6065 /* 6066 * we start counting for the private address 6067 * stuff at 1. since the link local we 6068 * source from won't show up in our scoped 6069 * count. 6070 */ 6071 cnt_inits_to = 1; 6072 /* pull out the scope_id from incoming pkt */ 6073 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) { 6074 /* 6075 * If the new destination is SITE_LOCAL then 6076 * we must have site scope in common. 6077 */ 6078 stc.site_scope = 1; 6079 } 6080 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr)); 6081 stc.laddr_type = SCTP_IPV6_ADDRESS; 6082 break; 6083 } 6084 #endif 6085 #if defined(__Userspace__) 6086 case AF_CONN: 6087 { 6088 /* lookup address */ 6089 stc.address[0] = 0; 6090 stc.address[1] = 0; 6091 stc.address[2] = 0; 6092 stc.address[3] = 0; 6093 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *)); 6094 stc.addr_type = SCTP_CONN_ADDRESS; 6095 /* local from address */ 6096 stc.laddress[0] = 0; 6097 stc.laddress[1] = 0; 6098 stc.laddress[2] = 0; 6099 stc.laddress[3] = 0; 6100 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *)); 6101 stc.laddr_type = SCTP_CONN_ADDRESS; 6102 /* scope_id is only for v6 */ 6103 stc.scope_id = 0; 6104 break; 6105 } 6106 #endif 6107 default: 6108 /* TSNH */ 6109 goto do_a_abort; 6110 break; 6111 } 6112 } else { 6113 /* set the scope per the existing tcb */ 6114 6115 #ifdef INET6 6116 struct sctp_nets *lnet; 6117 #endif 6118 6119 stc.loopback_scope = asoc->scope.loopback_scope; 6120 stc.ipv4_scope = asoc->scope.ipv4_local_scope; 6121 stc.site_scope = asoc->scope.site_scope; 6122 stc.local_scope = asoc->scope.local_scope; 6123 #ifdef INET6 6124 /* Why do we not consider IPv4 LL addresses? */ 6125 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 6126 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 6127 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 6128 /* 6129 * if we have a LL address, start 6130 * counting at 1. 6131 */ 6132 cnt_inits_to = 1; 6133 } 6134 } 6135 } 6136 #endif 6137 /* use the net pointer */ 6138 to = (struct sockaddr *)&net->ro._l_addr; 6139 switch (to->sa_family) { 6140 #ifdef INET 6141 case AF_INET: 6142 sin = (struct sockaddr_in *)to; 6143 stc.address[0] = sin->sin_addr.s_addr; 6144 stc.address[1] = 0; 6145 stc.address[2] = 0; 6146 stc.address[3] = 0; 6147 stc.addr_type = SCTP_IPV4_ADDRESS; 6148 if (net->src_addr_selected == 0) { 6149 /* 6150 * strange case here, the INIT should have 6151 * did the selection. 6152 */ 6153 net->ro._s_addr = sctp_source_address_selection(inp, 6154 stcb, (sctp_route_t *)&net->ro, 6155 net, 0, vrf_id); 6156 if (net->ro._s_addr == NULL) 6157 return; 6158 6159 net->src_addr_selected = 1; 6160 6161 } 6162 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 6163 stc.laddress[1] = 0; 6164 stc.laddress[2] = 0; 6165 stc.laddress[3] = 0; 6166 stc.laddr_type = SCTP_IPV4_ADDRESS; 6167 /* scope_id is only for v6 */ 6168 stc.scope_id = 0; 6169 break; 6170 #endif 6171 #ifdef INET6 6172 case AF_INET6: 6173 sin6 = (struct sockaddr_in6 *)to; 6174 memcpy(&stc.address, &sin6->sin6_addr, 6175 sizeof(struct in6_addr)); 6176 stc.addr_type = SCTP_IPV6_ADDRESS; 6177 stc.scope_id = sin6->sin6_scope_id; 6178 if (net->src_addr_selected == 0) { 6179 /* 6180 * strange case here, the INIT should have 6181 * done the selection. 6182 */ 6183 net->ro._s_addr = sctp_source_address_selection(inp, 6184 stcb, (sctp_route_t *)&net->ro, 6185 net, 0, vrf_id); 6186 if (net->ro._s_addr == NULL) 6187 return; 6188 6189 net->src_addr_selected = 1; 6190 } 6191 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 6192 sizeof(struct in6_addr)); 6193 stc.laddr_type = SCTP_IPV6_ADDRESS; 6194 break; 6195 #endif 6196 #if defined(__Userspace__) 6197 case AF_CONN: 6198 sconn = (struct sockaddr_conn *)to; 6199 stc.address[0] = 0; 6200 stc.address[1] = 0; 6201 stc.address[2] = 0; 6202 stc.address[3] = 0; 6203 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *)); 6204 stc.addr_type = SCTP_CONN_ADDRESS; 6205 stc.laddress[0] = 0; 6206 stc.laddress[1] = 0; 6207 stc.laddress[2] = 0; 6208 stc.laddress[3] = 0; 6209 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *)); 6210 stc.laddr_type = SCTP_CONN_ADDRESS; 6211 stc.scope_id = 0; 6212 break; 6213 #endif 6214 } 6215 } 6216 /* Now lets put the SCTP header in place */ 6217 initack = mtod(m, struct sctp_init_ack_chunk *); 6218 /* Save it off for quick ref */ 6219 stc.peers_vtag = init_chk->init.initiate_tag; 6220 /* who are we */ 6221 memcpy(stc.identification, SCTP_VERSION_STRING, 6222 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 6223 memset(stc.reserved, 0, SCTP_RESERVE_SPACE); 6224 /* now the chunk header */ 6225 initack->ch.chunk_type = SCTP_INITIATION_ACK; 6226 initack->ch.chunk_flags = 0; 6227 /* fill in later from mbuf we build */ 6228 initack->ch.chunk_length = 0; 6229 /* place in my tag */ 6230 if ((asoc != NULL) && 6231 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 6232 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 6233 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 6234 /* re-use the v-tags and init-seq here */ 6235 initack->init.initiate_tag = htonl(asoc->my_vtag); 6236 initack->init.initial_tsn = htonl(asoc->init_seq_number); 6237 } else { 6238 uint32_t vtag, itsn; 6239 if (hold_inp_lock) { 6240 SCTP_INP_INCR_REF(inp); 6241 SCTP_INP_RUNLOCK(inp); 6242 } 6243 if (asoc) { 6244 atomic_add_int(&asoc->refcnt, 1); 6245 SCTP_TCB_UNLOCK(stcb); 6246 new_tag: 6247 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 6248 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { 6249 /* Got a duplicate vtag on some guy behind a nat 6250 * make sure we don't use it. 6251 */ 6252 goto new_tag; 6253 } 6254 initack->init.initiate_tag = htonl(vtag); 6255 /* get a TSN to use too */ 6256 itsn = sctp_select_initial_TSN(&inp->sctp_ep); 6257 initack->init.initial_tsn = htonl(itsn); 6258 SCTP_TCB_LOCK(stcb); 6259 atomic_add_int(&asoc->refcnt, -1); 6260 } else { 6261 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 6262 initack->init.initiate_tag = htonl(vtag); 6263 /* get a TSN to use too */ 6264 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 6265 } 6266 if (hold_inp_lock) { 6267 SCTP_INP_RLOCK(inp); 6268 SCTP_INP_DECR_REF(inp); 6269 } 6270 } 6271 /* save away my tag to */ 6272 stc.my_vtag = initack->init.initiate_tag; 6273 6274 /* set up some of the credits. */ 6275 so = inp->sctp_socket; 6276 if (so == NULL) { 6277 /* memory problem */ 6278 sctp_m_freem(m); 6279 return; 6280 } else { 6281 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); 6282 } 6283 /* set what I want */ 6284 his_limit = ntohs(init_chk->init.num_inbound_streams); 6285 /* choose what I want */ 6286 if (asoc != NULL) { 6287 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 6288 i_want = asoc->streamoutcnt; 6289 } else { 6290 i_want = inp->sctp_ep.pre_open_stream_count; 6291 } 6292 } else { 6293 i_want = inp->sctp_ep.pre_open_stream_count; 6294 } 6295 if (his_limit < i_want) { 6296 /* I Want more :< */ 6297 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; 6298 } else { 6299 /* I can have what I want :> */ 6300 initack->init.num_outbound_streams = htons(i_want); 6301 } 6302 /* tell him his limit. */ 6303 initack->init.num_inbound_streams = 6304 htons(inp->sctp_ep.max_open_streams_intome); 6305 6306 /* adaptation layer indication parameter */ 6307 if (inp->sctp_ep.adaptation_layer_indicator_provided) { 6308 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); 6309 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); 6310 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 6311 ali->ph.param_length = htons(parameter_len); 6312 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); 6313 chunk_len += parameter_len; 6314 } 6315 6316 /* ECN parameter */ 6317 if (((asoc != NULL) && (asoc->ecn_supported == 1)) || 6318 ((asoc == NULL) && (inp->ecn_supported == 1))) { 6319 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6320 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6321 ph->param_type = htons(SCTP_ECN_CAPABLE); 6322 ph->param_length = htons(parameter_len); 6323 chunk_len += parameter_len; 6324 } 6325 6326 /* PR-SCTP supported parameter */ 6327 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || 6328 ((asoc == NULL) && (inp->prsctp_supported == 1))) { 6329 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6330 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6331 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); 6332 ph->param_length = htons(parameter_len); 6333 chunk_len += parameter_len; 6334 } 6335 6336 /* Add NAT friendly parameter */ 6337 if (nat_friendly) { 6338 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6339 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6340 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 6341 ph->param_length = htons(parameter_len); 6342 chunk_len += parameter_len; 6343 } 6344 6345 /* And now tell the peer which extensions we support */ 6346 num_ext = 0; 6347 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); 6348 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || 6349 ((asoc == NULL) && (inp->prsctp_supported == 1))) { 6350 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 6351 } 6352 if (((asoc != NULL) && (asoc->auth_supported == 1)) || 6353 ((asoc == NULL) && (inp->auth_supported == 1))) { 6354 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 6355 } 6356 if (((asoc != NULL) && (asoc->asconf_supported == 1)) || 6357 ((asoc == NULL) && (inp->asconf_supported == 1))) { 6358 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 6359 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 6360 } 6361 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) || 6362 ((asoc == NULL) && (inp->reconfig_supported == 1))) { 6363 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 6364 } 6365 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) || 6366 ((asoc == NULL) && (inp->nrsack_supported == 1))) { 6367 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 6368 } 6369 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) || 6370 ((asoc == NULL) && (inp->pktdrop_supported == 1))) { 6371 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 6372 } 6373 if (num_ext > 0) { 6374 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; 6375 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 6376 pr_supported->ph.param_length = htons(parameter_len); 6377 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6378 chunk_len += parameter_len; 6379 } 6380 6381 /* add authentication parameters */ 6382 if (((asoc != NULL) && (asoc->auth_supported == 1)) || 6383 ((asoc == NULL) && (inp->auth_supported == 1))) { 6384 struct sctp_auth_random *randp; 6385 struct sctp_auth_hmac_algo *hmacs; 6386 struct sctp_auth_chunk_list *chunks; 6387 6388 if (padding_len > 0) { 6389 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6390 chunk_len += padding_len; 6391 padding_len = 0; 6392 } 6393 /* generate and add RANDOM parameter */ 6394 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); 6395 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + 6396 SCTP_AUTH_RANDOM_SIZE_DEFAULT; 6397 randp->ph.param_type = htons(SCTP_RANDOM); 6398 randp->ph.param_length = htons(parameter_len); 6399 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT); 6400 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6401 chunk_len += parameter_len; 6402 6403 if (padding_len > 0) { 6404 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6405 chunk_len += padding_len; 6406 padding_len = 0; 6407 } 6408 /* add HMAC_ALGO parameter */ 6409 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); 6410 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) + 6411 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 6412 (uint8_t *)hmacs->hmac_ids); 6413 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 6414 hmacs->ph.param_length = htons(parameter_len); 6415 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6416 chunk_len += parameter_len; 6417 6418 if (padding_len > 0) { 6419 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6420 chunk_len += padding_len; 6421 padding_len = 0; 6422 } 6423 /* add CHUNKS parameter */ 6424 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); 6425 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) + 6426 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 6427 chunks->chunk_types); 6428 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 6429 chunks->ph.param_length = htons(parameter_len); 6430 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6431 chunk_len += parameter_len; 6432 } 6433 SCTP_BUF_LEN(m) = chunk_len; 6434 m_last = m; 6435 /* now the addresses */ 6436 /* To optimize this we could put the scoping stuff 6437 * into a structure and remove the individual uint8's from 6438 * the stc structure. Then we could just sifa in the 6439 * address within the stc.. but for now this is a quick 6440 * hack to get the address stuff teased apart. 6441 */ 6442 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 6443 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 6444 #if defined(__Userspace__) 6445 scp.conn_addr_legal = stc.conn_addr_legal; 6446 #endif 6447 scp.loopback_scope = stc.loopback_scope; 6448 scp.ipv4_local_scope = stc.ipv4_scope; 6449 scp.local_scope = stc.local_scope; 6450 scp.site_scope = stc.site_scope; 6451 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last, 6452 cnt_inits_to, 6453 &padding_len, &chunk_len); 6454 /* padding_len can only be positive, if no addresses have been added */ 6455 if (padding_len > 0) { 6456 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6457 chunk_len += padding_len; 6458 SCTP_BUF_LEN(m) += padding_len; 6459 padding_len = 0; 6460 } 6461 6462 /* tack on the operational error if present */ 6463 if (op_err) { 6464 parameter_len = 0; 6465 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 6466 parameter_len += SCTP_BUF_LEN(m_tmp); 6467 } 6468 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6469 SCTP_BUF_NEXT(m_last) = op_err; 6470 while (SCTP_BUF_NEXT(m_last) != NULL) { 6471 m_last = SCTP_BUF_NEXT(m_last); 6472 } 6473 chunk_len += parameter_len; 6474 } 6475 if (padding_len > 0) { 6476 m_last = sctp_add_pad_tombuf(m_last, padding_len); 6477 if (m_last == NULL) { 6478 /* Houston we have a problem, no space */ 6479 sctp_m_freem(m); 6480 return; 6481 } 6482 chunk_len += padding_len; 6483 padding_len = 0; 6484 } 6485 /* Now we must build a cookie */ 6486 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature); 6487 if (m_cookie == NULL) { 6488 /* memory problem */ 6489 sctp_m_freem(m); 6490 return; 6491 } 6492 /* Now append the cookie to the end and update the space/size */ 6493 SCTP_BUF_NEXT(m_last) = m_cookie; 6494 parameter_len = 0; 6495 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 6496 parameter_len += SCTP_BUF_LEN(m_tmp); 6497 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 6498 m_last = m_tmp; 6499 } 6500 } 6501 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6502 chunk_len += parameter_len; 6503 6504 /* Place in the size, but we don't include 6505 * the last pad (if any) in the INIT-ACK. 6506 */ 6507 initack->ch.chunk_length = htons(chunk_len); 6508 6509 /* Time to sign the cookie, we don't sign over the cookie 6510 * signature though thus we set trailer. 6511 */ 6512 (void)sctp_hmac_m(SCTP_HMAC, 6513 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 6514 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), 6515 (uint8_t *)signature, SCTP_SIGNATURE_SIZE); 6516 /* 6517 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 6518 * here since the timer will drive a retranmission. 6519 */ 6520 if (padding_len > 0) { 6521 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 6522 sctp_m_freem(m); 6523 return; 6524 } 6525 } 6526 if (stc.loopback_scope) { 6527 over_addr = (union sctp_sockstore *)dst; 6528 } else { 6529 over_addr = NULL; 6530 } 6531 6532 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 6533 0, 0, 6534 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, 6535 port, over_addr, 6536 #if defined(__FreeBSD__) 6537 use_mflowid, mflowid, 6538 #endif 6539 SCTP_SO_NOT_LOCKED); 6540 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6541 } 6542 6543 6544 static void 6545 sctp_prune_prsctp(struct sctp_tcb *stcb, 6546 struct sctp_association *asoc, 6547 struct sctp_sndrcvinfo *srcv, 6548 int dataout) 6549 { 6550 int freed_spc = 0; 6551 struct sctp_tmit_chunk *chk, *nchk; 6552 6553 SCTP_TCB_LOCK_ASSERT(stcb); 6554 if ((asoc->prsctp_supported) && 6555 (asoc->sent_queue_cnt_removeable > 0)) { 6556 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 6557 /* 6558 * Look for chunks marked with the PR_SCTP flag AND 6559 * the buffer space flag. If the one being sent is 6560 * equal or greater priority then purge the old one 6561 * and free some space. 6562 */ 6563 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 6564 /* 6565 * This one is PR-SCTP AND buffer space 6566 * limited type 6567 */ 6568 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 6569 /* 6570 * Lower numbers equates to higher 6571 * priority so if the one we are 6572 * looking at has a larger or equal 6573 * priority we want to drop the data 6574 * and NOT retransmit it. 6575 */ 6576 if (chk->data) { 6577 /* 6578 * We release the book_size 6579 * if the mbuf is here 6580 */ 6581 int ret_spc; 6582 uint8_t sent; 6583 6584 if (chk->sent > SCTP_DATAGRAM_UNSENT) 6585 sent = 1; 6586 else 6587 sent = 0; 6588 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 6589 sent, 6590 SCTP_SO_LOCKED); 6591 freed_spc += ret_spc; 6592 if (freed_spc >= dataout) { 6593 return; 6594 } 6595 } /* if chunk was present */ 6596 } /* if of sufficent priority */ 6597 } /* if chunk has enabled */ 6598 } /* tailqforeach */ 6599 6600 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 6601 /* Here we must move to the sent queue and mark */ 6602 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 6603 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 6604 if (chk->data) { 6605 /* 6606 * We release the book_size 6607 * if the mbuf is here 6608 */ 6609 int ret_spc; 6610 6611 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 6612 0, SCTP_SO_LOCKED); 6613 6614 freed_spc += ret_spc; 6615 if (freed_spc >= dataout) { 6616 return; 6617 } 6618 } /* end if chk->data */ 6619 } /* end if right class */ 6620 } /* end if chk pr-sctp */ 6621 } /* tailqforeachsafe (chk) */ 6622 } /* if enabled in asoc */ 6623 } 6624 6625 int 6626 sctp_get_frag_point(struct sctp_tcb *stcb, 6627 struct sctp_association *asoc) 6628 { 6629 int siz, ovh; 6630 6631 /* 6632 * For endpoints that have both v6 and v4 addresses we must reserve 6633 * room for the ipv6 header, for those that are only dealing with V4 6634 * we use a larger frag point. 6635 */ 6636 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6637 ovh = SCTP_MED_OVERHEAD; 6638 } else { 6639 ovh = SCTP_MED_V4_OVERHEAD; 6640 } 6641 6642 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu) 6643 siz = asoc->smallest_mtu - ovh; 6644 else 6645 siz = (stcb->asoc.sctp_frag_point - ovh); 6646 /* 6647 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 6648 */ 6649 /* A data chunk MUST fit in a cluster */ 6650 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 6651 /* } */ 6652 6653 /* adjust for an AUTH chunk if DATA requires auth */ 6654 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 6655 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6656 6657 if (siz % 4) { 6658 /* make it an even word boundary please */ 6659 siz -= (siz % 4); 6660 } 6661 return (siz); 6662 } 6663 6664 static void 6665 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) 6666 { 6667 /* 6668 * We assume that the user wants PR_SCTP_TTL if the user 6669 * provides a positive lifetime but does not specify any 6670 * PR_SCTP policy. 6671 */ 6672 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 6673 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 6674 } else if (sp->timetolive > 0) { 6675 sp->sinfo_flags |= SCTP_PR_SCTP_TTL; 6676 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 6677 } else { 6678 return; 6679 } 6680 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 6681 case CHUNK_FLAGS_PR_SCTP_BUF: 6682 /* 6683 * Time to live is a priority stored in tv_sec when 6684 * doing the buffer drop thing. 6685 */ 6686 sp->ts.tv_sec = sp->timetolive; 6687 sp->ts.tv_usec = 0; 6688 break; 6689 case CHUNK_FLAGS_PR_SCTP_TTL: 6690 { 6691 struct timeval tv; 6692 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 6693 tv.tv_sec = sp->timetolive / 1000; 6694 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 6695 /* TODO sctp_constants.h needs alternative time macros when 6696 * _KERNEL is undefined. 6697 */ 6698 #ifndef __FreeBSD__ 6699 timeradd(&sp->ts, &tv, &sp->ts); 6700 #else 6701 timevaladd(&sp->ts, &tv); 6702 #endif 6703 } 6704 break; 6705 case CHUNK_FLAGS_PR_SCTP_RTX: 6706 /* 6707 * Time to live is a the number or retransmissions 6708 * stored in tv_sec. 6709 */ 6710 sp->ts.tv_sec = sp->timetolive; 6711 sp->ts.tv_usec = 0; 6712 break; 6713 default: 6714 SCTPDBG(SCTP_DEBUG_USRREQ1, 6715 "Unknown PR_SCTP policy %u.\n", 6716 PR_SCTP_POLICY(sp->sinfo_flags)); 6717 break; 6718 } 6719 } 6720 6721 static int 6722 sctp_msg_append(struct sctp_tcb *stcb, 6723 struct sctp_nets *net, 6724 struct mbuf *m, 6725 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 6726 { 6727 int error = 0; 6728 struct mbuf *at; 6729 struct sctp_stream_queue_pending *sp = NULL; 6730 struct sctp_stream_out *strm; 6731 6732 /* Given an mbuf chain, put it 6733 * into the association send queue and 6734 * place it on the wheel 6735 */ 6736 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 6737 /* Invalid stream number */ 6738 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 6739 error = EINVAL; 6740 goto out_now; 6741 } 6742 if ((stcb->asoc.stream_locked) && 6743 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 6744 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 6745 error = EINVAL; 6746 goto out_now; 6747 } 6748 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 6749 /* Now can we send this? */ 6750 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 6751 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 6752 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 6753 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 6754 /* got data while shutting down */ 6755 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 6756 error = ECONNRESET; 6757 goto out_now; 6758 } 6759 sctp_alloc_a_strmoq(stcb, sp); 6760 if (sp == NULL) { 6761 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 6762 error = ENOMEM; 6763 goto out_now; 6764 } 6765 sp->sinfo_flags = srcv->sinfo_flags; 6766 sp->timetolive = srcv->sinfo_timetolive; 6767 sp->ppid = srcv->sinfo_ppid; 6768 sp->context = srcv->sinfo_context; 6769 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 6770 sp->net = net; 6771 atomic_add_int(&sp->net->ref_count, 1); 6772 } else { 6773 sp->net = NULL; 6774 } 6775 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 6776 sp->stream = srcv->sinfo_stream; 6777 sp->msg_is_complete = 1; 6778 sp->sender_all_done = 1; 6779 sp->some_taken = 0; 6780 sp->data = m; 6781 sp->tail_mbuf = NULL; 6782 sctp_set_prsctp_policy(sp); 6783 /* We could in theory (for sendall) sifa the length 6784 * in, but we would still have to hunt through the 6785 * chain since we need to setup the tail_mbuf 6786 */ 6787 sp->length = 0; 6788 for (at = m; at; at = SCTP_BUF_NEXT(at)) { 6789 if (SCTP_BUF_NEXT(at) == NULL) 6790 sp->tail_mbuf = at; 6791 sp->length += SCTP_BUF_LEN(at); 6792 } 6793 if (srcv->sinfo_keynumber_valid) { 6794 sp->auth_keyid = srcv->sinfo_keynumber; 6795 } else { 6796 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 6797 } 6798 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 6799 sctp_auth_key_acquire(stcb, sp->auth_keyid); 6800 sp->holds_key_ref = 1; 6801 } 6802 if (hold_stcb_lock == 0) { 6803 SCTP_TCB_SEND_LOCK(stcb); 6804 } 6805 sctp_snd_sb_alloc(stcb, sp->length); 6806 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 6807 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 6808 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1); 6809 m = NULL; 6810 if (hold_stcb_lock == 0) { 6811 SCTP_TCB_SEND_UNLOCK(stcb); 6812 } 6813 out_now: 6814 if (m) { 6815 sctp_m_freem(m); 6816 } 6817 return (error); 6818 } 6819 6820 6821 static struct mbuf * 6822 sctp_copy_mbufchain(struct mbuf *clonechain, 6823 struct mbuf *outchain, 6824 struct mbuf **endofchain, 6825 int can_take_mbuf, 6826 int sizeofcpy, 6827 uint8_t copy_by_ref) 6828 { 6829 struct mbuf *m; 6830 struct mbuf *appendchain; 6831 caddr_t cp; 6832 int len; 6833 6834 if (endofchain == NULL) { 6835 /* error */ 6836 error_out: 6837 if (outchain) 6838 sctp_m_freem(outchain); 6839 return (NULL); 6840 } 6841 if (can_take_mbuf) { 6842 appendchain = clonechain; 6843 } else { 6844 if (!copy_by_ref && 6845 #if defined(__Panda__) 6846 0 6847 #else 6848 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN))) 6849 #endif 6850 ) { 6851 /* Its not in a cluster */ 6852 if (*endofchain == NULL) { 6853 /* lets get a mbuf cluster */ 6854 if (outchain == NULL) { 6855 /* This is the general case */ 6856 new_mbuf: 6857 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 6858 if (outchain == NULL) { 6859 goto error_out; 6860 } 6861 SCTP_BUF_LEN(outchain) = 0; 6862 *endofchain = outchain; 6863 /* get the prepend space */ 6864 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4)); 6865 } else { 6866 /* We really should not get a NULL in endofchain */ 6867 /* find end */ 6868 m = outchain; 6869 while (m) { 6870 if (SCTP_BUF_NEXT(m) == NULL) { 6871 *endofchain = m; 6872 break; 6873 } 6874 m = SCTP_BUF_NEXT(m); 6875 } 6876 /* sanity */ 6877 if (*endofchain == NULL) { 6878 /* huh, TSNH XXX maybe we should panic */ 6879 sctp_m_freem(outchain); 6880 goto new_mbuf; 6881 } 6882 } 6883 /* get the new end of length */ 6884 len = M_TRAILINGSPACE(*endofchain); 6885 } else { 6886 /* how much is left at the end? */ 6887 len = M_TRAILINGSPACE(*endofchain); 6888 } 6889 /* Find the end of the data, for appending */ 6890 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain))); 6891 6892 /* Now lets copy it out */ 6893 if (len >= sizeofcpy) { 6894 /* It all fits, copy it in */ 6895 m_copydata(clonechain, 0, sizeofcpy, cp); 6896 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 6897 } else { 6898 /* fill up the end of the chain */ 6899 if (len > 0) { 6900 m_copydata(clonechain, 0, len, cp); 6901 SCTP_BUF_LEN((*endofchain)) += len; 6902 /* now we need another one */ 6903 sizeofcpy -= len; 6904 } 6905 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 6906 if (m == NULL) { 6907 /* We failed */ 6908 goto error_out; 6909 } 6910 SCTP_BUF_NEXT((*endofchain)) = m; 6911 *endofchain = m; 6912 cp = mtod((*endofchain), caddr_t); 6913 m_copydata(clonechain, len, sizeofcpy, cp); 6914 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 6915 } 6916 return (outchain); 6917 } else { 6918 /* copy the old fashion way */ 6919 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT); 6920 #ifdef SCTP_MBUF_LOGGING 6921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6922 struct mbuf *mat; 6923 6924 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) { 6925 if (SCTP_BUF_IS_EXTENDED(mat)) { 6926 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 6927 } 6928 } 6929 } 6930 #endif 6931 } 6932 } 6933 if (appendchain == NULL) { 6934 /* error */ 6935 if (outchain) 6936 sctp_m_freem(outchain); 6937 return (NULL); 6938 } 6939 if (outchain) { 6940 /* tack on to the end */ 6941 if (*endofchain != NULL) { 6942 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 6943 } else { 6944 m = outchain; 6945 while (m) { 6946 if (SCTP_BUF_NEXT(m) == NULL) { 6947 SCTP_BUF_NEXT(m) = appendchain; 6948 break; 6949 } 6950 m = SCTP_BUF_NEXT(m); 6951 } 6952 } 6953 /* 6954 * save off the end and update the end-chain 6955 * postion 6956 */ 6957 m = appendchain; 6958 while (m) { 6959 if (SCTP_BUF_NEXT(m) == NULL) { 6960 *endofchain = m; 6961 break; 6962 } 6963 m = SCTP_BUF_NEXT(m); 6964 } 6965 return (outchain); 6966 } else { 6967 /* save off the end and update the end-chain postion */ 6968 m = appendchain; 6969 while (m) { 6970 if (SCTP_BUF_NEXT(m) == NULL) { 6971 *endofchain = m; 6972 break; 6973 } 6974 m = SCTP_BUF_NEXT(m); 6975 } 6976 return (appendchain); 6977 } 6978 } 6979 6980 static int 6981 sctp_med_chunk_output(struct sctp_inpcb *inp, 6982 struct sctp_tcb *stcb, 6983 struct sctp_association *asoc, 6984 int *num_out, 6985 int *reason_code, 6986 int control_only, int from_where, 6987 struct timeval *now, int *now_filled, int frag_point, int so_locked 6988 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 6989 SCTP_UNUSED 6990 #endif 6991 ); 6992 6993 static void 6994 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 6995 uint32_t val SCTP_UNUSED) 6996 { 6997 struct sctp_copy_all *ca; 6998 struct mbuf *m; 6999 int ret = 0; 7000 int added_control = 0; 7001 int un_sent, do_chunk_output = 1; 7002 struct sctp_association *asoc; 7003 struct sctp_nets *net; 7004 7005 ca = (struct sctp_copy_all *)ptr; 7006 if (ca->m == NULL) { 7007 return; 7008 } 7009 if (ca->inp != inp) { 7010 /* TSNH */ 7011 return; 7012 } 7013 if (ca->sndlen > 0) { 7014 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT); 7015 if (m == NULL) { 7016 /* can't copy so we are done */ 7017 ca->cnt_failed++; 7018 return; 7019 } 7020 #ifdef SCTP_MBUF_LOGGING 7021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 7022 struct mbuf *mat; 7023 7024 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 7025 if (SCTP_BUF_IS_EXTENDED(mat)) { 7026 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 7027 } 7028 } 7029 } 7030 #endif 7031 } else { 7032 m = NULL; 7033 } 7034 SCTP_TCB_LOCK_ASSERT(stcb); 7035 if (stcb->asoc.alternate) { 7036 net = stcb->asoc.alternate; 7037 } else { 7038 net = stcb->asoc.primary_destination; 7039 } 7040 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 7041 /* Abort this assoc with m as the user defined reason */ 7042 if (m != NULL) { 7043 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT); 7044 } else { 7045 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 7046 0, M_NOWAIT, 1, MT_DATA); 7047 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 7048 } 7049 if (m != NULL) { 7050 struct sctp_paramhdr *ph; 7051 7052 ph = mtod(m, struct sctp_paramhdr *); 7053 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 7054 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen); 7055 } 7056 /* We add one here to keep the assoc from 7057 * dis-appearing on us. 7058 */ 7059 atomic_add_int(&stcb->asoc.refcnt, 1); 7060 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED); 7061 /* sctp_abort_an_association calls sctp_free_asoc() 7062 * free association will NOT free it since we 7063 * incremented the refcnt .. we do this to prevent 7064 * it being freed and things getting tricky since 7065 * we could end up (from free_asoc) calling inpcb_free 7066 * which would get a recursive lock call to the 7067 * iterator lock.. But as a consequence of that the 7068 * stcb will return to us un-locked.. since free_asoc 7069 * returns with either no TCB or the TCB unlocked, we 7070 * must relock.. to unlock in the iterator timer :-0 7071 */ 7072 SCTP_TCB_LOCK(stcb); 7073 atomic_add_int(&stcb->asoc.refcnt, -1); 7074 goto no_chunk_output; 7075 } else { 7076 if (m) { 7077 ret = sctp_msg_append(stcb, net, m, 7078 &ca->sndrcv, 1); 7079 } 7080 asoc = &stcb->asoc; 7081 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 7082 /* shutdown this assoc */ 7083 int cnt; 7084 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 7085 7086 if (TAILQ_EMPTY(&asoc->send_queue) && 7087 TAILQ_EMPTY(&asoc->sent_queue) && 7088 (cnt == 0)) { 7089 if (asoc->locked_on_sending) { 7090 goto abort_anyway; 7091 } 7092 /* there is nothing queued to send, so I'm done... */ 7093 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 7094 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 7095 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7096 /* only send SHUTDOWN the first time through */ 7097 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 7098 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 7099 } 7100 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 7101 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 7102 sctp_stop_timers_for_shutdown(stcb); 7103 sctp_send_shutdown(stcb, net); 7104 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 7105 net); 7106 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 7107 asoc->primary_destination); 7108 added_control = 1; 7109 do_chunk_output = 0; 7110 } 7111 } else { 7112 /* 7113 * we still got (or just got) data to send, so set 7114 * SHUTDOWN_PENDING 7115 */ 7116 /* 7117 * XXX sockets draft says that SCTP_EOF should be 7118 * sent with no data. currently, we will allow user 7119 * data to be sent first and move to 7120 * SHUTDOWN-PENDING 7121 */ 7122 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 7123 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 7124 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7125 if (asoc->locked_on_sending) { 7126 /* Locked to send out the data */ 7127 struct sctp_stream_queue_pending *sp; 7128 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 7129 if (sp) { 7130 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 7131 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 7132 } 7133 } 7134 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 7135 if (TAILQ_EMPTY(&asoc->send_queue) && 7136 TAILQ_EMPTY(&asoc->sent_queue) && 7137 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 7138 abort_anyway: 7139 atomic_add_int(&stcb->asoc.refcnt, 1); 7140 sctp_abort_an_association(stcb->sctp_ep, stcb, 7141 NULL, SCTP_SO_NOT_LOCKED); 7142 atomic_add_int(&stcb->asoc.refcnt, -1); 7143 goto no_chunk_output; 7144 } 7145 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 7146 asoc->primary_destination); 7147 } 7148 } 7149 7150 } 7151 } 7152 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 7153 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 7154 7155 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 7156 (stcb->asoc.total_flight > 0) && 7157 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 7158 do_chunk_output = 0; 7159 } 7160 if (do_chunk_output) 7161 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); 7162 else if (added_control) { 7163 int num_out = 0, reason = 0, now_filled = 0; 7164 struct timeval now; 7165 int frag_point; 7166 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 7167 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 7168 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); 7169 } 7170 no_chunk_output: 7171 if (ret) { 7172 ca->cnt_failed++; 7173 } else { 7174 ca->cnt_sent++; 7175 } 7176 } 7177 7178 static void 7179 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) 7180 { 7181 struct sctp_copy_all *ca; 7182 7183 ca = (struct sctp_copy_all *)ptr; 7184 /* 7185 * Do a notify here? Kacheong suggests that the notify be done at 7186 * the send time.. so you would push up a notification if any send 7187 * failed. Don't know if this is feasable since the only failures we 7188 * have is "memory" related and if you cannot get an mbuf to send 7189 * the data you surely can't get an mbuf to send up to notify the 7190 * user you can't send the data :-> 7191 */ 7192 7193 /* now free everything */ 7194 sctp_m_freem(ca->m); 7195 SCTP_FREE(ca, SCTP_M_COPYAL); 7196 } 7197 7198 7199 #define MC_ALIGN(m, len) do { \ 7200 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 7201 } while (0) 7202 7203 7204 7205 static struct mbuf * 7206 sctp_copy_out_all(struct uio *uio, int len) 7207 { 7208 struct mbuf *ret, *at; 7209 int left, willcpy, cancpy, error; 7210 7211 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA); 7212 if (ret == NULL) { 7213 /* TSNH */ 7214 return (NULL); 7215 } 7216 left = len; 7217 SCTP_BUF_LEN(ret) = 0; 7218 /* save space for the data chunk header */ 7219 cancpy = M_TRAILINGSPACE(ret); 7220 willcpy = min(cancpy, left); 7221 at = ret; 7222 while (left > 0) { 7223 /* Align data to the end */ 7224 error = uiomove(mtod(at, caddr_t), willcpy, uio); 7225 if (error) { 7226 err_out_now: 7227 sctp_m_freem(at); 7228 return (NULL); 7229 } 7230 SCTP_BUF_LEN(at) = willcpy; 7231 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 7232 left -= willcpy; 7233 if (left > 0) { 7234 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA); 7235 if (SCTP_BUF_NEXT(at) == NULL) { 7236 goto err_out_now; 7237 } 7238 at = SCTP_BUF_NEXT(at); 7239 SCTP_BUF_LEN(at) = 0; 7240 cancpy = M_TRAILINGSPACE(at); 7241 willcpy = min(cancpy, left); 7242 } 7243 } 7244 return (ret); 7245 } 7246 7247 static int 7248 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 7249 struct sctp_sndrcvinfo *srcv) 7250 { 7251 int ret; 7252 struct sctp_copy_all *ca; 7253 7254 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 7255 SCTP_M_COPYAL); 7256 if (ca == NULL) { 7257 sctp_m_freem(m); 7258 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7259 return (ENOMEM); 7260 } 7261 memset(ca, 0, sizeof(struct sctp_copy_all)); 7262 7263 ca->inp = inp; 7264 if (srcv) { 7265 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); 7266 } 7267 /* 7268 * take off the sendall flag, it would be bad if we failed to do 7269 * this :-0 7270 */ 7271 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 7272 /* get length and mbuf chain */ 7273 if (uio) { 7274 #if defined(__APPLE__) 7275 #if defined(APPLE_LEOPARD) 7276 ca->sndlen = uio->uio_resid; 7277 #else 7278 ca->sndlen = uio_resid(uio); 7279 #endif 7280 #else 7281 ca->sndlen = uio->uio_resid; 7282 #endif 7283 #if defined(__APPLE__) 7284 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0); 7285 #endif 7286 ca->m = sctp_copy_out_all(uio, ca->sndlen); 7287 #if defined(__APPLE__) 7288 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0); 7289 #endif 7290 if (ca->m == NULL) { 7291 SCTP_FREE(ca, SCTP_M_COPYAL); 7292 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7293 return (ENOMEM); 7294 } 7295 } else { 7296 /* Gather the length of the send */ 7297 struct mbuf *mat; 7298 7299 ca->sndlen = 0; 7300 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 7301 ca->sndlen += SCTP_BUF_LEN(mat); 7302 } 7303 } 7304 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 7305 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 7306 SCTP_ASOC_ANY_STATE, 7307 (void *)ca, 0, 7308 sctp_sendall_completes, inp, 1); 7309 if (ret) { 7310 SCTP_PRINTF("Failed to initiate iterator for sendall\n"); 7311 SCTP_FREE(ca, SCTP_M_COPYAL); 7312 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 7313 return (EFAULT); 7314 } 7315 return (0); 7316 } 7317 7318 7319 void 7320 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 7321 { 7322 struct sctp_tmit_chunk *chk, *nchk; 7323 7324 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 7325 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7326 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7327 if (chk->data) { 7328 sctp_m_freem(chk->data); 7329 chk->data = NULL; 7330 } 7331 asoc->ctrl_queue_cnt--; 7332 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7333 } 7334 } 7335 } 7336 7337 void 7338 sctp_toss_old_asconf(struct sctp_tcb *stcb) 7339 { 7340 struct sctp_association *asoc; 7341 struct sctp_tmit_chunk *chk, *nchk; 7342 struct sctp_asconf_chunk *acp; 7343 7344 asoc = &stcb->asoc; 7345 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 7346 /* find SCTP_ASCONF chunk in queue */ 7347 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 7348 if (chk->data) { 7349 acp = mtod(chk->data, struct sctp_asconf_chunk *); 7350 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { 7351 /* Not Acked yet */ 7352 break; 7353 } 7354 } 7355 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 7356 if (chk->data) { 7357 sctp_m_freem(chk->data); 7358 chk->data = NULL; 7359 } 7360 asoc->ctrl_queue_cnt--; 7361 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7362 } 7363 } 7364 } 7365 7366 7367 static void 7368 sctp_clean_up_datalist(struct sctp_tcb *stcb, 7369 struct sctp_association *asoc, 7370 struct sctp_tmit_chunk **data_list, 7371 int bundle_at, 7372 struct sctp_nets *net) 7373 { 7374 int i; 7375 struct sctp_tmit_chunk *tp1; 7376 7377 for (i = 0; i < bundle_at; i++) { 7378 /* off of the send queue */ 7379 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); 7380 asoc->send_queue_cnt--; 7381 if (i > 0) { 7382 /* 7383 * Any chunk NOT 0 you zap the time chunk 0 gets 7384 * zapped or set based on if a RTO measurment is 7385 * needed. 7386 */ 7387 data_list[i]->do_rtt = 0; 7388 } 7389 /* record time */ 7390 data_list[i]->sent_rcv_time = net->last_sent_time; 7391 data_list[i]->rec.data.cwnd_at_send = net->cwnd; 7392 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 7393 if (data_list[i]->whoTo == NULL) { 7394 data_list[i]->whoTo = net; 7395 atomic_add_int(&net->ref_count, 1); 7396 } 7397 /* on to the sent queue */ 7398 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 7399 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 7400 struct sctp_tmit_chunk *tpp; 7401 7402 /* need to move back */ 7403 back_up_more: 7404 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 7405 if (tpp == NULL) { 7406 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 7407 goto all_done; 7408 } 7409 tp1 = tpp; 7410 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 7411 goto back_up_more; 7412 } 7413 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 7414 } else { 7415 TAILQ_INSERT_TAIL(&asoc->sent_queue, 7416 data_list[i], 7417 sctp_next); 7418 } 7419 all_done: 7420 /* This does not lower until the cum-ack passes it */ 7421 asoc->sent_queue_cnt++; 7422 if ((asoc->peers_rwnd <= 0) && 7423 (asoc->total_flight == 0) && 7424 (bundle_at == 1)) { 7425 /* Mark the chunk as being a window probe */ 7426 SCTP_STAT_INCR(sctps_windowprobed); 7427 } 7428 #ifdef SCTP_AUDITING_ENABLED 7429 sctp_audit_log(0xC2, 3); 7430 #endif 7431 data_list[i]->sent = SCTP_DATAGRAM_SENT; 7432 data_list[i]->snd_count = 1; 7433 data_list[i]->rec.data.chunk_was_revoked = 0; 7434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 7435 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 7436 data_list[i]->whoTo->flight_size, 7437 data_list[i]->book_size, 7438 (uintptr_t)data_list[i]->whoTo, 7439 data_list[i]->rec.data.TSN_seq); 7440 } 7441 sctp_flight_size_increase(data_list[i]); 7442 sctp_total_flight_increase(stcb, data_list[i]); 7443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 7444 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 7445 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 7446 } 7447 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 7448 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 7449 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 7450 /* SWS sender side engages */ 7451 asoc->peers_rwnd = 0; 7452 } 7453 } 7454 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { 7455 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net); 7456 } 7457 } 7458 7459 static void 7460 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked 7461 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 7462 SCTP_UNUSED 7463 #endif 7464 ) 7465 { 7466 struct sctp_tmit_chunk *chk, *nchk; 7467 7468 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 7469 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7470 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 7471 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 7472 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 7473 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || 7474 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 7475 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 7476 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 7477 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 7478 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 7479 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 7480 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 7481 /* Stray chunks must be cleaned up */ 7482 clean_up_anyway: 7483 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7484 if (chk->data) { 7485 sctp_m_freem(chk->data); 7486 chk->data = NULL; 7487 } 7488 asoc->ctrl_queue_cnt--; 7489 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) 7490 asoc->fwd_tsn_cnt--; 7491 sctp_free_a_chunk(stcb, chk, so_locked); 7492 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 7493 /* special handling, we must look into the param */ 7494 if (chk != asoc->str_reset) { 7495 goto clean_up_anyway; 7496 } 7497 } 7498 } 7499 } 7500 7501 7502 static int 7503 sctp_can_we_split_this(struct sctp_tcb *stcb, 7504 uint32_t length, 7505 uint32_t goal_mtu, uint32_t frag_point, int eeor_on) 7506 { 7507 /* Make a decision on if I should split a 7508 * msg into multiple parts. This is only asked of 7509 * incomplete messages. 7510 */ 7511 if (eeor_on) { 7512 /* If we are doing EEOR we need to always send 7513 * it if its the entire thing, since it might 7514 * be all the guy is putting in the hopper. 7515 */ 7516 if (goal_mtu >= length) { 7517 /*- 7518 * If we have data outstanding, 7519 * we get another chance when the sack 7520 * arrives to transmit - wait for more data 7521 */ 7522 if (stcb->asoc.total_flight == 0) { 7523 /* If nothing is in flight, we zero 7524 * the packet counter. 7525 */ 7526 return (length); 7527 } 7528 return (0); 7529 7530 } else { 7531 /* You can fill the rest */ 7532 return (goal_mtu); 7533 } 7534 } 7535 /*- 7536 * For those strange folk that make the send buffer 7537 * smaller than our fragmentation point, we can't 7538 * get a full msg in so we have to allow splitting. 7539 */ 7540 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { 7541 return (length); 7542 } 7543 7544 if ((length <= goal_mtu) || 7545 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) { 7546 /* Sub-optimial residual don't split in non-eeor mode. */ 7547 return (0); 7548 } 7549 /* If we reach here length is larger 7550 * than the goal_mtu. Do we wish to split 7551 * it for the sake of packet putting together? 7552 */ 7553 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { 7554 /* Its ok to split it */ 7555 return (min(goal_mtu, frag_point)); 7556 } 7557 /* Nope, can't split */ 7558 return (0); 7559 7560 } 7561 7562 static uint32_t 7563 sctp_move_to_outqueue(struct sctp_tcb *stcb, 7564 struct sctp_stream_out *strq, 7565 uint32_t goal_mtu, 7566 uint32_t frag_point, 7567 int *locked, 7568 int *giveup, 7569 int eeor_mode, 7570 int *bail, 7571 int so_locked 7572 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 7573 SCTP_UNUSED 7574 #endif 7575 ) 7576 { 7577 /* Move from the stream to the send_queue keeping track of the total */ 7578 struct sctp_association *asoc; 7579 struct sctp_stream_queue_pending *sp; 7580 struct sctp_tmit_chunk *chk; 7581 struct sctp_data_chunk *dchkh; 7582 uint32_t to_move, length; 7583 uint8_t rcv_flags = 0; 7584 uint8_t some_taken; 7585 uint8_t send_lock_up = 0; 7586 7587 SCTP_TCB_LOCK_ASSERT(stcb); 7588 asoc = &stcb->asoc; 7589 one_more_time: 7590 /*sa_ignore FREED_MEMORY*/ 7591 sp = TAILQ_FIRST(&strq->outqueue); 7592 if (sp == NULL) { 7593 *locked = 0; 7594 if (send_lock_up == 0) { 7595 SCTP_TCB_SEND_LOCK(stcb); 7596 send_lock_up = 1; 7597 } 7598 sp = TAILQ_FIRST(&strq->outqueue); 7599 if (sp) { 7600 goto one_more_time; 7601 } 7602 if (strq->last_msg_incomplete) { 7603 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 7604 strq->stream_no, 7605 strq->last_msg_incomplete); 7606 strq->last_msg_incomplete = 0; 7607 } 7608 to_move = 0; 7609 if (send_lock_up) { 7610 SCTP_TCB_SEND_UNLOCK(stcb); 7611 send_lock_up = 0; 7612 } 7613 goto out_of; 7614 } 7615 if ((sp->msg_is_complete) && (sp->length == 0)) { 7616 if (sp->sender_all_done) { 7617 /* We are doing differed cleanup. Last 7618 * time through when we took all the data 7619 * the sender_all_done was not set. 7620 */ 7621 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { 7622 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 7623 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 7624 sp->sender_all_done, 7625 sp->length, 7626 sp->msg_is_complete, 7627 sp->put_last_out, 7628 send_lock_up); 7629 } 7630 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) { 7631 SCTP_TCB_SEND_LOCK(stcb); 7632 send_lock_up = 1; 7633 } 7634 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 7635 TAILQ_REMOVE(&strq->outqueue, sp, next); 7636 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 7637 if (sp->net) { 7638 sctp_free_remote_addr(sp->net); 7639 sp->net = NULL; 7640 } 7641 if (sp->data) { 7642 sctp_m_freem(sp->data); 7643 sp->data = NULL; 7644 } 7645 sctp_free_a_strmoq(stcb, sp, so_locked); 7646 /* we can't be locked to it */ 7647 *locked = 0; 7648 stcb->asoc.locked_on_sending = NULL; 7649 if (send_lock_up) { 7650 SCTP_TCB_SEND_UNLOCK(stcb); 7651 send_lock_up = 0; 7652 } 7653 /* back to get the next msg */ 7654 goto one_more_time; 7655 } else { 7656 /* sender just finished this but 7657 * still holds a reference 7658 */ 7659 *locked = 1; 7660 *giveup = 1; 7661 to_move = 0; 7662 goto out_of; 7663 } 7664 } else { 7665 /* is there some to get */ 7666 if (sp->length == 0) { 7667 /* no */ 7668 *locked = 1; 7669 *giveup = 1; 7670 to_move = 0; 7671 goto out_of; 7672 } else if (sp->discard_rest) { 7673 if (send_lock_up == 0) { 7674 SCTP_TCB_SEND_LOCK(stcb); 7675 send_lock_up = 1; 7676 } 7677 /* Whack down the size */ 7678 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); 7679 if ((stcb->sctp_socket != NULL) && \ 7680 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 7681 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 7682 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length); 7683 } 7684 if (sp->data) { 7685 sctp_m_freem(sp->data); 7686 sp->data = NULL; 7687 sp->tail_mbuf = NULL; 7688 } 7689 sp->length = 0; 7690 sp->some_taken = 1; 7691 *locked = 1; 7692 *giveup = 1; 7693 to_move = 0; 7694 goto out_of; 7695 } 7696 } 7697 some_taken = sp->some_taken; 7698 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 7699 sp->msg_is_complete = 1; 7700 } 7701 re_look: 7702 length = sp->length; 7703 if (sp->msg_is_complete) { 7704 /* The message is complete */ 7705 to_move = min(length, frag_point); 7706 if (to_move == length) { 7707 /* All of it fits in the MTU */ 7708 if (sp->some_taken) { 7709 rcv_flags |= SCTP_DATA_LAST_FRAG; 7710 sp->put_last_out = 1; 7711 } else { 7712 rcv_flags |= SCTP_DATA_NOT_FRAG; 7713 sp->put_last_out = 1; 7714 } 7715 } else { 7716 /* Not all of it fits, we fragment */ 7717 if (sp->some_taken == 0) { 7718 rcv_flags |= SCTP_DATA_FIRST_FRAG; 7719 } 7720 sp->some_taken = 1; 7721 } 7722 } else { 7723 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode); 7724 if (to_move) { 7725 /*- 7726 * We use a snapshot of length in case it 7727 * is expanding during the compare. 7728 */ 7729 uint32_t llen; 7730 7731 llen = length; 7732 if (to_move >= llen) { 7733 to_move = llen; 7734 if (send_lock_up == 0) { 7735 /*- 7736 * We are taking all of an incomplete msg 7737 * thus we need a send lock. 7738 */ 7739 SCTP_TCB_SEND_LOCK(stcb); 7740 send_lock_up = 1; 7741 if (sp->msg_is_complete) { 7742 /* the sender finished the msg */ 7743 goto re_look; 7744 } 7745 } 7746 } 7747 if (sp->some_taken == 0) { 7748 rcv_flags |= SCTP_DATA_FIRST_FRAG; 7749 sp->some_taken = 1; 7750 } 7751 } else { 7752 /* Nothing to take. */ 7753 if (sp->some_taken) { 7754 *locked = 1; 7755 } 7756 *giveup = 1; 7757 to_move = 0; 7758 goto out_of; 7759 } 7760 } 7761 7762 /* If we reach here, we can copy out a chunk */ 7763 sctp_alloc_a_chunk(stcb, chk); 7764 if (chk == NULL) { 7765 /* No chunk memory */ 7766 *giveup = 1; 7767 to_move = 0; 7768 goto out_of; 7769 } 7770 /* Setup for unordered if needed by looking 7771 * at the user sent info flags. 7772 */ 7773 if (sp->sinfo_flags & SCTP_UNORDERED) { 7774 rcv_flags |= SCTP_DATA_UNORDERED; 7775 } 7776 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) || 7777 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) { 7778 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; 7779 } 7780 /* clear out the chunk before setting up */ 7781 memset(chk, 0, sizeof(*chk)); 7782 chk->rec.data.rcv_flags = rcv_flags; 7783 7784 if (to_move >= length) { 7785 /* we think we can steal the whole thing */ 7786 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) { 7787 SCTP_TCB_SEND_LOCK(stcb); 7788 send_lock_up = 1; 7789 } 7790 if (to_move < sp->length) { 7791 /* bail, it changed */ 7792 goto dont_do_it; 7793 } 7794 chk->data = sp->data; 7795 chk->last_mbuf = sp->tail_mbuf; 7796 /* register the stealing */ 7797 sp->data = sp->tail_mbuf = NULL; 7798 } else { 7799 struct mbuf *m; 7800 dont_do_it: 7801 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT); 7802 chk->last_mbuf = NULL; 7803 if (chk->data == NULL) { 7804 sp->some_taken = some_taken; 7805 sctp_free_a_chunk(stcb, chk, so_locked); 7806 *bail = 1; 7807 to_move = 0; 7808 goto out_of; 7809 } 7810 #ifdef SCTP_MBUF_LOGGING 7811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 7812 struct mbuf *mat; 7813 7814 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) { 7815 if (SCTP_BUF_IS_EXTENDED(mat)) { 7816 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 7817 } 7818 } 7819 } 7820 #endif 7821 /* Pull off the data */ 7822 m_adj(sp->data, to_move); 7823 /* Now lets work our way down and compact it */ 7824 m = sp->data; 7825 while (m && (SCTP_BUF_LEN(m) == 0)) { 7826 sp->data = SCTP_BUF_NEXT(m); 7827 SCTP_BUF_NEXT(m) = NULL; 7828 if (sp->tail_mbuf == m) { 7829 /*- 7830 * Freeing tail? TSNH since 7831 * we supposedly were taking less 7832 * than the sp->length. 7833 */ 7834 #ifdef INVARIANTS 7835 panic("Huh, freing tail? - TSNH"); 7836 #else 7837 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 7838 sp->tail_mbuf = sp->data = NULL; 7839 sp->length = 0; 7840 #endif 7841 7842 } 7843 sctp_m_free(m); 7844 m = sp->data; 7845 } 7846 } 7847 if (SCTP_BUF_IS_EXTENDED(chk->data)) { 7848 chk->copy_by_ref = 1; 7849 } else { 7850 chk->copy_by_ref = 0; 7851 } 7852 /* get last_mbuf and counts of mb useage 7853 * This is ugly but hopefully its only one mbuf. 7854 */ 7855 if (chk->last_mbuf == NULL) { 7856 chk->last_mbuf = chk->data; 7857 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 7858 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 7859 } 7860 } 7861 7862 if (to_move > length) { 7863 /*- This should not happen either 7864 * since we always lower to_move to the size 7865 * of sp->length if its larger. 7866 */ 7867 #ifdef INVARIANTS 7868 panic("Huh, how can to_move be larger?"); 7869 #else 7870 SCTP_PRINTF("Huh, how can to_move be larger?\n"); 7871 sp->length = 0; 7872 #endif 7873 } else { 7874 atomic_subtract_int(&sp->length, to_move); 7875 } 7876 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) { 7877 /* Not enough room for a chunk header, get some */ 7878 struct mbuf *m; 7879 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA); 7880 if (m == NULL) { 7881 /* 7882 * we're in trouble here. _PREPEND below will free 7883 * all the data if there is no leading space, so we 7884 * must put the data back and restore. 7885 */ 7886 if (send_lock_up == 0) { 7887 SCTP_TCB_SEND_LOCK(stcb); 7888 send_lock_up = 1; 7889 } 7890 if (chk->data == NULL) { 7891 /* unsteal the data */ 7892 sp->data = chk->data; 7893 sp->tail_mbuf = chk->last_mbuf; 7894 } else { 7895 struct mbuf *m_tmp; 7896 /* reassemble the data */ 7897 m_tmp = sp->data; 7898 sp->data = chk->data; 7899 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; 7900 } 7901 sp->some_taken = some_taken; 7902 atomic_add_int(&sp->length, to_move); 7903 chk->data = NULL; 7904 *bail = 1; 7905 sctp_free_a_chunk(stcb, chk, so_locked); 7906 to_move = 0; 7907 goto out_of; 7908 } else { 7909 SCTP_BUF_LEN(m) = 0; 7910 SCTP_BUF_NEXT(m) = chk->data; 7911 chk->data = m; 7912 M_ALIGN(chk->data, 4); 7913 } 7914 } 7915 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT); 7916 if (chk->data == NULL) { 7917 /* HELP, TSNH since we assured it would not above? */ 7918 #ifdef INVARIANTS 7919 panic("prepend failes HELP?"); 7920 #else 7921 SCTP_PRINTF("prepend fails HELP?\n"); 7922 sctp_free_a_chunk(stcb, chk, so_locked); 7923 #endif 7924 *bail = 1; 7925 to_move = 0; 7926 goto out_of; 7927 } 7928 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 7929 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk)); 7930 chk->book_size_scale = 0; 7931 chk->sent = SCTP_DATAGRAM_UNSENT; 7932 7933 chk->flags = 0; 7934 chk->asoc = &stcb->asoc; 7935 chk->pad_inplace = 0; 7936 chk->no_fr_allowed = 0; 7937 chk->rec.data.stream_seq = strq->next_sequence_send; 7938 if ((rcv_flags & SCTP_DATA_LAST_FRAG) && 7939 !(rcv_flags & SCTP_DATA_UNORDERED)) { 7940 strq->next_sequence_send++; 7941 } 7942 chk->rec.data.stream_number = sp->stream; 7943 chk->rec.data.payloadtype = sp->ppid; 7944 chk->rec.data.context = sp->context; 7945 chk->rec.data.doing_fast_retransmit = 0; 7946 7947 chk->rec.data.timetodrop = sp->ts; 7948 chk->flags = sp->act_flags; 7949 7950 if (sp->net) { 7951 chk->whoTo = sp->net; 7952 atomic_add_int(&chk->whoTo->ref_count, 1); 7953 } else 7954 chk->whoTo = NULL; 7955 7956 if (sp->holds_key_ref) { 7957 chk->auth_keyid = sp->auth_keyid; 7958 sctp_auth_key_acquire(stcb, chk->auth_keyid); 7959 chk->holds_key_ref = 1; 7960 } 7961 #if defined(__FreeBSD__) || defined(__Panda__) 7962 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 7963 #else 7964 chk->rec.data.TSN_seq = asoc->sending_seq++; 7965 #endif 7966 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { 7967 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 7968 (uintptr_t)stcb, sp->length, 7969 (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 7970 chk->rec.data.TSN_seq); 7971 } 7972 dchkh = mtod(chk->data, struct sctp_data_chunk *); 7973 /* 7974 * Put the rest of the things in place now. Size was done 7975 * earlier in previous loop prior to padding. 7976 */ 7977 7978 #ifdef SCTP_ASOCLOG_OF_TSNS 7979 SCTP_TCB_LOCK_ASSERT(stcb); 7980 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 7981 asoc->tsn_out_at = 0; 7982 asoc->tsn_out_wrapped = 1; 7983 } 7984 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 7985 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 7986 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 7987 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 7988 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 7989 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; 7990 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; 7991 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; 7992 asoc->tsn_out_at++; 7993 #endif 7994 7995 dchkh->ch.chunk_type = SCTP_DATA; 7996 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 7997 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 7998 dchkh->dp.stream_id = htons(strq->stream_no); 7999 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 8000 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 8001 dchkh->ch.chunk_length = htons(chk->send_size); 8002 /* Now advance the chk->send_size by the actual pad needed. */ 8003 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 8004 /* need a pad */ 8005 struct mbuf *lm; 8006 int pads; 8007 8008 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 8009 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf); 8010 if (lm != NULL) { 8011 chk->last_mbuf = lm; 8012 chk->pad_inplace = 1; 8013 } 8014 chk->send_size += pads; 8015 } 8016 if (PR_SCTP_ENABLED(chk->flags)) { 8017 asoc->pr_sctp_cnt++; 8018 } 8019 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 8020 /* All done pull and kill the message */ 8021 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 8022 if (sp->put_last_out == 0) { 8023 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 8024 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 8025 sp->sender_all_done, 8026 sp->length, 8027 sp->msg_is_complete, 8028 sp->put_last_out, 8029 send_lock_up); 8030 } 8031 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { 8032 SCTP_TCB_SEND_LOCK(stcb); 8033 send_lock_up = 1; 8034 } 8035 TAILQ_REMOVE(&strq->outqueue, sp, next); 8036 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 8037 if (sp->net) { 8038 sctp_free_remote_addr(sp->net); 8039 sp->net = NULL; 8040 } 8041 if (sp->data) { 8042 sctp_m_freem(sp->data); 8043 sp->data = NULL; 8044 } 8045 sctp_free_a_strmoq(stcb, sp, so_locked); 8046 8047 /* we can't be locked to it */ 8048 *locked = 0; 8049 stcb->asoc.locked_on_sending = NULL; 8050 } else { 8051 /* more to go, we are locked */ 8052 *locked = 1; 8053 } 8054 asoc->chunks_on_out_queue++; 8055 strq->chunks_on_queues++; 8056 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 8057 asoc->send_queue_cnt++; 8058 out_of: 8059 if (send_lock_up) { 8060 SCTP_TCB_SEND_UNLOCK(stcb); 8061 } 8062 return (to_move); 8063 } 8064 8065 8066 static void 8067 sctp_fill_outqueue(struct sctp_tcb *stcb, 8068 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked 8069 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 8070 SCTP_UNUSED 8071 #endif 8072 ) 8073 { 8074 struct sctp_association *asoc; 8075 struct sctp_stream_out *strq; 8076 int goal_mtu, moved_how_much, total_moved = 0, bail = 0; 8077 int locked, giveup; 8078 8079 SCTP_TCB_LOCK_ASSERT(stcb); 8080 asoc = &stcb->asoc; 8081 switch (net->ro._l_addr.sa.sa_family) { 8082 #ifdef INET 8083 case AF_INET: 8084 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8085 break; 8086 #endif 8087 #ifdef INET6 8088 case AF_INET6: 8089 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 8090 break; 8091 #endif 8092 #if defined(__Userspace__) 8093 case AF_CONN: 8094 goal_mtu = net->mtu - sizeof(struct sctphdr); 8095 break; 8096 #endif 8097 default: 8098 /* TSNH */ 8099 goal_mtu = net->mtu; 8100 break; 8101 } 8102 /* Need an allowance for the data chunk header too */ 8103 goal_mtu -= sizeof(struct sctp_data_chunk); 8104 8105 /* must make even word boundary */ 8106 goal_mtu &= 0xfffffffc; 8107 if (asoc->locked_on_sending) { 8108 /* We are stuck on one stream until the message completes. */ 8109 strq = asoc->locked_on_sending; 8110 locked = 1; 8111 } else { 8112 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 8113 locked = 0; 8114 } 8115 while ((goal_mtu > 0) && strq) { 8116 giveup = 0; 8117 bail = 0; 8118 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked, 8119 &giveup, eeor_mode, &bail, so_locked); 8120 if (moved_how_much) 8121 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much); 8122 8123 if (locked) { 8124 asoc->locked_on_sending = strq; 8125 if ((moved_how_much == 0) || (giveup) || bail) 8126 /* no more to move for now */ 8127 break; 8128 } else { 8129 asoc->locked_on_sending = NULL; 8130 if ((giveup) || bail) { 8131 break; 8132 } 8133 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 8134 if (strq == NULL) { 8135 break; 8136 } 8137 } 8138 total_moved += moved_how_much; 8139 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); 8140 goal_mtu &= 0xfffffffc; 8141 } 8142 if (bail) 8143 *quit_now = 1; 8144 8145 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); 8146 8147 if (total_moved == 0) { 8148 if ((stcb->asoc.sctp_cmt_on_off == 0) && 8149 (net == stcb->asoc.primary_destination)) { 8150 /* ran dry for primary network net */ 8151 SCTP_STAT_INCR(sctps_primary_randry); 8152 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 8153 /* ran dry with CMT on */ 8154 SCTP_STAT_INCR(sctps_cmt_randry); 8155 } 8156 } 8157 } 8158 8159 void 8160 sctp_fix_ecn_echo(struct sctp_association *asoc) 8161 { 8162 struct sctp_tmit_chunk *chk; 8163 8164 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8165 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 8166 chk->sent = SCTP_DATAGRAM_UNSENT; 8167 } 8168 } 8169 } 8170 8171 void 8172 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) 8173 { 8174 struct sctp_association *asoc; 8175 struct sctp_tmit_chunk *chk; 8176 struct sctp_stream_queue_pending *sp; 8177 unsigned int i; 8178 8179 if (net == NULL) { 8180 return; 8181 } 8182 asoc = &stcb->asoc; 8183 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 8184 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 8185 if (sp->net == net) { 8186 sctp_free_remote_addr(sp->net); 8187 sp->net = NULL; 8188 } 8189 } 8190 } 8191 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 8192 if (chk->whoTo == net) { 8193 sctp_free_remote_addr(chk->whoTo); 8194 chk->whoTo = NULL; 8195 } 8196 } 8197 } 8198 8199 int 8200 sctp_med_chunk_output(struct sctp_inpcb *inp, 8201 struct sctp_tcb *stcb, 8202 struct sctp_association *asoc, 8203 int *num_out, 8204 int *reason_code, 8205 int control_only, int from_where, 8206 struct timeval *now, int *now_filled, int frag_point, int so_locked 8207 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 8208 SCTP_UNUSED 8209 #endif 8210 ) 8211 { 8212 /** 8213 * Ok this is the generic chunk service queue. we must do the 8214 * following: - Service the stream queue that is next, moving any 8215 * message (note I must get a complete message i.e. FIRST/MIDDLE and 8216 * LAST to the out queue in one pass) and assigning TSN's - Check to 8217 * see if the cwnd/rwnd allows any output, if so we go ahead and 8218 * fomulate and send the low level chunks. Making sure to combine 8219 * any control in the control chunk queue also. 8220 */ 8221 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; 8222 struct mbuf *outchain, *endoutchain; 8223 struct sctp_tmit_chunk *chk, *nchk; 8224 8225 /* temp arrays for unlinking */ 8226 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 8227 int no_fragmentflg, error; 8228 unsigned int max_rwnd_per_dest, max_send_per_dest; 8229 int one_chunk, hbflag, skip_data_for_this_net; 8230 int asconf, cookie, no_out_cnt; 8231 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; 8232 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 8233 int tsns_sent = 0; 8234 uint32_t auth_offset = 0; 8235 struct sctp_auth_chunk *auth = NULL; 8236 uint16_t auth_keyid; 8237 int override_ok = 1; 8238 int skip_fill_up = 0; 8239 int data_auth_reqd = 0; 8240 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to 8241 the destination. */ 8242 int quit_now = 0; 8243 8244 #if defined(__APPLE__) 8245 if (so_locked) { 8246 sctp_lock_assert(SCTP_INP_SO(inp)); 8247 } else { 8248 sctp_unlock_assert(SCTP_INP_SO(inp)); 8249 } 8250 #endif 8251 *num_out = 0; 8252 auth_keyid = stcb->asoc.authinfo.active_keyid; 8253 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 8254 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 8255 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 8256 eeor_mode = 1; 8257 } else { 8258 eeor_mode = 0; 8259 } 8260 ctl_cnt = no_out_cnt = asconf = cookie = 0; 8261 /* 8262 * First lets prime the pump. For each destination, if there is room 8263 * in the flight size, attempt to pull an MTU's worth out of the 8264 * stream queues into the general send_queue 8265 */ 8266 #ifdef SCTP_AUDITING_ENABLED 8267 sctp_audit_log(0xC2, 2); 8268 #endif 8269 SCTP_TCB_LOCK_ASSERT(stcb); 8270 hbflag = 0; 8271 if ((control_only) || (asoc->stream_reset_outstanding)) 8272 no_data_chunks = 1; 8273 else 8274 no_data_chunks = 0; 8275 8276 /* Nothing to possible to send? */ 8277 if ((TAILQ_EMPTY(&asoc->control_send_queue) || 8278 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && 8279 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8280 TAILQ_EMPTY(&asoc->send_queue) && 8281 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 8282 nothing_to_send: 8283 *reason_code = 9; 8284 return (0); 8285 } 8286 if (asoc->peers_rwnd == 0) { 8287 /* No room in peers rwnd */ 8288 *reason_code = 1; 8289 if (asoc->total_flight > 0) { 8290 /* we are allowed one chunk in flight */ 8291 no_data_chunks = 1; 8292 } 8293 } 8294 if (stcb->asoc.ecn_echo_cnt_onq) { 8295 /* Record where a sack goes, if any */ 8296 if (no_data_chunks && 8297 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { 8298 /* Nothing but ECNe to send - we don't do that */ 8299 goto nothing_to_send; 8300 } 8301 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8302 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 8303 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 8304 sack_goes_to = chk->whoTo; 8305 break; 8306 } 8307 } 8308 } 8309 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); 8310 if (stcb->sctp_socket) 8311 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; 8312 else 8313 max_send_per_dest = 0; 8314 if (no_data_chunks == 0) { 8315 /* How many non-directed chunks are there? */ 8316 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 8317 if (chk->whoTo == NULL) { 8318 /* We already have non-directed 8319 * chunks on the queue, no need 8320 * to do a fill-up. 8321 */ 8322 skip_fill_up = 1; 8323 break; 8324 } 8325 } 8326 8327 } 8328 if ((no_data_chunks == 0) && 8329 (skip_fill_up == 0) && 8330 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { 8331 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8332 /* 8333 * This for loop we are in takes in 8334 * each net, if its's got space in cwnd and 8335 * has data sent to it (when CMT is off) then it 8336 * calls sctp_fill_outqueue for the net. This gets 8337 * data on the send queue for that network. 8338 * 8339 * In sctp_fill_outqueue TSN's are assigned and 8340 * data is copied out of the stream buffers. Note 8341 * mostly copy by reference (we hope). 8342 */ 8343 net->window_probe = 0; 8344 if ((net != stcb->asoc.alternate) && 8345 ((net->dest_state & SCTP_ADDR_PF) || 8346 (!(net->dest_state & SCTP_ADDR_REACHABLE)) || 8347 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 8348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8349 sctp_log_cwnd(stcb, net, 1, 8350 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8351 } 8352 continue; 8353 } 8354 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 8355 (net->flight_size == 0)) { 8356 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); 8357 } 8358 if (net->flight_size >= net->cwnd) { 8359 /* skip this network, no room - can't fill */ 8360 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8361 sctp_log_cwnd(stcb, net, 3, 8362 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8363 } 8364 continue; 8365 } 8366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8367 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8368 } 8369 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked); 8370 if (quit_now) { 8371 /* memory alloc failure */ 8372 no_data_chunks = 1; 8373 break; 8374 } 8375 } 8376 } 8377 /* now service each destination and send out what we can for it */ 8378 /* Nothing to send? */ 8379 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8380 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8381 TAILQ_EMPTY(&asoc->send_queue)) { 8382 *reason_code = 8; 8383 return (0); 8384 } 8385 8386 if (asoc->sctp_cmt_on_off > 0) { 8387 /* get the last start point */ 8388 start_at = asoc->last_net_cmt_send_started; 8389 if (start_at == NULL) { 8390 /* null so to beginning */ 8391 start_at = TAILQ_FIRST(&asoc->nets); 8392 } else { 8393 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); 8394 if (start_at == NULL) { 8395 start_at = TAILQ_FIRST(&asoc->nets); 8396 } 8397 } 8398 asoc->last_net_cmt_send_started = start_at; 8399 } else { 8400 start_at = TAILQ_FIRST(&asoc->nets); 8401 } 8402 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8403 if (chk->whoTo == NULL) { 8404 if (asoc->alternate) { 8405 chk->whoTo = asoc->alternate; 8406 } else { 8407 chk->whoTo = asoc->primary_destination; 8408 } 8409 atomic_add_int(&chk->whoTo->ref_count, 1); 8410 } 8411 } 8412 old_start_at = NULL; 8413 again_one_more_time: 8414 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 8415 /* how much can we send? */ 8416 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 8417 if (old_start_at && (old_start_at == net)) { 8418 /* through list ocmpletely. */ 8419 break; 8420 } 8421 tsns_sent = 0xa; 8422 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8423 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8424 (net->flight_size >= net->cwnd)) { 8425 /* Nothing on control or asconf and flight is full, we can skip 8426 * even in the CMT case. 8427 */ 8428 continue; 8429 } 8430 bundle_at = 0; 8431 endoutchain = outchain = NULL; 8432 no_fragmentflg = 1; 8433 one_chunk = 0; 8434 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 8435 skip_data_for_this_net = 1; 8436 } else { 8437 skip_data_for_this_net = 0; 8438 } 8439 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__)) 8440 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 8441 /* 8442 * if we have a route and an ifp check to see if we 8443 * have room to send to this guy 8444 */ 8445 struct ifnet *ifp; 8446 8447 ifp = net->ro.ro_rt->rt_ifp; 8448 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 8449 SCTP_STAT_INCR(sctps_ifnomemqueued); 8450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 8451 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 8452 } 8453 continue; 8454 } 8455 } 8456 #endif 8457 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 8458 #ifdef INET 8459 case AF_INET: 8460 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 8461 break; 8462 #endif 8463 #ifdef INET6 8464 case AF_INET6: 8465 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 8466 break; 8467 #endif 8468 #if defined(__Userspace__) 8469 case AF_CONN: 8470 mtu = net->mtu - sizeof(struct sctphdr); 8471 break; 8472 #endif 8473 default: 8474 /* TSNH */ 8475 mtu = net->mtu; 8476 break; 8477 } 8478 mx_mtu = mtu; 8479 to_out = 0; 8480 if (mtu > asoc->peers_rwnd) { 8481 if (asoc->total_flight > 0) { 8482 /* We have a packet in flight somewhere */ 8483 r_mtu = asoc->peers_rwnd; 8484 } else { 8485 /* We are always allowed to send one MTU out */ 8486 one_chunk = 1; 8487 r_mtu = mtu; 8488 } 8489 } else { 8490 r_mtu = mtu; 8491 } 8492 /************************/ 8493 /* ASCONF transmission */ 8494 /************************/ 8495 /* Now first lets go through the asconf queue */ 8496 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 8497 if (chk->rec.chunk_id.id != SCTP_ASCONF) { 8498 continue; 8499 } 8500 if (chk->whoTo == NULL) { 8501 if (asoc->alternate == NULL) { 8502 if (asoc->primary_destination != net) { 8503 break; 8504 } 8505 } else { 8506 if (asoc->alternate != net) { 8507 break; 8508 } 8509 } 8510 } else { 8511 if (chk->whoTo != net) { 8512 break; 8513 } 8514 } 8515 if (chk->data == NULL) { 8516 break; 8517 } 8518 if (chk->sent != SCTP_DATAGRAM_UNSENT && 8519 chk->sent != SCTP_DATAGRAM_RESEND) { 8520 break; 8521 } 8522 /* 8523 * if no AUTH is yet included and this chunk 8524 * requires it, make sure to account for it. We 8525 * don't apply the size until the AUTH chunk is 8526 * actually added below in case there is no room for 8527 * this chunk. NOTE: we overload the use of "omtu" 8528 * here 8529 */ 8530 if ((auth == NULL) && 8531 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8532 stcb->asoc.peer_auth_chunks)) { 8533 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8534 } else 8535 omtu = 0; 8536 /* Here we do NOT factor the r_mtu */ 8537 if ((chk->send_size < (int)(mtu - omtu)) || 8538 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8539 /* 8540 * We probably should glom the mbuf chain 8541 * from the chk->data for control but the 8542 * problem is it becomes yet one more level 8543 * of tracking to do if for some reason 8544 * output fails. Then I have got to 8545 * reconstruct the merged control chain.. el 8546 * yucko.. for now we take the easy way and 8547 * do the copy 8548 */ 8549 /* 8550 * Add an AUTH chunk, if chunk requires it 8551 * save the offset into the chain for AUTH 8552 */ 8553 if ((auth == NULL) && 8554 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8555 stcb->asoc.peer_auth_chunks))) { 8556 outchain = sctp_add_auth_chunk(outchain, 8557 &endoutchain, 8558 &auth, 8559 &auth_offset, 8560 stcb, 8561 chk->rec.chunk_id.id); 8562 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8563 } 8564 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 8565 (int)chk->rec.chunk_id.can_take_data, 8566 chk->send_size, chk->copy_by_ref); 8567 if (outchain == NULL) { 8568 *reason_code = 8; 8569 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8570 return (ENOMEM); 8571 } 8572 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8573 /* update our MTU size */ 8574 if (mtu > (chk->send_size + omtu)) 8575 mtu -= (chk->send_size + omtu); 8576 else 8577 mtu = 0; 8578 to_out += (chk->send_size + omtu); 8579 /* Do clear IP_DF ? */ 8580 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8581 no_fragmentflg = 0; 8582 } 8583 if (chk->rec.chunk_id.can_take_data) 8584 chk->data = NULL; 8585 /* 8586 * set hb flag since we can 8587 * use these for RTO 8588 */ 8589 hbflag = 1; 8590 asconf = 1; 8591 /* 8592 * should sysctl this: don't 8593 * bundle data with ASCONF 8594 * since it requires AUTH 8595 */ 8596 no_data_chunks = 1; 8597 chk->sent = SCTP_DATAGRAM_SENT; 8598 if (chk->whoTo == NULL) { 8599 chk->whoTo = net; 8600 atomic_add_int(&net->ref_count, 1); 8601 } 8602 chk->snd_count++; 8603 if (mtu == 0) { 8604 /* 8605 * Ok we are out of room but we can 8606 * output without effecting the 8607 * flight size since this little guy 8608 * is a control only packet. 8609 */ 8610 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 8611 /* 8612 * do NOT clear the asconf 8613 * flag as it is used to do 8614 * appropriate source address 8615 * selection. 8616 */ 8617 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 8618 (struct sockaddr *)&net->ro._l_addr, 8619 outchain, auth_offset, auth, 8620 stcb->asoc.authinfo.active_keyid, 8621 no_fragmentflg, 0, asconf, 8622 inp->sctp_lport, stcb->rport, 8623 htonl(stcb->asoc.peer_vtag), 8624 net->port, NULL, 8625 #if defined(__FreeBSD__) 8626 0, 0, 8627 #endif 8628 so_locked))) { 8629 if (error == ENOBUFS) { 8630 asoc->ifp_had_enobuf = 1; 8631 SCTP_STAT_INCR(sctps_lowlevelerr); 8632 } 8633 if (from_where == 0) { 8634 SCTP_STAT_INCR(sctps_lowlevelerrusr); 8635 } 8636 if (*now_filled == 0) { 8637 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8638 *now_filled = 1; 8639 *now = net->last_sent_time; 8640 } else { 8641 net->last_sent_time = *now; 8642 } 8643 hbflag = 0; 8644 /* error, could not output */ 8645 if (error == EHOSTUNREACH) { 8646 /* 8647 * Destination went 8648 * unreachable 8649 * during this send 8650 */ 8651 sctp_move_chunks_from_net(stcb, net); 8652 } 8653 *reason_code = 7; 8654 continue; 8655 } else 8656 asoc->ifp_had_enobuf = 0; 8657 if (*now_filled == 0) { 8658 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8659 *now_filled = 1; 8660 *now = net->last_sent_time; 8661 } else { 8662 net->last_sent_time = *now; 8663 } 8664 hbflag = 0; 8665 /* 8666 * increase the number we sent, if a 8667 * cookie is sent we don't tell them 8668 * any was sent out. 8669 */ 8670 outchain = endoutchain = NULL; 8671 auth = NULL; 8672 auth_offset = 0; 8673 if (!no_out_cnt) 8674 *num_out += ctl_cnt; 8675 /* recalc a clean slate and setup */ 8676 switch (net->ro._l_addr.sa.sa_family) { 8677 #ifdef INET 8678 case AF_INET: 8679 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8680 break; 8681 #endif 8682 #ifdef INET6 8683 case AF_INET6: 8684 mtu = net->mtu - SCTP_MIN_OVERHEAD; 8685 break; 8686 #endif 8687 #if defined(__Userspace__) 8688 case AF_CONN: 8689 mtu = net->mtu - sizeof(struct sctphdr); 8690 break; 8691 #endif 8692 default: 8693 /* TSNH */ 8694 mtu = net->mtu; 8695 break; 8696 } 8697 to_out = 0; 8698 no_fragmentflg = 1; 8699 } 8700 } 8701 } 8702 /************************/ 8703 /* Control transmission */ 8704 /************************/ 8705 /* Now first lets go through the control queue */ 8706 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 8707 if ((sack_goes_to) && 8708 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && 8709 (chk->whoTo != sack_goes_to)) { 8710 /* 8711 * if we have a sack in queue, and we are looking at an 8712 * ecn echo that is NOT queued to where the sack is going.. 8713 */ 8714 if (chk->whoTo == net) { 8715 /* Don't transmit it to where its going (current net) */ 8716 continue; 8717 } else if (sack_goes_to == net) { 8718 /* But do transmit it to this address */ 8719 goto skip_net_check; 8720 } 8721 } 8722 if (chk->whoTo == NULL) { 8723 if (asoc->alternate == NULL) { 8724 if (asoc->primary_destination != net) { 8725 continue; 8726 } 8727 } else { 8728 if (asoc->alternate != net) { 8729 continue; 8730 } 8731 } 8732 } else { 8733 if (chk->whoTo != net) { 8734 continue; 8735 } 8736 } 8737 skip_net_check: 8738 if (chk->data == NULL) { 8739 continue; 8740 } 8741 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 8742 /* 8743 * It must be unsent. Cookies and ASCONF's 8744 * hang around but there timers will force 8745 * when marked for resend. 8746 */ 8747 continue; 8748 } 8749 /* 8750 * if no AUTH is yet included and this chunk 8751 * requires it, make sure to account for it. We 8752 * don't apply the size until the AUTH chunk is 8753 * actually added below in case there is no room for 8754 * this chunk. NOTE: we overload the use of "omtu" 8755 * here 8756 */ 8757 if ((auth == NULL) && 8758 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8759 stcb->asoc.peer_auth_chunks)) { 8760 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8761 } else 8762 omtu = 0; 8763 /* Here we do NOT factor the r_mtu */ 8764 if ((chk->send_size <= (int)(mtu - omtu)) || 8765 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8766 /* 8767 * We probably should glom the mbuf chain 8768 * from the chk->data for control but the 8769 * problem is it becomes yet one more level 8770 * of tracking to do if for some reason 8771 * output fails. Then I have got to 8772 * reconstruct the merged control chain.. el 8773 * yucko.. for now we take the easy way and 8774 * do the copy 8775 */ 8776 /* 8777 * Add an AUTH chunk, if chunk requires it 8778 * save the offset into the chain for AUTH 8779 */ 8780 if ((auth == NULL) && 8781 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8782 stcb->asoc.peer_auth_chunks))) { 8783 outchain = sctp_add_auth_chunk(outchain, 8784 &endoutchain, 8785 &auth, 8786 &auth_offset, 8787 stcb, 8788 chk->rec.chunk_id.id); 8789 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8790 } 8791 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 8792 (int)chk->rec.chunk_id.can_take_data, 8793 chk->send_size, chk->copy_by_ref); 8794 if (outchain == NULL) { 8795 *reason_code = 8; 8796 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8797 return (ENOMEM); 8798 } 8799 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8800 /* update our MTU size */ 8801 if (mtu > (chk->send_size + omtu)) 8802 mtu -= (chk->send_size + omtu); 8803 else 8804 mtu = 0; 8805 to_out += (chk->send_size + omtu); 8806 /* Do clear IP_DF ? */ 8807 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8808 no_fragmentflg = 0; 8809 } 8810 if (chk->rec.chunk_id.can_take_data) 8811 chk->data = NULL; 8812 /* Mark things to be removed, if needed */ 8813 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 8814 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 8815 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 8816 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 8817 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 8818 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 8819 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 8820 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 8821 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 8822 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 8823 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 8824 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { 8825 hbflag = 1; 8826 } 8827 /* remove these chunks at the end */ 8828 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 8829 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 8830 /* turn off the timer */ 8831 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 8832 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 8833 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1); 8834 } 8835 } 8836 ctl_cnt++; 8837 } else { 8838 /* 8839 * Other chunks, since they have 8840 * timers running (i.e. COOKIE) 8841 * we just "trust" that it 8842 * gets sent or retransmitted. 8843 */ 8844 ctl_cnt++; 8845 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 8846 cookie = 1; 8847 no_out_cnt = 1; 8848 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 8849 /* 8850 * Increment ecne send count here 8851 * this means we may be over-zealous in 8852 * our counting if the send fails, but its 8853 * the best place to do it (we used to do 8854 * it in the queue of the chunk, but that did 8855 * not tell how many times it was sent. 8856 */ 8857 SCTP_STAT_INCR(sctps_sendecne); 8858 } 8859 chk->sent = SCTP_DATAGRAM_SENT; 8860 if (chk->whoTo == NULL) { 8861 chk->whoTo = net; 8862 atomic_add_int(&net->ref_count, 1); 8863 } 8864 chk->snd_count++; 8865 } 8866 if (mtu == 0) { 8867 /* 8868 * Ok we are out of room but we can 8869 * output without effecting the 8870 * flight size since this little guy 8871 * is a control only packet. 8872 */ 8873 if (asconf) { 8874 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 8875 /* 8876 * do NOT clear the asconf 8877 * flag as it is used to do 8878 * appropriate source address 8879 * selection. 8880 */ 8881 } 8882 if (cookie) { 8883 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 8884 cookie = 0; 8885 } 8886 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 8887 (struct sockaddr *)&net->ro._l_addr, 8888 outchain, 8889 auth_offset, auth, 8890 stcb->asoc.authinfo.active_keyid, 8891 no_fragmentflg, 0, asconf, 8892 inp->sctp_lport, stcb->rport, 8893 htonl(stcb->asoc.peer_vtag), 8894 net->port, NULL, 8895 #if defined(__FreeBSD__) 8896 0, 0, 8897 #endif 8898 so_locked))) { 8899 if (error == ENOBUFS) { 8900 asoc->ifp_had_enobuf = 1; 8901 SCTP_STAT_INCR(sctps_lowlevelerr); 8902 } 8903 if (from_where == 0) { 8904 SCTP_STAT_INCR(sctps_lowlevelerrusr); 8905 } 8906 /* error, could not output */ 8907 if (hbflag) { 8908 if (*now_filled == 0) { 8909 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8910 *now_filled = 1; 8911 *now = net->last_sent_time; 8912 } else { 8913 net->last_sent_time = *now; 8914 } 8915 hbflag = 0; 8916 } 8917 if (error == EHOSTUNREACH) { 8918 /* 8919 * Destination went 8920 * unreachable 8921 * during this send 8922 */ 8923 sctp_move_chunks_from_net(stcb, net); 8924 } 8925 *reason_code = 7; 8926 continue; 8927 } else 8928 asoc->ifp_had_enobuf = 0; 8929 /* Only HB or ASCONF advances time */ 8930 if (hbflag) { 8931 if (*now_filled == 0) { 8932 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8933 *now_filled = 1; 8934 *now = net->last_sent_time; 8935 } else { 8936 net->last_sent_time = *now; 8937 } 8938 hbflag = 0; 8939 } 8940 /* 8941 * increase the number we sent, if a 8942 * cookie is sent we don't tell them 8943 * any was sent out. 8944 */ 8945 outchain = endoutchain = NULL; 8946 auth = NULL; 8947 auth_offset = 0; 8948 if (!no_out_cnt) 8949 *num_out += ctl_cnt; 8950 /* recalc a clean slate and setup */ 8951 switch (net->ro._l_addr.sa.sa_family) { 8952 #ifdef INET 8953 case AF_INET: 8954 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8955 break; 8956 #endif 8957 #ifdef INET6 8958 case AF_INET6: 8959 mtu = net->mtu - SCTP_MIN_OVERHEAD; 8960 break; 8961 #endif 8962 #if defined(__Userspace__) 8963 case AF_CONN: 8964 mtu = net->mtu - sizeof(struct sctphdr); 8965 break; 8966 #endif 8967 default: 8968 /* TSNH */ 8969 mtu = net->mtu; 8970 break; 8971 } 8972 to_out = 0; 8973 no_fragmentflg = 1; 8974 } 8975 } 8976 } 8977 /* JRI: if dest is in PF state, do not send data to it */ 8978 if ((asoc->sctp_cmt_on_off > 0) && 8979 (net != stcb->asoc.alternate) && 8980 (net->dest_state & SCTP_ADDR_PF)) { 8981 goto no_data_fill; 8982 } 8983 if (net->flight_size >= net->cwnd) { 8984 goto no_data_fill; 8985 } 8986 if ((asoc->sctp_cmt_on_off > 0) && 8987 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && 8988 (net->flight_size > max_rwnd_per_dest)) { 8989 goto no_data_fill; 8990 } 8991 /* 8992 * We need a specific accounting for the usage of the 8993 * send buffer. We also need to check the number of messages 8994 * per net. For now, this is better than nothing and it 8995 * disabled by default... 8996 */ 8997 if ((asoc->sctp_cmt_on_off > 0) && 8998 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && 8999 (max_send_per_dest > 0) && 9000 (net->flight_size > max_send_per_dest)) { 9001 goto no_data_fill; 9002 } 9003 /*********************/ 9004 /* Data transmission */ 9005 /*********************/ 9006 /* 9007 * if AUTH for DATA is required and no AUTH has been added 9008 * yet, account for this in the mtu now... if no data can be 9009 * bundled, this adjustment won't matter anyways since the 9010 * packet will be going out... 9011 */ 9012 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, 9013 stcb->asoc.peer_auth_chunks); 9014 if (data_auth_reqd && (auth == NULL)) { 9015 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9016 } 9017 /* now lets add any data within the MTU constraints */ 9018 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 9019 #ifdef INET 9020 case AF_INET: 9021 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 9022 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 9023 else 9024 omtu = 0; 9025 break; 9026 #endif 9027 #ifdef INET6 9028 case AF_INET6: 9029 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 9030 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 9031 else 9032 omtu = 0; 9033 break; 9034 #endif 9035 #if defined(__Userspace__) 9036 case AF_CONN: 9037 if (net->mtu > sizeof(struct sctphdr)) { 9038 omtu = net->mtu - sizeof(struct sctphdr); 9039 } else { 9040 omtu = 0; 9041 } 9042 break; 9043 #endif 9044 default: 9045 /* TSNH */ 9046 omtu = 0; 9047 break; 9048 } 9049 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && 9050 (skip_data_for_this_net == 0)) || 9051 (cookie)) { 9052 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 9053 if (no_data_chunks) { 9054 /* let only control go out */ 9055 *reason_code = 1; 9056 break; 9057 } 9058 if (net->flight_size >= net->cwnd) { 9059 /* skip this net, no room for data */ 9060 *reason_code = 2; 9061 break; 9062 } 9063 if ((chk->whoTo != NULL) && 9064 (chk->whoTo != net)) { 9065 /* Don't send the chunk on this net */ 9066 continue; 9067 } 9068 9069 if (asoc->sctp_cmt_on_off == 0) { 9070 if ((asoc->alternate) && 9071 (asoc->alternate != net) && 9072 (chk->whoTo == NULL)) { 9073 continue; 9074 } else if ((net != asoc->primary_destination) && 9075 (asoc->alternate == NULL) && 9076 (chk->whoTo == NULL)) { 9077 continue; 9078 } 9079 } 9080 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 9081 /*- 9082 * strange, we have a chunk that is 9083 * to big for its destination and 9084 * yet no fragment ok flag. 9085 * Something went wrong when the 9086 * PMTU changed...we did not mark 9087 * this chunk for some reason?? I 9088 * will fix it here by letting IP 9089 * fragment it for now and printing 9090 * a warning. This really should not 9091 * happen ... 9092 */ 9093 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 9094 chk->send_size, mtu); 9095 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 9096 } 9097 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && 9098 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) { 9099 struct sctp_data_chunk *dchkh; 9100 9101 dchkh = mtod(chk->data, struct sctp_data_chunk *); 9102 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; 9103 } 9104 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 9105 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 9106 /* ok we will add this one */ 9107 9108 /* 9109 * Add an AUTH chunk, if chunk 9110 * requires it, save the offset into 9111 * the chain for AUTH 9112 */ 9113 if (data_auth_reqd) { 9114 if (auth == NULL) { 9115 outchain = sctp_add_auth_chunk(outchain, 9116 &endoutchain, 9117 &auth, 9118 &auth_offset, 9119 stcb, 9120 SCTP_DATA); 9121 auth_keyid = chk->auth_keyid; 9122 override_ok = 0; 9123 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9124 } else if (override_ok) { 9125 /* use this data's keyid */ 9126 auth_keyid = chk->auth_keyid; 9127 override_ok = 0; 9128 } else if (auth_keyid != chk->auth_keyid) { 9129 /* different keyid, so done bundling */ 9130 break; 9131 } 9132 } 9133 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 9134 chk->send_size, chk->copy_by_ref); 9135 if (outchain == NULL) { 9136 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 9137 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9138 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9139 } 9140 *reason_code = 3; 9141 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9142 return (ENOMEM); 9143 } 9144 /* upate our MTU size */ 9145 /* Do clear IP_DF ? */ 9146 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9147 no_fragmentflg = 0; 9148 } 9149 /* unsigned subtraction of mtu */ 9150 if (mtu > chk->send_size) 9151 mtu -= chk->send_size; 9152 else 9153 mtu = 0; 9154 /* unsigned subtraction of r_mtu */ 9155 if (r_mtu > chk->send_size) 9156 r_mtu -= chk->send_size; 9157 else 9158 r_mtu = 0; 9159 9160 to_out += chk->send_size; 9161 if ((to_out > mx_mtu) && no_fragmentflg) { 9162 #ifdef INVARIANTS 9163 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 9164 #else 9165 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 9166 mx_mtu, to_out); 9167 #endif 9168 } 9169 chk->window_probe = 0; 9170 data_list[bundle_at++] = chk; 9171 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 9172 break; 9173 } 9174 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 9175 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 9176 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 9177 } else { 9178 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 9179 } 9180 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 9181 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 9182 /* Count number of user msg's that were fragmented 9183 * we do this by counting when we see a LAST fragment 9184 * only. 9185 */ 9186 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 9187 } 9188 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 9189 if ((one_chunk) && (stcb->asoc.total_flight == 0)) { 9190 data_list[0]->window_probe = 1; 9191 net->window_probe = 1; 9192 } 9193 break; 9194 } 9195 } else { 9196 /* 9197 * Must be sent in order of the 9198 * TSN's (on a network) 9199 */ 9200 break; 9201 } 9202 } /* for (chunk gather loop for this net) */ 9203 } /* if asoc.state OPEN */ 9204 no_data_fill: 9205 /* Is there something to send for this destination? */ 9206 if (outchain) { 9207 /* We may need to start a control timer or two */ 9208 if (asconf) { 9209 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, 9210 stcb, net); 9211 /* 9212 * do NOT clear the asconf flag as it is used 9213 * to do appropriate source address selection. 9214 */ 9215 } 9216 if (cookie) { 9217 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 9218 cookie = 0; 9219 } 9220 /* must start a send timer if data is being sent */ 9221 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 9222 /* 9223 * no timer running on this destination 9224 * restart it. 9225 */ 9226 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9227 } 9228 /* Now send it, if there is anything to send :> */ 9229 if ((error = sctp_lowlevel_chunk_output(inp, 9230 stcb, 9231 net, 9232 (struct sockaddr *)&net->ro._l_addr, 9233 outchain, 9234 auth_offset, 9235 auth, 9236 auth_keyid, 9237 no_fragmentflg, 9238 bundle_at, 9239 asconf, 9240 inp->sctp_lport, stcb->rport, 9241 htonl(stcb->asoc.peer_vtag), 9242 net->port, NULL, 9243 #if defined(__FreeBSD__) 9244 0, 0, 9245 #endif 9246 so_locked))) { 9247 /* error, we could not output */ 9248 if (error == ENOBUFS) { 9249 SCTP_STAT_INCR(sctps_lowlevelerr); 9250 asoc->ifp_had_enobuf = 1; 9251 } 9252 if (from_where == 0) { 9253 SCTP_STAT_INCR(sctps_lowlevelerrusr); 9254 } 9255 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 9256 if (hbflag) { 9257 if (*now_filled == 0) { 9258 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 9259 *now_filled = 1; 9260 *now = net->last_sent_time; 9261 } else { 9262 net->last_sent_time = *now; 9263 } 9264 hbflag = 0; 9265 } 9266 if (error == EHOSTUNREACH) { 9267 /* 9268 * Destination went unreachable 9269 * during this send 9270 */ 9271 sctp_move_chunks_from_net(stcb, net); 9272 } 9273 *reason_code = 6; 9274 /*- 9275 * I add this line to be paranoid. As far as 9276 * I can tell the continue, takes us back to 9277 * the top of the for, but just to make sure 9278 * I will reset these again here. 9279 */ 9280 ctl_cnt = bundle_at = 0; 9281 continue; /* This takes us back to the for() for the nets. */ 9282 } else { 9283 asoc->ifp_had_enobuf = 0; 9284 } 9285 endoutchain = NULL; 9286 auth = NULL; 9287 auth_offset = 0; 9288 if (bundle_at || hbflag) { 9289 /* For data/asconf and hb set time */ 9290 if (*now_filled == 0) { 9291 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 9292 *now_filled = 1; 9293 *now = net->last_sent_time; 9294 } else { 9295 net->last_sent_time = *now; 9296 } 9297 } 9298 if (!no_out_cnt) { 9299 *num_out += (ctl_cnt + bundle_at); 9300 } 9301 if (bundle_at) { 9302 /* setup for a RTO measurement */ 9303 tsns_sent = data_list[0]->rec.data.TSN_seq; 9304 /* fill time if not already filled */ 9305 if (*now_filled == 0) { 9306 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 9307 *now_filled = 1; 9308 *now = asoc->time_last_sent; 9309 } else { 9310 asoc->time_last_sent = *now; 9311 } 9312 if (net->rto_needed) { 9313 data_list[0]->do_rtt = 1; 9314 net->rto_needed = 0; 9315 } 9316 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 9317 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 9318 } 9319 if (one_chunk) { 9320 break; 9321 } 9322 } 9323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9324 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 9325 } 9326 } 9327 if (old_start_at == NULL) { 9328 old_start_at = start_at; 9329 start_at = TAILQ_FIRST(&asoc->nets); 9330 if (old_start_at) 9331 goto again_one_more_time; 9332 } 9333 9334 /* 9335 * At the end there should be no NON timed chunks hanging on this 9336 * queue. 9337 */ 9338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9339 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 9340 } 9341 if ((*num_out == 0) && (*reason_code == 0)) { 9342 *reason_code = 4; 9343 } else { 9344 *reason_code = 5; 9345 } 9346 sctp_clean_up_ctl(stcb, asoc, so_locked); 9347 return (0); 9348 } 9349 9350 void 9351 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 9352 { 9353 /*- 9354 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 9355 * the control chunk queue. 9356 */ 9357 struct sctp_chunkhdr *hdr; 9358 struct sctp_tmit_chunk *chk; 9359 struct mbuf *mat; 9360 9361 SCTP_TCB_LOCK_ASSERT(stcb); 9362 sctp_alloc_a_chunk(stcb, chk); 9363 if (chk == NULL) { 9364 /* no memory */ 9365 sctp_m_freem(op_err); 9366 return; 9367 } 9368 chk->copy_by_ref = 0; 9369 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT); 9370 if (op_err == NULL) { 9371 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 9372 return; 9373 } 9374 chk->send_size = 0; 9375 mat = op_err; 9376 while (mat != NULL) { 9377 chk->send_size += SCTP_BUF_LEN(mat); 9378 mat = SCTP_BUF_NEXT(mat); 9379 } 9380 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 9381 chk->rec.chunk_id.can_take_data = 1; 9382 chk->sent = SCTP_DATAGRAM_UNSENT; 9383 chk->snd_count = 0; 9384 chk->flags = 0; 9385 chk->asoc = &stcb->asoc; 9386 chk->data = op_err; 9387 chk->whoTo = NULL; 9388 hdr = mtod(op_err, struct sctp_chunkhdr *); 9389 hdr->chunk_type = SCTP_OPERATION_ERROR; 9390 hdr->chunk_flags = 0; 9391 hdr->chunk_length = htons(chk->send_size); 9392 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 9393 chk, 9394 sctp_next); 9395 chk->asoc->ctrl_queue_cnt++; 9396 } 9397 9398 int 9399 sctp_send_cookie_echo(struct mbuf *m, 9400 int offset, 9401 struct sctp_tcb *stcb, 9402 struct sctp_nets *net) 9403 { 9404 /*- 9405 * pull out the cookie and put it at the front of the control chunk 9406 * queue. 9407 */ 9408 int at; 9409 struct mbuf *cookie; 9410 struct sctp_paramhdr parm, *phdr; 9411 struct sctp_chunkhdr *hdr; 9412 struct sctp_tmit_chunk *chk; 9413 uint16_t ptype, plen; 9414 9415 /* First find the cookie in the param area */ 9416 cookie = NULL; 9417 at = offset + sizeof(struct sctp_init_chunk); 9418 9419 SCTP_TCB_LOCK_ASSERT(stcb); 9420 do { 9421 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 9422 if (phdr == NULL) { 9423 return (-3); 9424 } 9425 ptype = ntohs(phdr->param_type); 9426 plen = ntohs(phdr->param_length); 9427 if (ptype == SCTP_STATE_COOKIE) { 9428 int pad; 9429 9430 /* found the cookie */ 9431 if ((pad = (plen % 4))) { 9432 plen += 4 - pad; 9433 } 9434 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT); 9435 if (cookie == NULL) { 9436 /* No memory */ 9437 return (-2); 9438 } 9439 #ifdef SCTP_MBUF_LOGGING 9440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 9441 struct mbuf *mat; 9442 9443 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) { 9444 if (SCTP_BUF_IS_EXTENDED(mat)) { 9445 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 9446 } 9447 } 9448 } 9449 #endif 9450 break; 9451 } 9452 at += SCTP_SIZE32(plen); 9453 } while (phdr); 9454 if (cookie == NULL) { 9455 /* Did not find the cookie */ 9456 return (-3); 9457 } 9458 /* ok, we got the cookie lets change it into a cookie echo chunk */ 9459 9460 /* first the change from param to cookie */ 9461 hdr = mtod(cookie, struct sctp_chunkhdr *); 9462 hdr->chunk_type = SCTP_COOKIE_ECHO; 9463 hdr->chunk_flags = 0; 9464 /* get the chunk stuff now and place it in the FRONT of the queue */ 9465 sctp_alloc_a_chunk(stcb, chk); 9466 if (chk == NULL) { 9467 /* no memory */ 9468 sctp_m_freem(cookie); 9469 return (-5); 9470 } 9471 chk->copy_by_ref = 0; 9472 chk->send_size = plen; 9473 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 9474 chk->rec.chunk_id.can_take_data = 0; 9475 chk->sent = SCTP_DATAGRAM_UNSENT; 9476 chk->snd_count = 0; 9477 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 9478 chk->asoc = &stcb->asoc; 9479 chk->data = cookie; 9480 chk->whoTo = net; 9481 atomic_add_int(&chk->whoTo->ref_count, 1); 9482 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 9483 chk->asoc->ctrl_queue_cnt++; 9484 return (0); 9485 } 9486 9487 void 9488 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 9489 struct mbuf *m, 9490 int offset, 9491 int chk_length, 9492 struct sctp_nets *net) 9493 { 9494 /* 9495 * take a HB request and make it into a HB ack and send it. 9496 */ 9497 struct mbuf *outchain; 9498 struct sctp_chunkhdr *chdr; 9499 struct sctp_tmit_chunk *chk; 9500 9501 9502 if (net == NULL) 9503 /* must have a net pointer */ 9504 return; 9505 9506 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT); 9507 if (outchain == NULL) { 9508 /* gak out of memory */ 9509 return; 9510 } 9511 #ifdef SCTP_MBUF_LOGGING 9512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 9513 struct mbuf *mat; 9514 9515 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) { 9516 if (SCTP_BUF_IS_EXTENDED(mat)) { 9517 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 9518 } 9519 } 9520 } 9521 #endif 9522 chdr = mtod(outchain, struct sctp_chunkhdr *); 9523 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 9524 chdr->chunk_flags = 0; 9525 if (chk_length % 4) { 9526 /* need pad */ 9527 uint32_t cpthis = 0; 9528 int padlen; 9529 9530 padlen = 4 - (chk_length % 4); 9531 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 9532 } 9533 sctp_alloc_a_chunk(stcb, chk); 9534 if (chk == NULL) { 9535 /* no memory */ 9536 sctp_m_freem(outchain); 9537 return; 9538 } 9539 chk->copy_by_ref = 0; 9540 chk->send_size = chk_length; 9541 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 9542 chk->rec.chunk_id.can_take_data = 1; 9543 chk->sent = SCTP_DATAGRAM_UNSENT; 9544 chk->snd_count = 0; 9545 chk->flags = 0; 9546 chk->asoc = &stcb->asoc; 9547 chk->data = outchain; 9548 chk->whoTo = net; 9549 atomic_add_int(&chk->whoTo->ref_count, 1); 9550 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9551 chk->asoc->ctrl_queue_cnt++; 9552 } 9553 9554 void 9555 sctp_send_cookie_ack(struct sctp_tcb *stcb) 9556 { 9557 /* formulate and queue a cookie-ack back to sender */ 9558 struct mbuf *cookie_ack; 9559 struct sctp_chunkhdr *hdr; 9560 struct sctp_tmit_chunk *chk; 9561 9562 SCTP_TCB_LOCK_ASSERT(stcb); 9563 9564 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 9565 if (cookie_ack == NULL) { 9566 /* no mbuf's */ 9567 return; 9568 } 9569 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 9570 sctp_alloc_a_chunk(stcb, chk); 9571 if (chk == NULL) { 9572 /* no memory */ 9573 sctp_m_freem(cookie_ack); 9574 return; 9575 } 9576 chk->copy_by_ref = 0; 9577 chk->send_size = sizeof(struct sctp_chunkhdr); 9578 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 9579 chk->rec.chunk_id.can_take_data = 1; 9580 chk->sent = SCTP_DATAGRAM_UNSENT; 9581 chk->snd_count = 0; 9582 chk->flags = 0; 9583 chk->asoc = &stcb->asoc; 9584 chk->data = cookie_ack; 9585 if (chk->asoc->last_control_chunk_from != NULL) { 9586 chk->whoTo = chk->asoc->last_control_chunk_from; 9587 atomic_add_int(&chk->whoTo->ref_count, 1); 9588 } else { 9589 chk->whoTo = NULL; 9590 } 9591 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 9592 hdr->chunk_type = SCTP_COOKIE_ACK; 9593 hdr->chunk_flags = 0; 9594 hdr->chunk_length = htons(chk->send_size); 9595 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 9596 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9597 chk->asoc->ctrl_queue_cnt++; 9598 return; 9599 } 9600 9601 9602 void 9603 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 9604 { 9605 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 9606 struct mbuf *m_shutdown_ack; 9607 struct sctp_shutdown_ack_chunk *ack_cp; 9608 struct sctp_tmit_chunk *chk; 9609 9610 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER); 9611 if (m_shutdown_ack == NULL) { 9612 /* no mbuf's */ 9613 return; 9614 } 9615 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 9616 sctp_alloc_a_chunk(stcb, chk); 9617 if (chk == NULL) { 9618 /* no memory */ 9619 sctp_m_freem(m_shutdown_ack); 9620 return; 9621 } 9622 chk->copy_by_ref = 0; 9623 chk->send_size = sizeof(struct sctp_chunkhdr); 9624 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 9625 chk->rec.chunk_id.can_take_data = 1; 9626 chk->sent = SCTP_DATAGRAM_UNSENT; 9627 chk->snd_count = 0; 9628 chk->flags = 0; 9629 chk->asoc = &stcb->asoc; 9630 chk->data = m_shutdown_ack; 9631 chk->whoTo = net; 9632 if (chk->whoTo) { 9633 atomic_add_int(&chk->whoTo->ref_count, 1); 9634 } 9635 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 9636 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 9637 ack_cp->ch.chunk_flags = 0; 9638 ack_cp->ch.chunk_length = htons(chk->send_size); 9639 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 9640 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9641 chk->asoc->ctrl_queue_cnt++; 9642 return; 9643 } 9644 9645 void 9646 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 9647 { 9648 /* formulate and queue a SHUTDOWN to the sender */ 9649 struct mbuf *m_shutdown; 9650 struct sctp_shutdown_chunk *shutdown_cp; 9651 struct sctp_tmit_chunk *chk; 9652 9653 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER); 9654 if (m_shutdown == NULL) { 9655 /* no mbuf's */ 9656 return; 9657 } 9658 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 9659 sctp_alloc_a_chunk(stcb, chk); 9660 if (chk == NULL) { 9661 /* no memory */ 9662 sctp_m_freem(m_shutdown); 9663 return; 9664 } 9665 chk->copy_by_ref = 0; 9666 chk->send_size = sizeof(struct sctp_shutdown_chunk); 9667 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 9668 chk->rec.chunk_id.can_take_data = 1; 9669 chk->sent = SCTP_DATAGRAM_UNSENT; 9670 chk->snd_count = 0; 9671 chk->flags = 0; 9672 chk->asoc = &stcb->asoc; 9673 chk->data = m_shutdown; 9674 chk->whoTo = net; 9675 if (chk->whoTo) { 9676 atomic_add_int(&chk->whoTo->ref_count, 1); 9677 } 9678 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 9679 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 9680 shutdown_cp->ch.chunk_flags = 0; 9681 shutdown_cp->ch.chunk_length = htons(chk->send_size); 9682 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 9683 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 9684 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9685 chk->asoc->ctrl_queue_cnt++; 9686 return; 9687 } 9688 9689 void 9690 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) 9691 { 9692 /* 9693 * formulate and queue an ASCONF to the peer. 9694 * ASCONF parameters should be queued on the assoc queue. 9695 */ 9696 struct sctp_tmit_chunk *chk; 9697 struct mbuf *m_asconf; 9698 int len; 9699 9700 SCTP_TCB_LOCK_ASSERT(stcb); 9701 9702 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && 9703 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { 9704 /* can't send a new one if there is one in flight already */ 9705 return; 9706 } 9707 9708 /* compose an ASCONF chunk, maximum length is PMTU */ 9709 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); 9710 if (m_asconf == NULL) { 9711 return; 9712 } 9713 9714 sctp_alloc_a_chunk(stcb, chk); 9715 if (chk == NULL) { 9716 /* no memory */ 9717 sctp_m_freem(m_asconf); 9718 return; 9719 } 9720 9721 chk->copy_by_ref = 0; 9722 chk->data = m_asconf; 9723 chk->send_size = len; 9724 chk->rec.chunk_id.id = SCTP_ASCONF; 9725 chk->rec.chunk_id.can_take_data = 0; 9726 chk->sent = SCTP_DATAGRAM_UNSENT; 9727 chk->snd_count = 0; 9728 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 9729 chk->asoc = &stcb->asoc; 9730 chk->whoTo = net; 9731 if (chk->whoTo) { 9732 atomic_add_int(&chk->whoTo->ref_count, 1); 9733 } 9734 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); 9735 chk->asoc->ctrl_queue_cnt++; 9736 return; 9737 } 9738 9739 void 9740 sctp_send_asconf_ack(struct sctp_tcb *stcb) 9741 { 9742 /* 9743 * formulate and queue a asconf-ack back to sender. 9744 * the asconf-ack must be stored in the tcb. 9745 */ 9746 struct sctp_tmit_chunk *chk; 9747 struct sctp_asconf_ack *ack, *latest_ack; 9748 struct mbuf *m_ack; 9749 struct sctp_nets *net = NULL; 9750 9751 SCTP_TCB_LOCK_ASSERT(stcb); 9752 /* Get the latest ASCONF-ACK */ 9753 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); 9754 if (latest_ack == NULL) { 9755 return; 9756 } 9757 if (latest_ack->last_sent_to != NULL && 9758 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { 9759 /* we're doing a retransmission */ 9760 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 9761 if (net == NULL) { 9762 /* no alternate */ 9763 if (stcb->asoc.last_control_chunk_from == NULL) { 9764 if (stcb->asoc.alternate) { 9765 net = stcb->asoc.alternate; 9766 } else { 9767 net = stcb->asoc.primary_destination; 9768 } 9769 } else { 9770 net = stcb->asoc.last_control_chunk_from; 9771 } 9772 } 9773 } else { 9774 /* normal case */ 9775 if (stcb->asoc.last_control_chunk_from == NULL) { 9776 if (stcb->asoc.alternate) { 9777 net = stcb->asoc.alternate; 9778 } else { 9779 net = stcb->asoc.primary_destination; 9780 } 9781 } else { 9782 net = stcb->asoc.last_control_chunk_from; 9783 } 9784 } 9785 latest_ack->last_sent_to = net; 9786 9787 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { 9788 if (ack->data == NULL) { 9789 continue; 9790 } 9791 9792 /* copy the asconf_ack */ 9793 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT); 9794 if (m_ack == NULL) { 9795 /* couldn't copy it */ 9796 return; 9797 } 9798 #ifdef SCTP_MBUF_LOGGING 9799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 9800 struct mbuf *mat; 9801 9802 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) { 9803 if (SCTP_BUF_IS_EXTENDED(mat)) { 9804 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 9805 } 9806 } 9807 } 9808 #endif 9809 9810 sctp_alloc_a_chunk(stcb, chk); 9811 if (chk == NULL) { 9812 /* no memory */ 9813 if (m_ack) 9814 sctp_m_freem(m_ack); 9815 return; 9816 } 9817 chk->copy_by_ref = 0; 9818 9819 chk->whoTo = net; 9820 if (chk->whoTo) { 9821 atomic_add_int(&chk->whoTo->ref_count, 1); 9822 } 9823 chk->data = m_ack; 9824 chk->send_size = 0; 9825 /* Get size */ 9826 chk->send_size = ack->len; 9827 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 9828 chk->rec.chunk_id.can_take_data = 1; 9829 chk->sent = SCTP_DATAGRAM_UNSENT; 9830 chk->snd_count = 0; 9831 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */ 9832 chk->asoc = &stcb->asoc; 9833 9834 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9835 chk->asoc->ctrl_queue_cnt++; 9836 } 9837 return; 9838 } 9839 9840 9841 static int 9842 sctp_chunk_retransmission(struct sctp_inpcb *inp, 9843 struct sctp_tcb *stcb, 9844 struct sctp_association *asoc, 9845 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked 9846 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 9847 SCTP_UNUSED 9848 #endif 9849 ) 9850 { 9851 /*- 9852 * send out one MTU of retransmission. If fast_retransmit is 9853 * happening we ignore the cwnd. Otherwise we obey the cwnd and 9854 * rwnd. For a Cookie or Asconf in the control chunk queue we 9855 * retransmit them by themselves. 9856 * 9857 * For data chunks we will pick out the lowest TSN's in the sent_queue 9858 * marked for resend and bundle them all together (up to a MTU of 9859 * destination). The address to send to should have been 9860 * selected/changed where the retransmission was marked (i.e. in FR 9861 * or t3-timeout routines). 9862 */ 9863 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 9864 struct sctp_tmit_chunk *chk, *fwd; 9865 struct mbuf *m, *endofchain; 9866 struct sctp_nets *net = NULL; 9867 uint32_t tsns_sent = 0; 9868 int no_fragmentflg, bundle_at, cnt_thru; 9869 unsigned int mtu; 9870 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 9871 struct sctp_auth_chunk *auth = NULL; 9872 uint32_t auth_offset = 0; 9873 uint16_t auth_keyid; 9874 int override_ok = 1; 9875 int data_auth_reqd = 0; 9876 uint32_t dmtu = 0; 9877 9878 #if defined(__APPLE__) 9879 if (so_locked) { 9880 sctp_lock_assert(SCTP_INP_SO(inp)); 9881 } else { 9882 sctp_unlock_assert(SCTP_INP_SO(inp)); 9883 } 9884 #endif 9885 SCTP_TCB_LOCK_ASSERT(stcb); 9886 tmr_started = ctl_cnt = bundle_at = error = 0; 9887 no_fragmentflg = 1; 9888 fwd_tsn = 0; 9889 *cnt_out = 0; 9890 fwd = NULL; 9891 endofchain = m = NULL; 9892 auth_keyid = stcb->asoc.authinfo.active_keyid; 9893 #ifdef SCTP_AUDITING_ENABLED 9894 sctp_audit_log(0xC3, 1); 9895 #endif 9896 if ((TAILQ_EMPTY(&asoc->sent_queue)) && 9897 (TAILQ_EMPTY(&asoc->control_send_queue))) { 9898 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n", 9899 asoc->sent_queue_retran_cnt); 9900 asoc->sent_queue_cnt = 0; 9901 asoc->sent_queue_cnt_removeable = 0; 9902 /* send back 0/0 so we enter normal transmission */ 9903 *cnt_out = 0; 9904 return (0); 9905 } 9906 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9907 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 9908 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 9909 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 9910 if (chk->sent != SCTP_DATAGRAM_RESEND) { 9911 continue; 9912 } 9913 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 9914 if (chk != asoc->str_reset) { 9915 /* 9916 * not eligible for retran if its 9917 * not ours 9918 */ 9919 continue; 9920 } 9921 } 9922 ctl_cnt++; 9923 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 9924 fwd_tsn = 1; 9925 } 9926 /* 9927 * Add an AUTH chunk, if chunk requires it save the 9928 * offset into the chain for AUTH 9929 */ 9930 if ((auth == NULL) && 9931 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 9932 stcb->asoc.peer_auth_chunks))) { 9933 m = sctp_add_auth_chunk(m, &endofchain, 9934 &auth, &auth_offset, 9935 stcb, 9936 chk->rec.chunk_id.id); 9937 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9938 } 9939 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 9940 break; 9941 } 9942 } 9943 one_chunk = 0; 9944 cnt_thru = 0; 9945 /* do we have control chunks to retransmit? */ 9946 if (m != NULL) { 9947 /* Start a timer no matter if we suceed or fail */ 9948 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 9949 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 9950 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 9951 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 9952 chk->snd_count++; /* update our count */ 9953 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 9954 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, 9955 auth_offset, auth, stcb->asoc.authinfo.active_keyid, 9956 no_fragmentflg, 0, 0, 9957 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 9958 chk->whoTo->port, NULL, 9959 #if defined(__FreeBSD__) 9960 0, 0, 9961 #endif 9962 so_locked))) { 9963 SCTP_STAT_INCR(sctps_lowlevelerr); 9964 return (error); 9965 } 9966 endofchain = NULL; 9967 auth = NULL; 9968 auth_offset = 0; 9969 /* 9970 * We don't want to mark the net->sent time here since this 9971 * we use this for HB and retrans cannot measure RTT 9972 */ 9973 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 9974 *cnt_out += 1; 9975 chk->sent = SCTP_DATAGRAM_SENT; 9976 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 9977 if (fwd_tsn == 0) { 9978 return (0); 9979 } else { 9980 /* Clean up the fwd-tsn list */ 9981 sctp_clean_up_ctl(stcb, asoc, so_locked); 9982 return (0); 9983 } 9984 } 9985 /* 9986 * Ok, it is just data retransmission we need to do or that and a 9987 * fwd-tsn with it all. 9988 */ 9989 if (TAILQ_EMPTY(&asoc->sent_queue)) { 9990 return (SCTP_RETRAN_DONE); 9991 } 9992 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 9993 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 9994 /* not yet open, resend the cookie and that is it */ 9995 return (1); 9996 } 9997 #ifdef SCTP_AUDITING_ENABLED 9998 sctp_auditing(20, inp, stcb, NULL); 9999 #endif 10000 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); 10001 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 10002 if (chk->sent != SCTP_DATAGRAM_RESEND) { 10003 /* No, not sent to this net or not ready for rtx */ 10004 continue; 10005 } 10006 if (chk->data == NULL) { 10007 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", 10008 chk->rec.data.TSN_seq, chk->snd_count, chk->sent); 10009 continue; 10010 } 10011 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && 10012 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { 10013 /* Gak, we have exceeded max unlucky retran, abort! */ 10014 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", 10015 chk->snd_count, 10016 SCTP_BASE_SYSCTL(sctp_max_retran_chunk)); 10017 atomic_add_int(&stcb->asoc.refcnt, 1); 10018 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked); 10019 SCTP_TCB_LOCK(stcb); 10020 atomic_subtract_int(&stcb->asoc.refcnt, 1); 10021 return (SCTP_RETRAN_EXIT); 10022 } 10023 /* pick up the net */ 10024 net = chk->whoTo; 10025 switch (net->ro._l_addr.sa.sa_family) { 10026 #ifdef INET 10027 case AF_INET: 10028 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 10029 break; 10030 #endif 10031 #ifdef INET6 10032 case AF_INET6: 10033 mtu = net->mtu - SCTP_MIN_OVERHEAD; 10034 break; 10035 #endif 10036 #if defined(__Userspace__) 10037 case AF_CONN: 10038 mtu = net->mtu - sizeof(struct sctphdr); 10039 break; 10040 #endif 10041 default: 10042 /* TSNH */ 10043 mtu = net->mtu; 10044 break; 10045 } 10046 10047 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 10048 /* No room in peers rwnd */ 10049 uint32_t tsn; 10050 10051 tsn = asoc->last_acked_seq + 1; 10052 if (tsn == chk->rec.data.TSN_seq) { 10053 /* 10054 * we make a special exception for this 10055 * case. The peer has no rwnd but is missing 10056 * the lowest chunk.. which is probably what 10057 * is holding up the rwnd. 10058 */ 10059 goto one_chunk_around; 10060 } 10061 return (1); 10062 } 10063 one_chunk_around: 10064 if (asoc->peers_rwnd < mtu) { 10065 one_chunk = 1; 10066 if ((asoc->peers_rwnd == 0) && 10067 (asoc->total_flight == 0)) { 10068 chk->window_probe = 1; 10069 chk->whoTo->window_probe = 1; 10070 } 10071 } 10072 #ifdef SCTP_AUDITING_ENABLED 10073 sctp_audit_log(0xC3, 2); 10074 #endif 10075 bundle_at = 0; 10076 m = NULL; 10077 net->fast_retran_ip = 0; 10078 if (chk->rec.data.doing_fast_retransmit == 0) { 10079 /* 10080 * if no FR in progress skip destination that have 10081 * flight_size > cwnd. 10082 */ 10083 if (net->flight_size >= net->cwnd) { 10084 continue; 10085 } 10086 } else { 10087 /* 10088 * Mark the destination net to have FR recovery 10089 * limits put on it. 10090 */ 10091 *fr_done = 1; 10092 net->fast_retran_ip = 1; 10093 } 10094 10095 /* 10096 * if no AUTH is yet included and this chunk requires it, 10097 * make sure to account for it. We don't apply the size 10098 * until the AUTH chunk is actually added below in case 10099 * there is no room for this chunk. 10100 */ 10101 if (data_auth_reqd && (auth == NULL)) { 10102 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 10103 } else 10104 dmtu = 0; 10105 10106 if ((chk->send_size <= (mtu - dmtu)) || 10107 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 10108 /* ok we will add this one */ 10109 if (data_auth_reqd) { 10110 if (auth == NULL) { 10111 m = sctp_add_auth_chunk(m, 10112 &endofchain, 10113 &auth, 10114 &auth_offset, 10115 stcb, 10116 SCTP_DATA); 10117 auth_keyid = chk->auth_keyid; 10118 override_ok = 0; 10119 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10120 } else if (override_ok) { 10121 auth_keyid = chk->auth_keyid; 10122 override_ok = 0; 10123 } else if (chk->auth_keyid != auth_keyid) { 10124 /* different keyid, so done bundling */ 10125 break; 10126 } 10127 } 10128 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 10129 if (m == NULL) { 10130 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 10131 return (ENOMEM); 10132 } 10133 /* Do clear IP_DF ? */ 10134 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 10135 no_fragmentflg = 0; 10136 } 10137 /* upate our MTU size */ 10138 if (mtu > (chk->send_size + dmtu)) 10139 mtu -= (chk->send_size + dmtu); 10140 else 10141 mtu = 0; 10142 data_list[bundle_at++] = chk; 10143 if (one_chunk && (asoc->total_flight <= 0)) { 10144 SCTP_STAT_INCR(sctps_windowprobed); 10145 } 10146 } 10147 if (one_chunk == 0) { 10148 /* 10149 * now are there anymore forward from chk to pick 10150 * up? 10151 */ 10152 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) { 10153 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 10154 /* Nope, not for retran */ 10155 continue; 10156 } 10157 if (fwd->whoTo != net) { 10158 /* Nope, not the net in question */ 10159 continue; 10160 } 10161 if (data_auth_reqd && (auth == NULL)) { 10162 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 10163 } else 10164 dmtu = 0; 10165 if (fwd->send_size <= (mtu - dmtu)) { 10166 if (data_auth_reqd) { 10167 if (auth == NULL) { 10168 m = sctp_add_auth_chunk(m, 10169 &endofchain, 10170 &auth, 10171 &auth_offset, 10172 stcb, 10173 SCTP_DATA); 10174 auth_keyid = fwd->auth_keyid; 10175 override_ok = 0; 10176 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10177 } else if (override_ok) { 10178 auth_keyid = fwd->auth_keyid; 10179 override_ok = 0; 10180 } else if (fwd->auth_keyid != auth_keyid) { 10181 /* different keyid, so done bundling */ 10182 break; 10183 } 10184 } 10185 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 10186 if (m == NULL) { 10187 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 10188 return (ENOMEM); 10189 } 10190 /* Do clear IP_DF ? */ 10191 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 10192 no_fragmentflg = 0; 10193 } 10194 /* upate our MTU size */ 10195 if (mtu > (fwd->send_size + dmtu)) 10196 mtu -= (fwd->send_size + dmtu); 10197 else 10198 mtu = 0; 10199 data_list[bundle_at++] = fwd; 10200 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 10201 break; 10202 } 10203 } else { 10204 /* can't fit so we are done */ 10205 break; 10206 } 10207 } 10208 } 10209 /* Is there something to send for this destination? */ 10210 if (m) { 10211 /* 10212 * No matter if we fail/or suceed we should start a 10213 * timer. A failure is like a lost IP packet :-) 10214 */ 10215 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 10216 /* 10217 * no timer running on this destination 10218 * restart it. 10219 */ 10220 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 10221 tmr_started = 1; 10222 } 10223 /* Now lets send it, if there is anything to send :> */ 10224 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 10225 (struct sockaddr *)&net->ro._l_addr, m, 10226 auth_offset, auth, auth_keyid, 10227 no_fragmentflg, 0, 0, 10228 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 10229 net->port, NULL, 10230 #if defined(__FreeBSD__) 10231 0, 0, 10232 #endif 10233 so_locked))) { 10234 /* error, we could not output */ 10235 SCTP_STAT_INCR(sctps_lowlevelerr); 10236 return (error); 10237 } 10238 endofchain = NULL; 10239 auth = NULL; 10240 auth_offset = 0; 10241 /* For HB's */ 10242 /* 10243 * We don't want to mark the net->sent time here 10244 * since this we use this for HB and retrans cannot 10245 * measure RTT 10246 */ 10247 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 10248 10249 /* For auto-close */ 10250 cnt_thru++; 10251 if (*now_filled == 0) { 10252 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 10253 *now = asoc->time_last_sent; 10254 *now_filled = 1; 10255 } else { 10256 asoc->time_last_sent = *now; 10257 } 10258 *cnt_out += bundle_at; 10259 #ifdef SCTP_AUDITING_ENABLED 10260 sctp_audit_log(0xC4, bundle_at); 10261 #endif 10262 if (bundle_at) { 10263 tsns_sent = data_list[0]->rec.data.TSN_seq; 10264 } 10265 for (i = 0; i < bundle_at; i++) { 10266 SCTP_STAT_INCR(sctps_sendretransdata); 10267 data_list[i]->sent = SCTP_DATAGRAM_SENT; 10268 /* 10269 * When we have a revoked data, and we 10270 * retransmit it, then we clear the revoked 10271 * flag since this flag dictates if we 10272 * subtracted from the fs 10273 */ 10274 if (data_list[i]->rec.data.chunk_was_revoked) { 10275 /* Deflate the cwnd */ 10276 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 10277 data_list[i]->rec.data.chunk_was_revoked = 0; 10278 } 10279 data_list[i]->snd_count++; 10280 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 10281 /* record the time */ 10282 data_list[i]->sent_rcv_time = asoc->time_last_sent; 10283 if (data_list[i]->book_size_scale) { 10284 /* 10285 * need to double the book size on 10286 * this one 10287 */ 10288 data_list[i]->book_size_scale = 0; 10289 /* Since we double the booksize, we must 10290 * also double the output queue size, since this 10291 * get shrunk when we free by this amount. 10292 */ 10293 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size); 10294 data_list[i]->book_size *= 2; 10295 10296 10297 } else { 10298 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 10299 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 10300 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 10301 } 10302 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 10303 (uint32_t) (data_list[i]->send_size + 10304 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 10305 } 10306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 10307 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 10308 data_list[i]->whoTo->flight_size, 10309 data_list[i]->book_size, 10310 (uintptr_t)data_list[i]->whoTo, 10311 data_list[i]->rec.data.TSN_seq); 10312 } 10313 sctp_flight_size_increase(data_list[i]); 10314 sctp_total_flight_increase(stcb, data_list[i]); 10315 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 10316 /* SWS sender side engages */ 10317 asoc->peers_rwnd = 0; 10318 } 10319 if ((i == 0) && 10320 (data_list[i]->rec.data.doing_fast_retransmit)) { 10321 SCTP_STAT_INCR(sctps_sendfastretrans); 10322 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 10323 (tmr_started == 0)) { 10324 /*- 10325 * ok we just fast-retrans'd 10326 * the lowest TSN, i.e the 10327 * first on the list. In 10328 * this case we want to give 10329 * some more time to get a 10330 * SACK back without a 10331 * t3-expiring. 10332 */ 10333 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 10334 SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4); 10335 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 10336 } 10337 } 10338 } 10339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10340 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 10341 } 10342 #ifdef SCTP_AUDITING_ENABLED 10343 sctp_auditing(21, inp, stcb, NULL); 10344 #endif 10345 } else { 10346 /* None will fit */ 10347 return (1); 10348 } 10349 if (asoc->sent_queue_retran_cnt <= 0) { 10350 /* all done we have no more to retran */ 10351 asoc->sent_queue_retran_cnt = 0; 10352 break; 10353 } 10354 if (one_chunk) { 10355 /* No more room in rwnd */ 10356 return (1); 10357 } 10358 /* stop the for loop here. we sent out a packet */ 10359 break; 10360 } 10361 return (0); 10362 } 10363 10364 static void 10365 sctp_timer_validation(struct sctp_inpcb *inp, 10366 struct sctp_tcb *stcb, 10367 struct sctp_association *asoc) 10368 { 10369 struct sctp_nets *net; 10370 10371 /* Validate that a timer is running somewhere */ 10372 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 10373 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 10374 /* Here is a timer */ 10375 return; 10376 } 10377 } 10378 SCTP_TCB_LOCK_ASSERT(stcb); 10379 /* Gak, we did not have a timer somewhere */ 10380 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 10381 if (asoc->alternate) { 10382 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate); 10383 } else { 10384 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 10385 } 10386 return; 10387 } 10388 10389 void 10390 sctp_chunk_output (struct sctp_inpcb *inp, 10391 struct sctp_tcb *stcb, 10392 int from_where, 10393 int so_locked 10394 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 10395 SCTP_UNUSED 10396 #endif 10397 ) 10398 { 10399 /*- 10400 * Ok this is the generic chunk service queue. we must do the 10401 * following: 10402 * - See if there are retransmits pending, if so we must 10403 * do these first. 10404 * - Service the stream queue that is next, moving any 10405 * message (note I must get a complete message i.e. 10406 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 10407 * TSN's 10408 * - Check to see if the cwnd/rwnd allows any output, if so we 10409 * go ahead and fomulate and send the low level chunks. Making sure 10410 * to combine any control in the control chunk queue also. 10411 */ 10412 struct sctp_association *asoc; 10413 struct sctp_nets *net; 10414 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0; 10415 unsigned int burst_cnt = 0; 10416 struct timeval now; 10417 int now_filled = 0; 10418 int nagle_on; 10419 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 10420 int un_sent = 0; 10421 int fr_done; 10422 unsigned int tot_frs = 0; 10423 10424 #if defined(__APPLE__) 10425 if (so_locked) { 10426 sctp_lock_assert(SCTP_INP_SO(inp)); 10427 } else { 10428 sctp_unlock_assert(SCTP_INP_SO(inp)); 10429 } 10430 #endif 10431 asoc = &stcb->asoc; 10432 /* The Nagle algorithm is only applied when handling a send call. */ 10433 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 10434 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 10435 nagle_on = 0; 10436 } else { 10437 nagle_on = 1; 10438 } 10439 } else { 10440 nagle_on = 0; 10441 } 10442 SCTP_TCB_LOCK_ASSERT(stcb); 10443 10444 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 10445 10446 if ((un_sent <= 0) && 10447 (TAILQ_EMPTY(&asoc->control_send_queue)) && 10448 (TAILQ_EMPTY(&asoc->asconf_send_queue)) && 10449 (asoc->sent_queue_retran_cnt == 0)) { 10450 /* Nothing to do unless there is something to be sent left */ 10451 return; 10452 } 10453 /* Do we have something to send, data or control AND 10454 * a sack timer running, if so piggy-back the sack. 10455 */ 10456 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 10457 sctp_send_sack(stcb, so_locked); 10458 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 10459 } 10460 while (asoc->sent_queue_retran_cnt) { 10461 /*- 10462 * Ok, it is retransmission time only, we send out only ONE 10463 * packet with a single call off to the retran code. 10464 */ 10465 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 10466 /*- 10467 * Special hook for handling cookiess discarded 10468 * by peer that carried data. Send cookie-ack only 10469 * and then the next call with get the retran's. 10470 */ 10471 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 10472 from_where, 10473 &now, &now_filled, frag_point, so_locked); 10474 return; 10475 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 10476 /* if its not from a HB then do it */ 10477 fr_done = 0; 10478 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); 10479 if (fr_done) { 10480 tot_frs++; 10481 } 10482 } else { 10483 /* 10484 * its from any other place, we don't allow retran 10485 * output (only control) 10486 */ 10487 ret = 1; 10488 } 10489 if (ret > 0) { 10490 /* Can't send anymore */ 10491 /*- 10492 * now lets push out control by calling med-level 10493 * output once. this assures that we WILL send HB's 10494 * if queued too. 10495 */ 10496 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 10497 from_where, 10498 &now, &now_filled, frag_point, so_locked); 10499 #ifdef SCTP_AUDITING_ENABLED 10500 sctp_auditing(8, inp, stcb, NULL); 10501 #endif 10502 sctp_timer_validation(inp, stcb, asoc); 10503 return; 10504 } 10505 if (ret < 0) { 10506 /*- 10507 * The count was off.. retran is not happening so do 10508 * the normal retransmission. 10509 */ 10510 #ifdef SCTP_AUDITING_ENABLED 10511 sctp_auditing(9, inp, stcb, NULL); 10512 #endif 10513 if (ret == SCTP_RETRAN_EXIT) { 10514 return; 10515 } 10516 break; 10517 } 10518 if (from_where == SCTP_OUTPUT_FROM_T3) { 10519 /* Only one transmission allowed out of a timeout */ 10520 #ifdef SCTP_AUDITING_ENABLED 10521 sctp_auditing(10, inp, stcb, NULL); 10522 #endif 10523 /* Push out any control */ 10524 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, 10525 &now, &now_filled, frag_point, so_locked); 10526 return; 10527 } 10528 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { 10529 /* Hit FR burst limit */ 10530 return; 10531 } 10532 if ((num_out == 0) && (ret == 0)) { 10533 /* No more retrans to send */ 10534 break; 10535 } 10536 } 10537 #ifdef SCTP_AUDITING_ENABLED 10538 sctp_auditing(12, inp, stcb, NULL); 10539 #endif 10540 /* Check for bad destinations, if they exist move chunks around. */ 10541 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 10542 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 10543 /*- 10544 * if possible move things off of this address we 10545 * still may send below due to the dormant state but 10546 * we try to find an alternate address to send to 10547 * and if we have one we move all queued data on the 10548 * out wheel to this alternate address. 10549 */ 10550 if (net->ref_count > 1) 10551 sctp_move_chunks_from_net(stcb, net); 10552 } else { 10553 /*- 10554 * if ((asoc->sat_network) || (net->addr_is_local)) 10555 * { burst_limit = asoc->max_burst * 10556 * SCTP_SAT_NETWORK_BURST_INCR; } 10557 */ 10558 if (asoc->max_burst > 0) { 10559 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { 10560 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { 10561 /* JRS - Use the congestion control given in the congestion control module */ 10562 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); 10563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10564 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); 10565 } 10566 SCTP_STAT_INCR(sctps_maxburstqueued); 10567 } 10568 net->fast_retran_ip = 0; 10569 } else { 10570 if (net->flight_size == 0) { 10571 /* Should be decaying the cwnd here */ 10572 ; 10573 } 10574 } 10575 } 10576 } 10577 10578 } 10579 burst_cnt = 0; 10580 do { 10581 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 10582 &reason_code, 0, from_where, 10583 &now, &now_filled, frag_point, so_locked); 10584 if (error) { 10585 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 10586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10587 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 10588 } 10589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10590 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 10591 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 10592 } 10593 break; 10594 } 10595 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 10596 10597 tot_out += num_out; 10598 burst_cnt++; 10599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10600 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 10601 if (num_out == 0) { 10602 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 10603 } 10604 } 10605 if (nagle_on) { 10606 /* 10607 * When the Nagle algorithm is used, look at how much 10608 * is unsent, then if its smaller than an MTU and we 10609 * have data in flight we stop, except if we are 10610 * handling a fragmented user message. 10611 */ 10612 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10613 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 10614 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 10615 (stcb->asoc.total_flight > 0) && 10616 ((stcb->asoc.locked_on_sending == NULL) || 10617 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 10618 break; 10619 } 10620 } 10621 if (TAILQ_EMPTY(&asoc->control_send_queue) && 10622 TAILQ_EMPTY(&asoc->send_queue) && 10623 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 10624 /* Nothing left to send */ 10625 break; 10626 } 10627 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 10628 /* Nothing left to send */ 10629 break; 10630 } 10631 } while (num_out && 10632 ((asoc->max_burst == 0) || 10633 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || 10634 (burst_cnt < asoc->max_burst))); 10635 10636 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { 10637 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { 10638 SCTP_STAT_INCR(sctps_maxburstqueued); 10639 asoc->burst_limit_applied = 1; 10640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10641 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 10642 } 10643 } else { 10644 asoc->burst_limit_applied = 0; 10645 } 10646 } 10647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10648 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 10649 } 10650 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 10651 tot_out); 10652 10653 /*- 10654 * Now we need to clean up the control chunk chain if a ECNE is on 10655 * it. It must be marked as UNSENT again so next call will continue 10656 * to send it until such time that we get a CWR, to remove it. 10657 */ 10658 if (stcb->asoc.ecn_echo_cnt_onq) 10659 sctp_fix_ecn_echo(asoc); 10660 return; 10661 } 10662 10663 10664 int 10665 sctp_output( 10666 struct sctp_inpcb *inp, 10667 #if defined(__Panda__) 10668 pakhandle_type m, 10669 #else 10670 struct mbuf *m, 10671 #endif 10672 struct sockaddr *addr, 10673 #if defined(__Panda__) 10674 pakhandle_type control, 10675 #else 10676 struct mbuf *control, 10677 #endif 10678 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 10679 struct thread *p, 10680 #elif defined(__Windows__) 10681 PKTHREAD p, 10682 #else 10683 #if defined(__APPLE__) 10684 struct proc *p SCTP_UNUSED, 10685 #else 10686 struct proc *p, 10687 #endif 10688 #endif 10689 int flags) 10690 { 10691 if (inp == NULL) { 10692 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 10693 return (EINVAL); 10694 } 10695 10696 if (inp->sctp_socket == NULL) { 10697 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 10698 return (EINVAL); 10699 } 10700 return (sctp_sosend(inp->sctp_socket, 10701 addr, 10702 (struct uio *)NULL, 10703 m, 10704 control, 10705 #if defined(__APPLE__) || defined(__Panda__) 10706 flags 10707 #else 10708 flags, p 10709 #endif 10710 )); 10711 } 10712 10713 void 10714 send_forward_tsn(struct sctp_tcb *stcb, 10715 struct sctp_association *asoc) 10716 { 10717 struct sctp_tmit_chunk *chk; 10718 struct sctp_forward_tsn_chunk *fwdtsn; 10719 uint32_t advance_peer_ack_point; 10720 10721 SCTP_TCB_LOCK_ASSERT(stcb); 10722 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10723 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 10724 /* mark it to unsent */ 10725 chk->sent = SCTP_DATAGRAM_UNSENT; 10726 chk->snd_count = 0; 10727 /* Do we correct its output location? */ 10728 if (chk->whoTo) { 10729 sctp_free_remote_addr(chk->whoTo); 10730 chk->whoTo = NULL; 10731 } 10732 goto sctp_fill_in_rest; 10733 } 10734 } 10735 /* Ok if we reach here we must build one */ 10736 sctp_alloc_a_chunk(stcb, chk); 10737 if (chk == NULL) { 10738 return; 10739 } 10740 asoc->fwd_tsn_cnt++; 10741 chk->copy_by_ref = 0; 10742 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 10743 chk->rec.chunk_id.can_take_data = 0; 10744 chk->asoc = asoc; 10745 chk->whoTo = NULL; 10746 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 10747 if (chk->data == NULL) { 10748 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 10749 return; 10750 } 10751 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10752 chk->sent = SCTP_DATAGRAM_UNSENT; 10753 chk->snd_count = 0; 10754 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 10755 asoc->ctrl_queue_cnt++; 10756 sctp_fill_in_rest: 10757 /*- 10758 * Here we go through and fill out the part that deals with 10759 * stream/seq of the ones we skip. 10760 */ 10761 SCTP_BUF_LEN(chk->data) = 0; 10762 { 10763 struct sctp_tmit_chunk *at, *tp1, *last; 10764 struct sctp_strseq *strseq; 10765 unsigned int cnt_of_space, i, ovh; 10766 unsigned int space_needed; 10767 unsigned int cnt_of_skipped = 0; 10768 10769 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 10770 if ((at->sent != SCTP_FORWARD_TSN_SKIP) && 10771 (at->sent != SCTP_DATAGRAM_NR_ACKED)) { 10772 /* no more to look at */ 10773 break; 10774 } 10775 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 10776 /* We don't report these */ 10777 continue; 10778 } 10779 cnt_of_skipped++; 10780 } 10781 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 10782 (cnt_of_skipped * sizeof(struct sctp_strseq))); 10783 10784 cnt_of_space = M_TRAILINGSPACE(chk->data); 10785 10786 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 10787 ovh = SCTP_MIN_OVERHEAD; 10788 } else { 10789 ovh = SCTP_MIN_V4_OVERHEAD; 10790 } 10791 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 10792 /* trim to a mtu size */ 10793 cnt_of_space = asoc->smallest_mtu - ovh; 10794 } 10795 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 10796 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 10797 0xff, 0, cnt_of_skipped, 10798 asoc->advanced_peer_ack_point); 10799 10800 } 10801 advance_peer_ack_point = asoc->advanced_peer_ack_point; 10802 if (cnt_of_space < space_needed) { 10803 /*- 10804 * ok we must trim down the chunk by lowering the 10805 * advance peer ack point. 10806 */ 10807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 10808 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 10809 0xff, 0xff, cnt_of_space, 10810 space_needed); 10811 } 10812 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); 10813 cnt_of_skipped /= sizeof(struct sctp_strseq); 10814 /*- 10815 * Go through and find the TSN that will be the one 10816 * we report. 10817 */ 10818 at = TAILQ_FIRST(&asoc->sent_queue); 10819 if (at != NULL) { 10820 for (i = 0; i < cnt_of_skipped; i++) { 10821 tp1 = TAILQ_NEXT(at, sctp_next); 10822 if (tp1 == NULL) { 10823 break; 10824 } 10825 at = tp1; 10826 } 10827 } 10828 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 10829 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 10830 0xff, cnt_of_skipped, at->rec.data.TSN_seq, 10831 asoc->advanced_peer_ack_point); 10832 } 10833 last = at; 10834 /*- 10835 * last now points to last one I can report, update 10836 * peer ack point 10837 */ 10838 if (last) 10839 advance_peer_ack_point = last->rec.data.TSN_seq; 10840 space_needed = sizeof(struct sctp_forward_tsn_chunk) + 10841 cnt_of_skipped * sizeof(struct sctp_strseq); 10842 } 10843 chk->send_size = space_needed; 10844 /* Setup the chunk */ 10845 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 10846 fwdtsn->ch.chunk_length = htons(chk->send_size); 10847 fwdtsn->ch.chunk_flags = 0; 10848 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 10849 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); 10850 SCTP_BUF_LEN(chk->data) = chk->send_size; 10851 fwdtsn++; 10852 /*- 10853 * Move pointer to after the fwdtsn and transfer to the 10854 * strseq pointer. 10855 */ 10856 strseq = (struct sctp_strseq *)fwdtsn; 10857 /*- 10858 * Now populate the strseq list. This is done blindly 10859 * without pulling out duplicate stream info. This is 10860 * inefficent but won't harm the process since the peer will 10861 * look at these in sequence and will thus release anything. 10862 * It could mean we exceed the PMTU and chop off some that 10863 * we could have included.. but this is unlikely (aka 1432/4 10864 * would mean 300+ stream seq's would have to be reported in 10865 * one FWD-TSN. With a bit of work we can later FIX this to 10866 * optimize and pull out duplcates.. but it does add more 10867 * overhead. So for now... not! 10868 */ 10869 at = TAILQ_FIRST(&asoc->sent_queue); 10870 for (i = 0; i < cnt_of_skipped; i++) { 10871 tp1 = TAILQ_NEXT(at, sctp_next); 10872 if (tp1 == NULL) 10873 break; 10874 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 10875 /* We don't report these */ 10876 i--; 10877 at = tp1; 10878 continue; 10879 } 10880 if (at->rec.data.TSN_seq == advance_peer_ack_point) { 10881 at->rec.data.fwd_tsn_cnt = 0; 10882 } 10883 strseq->stream = ntohs(at->rec.data.stream_number); 10884 strseq->sequence = ntohs(at->rec.data.stream_seq); 10885 strseq++; 10886 at = tp1; 10887 } 10888 } 10889 return; 10890 } 10891 10892 void 10893 sctp_send_sack(struct sctp_tcb *stcb, int so_locked 10894 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 10895 SCTP_UNUSED 10896 #endif 10897 ) 10898 { 10899 /*- 10900 * Queue up a SACK or NR-SACK in the control queue. 10901 * We must first check to see if a SACK or NR-SACK is 10902 * somehow on the control queue. 10903 * If so, we will take and and remove the old one. 10904 */ 10905 struct sctp_association *asoc; 10906 struct sctp_tmit_chunk *chk, *a_chk; 10907 struct sctp_sack_chunk *sack; 10908 struct sctp_nr_sack_chunk *nr_sack; 10909 struct sctp_gap_ack_block *gap_descriptor; 10910 struct sack_track *selector; 10911 int mergeable = 0; 10912 int offset; 10913 caddr_t limit; 10914 uint32_t *dup; 10915 int limit_reached = 0; 10916 unsigned int i, siz, j; 10917 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; 10918 int num_dups = 0; 10919 int space_req; 10920 uint32_t highest_tsn; 10921 uint8_t flags; 10922 uint8_t type; 10923 uint8_t tsn_map; 10924 10925 if (stcb->asoc.nrsack_supported == 1) { 10926 type = SCTP_NR_SELECTIVE_ACK; 10927 } else { 10928 type = SCTP_SELECTIVE_ACK; 10929 } 10930 a_chk = NULL; 10931 asoc = &stcb->asoc; 10932 SCTP_TCB_LOCK_ASSERT(stcb); 10933 if (asoc->last_data_chunk_from == NULL) { 10934 /* Hmm we never received anything */ 10935 return; 10936 } 10937 sctp_slide_mapping_arrays(stcb); 10938 sctp_set_rwnd(stcb, asoc); 10939 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10940 if (chk->rec.chunk_id.id == type) { 10941 /* Hmm, found a sack already on queue, remove it */ 10942 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 10943 asoc->ctrl_queue_cnt--; 10944 a_chk = chk; 10945 if (a_chk->data) { 10946 sctp_m_freem(a_chk->data); 10947 a_chk->data = NULL; 10948 } 10949 if (a_chk->whoTo) { 10950 sctp_free_remote_addr(a_chk->whoTo); 10951 a_chk->whoTo = NULL; 10952 } 10953 break; 10954 } 10955 } 10956 if (a_chk == NULL) { 10957 sctp_alloc_a_chunk(stcb, a_chk); 10958 if (a_chk == NULL) { 10959 /* No memory so we drop the idea, and set a timer */ 10960 if (stcb->asoc.delayed_ack) { 10961 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 10962 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 10963 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 10964 stcb->sctp_ep, stcb, NULL); 10965 } else { 10966 stcb->asoc.send_sack = 1; 10967 } 10968 return; 10969 } 10970 a_chk->copy_by_ref = 0; 10971 a_chk->rec.chunk_id.id = type; 10972 a_chk->rec.chunk_id.can_take_data = 1; 10973 } 10974 /* Clear our pkt counts */ 10975 asoc->data_pkts_seen = 0; 10976 10977 a_chk->asoc = asoc; 10978 a_chk->snd_count = 0; 10979 a_chk->send_size = 0; /* fill in later */ 10980 a_chk->sent = SCTP_DATAGRAM_UNSENT; 10981 a_chk->whoTo = NULL; 10982 10983 if ((asoc->numduptsns) || 10984 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) { 10985 /*- 10986 * Ok, we have some duplicates or the destination for the 10987 * sack is unreachable, lets see if we can select an 10988 * alternate than asoc->last_data_chunk_from 10989 */ 10990 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) && 10991 (asoc->used_alt_onsack > asoc->numnets)) { 10992 /* We used an alt last time, don't this time */ 10993 a_chk->whoTo = NULL; 10994 } else { 10995 asoc->used_alt_onsack++; 10996 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 10997 } 10998 if (a_chk->whoTo == NULL) { 10999 /* Nope, no alternate */ 11000 a_chk->whoTo = asoc->last_data_chunk_from; 11001 asoc->used_alt_onsack = 0; 11002 } 11003 } else { 11004 /* 11005 * No duplicates so we use the last place we received data 11006 * from. 11007 */ 11008 asoc->used_alt_onsack = 0; 11009 a_chk->whoTo = asoc->last_data_chunk_from; 11010 } 11011 if (a_chk->whoTo) { 11012 atomic_add_int(&a_chk->whoTo->ref_count, 1); 11013 } 11014 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { 11015 highest_tsn = asoc->highest_tsn_inside_map; 11016 } else { 11017 highest_tsn = asoc->highest_tsn_inside_nr_map; 11018 } 11019 if (highest_tsn == asoc->cumulative_tsn) { 11020 /* no gaps */ 11021 if (type == SCTP_SELECTIVE_ACK) { 11022 space_req = sizeof(struct sctp_sack_chunk); 11023 } else { 11024 space_req = sizeof(struct sctp_nr_sack_chunk); 11025 } 11026 } else { 11027 /* gaps get a cluster */ 11028 space_req = MCLBYTES; 11029 } 11030 /* Ok now lets formulate a MBUF with our sack */ 11031 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA); 11032 if ((a_chk->data == NULL) || 11033 (a_chk->whoTo == NULL)) { 11034 /* rats, no mbuf memory */ 11035 if (a_chk->data) { 11036 /* was a problem with the destination */ 11037 sctp_m_freem(a_chk->data); 11038 a_chk->data = NULL; 11039 } 11040 sctp_free_a_chunk(stcb, a_chk, so_locked); 11041 /* sa_ignore NO_NULL_CHK */ 11042 if (stcb->asoc.delayed_ack) { 11043 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 11044 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 11045 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 11046 stcb->sctp_ep, stcb, NULL); 11047 } else { 11048 stcb->asoc.send_sack = 1; 11049 } 11050 return; 11051 } 11052 /* ok, lets go through and fill it in */ 11053 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 11054 space = M_TRAILINGSPACE(a_chk->data); 11055 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 11056 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 11057 } 11058 limit = mtod(a_chk->data, caddr_t); 11059 limit += space; 11060 11061 flags = 0; 11062 11063 if ((asoc->sctp_cmt_on_off > 0) && 11064 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 11065 /*- 11066 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 11067 * received, then set high bit to 1, else 0. Reset 11068 * pkts_rcvd. 11069 */ 11070 flags |= (asoc->cmt_dac_pkts_rcvd << 6); 11071 asoc->cmt_dac_pkts_rcvd = 0; 11072 } 11073 #ifdef SCTP_ASOCLOG_OF_TSNS 11074 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; 11075 stcb->asoc.cumack_log_atsnt++; 11076 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { 11077 stcb->asoc.cumack_log_atsnt = 0; 11078 } 11079 #endif 11080 /* reset the readers interpretation */ 11081 stcb->freed_by_sorcv_sincelast = 0; 11082 11083 if (type == SCTP_SELECTIVE_ACK) { 11084 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 11085 nr_sack = NULL; 11086 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 11087 if (highest_tsn > asoc->mapping_array_base_tsn) { 11088 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11089 } else { 11090 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8; 11091 } 11092 } else { 11093 sack = NULL; 11094 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); 11095 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); 11096 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { 11097 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11098 } else { 11099 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; 11100 } 11101 } 11102 11103 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 11104 offset = 1; 11105 } else { 11106 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 11107 } 11108 if (((type == SCTP_SELECTIVE_ACK) && 11109 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || 11110 ((type == SCTP_NR_SELECTIVE_ACK) && 11111 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { 11112 /* we have a gap .. maybe */ 11113 for (i = 0; i < siz; i++) { 11114 tsn_map = asoc->mapping_array[i]; 11115 if (type == SCTP_SELECTIVE_ACK) { 11116 tsn_map |= asoc->nr_mapping_array[i]; 11117 } 11118 if (i == 0) { 11119 /* 11120 * Clear all bits corresponding to TSNs 11121 * smaller or equal to the cumulative TSN. 11122 */ 11123 tsn_map &= (~0 << (1 - offset)); 11124 } 11125 selector = &sack_array[tsn_map]; 11126 if (mergeable && selector->right_edge) { 11127 /* 11128 * Backup, left and right edges were ok to 11129 * merge. 11130 */ 11131 num_gap_blocks--; 11132 gap_descriptor--; 11133 } 11134 if (selector->num_entries == 0) 11135 mergeable = 0; 11136 else { 11137 for (j = 0; j < selector->num_entries; j++) { 11138 if (mergeable && selector->right_edge) { 11139 /* 11140 * do a merge by NOT setting 11141 * the left side 11142 */ 11143 mergeable = 0; 11144 } else { 11145 /* 11146 * no merge, set the left 11147 * side 11148 */ 11149 mergeable = 0; 11150 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 11151 } 11152 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 11153 num_gap_blocks++; 11154 gap_descriptor++; 11155 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 11156 /* no more room */ 11157 limit_reached = 1; 11158 break; 11159 } 11160 } 11161 if (selector->left_edge) { 11162 mergeable = 1; 11163 } 11164 } 11165 if (limit_reached) { 11166 /* Reached the limit stop */ 11167 break; 11168 } 11169 offset += 8; 11170 } 11171 } 11172 if ((type == SCTP_NR_SELECTIVE_ACK) && 11173 (limit_reached == 0)) { 11174 11175 mergeable = 0; 11176 11177 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { 11178 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11179 } else { 11180 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; 11181 } 11182 11183 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 11184 offset = 1; 11185 } else { 11186 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 11187 } 11188 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { 11189 /* we have a gap .. maybe */ 11190 for (i = 0; i < siz; i++) { 11191 tsn_map = asoc->nr_mapping_array[i]; 11192 if (i == 0) { 11193 /* 11194 * Clear all bits corresponding to TSNs 11195 * smaller or equal to the cumulative TSN. 11196 */ 11197 tsn_map &= (~0 << (1 - offset)); 11198 } 11199 selector = &sack_array[tsn_map]; 11200 if (mergeable && selector->right_edge) { 11201 /* 11202 * Backup, left and right edges were ok to 11203 * merge. 11204 */ 11205 num_nr_gap_blocks--; 11206 gap_descriptor--; 11207 } 11208 if (selector->num_entries == 0) 11209 mergeable = 0; 11210 else { 11211 for (j = 0; j < selector->num_entries; j++) { 11212 if (mergeable && selector->right_edge) { 11213 /* 11214 * do a merge by NOT setting 11215 * the left side 11216 */ 11217 mergeable = 0; 11218 } else { 11219 /* 11220 * no merge, set the left 11221 * side 11222 */ 11223 mergeable = 0; 11224 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 11225 } 11226 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 11227 num_nr_gap_blocks++; 11228 gap_descriptor++; 11229 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 11230 /* no more room */ 11231 limit_reached = 1; 11232 break; 11233 } 11234 } 11235 if (selector->left_edge) { 11236 mergeable = 1; 11237 } 11238 } 11239 if (limit_reached) { 11240 /* Reached the limit stop */ 11241 break; 11242 } 11243 offset += 8; 11244 } 11245 } 11246 } 11247 /* now we must add any dups we are going to report. */ 11248 if ((limit_reached == 0) && (asoc->numduptsns)) { 11249 dup = (uint32_t *) gap_descriptor; 11250 for (i = 0; i < asoc->numduptsns; i++) { 11251 *dup = htonl(asoc->dup_tsns[i]); 11252 dup++; 11253 num_dups++; 11254 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 11255 /* no more room */ 11256 break; 11257 } 11258 } 11259 asoc->numduptsns = 0; 11260 } 11261 /* 11262 * now that the chunk is prepared queue it to the control chunk 11263 * queue. 11264 */ 11265 if (type == SCTP_SELECTIVE_ACK) { 11266 a_chk->send_size = sizeof(struct sctp_sack_chunk) + 11267 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 11268 num_dups * sizeof(int32_t); 11269 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 11270 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 11271 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 11272 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 11273 sack->sack.num_dup_tsns = htons(num_dups); 11274 sack->ch.chunk_type = type; 11275 sack->ch.chunk_flags = flags; 11276 sack->ch.chunk_length = htons(a_chk->send_size); 11277 } else { 11278 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) + 11279 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 11280 num_dups * sizeof(int32_t); 11281 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 11282 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 11283 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); 11284 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); 11285 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); 11286 nr_sack->nr_sack.num_dup_tsns = htons(num_dups); 11287 nr_sack->nr_sack.reserved = 0; 11288 nr_sack->ch.chunk_type = type; 11289 nr_sack->ch.chunk_flags = flags; 11290 nr_sack->ch.chunk_length = htons(a_chk->send_size); 11291 } 11292 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 11293 asoc->my_last_reported_rwnd = asoc->my_rwnd; 11294 asoc->ctrl_queue_cnt++; 11295 asoc->send_sack = 0; 11296 SCTP_STAT_INCR(sctps_sendsacks); 11297 return; 11298 } 11299 11300 void 11301 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked 11302 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 11303 SCTP_UNUSED 11304 #endif 11305 ) 11306 { 11307 struct mbuf *m_abort, *m, *m_last; 11308 struct mbuf *m_out, *m_end = NULL; 11309 struct sctp_abort_chunk *abort; 11310 struct sctp_auth_chunk *auth = NULL; 11311 struct sctp_nets *net; 11312 uint32_t vtag; 11313 uint32_t auth_offset = 0; 11314 uint16_t cause_len, chunk_len, padding_len; 11315 11316 #if defined(__APPLE__) 11317 if (so_locked) { 11318 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 11319 } else { 11320 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 11321 } 11322 #endif 11323 SCTP_TCB_LOCK_ASSERT(stcb); 11324 /*- 11325 * Add an AUTH chunk, if chunk requires it and save the offset into 11326 * the chain for AUTH 11327 */ 11328 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 11329 stcb->asoc.peer_auth_chunks)) { 11330 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset, 11331 stcb, SCTP_ABORT_ASSOCIATION); 11332 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11333 } else { 11334 m_out = NULL; 11335 } 11336 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER); 11337 if (m_abort == NULL) { 11338 if (m_out) { 11339 sctp_m_freem(m_out); 11340 } 11341 if (operr) { 11342 sctp_m_freem(operr); 11343 } 11344 return; 11345 } 11346 /* link in any error */ 11347 SCTP_BUF_NEXT(m_abort) = operr; 11348 cause_len = 0; 11349 m_last = NULL; 11350 for (m = operr; m; m = SCTP_BUF_NEXT(m)) { 11351 cause_len += (uint16_t)SCTP_BUF_LEN(m); 11352 if (SCTP_BUF_NEXT(m) == NULL) { 11353 m_last = m; 11354 } 11355 } 11356 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk); 11357 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len; 11358 padding_len = SCTP_SIZE32(chunk_len) - chunk_len; 11359 if (m_out == NULL) { 11360 /* NO Auth chunk prepended, so reserve space in front */ 11361 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 11362 m_out = m_abort; 11363 } else { 11364 /* Put AUTH chunk at the front of the chain */ 11365 SCTP_BUF_NEXT(m_end) = m_abort; 11366 } 11367 if (stcb->asoc.alternate) { 11368 net = stcb->asoc.alternate; 11369 } else { 11370 net = stcb->asoc.primary_destination; 11371 } 11372 /* Fill in the ABORT chunk header. */ 11373 abort = mtod(m_abort, struct sctp_abort_chunk *); 11374 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 11375 if (stcb->asoc.peer_vtag == 0) { 11376 /* This happens iff the assoc is in COOKIE-WAIT state. */ 11377 vtag = stcb->asoc.my_vtag; 11378 abort->ch.chunk_flags = SCTP_HAD_NO_TCB; 11379 } else { 11380 vtag = stcb->asoc.peer_vtag; 11381 abort->ch.chunk_flags = 0; 11382 } 11383 abort->ch.chunk_length = htons(chunk_len); 11384 /* Add padding, if necessary. */ 11385 if (padding_len > 0) { 11386 if ((m_last == NULL) || 11387 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) { 11388 sctp_m_freem(m_out); 11389 return; 11390 } 11391 } 11392 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 11393 (struct sockaddr *)&net->ro._l_addr, 11394 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, 11395 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), 11396 stcb->asoc.primary_destination->port, NULL, 11397 #if defined(__FreeBSD__) 11398 0, 0, 11399 #endif 11400 so_locked); 11401 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11402 } 11403 11404 void 11405 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 11406 struct sctp_nets *net, 11407 int reflect_vtag) 11408 { 11409 /* formulate and SEND a SHUTDOWN-COMPLETE */ 11410 struct mbuf *m_shutdown_comp; 11411 struct sctp_shutdown_complete_chunk *shutdown_complete; 11412 uint32_t vtag; 11413 uint8_t flags; 11414 11415 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 11416 if (m_shutdown_comp == NULL) { 11417 /* no mbuf's */ 11418 return; 11419 } 11420 if (reflect_vtag) { 11421 flags = SCTP_HAD_NO_TCB; 11422 vtag = stcb->asoc.my_vtag; 11423 } else { 11424 flags = 0; 11425 vtag = stcb->asoc.peer_vtag; 11426 } 11427 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); 11428 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 11429 shutdown_complete->ch.chunk_flags = flags; 11430 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 11431 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); 11432 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 11433 (struct sockaddr *)&net->ro._l_addr, 11434 m_shutdown_comp, 0, NULL, 0, 1, 0, 0, 11435 stcb->sctp_ep->sctp_lport, stcb->rport, 11436 htonl(vtag), 11437 net->port, NULL, 11438 #if defined(__FreeBSD__) 11439 0, 0, 11440 #endif 11441 SCTP_SO_NOT_LOCKED); 11442 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11443 return; 11444 } 11445 11446 #if defined(__FreeBSD__) 11447 static void 11448 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 11449 struct sctphdr *sh, uint32_t vtag, 11450 uint8_t type, struct mbuf *cause, 11451 uint8_t use_mflowid, uint32_t mflowid, 11452 uint32_t vrf_id, uint16_t port) 11453 #else 11454 static void 11455 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 11456 struct sctphdr *sh, uint32_t vtag, 11457 uint8_t type, struct mbuf *cause, 11458 uint32_t vrf_id SCTP_UNUSED, uint16_t port) 11459 #endif 11460 { 11461 #ifdef __Panda__ 11462 pakhandle_type o_pak; 11463 #else 11464 struct mbuf *o_pak; 11465 #endif 11466 struct mbuf *mout; 11467 struct sctphdr *shout; 11468 struct sctp_chunkhdr *ch; 11469 #if defined(INET) || defined(INET6) 11470 struct udphdr *udp; 11471 int ret; 11472 #endif 11473 int len, cause_len, padding_len; 11474 #ifdef INET 11475 #if defined(__APPLE__) || defined(__Panda__) 11476 sctp_route_t ro; 11477 #endif 11478 struct sockaddr_in *src_sin, *dst_sin; 11479 struct ip *ip; 11480 #endif 11481 #ifdef INET6 11482 struct sockaddr_in6 *src_sin6, *dst_sin6; 11483 struct ip6_hdr *ip6; 11484 #endif 11485 11486 /* Compute the length of the cause and add final padding. */ 11487 cause_len = 0; 11488 if (cause != NULL) { 11489 struct mbuf *m_at, *m_last = NULL; 11490 11491 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 11492 if (SCTP_BUF_NEXT(m_at) == NULL) 11493 m_last = m_at; 11494 cause_len += SCTP_BUF_LEN(m_at); 11495 } 11496 padding_len = cause_len % 4; 11497 if (padding_len != 0) { 11498 padding_len = 4 - padding_len; 11499 } 11500 if (padding_len != 0) { 11501 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 11502 sctp_m_freem(cause); 11503 return; 11504 } 11505 } 11506 } else { 11507 padding_len = 0; 11508 } 11509 /* Get an mbuf for the header. */ 11510 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 11511 switch (dst->sa_family) { 11512 #ifdef INET 11513 case AF_INET: 11514 len += sizeof(struct ip); 11515 break; 11516 #endif 11517 #ifdef INET6 11518 case AF_INET6: 11519 len += sizeof(struct ip6_hdr); 11520 break; 11521 #endif 11522 default: 11523 break; 11524 } 11525 #if defined(INET) || defined(INET6) 11526 if (port) { 11527 len += sizeof(struct udphdr); 11528 } 11529 #endif 11530 #if defined(__APPLE__) 11531 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 11532 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 11533 #else 11534 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA); 11535 #endif 11536 #else 11537 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 11538 #endif 11539 if (mout == NULL) { 11540 if (cause) { 11541 sctp_m_freem(cause); 11542 } 11543 return; 11544 } 11545 #if defined(__APPLE__) 11546 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 11547 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11548 #else 11549 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR); 11550 #endif 11551 #else 11552 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11553 #endif 11554 SCTP_BUF_LEN(mout) = len; 11555 SCTP_BUF_NEXT(mout) = cause; 11556 #if defined(__FreeBSD__) 11557 if (use_mflowid != 0) { 11558 mout->m_pkthdr.flowid = mflowid; 11559 mout->m_flags |= M_FLOWID; 11560 } 11561 #endif 11562 #ifdef INET 11563 ip = NULL; 11564 #endif 11565 #ifdef INET6 11566 ip6 = NULL; 11567 #endif 11568 switch (dst->sa_family) { 11569 #ifdef INET 11570 case AF_INET: 11571 src_sin = (struct sockaddr_in *)src; 11572 dst_sin = (struct sockaddr_in *)dst; 11573 ip = mtod(mout, struct ip *); 11574 ip->ip_v = IPVERSION; 11575 ip->ip_hl = (sizeof(struct ip) >> 2); 11576 ip->ip_tos = 0; 11577 #if defined(__FreeBSD__) 11578 ip->ip_id = ip_newid(); 11579 #elif defined(__APPLE__) 11580 #if RANDOM_IP_ID 11581 ip->ip_id = ip_randomid(); 11582 #else 11583 ip->ip_id = htons(ip_id++); 11584 #endif 11585 #else 11586 ip->ip_id = htons(ip_id++); 11587 #endif 11588 ip->ip_off = 0; 11589 ip->ip_ttl = MODULE_GLOBAL(ip_defttl); 11590 if (port) { 11591 ip->ip_p = IPPROTO_UDP; 11592 } else { 11593 ip->ip_p = IPPROTO_SCTP; 11594 } 11595 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr; 11596 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr; 11597 ip->ip_sum = 0; 11598 len = sizeof(struct ip); 11599 shout = (struct sctphdr *)((caddr_t)ip + len); 11600 break; 11601 #endif 11602 #ifdef INET6 11603 case AF_INET6: 11604 src_sin6 = (struct sockaddr_in6 *)src; 11605 dst_sin6 = (struct sockaddr_in6 *)dst; 11606 ip6 = mtod(mout, struct ip6_hdr *); 11607 ip6->ip6_flow = htonl(0x60000000); 11608 #if defined(__FreeBSD__) 11609 if (V_ip6_auto_flowlabel) { 11610 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 11611 } 11612 #endif 11613 #if defined(__Userspace__) 11614 ip6->ip6_hlim = IPv6_HOP_LIMIT; 11615 #else 11616 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 11617 #endif 11618 if (port) { 11619 ip6->ip6_nxt = IPPROTO_UDP; 11620 } else { 11621 ip6->ip6_nxt = IPPROTO_SCTP; 11622 } 11623 ip6->ip6_src = dst_sin6->sin6_addr; 11624 ip6->ip6_dst = src_sin6->sin6_addr; 11625 len = sizeof(struct ip6_hdr); 11626 shout = (struct sctphdr *)((caddr_t)ip6 + len); 11627 break; 11628 #endif 11629 default: 11630 len = 0; 11631 shout = mtod(mout, struct sctphdr *); 11632 break; 11633 } 11634 #if defined(INET) || defined(INET6) 11635 if (port) { 11636 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 11637 sctp_m_freem(mout); 11638 return; 11639 } 11640 udp = (struct udphdr *)shout; 11641 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 11642 udp->uh_dport = port; 11643 udp->uh_sum = 0; 11644 udp->uh_ulen = htons(sizeof(struct udphdr) + 11645 sizeof(struct sctphdr) + 11646 sizeof(struct sctp_chunkhdr) + 11647 cause_len + padding_len); 11648 len += sizeof(struct udphdr); 11649 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr)); 11650 } else { 11651 udp = NULL; 11652 } 11653 #endif 11654 shout->src_port = sh->dest_port; 11655 shout->dest_port = sh->src_port; 11656 shout->checksum = 0; 11657 if (vtag) { 11658 shout->v_tag = htonl(vtag); 11659 } else { 11660 shout->v_tag = sh->v_tag; 11661 } 11662 len += sizeof(struct sctphdr); 11663 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr)); 11664 ch->chunk_type = type; 11665 if (vtag) { 11666 ch->chunk_flags = 0; 11667 } else { 11668 ch->chunk_flags = SCTP_HAD_NO_TCB; 11669 } 11670 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len); 11671 len += sizeof(struct sctp_chunkhdr); 11672 len += cause_len + padding_len; 11673 11674 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11675 sctp_m_freem(mout); 11676 return; 11677 } 11678 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11679 switch (dst->sa_family) { 11680 #ifdef INET 11681 case AF_INET: 11682 #if defined(__APPLE__) || defined(__Panda__) 11683 /* zap the stack pointer to the route */ 11684 bzero(&ro, sizeof(sctp_route_t)); 11685 #if defined(__Panda__) 11686 ro._l_addr.sa.sa_family = AF_INET; 11687 #endif 11688 #endif 11689 if (port) { 11690 #if !defined(__Windows__) && !defined(__Userspace__) 11691 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 11692 if (V_udp_cksum) { 11693 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11694 } else { 11695 udp->uh_sum = 0; 11696 } 11697 #else 11698 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11699 #endif 11700 #else 11701 udp->uh_sum = 0; 11702 #endif 11703 } 11704 #if defined(__FreeBSD__) 11705 #if __FreeBSD_version >= 1000000 11706 ip->ip_len = htons(len); 11707 #else 11708 ip->ip_len = len; 11709 #endif 11710 #elif defined(__APPLE__) || defined(__Userspace__) 11711 ip->ip_len = len; 11712 #else 11713 ip->ip_len = htons(len); 11714 #endif 11715 if (port) { 11716 #if defined(SCTP_WITH_NO_CSUM) 11717 SCTP_STAT_INCR(sctps_sendnocrc); 11718 #else 11719 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); 11720 SCTP_STAT_INCR(sctps_sendswcrc); 11721 #endif 11722 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 11723 if (V_udp_cksum) { 11724 SCTP_ENABLE_UDP_CSUM(o_pak); 11725 } 11726 #else 11727 SCTP_ENABLE_UDP_CSUM(o_pak); 11728 #endif 11729 } else { 11730 #if defined(SCTP_WITH_NO_CSUM) 11731 SCTP_STAT_INCR(sctps_sendnocrc); 11732 #else 11733 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 11734 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11735 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 11736 SCTP_STAT_INCR(sctps_sendhwcrc); 11737 #else 11738 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip)); 11739 SCTP_STAT_INCR(sctps_sendswcrc); 11740 #endif 11741 #endif 11742 } 11743 #ifdef SCTP_PACKET_LOGGING 11744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 11745 sctp_packet_log(o_pak); 11746 } 11747 #endif 11748 #if defined(__APPLE__) || defined(__Panda__) 11749 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 11750 /* Free the route if we got one back */ 11751 if (ro.ro_rt) { 11752 RTFREE(ro.ro_rt); 11753 ro.ro_rt = NULL; 11754 } 11755 #else 11756 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); 11757 #endif 11758 break; 11759 #endif 11760 #ifdef INET6 11761 case AF_INET6: 11762 ip6->ip6_plen = len - sizeof(struct ip6_hdr); 11763 if (port) { 11764 #if defined(SCTP_WITH_NO_CSUM) 11765 SCTP_STAT_INCR(sctps_sendnocrc); 11766 #else 11767 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 11768 SCTP_STAT_INCR(sctps_sendswcrc); 11769 #endif 11770 #if defined(__Windows__) 11771 udp->uh_sum = 0; 11772 #elif !defined(__Userspace__) 11773 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { 11774 udp->uh_sum = 0xffff; 11775 } 11776 #endif 11777 } else { 11778 #if defined(SCTP_WITH_NO_CSUM) 11779 SCTP_STAT_INCR(sctps_sendnocrc); 11780 #else 11781 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000 11782 #if __FreeBSD_version > 901000 11783 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 11784 #else 11785 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11786 #endif 11787 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 11788 SCTP_STAT_INCR(sctps_sendhwcrc); 11789 #else 11790 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr)); 11791 SCTP_STAT_INCR(sctps_sendswcrc); 11792 #endif 11793 #endif 11794 } 11795 #ifdef SCTP_PACKET_LOGGING 11796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 11797 sctp_packet_log(o_pak); 11798 } 11799 #endif 11800 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); 11801 break; 11802 #endif 11803 #if defined(__Userspace__) 11804 case AF_CONN: 11805 { 11806 char *buffer; 11807 struct sockaddr_conn *sconn; 11808 11809 sconn = (struct sockaddr_conn *)src; 11810 #if defined(SCTP_WITH_NO_CSUM) 11811 SCTP_STAT_INCR(sctps_sendnocrc); 11812 #else 11813 shout->checksum = sctp_calculate_cksum(mout, 0); 11814 SCTP_STAT_INCR(sctps_sendswcrc); 11815 #endif 11816 #ifdef SCTP_PACKET_LOGGING 11817 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 11818 sctp_packet_log(mout); 11819 } 11820 #endif 11821 /* Don't alloc/free for each packet */ 11822 if ((buffer = malloc(len)) != NULL) { 11823 m_copydata(mout, 0, len, buffer); 11824 SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0); 11825 free(buffer); 11826 } 11827 sctp_m_freem(mout); 11828 break; 11829 } 11830 #endif 11831 default: 11832 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 11833 dst->sa_family); 11834 sctp_m_freem(mout); 11835 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 11836 return; 11837 } 11838 SCTP_STAT_INCR(sctps_sendpackets); 11839 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11840 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11841 return; 11842 } 11843 11844 void 11845 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, 11846 struct sctphdr *sh, 11847 #if defined(__FreeBSD__) 11848 uint8_t use_mflowid, uint32_t mflowid, 11849 #endif 11850 uint32_t vrf_id, uint16_t port) 11851 { 11852 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, 11853 #if defined(__FreeBSD__) 11854 use_mflowid, mflowid, 11855 #endif 11856 vrf_id, port); 11857 } 11858 11859 void 11860 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked 11861 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 11862 SCTP_UNUSED 11863 #endif 11864 ) 11865 { 11866 struct sctp_tmit_chunk *chk; 11867 struct sctp_heartbeat_chunk *hb; 11868 struct timeval now; 11869 11870 SCTP_TCB_LOCK_ASSERT(stcb); 11871 if (net == NULL) { 11872 return; 11873 } 11874 (void)SCTP_GETTIME_TIMEVAL(&now); 11875 switch (net->ro._l_addr.sa.sa_family) { 11876 #ifdef INET 11877 case AF_INET: 11878 break; 11879 #endif 11880 #ifdef INET6 11881 case AF_INET6: 11882 break; 11883 #endif 11884 #if defined(__Userspace__) 11885 case AF_CONN: 11886 break; 11887 #endif 11888 default: 11889 return; 11890 } 11891 sctp_alloc_a_chunk(stcb, chk); 11892 if (chk == NULL) { 11893 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 11894 return; 11895 } 11896 11897 chk->copy_by_ref = 0; 11898 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 11899 chk->rec.chunk_id.can_take_data = 1; 11900 chk->asoc = &stcb->asoc; 11901 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 11902 11903 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 11904 if (chk->data == NULL) { 11905 sctp_free_a_chunk(stcb, chk, so_locked); 11906 return; 11907 } 11908 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 11909 SCTP_BUF_LEN(chk->data) = chk->send_size; 11910 chk->sent = SCTP_DATAGRAM_UNSENT; 11911 chk->snd_count = 0; 11912 chk->whoTo = net; 11913 atomic_add_int(&chk->whoTo->ref_count, 1); 11914 /* Now we have a mbuf that we can fill in with the details */ 11915 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 11916 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); 11917 /* fill out chunk header */ 11918 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 11919 hb->ch.chunk_flags = 0; 11920 hb->ch.chunk_length = htons(chk->send_size); 11921 /* Fill out hb parameter */ 11922 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 11923 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 11924 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 11925 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 11926 /* Did our user request this one, put it in */ 11927 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family; 11928 #ifdef HAVE_SA_LEN 11929 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len; 11930 #else 11931 switch (net->ro._l_addr.sa.sa_family) { 11932 #ifdef INET 11933 case AF_INET: 11934 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in); 11935 break; 11936 #endif 11937 #ifdef INET6 11938 case AF_INET6: 11939 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6); 11940 break; 11941 #endif 11942 #if defined(__Userspace__) 11943 case AF_CONN: 11944 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn); 11945 break; 11946 #endif 11947 default: 11948 hb->heartbeat.hb_info.addr_len = 0; 11949 break; 11950 } 11951 #endif 11952 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 11953 /* 11954 * we only take from the entropy pool if the address is not 11955 * confirmed. 11956 */ 11957 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 11958 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 11959 } else { 11960 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 11961 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 11962 } 11963 switch (net->ro._l_addr.sa.sa_family) { 11964 #ifdef INET 11965 case AF_INET: 11966 memcpy(hb->heartbeat.hb_info.address, 11967 &net->ro._l_addr.sin.sin_addr, 11968 sizeof(net->ro._l_addr.sin.sin_addr)); 11969 break; 11970 #endif 11971 #ifdef INET6 11972 case AF_INET6: 11973 memcpy(hb->heartbeat.hb_info.address, 11974 &net->ro._l_addr.sin6.sin6_addr, 11975 sizeof(net->ro._l_addr.sin6.sin6_addr)); 11976 break; 11977 #endif 11978 #if defined(__Userspace__) 11979 case AF_CONN: 11980 memcpy(hb->heartbeat.hb_info.address, 11981 &net->ro._l_addr.sconn.sconn_addr, 11982 sizeof(net->ro._l_addr.sconn.sconn_addr)); 11983 break; 11984 #endif 11985 default: 11986 return; 11987 break; 11988 } 11989 net->hb_responded = 0; 11990 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 11991 stcb->asoc.ctrl_queue_cnt++; 11992 SCTP_STAT_INCR(sctps_sendheartbeat); 11993 return; 11994 } 11995 11996 void 11997 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 11998 uint32_t high_tsn) 11999 { 12000 struct sctp_association *asoc; 12001 struct sctp_ecne_chunk *ecne; 12002 struct sctp_tmit_chunk *chk; 12003 12004 if (net == NULL) { 12005 return; 12006 } 12007 asoc = &stcb->asoc; 12008 SCTP_TCB_LOCK_ASSERT(stcb); 12009 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 12010 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { 12011 /* found a previous ECN_ECHO update it if needed */ 12012 uint32_t cnt, ctsn; 12013 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 12014 ctsn = ntohl(ecne->tsn); 12015 if (SCTP_TSN_GT(high_tsn, ctsn)) { 12016 ecne->tsn = htonl(high_tsn); 12017 SCTP_STAT_INCR(sctps_queue_upd_ecne); 12018 } 12019 cnt = ntohl(ecne->num_pkts_since_cwr); 12020 cnt++; 12021 ecne->num_pkts_since_cwr = htonl(cnt); 12022 return; 12023 } 12024 } 12025 /* nope could not find one to update so we must build one */ 12026 sctp_alloc_a_chunk(stcb, chk); 12027 if (chk == NULL) { 12028 return; 12029 } 12030 chk->copy_by_ref = 0; 12031 SCTP_STAT_INCR(sctps_queue_upd_ecne); 12032 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 12033 chk->rec.chunk_id.can_take_data = 0; 12034 chk->asoc = &stcb->asoc; 12035 chk->send_size = sizeof(struct sctp_ecne_chunk); 12036 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 12037 if (chk->data == NULL) { 12038 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12039 return; 12040 } 12041 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12042 SCTP_BUF_LEN(chk->data) = chk->send_size; 12043 chk->sent = SCTP_DATAGRAM_UNSENT; 12044 chk->snd_count = 0; 12045 chk->whoTo = net; 12046 atomic_add_int(&chk->whoTo->ref_count, 1); 12047 12048 stcb->asoc.ecn_echo_cnt_onq++; 12049 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 12050 ecne->ch.chunk_type = SCTP_ECN_ECHO; 12051 ecne->ch.chunk_flags = 0; 12052 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 12053 ecne->tsn = htonl(high_tsn); 12054 ecne->num_pkts_since_cwr = htonl(1); 12055 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); 12056 asoc->ctrl_queue_cnt++; 12057 } 12058 12059 void 12060 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 12061 struct mbuf *m, int len, int iphlen, int bad_crc) 12062 { 12063 struct sctp_association *asoc; 12064 struct sctp_pktdrop_chunk *drp; 12065 struct sctp_tmit_chunk *chk; 12066 uint8_t *datap; 12067 int was_trunc = 0; 12068 int fullsz = 0; 12069 long spc; 12070 int offset; 12071 struct sctp_chunkhdr *ch, chunk_buf; 12072 unsigned int chk_length; 12073 12074 if (!stcb) { 12075 return; 12076 } 12077 asoc = &stcb->asoc; 12078 SCTP_TCB_LOCK_ASSERT(stcb); 12079 if (asoc->pktdrop_supported == 0) { 12080 /*- 12081 * peer must declare support before I send one. 12082 */ 12083 return; 12084 } 12085 if (stcb->sctp_socket == NULL) { 12086 return; 12087 } 12088 sctp_alloc_a_chunk(stcb, chk); 12089 if (chk == NULL) { 12090 return; 12091 } 12092 chk->copy_by_ref = 0; 12093 len -= iphlen; 12094 chk->send_size = len; 12095 /* Validate that we do not have an ABORT in here. */ 12096 offset = iphlen + sizeof(struct sctphdr); 12097 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 12098 sizeof(*ch), (uint8_t *) & chunk_buf); 12099 while (ch != NULL) { 12100 chk_length = ntohs(ch->chunk_length); 12101 if (chk_length < sizeof(*ch)) { 12102 /* break to abort land */ 12103 break; 12104 } 12105 switch (ch->chunk_type) { 12106 case SCTP_PACKET_DROPPED: 12107 case SCTP_ABORT_ASSOCIATION: 12108 case SCTP_INITIATION_ACK: 12109 /** 12110 * We don't respond with an PKT-DROP to an ABORT 12111 * or PKT-DROP. We also do not respond to an 12112 * INIT-ACK, because we can't know if the initiation 12113 * tag is correct or not. 12114 */ 12115 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12116 return; 12117 default: 12118 break; 12119 } 12120 offset += SCTP_SIZE32(chk_length); 12121 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 12122 sizeof(*ch), (uint8_t *) & chunk_buf); 12123 } 12124 12125 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > 12126 min(stcb->asoc.smallest_mtu, MCLBYTES)) { 12127 /* only send 1 mtu worth, trim off the 12128 * excess on the end. 12129 */ 12130 fullsz = len; 12131 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; 12132 was_trunc = 1; 12133 } 12134 chk->asoc = &stcb->asoc; 12135 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 12136 if (chk->data == NULL) { 12137 jump_out: 12138 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12139 return; 12140 } 12141 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12142 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 12143 if (drp == NULL) { 12144 sctp_m_freem(chk->data); 12145 chk->data = NULL; 12146 goto jump_out; 12147 } 12148 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 12149 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 12150 chk->book_size_scale = 0; 12151 if (was_trunc) { 12152 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 12153 drp->trunc_len = htons(fullsz); 12154 /* Len is already adjusted to size minus overhead above 12155 * take out the pkt_drop chunk itself from it. 12156 */ 12157 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk); 12158 len = chk->send_size; 12159 } else { 12160 /* no truncation needed */ 12161 drp->ch.chunk_flags = 0; 12162 drp->trunc_len = htons(0); 12163 } 12164 if (bad_crc) { 12165 drp->ch.chunk_flags |= SCTP_BADCRC; 12166 } 12167 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 12168 SCTP_BUF_LEN(chk->data) = chk->send_size; 12169 chk->sent = SCTP_DATAGRAM_UNSENT; 12170 chk->snd_count = 0; 12171 if (net) { 12172 /* we should hit here */ 12173 chk->whoTo = net; 12174 atomic_add_int(&chk->whoTo->ref_count, 1); 12175 } else { 12176 chk->whoTo = NULL; 12177 } 12178 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 12179 chk->rec.chunk_id.can_take_data = 1; 12180 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 12181 drp->ch.chunk_length = htons(chk->send_size); 12182 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 12183 if (spc < 0) { 12184 spc = 0; 12185 } 12186 drp->bottle_bw = htonl(spc); 12187 if (asoc->my_rwnd) { 12188 drp->current_onq = htonl(asoc->size_on_reasm_queue + 12189 asoc->size_on_all_streams + 12190 asoc->my_rwnd_control_len + 12191 stcb->sctp_socket->so_rcv.sb_cc); 12192 } else { 12193 /*- 12194 * If my rwnd is 0, possibly from mbuf depletion as well as 12195 * space used, tell the peer there is NO space aka onq == bw 12196 */ 12197 drp->current_onq = htonl(spc); 12198 } 12199 drp->reserved = 0; 12200 datap = drp->data; 12201 m_copydata(m, iphlen, len, (caddr_t)datap); 12202 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 12203 asoc->ctrl_queue_cnt++; 12204 } 12205 12206 void 12207 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) 12208 { 12209 struct sctp_association *asoc; 12210 struct sctp_cwr_chunk *cwr; 12211 struct sctp_tmit_chunk *chk; 12212 12213 SCTP_TCB_LOCK_ASSERT(stcb); 12214 if (net == NULL) { 12215 return; 12216 } 12217 asoc = &stcb->asoc; 12218 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 12219 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { 12220 /* found a previous CWR queued to same destination update it if needed */ 12221 uint32_t ctsn; 12222 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 12223 ctsn = ntohl(cwr->tsn); 12224 if (SCTP_TSN_GT(high_tsn, ctsn)) { 12225 cwr->tsn = htonl(high_tsn); 12226 } 12227 if (override & SCTP_CWR_REDUCE_OVERRIDE) { 12228 /* Make sure override is carried */ 12229 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; 12230 } 12231 return; 12232 } 12233 } 12234 sctp_alloc_a_chunk(stcb, chk); 12235 if (chk == NULL) { 12236 return; 12237 } 12238 chk->copy_by_ref = 0; 12239 chk->rec.chunk_id.id = SCTP_ECN_CWR; 12240 chk->rec.chunk_id.can_take_data = 1; 12241 chk->asoc = &stcb->asoc; 12242 chk->send_size = sizeof(struct sctp_cwr_chunk); 12243 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 12244 if (chk->data == NULL) { 12245 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12246 return; 12247 } 12248 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12249 SCTP_BUF_LEN(chk->data) = chk->send_size; 12250 chk->sent = SCTP_DATAGRAM_UNSENT; 12251 chk->snd_count = 0; 12252 chk->whoTo = net; 12253 atomic_add_int(&chk->whoTo->ref_count, 1); 12254 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 12255 cwr->ch.chunk_type = SCTP_ECN_CWR; 12256 cwr->ch.chunk_flags = override; 12257 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 12258 cwr->tsn = htonl(high_tsn); 12259 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 12260 asoc->ctrl_queue_cnt++; 12261 } 12262 12263 void 12264 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 12265 int number_entries, uint16_t * list, 12266 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 12267 { 12268 uint16_t len, old_len, i; 12269 struct sctp_stream_reset_out_request *req_out; 12270 struct sctp_chunkhdr *ch; 12271 12272 ch = mtod(chk->data, struct sctp_chunkhdr *); 12273 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12274 12275 /* get to new offset for the param. */ 12276 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 12277 /* now how long will this param be? */ 12278 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 12279 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 12280 req_out->ph.param_length = htons(len); 12281 req_out->request_seq = htonl(seq); 12282 req_out->response_seq = htonl(resp_seq); 12283 req_out->send_reset_at_tsn = htonl(last_sent); 12284 if (number_entries) { 12285 for (i = 0; i < number_entries; i++) { 12286 req_out->list_of_streams[i] = htons(list[i]); 12287 } 12288 } 12289 if (SCTP_SIZE32(len) > len) { 12290 /*- 12291 * Need to worry about the pad we may end up adding to the 12292 * end. This is easy since the struct is either aligned to 4 12293 * bytes or 2 bytes off. 12294 */ 12295 req_out->list_of_streams[number_entries] = 0; 12296 } 12297 /* now fix the chunk length */ 12298 ch->chunk_length = htons(len + old_len); 12299 chk->book_size = len + old_len; 12300 chk->book_size_scale = 0; 12301 chk->send_size = SCTP_SIZE32(chk->book_size); 12302 SCTP_BUF_LEN(chk->data) = chk->send_size; 12303 return; 12304 } 12305 12306 static void 12307 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 12308 int number_entries, uint16_t *list, 12309 uint32_t seq) 12310 { 12311 uint16_t len, old_len, i; 12312 struct sctp_stream_reset_in_request *req_in; 12313 struct sctp_chunkhdr *ch; 12314 12315 ch = mtod(chk->data, struct sctp_chunkhdr *); 12316 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12317 12318 /* get to new offset for the param. */ 12319 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 12320 /* now how long will this param be? */ 12321 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 12322 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 12323 req_in->ph.param_length = htons(len); 12324 req_in->request_seq = htonl(seq); 12325 if (number_entries) { 12326 for (i = 0; i < number_entries; i++) { 12327 req_in->list_of_streams[i] = htons(list[i]); 12328 } 12329 } 12330 if (SCTP_SIZE32(len) > len) { 12331 /*- 12332 * Need to worry about the pad we may end up adding to the 12333 * end. This is easy since the struct is either aligned to 4 12334 * bytes or 2 bytes off. 12335 */ 12336 req_in->list_of_streams[number_entries] = 0; 12337 } 12338 /* now fix the chunk length */ 12339 ch->chunk_length = htons(len + old_len); 12340 chk->book_size = len + old_len; 12341 chk->book_size_scale = 0; 12342 chk->send_size = SCTP_SIZE32(chk->book_size); 12343 SCTP_BUF_LEN(chk->data) = chk->send_size; 12344 return; 12345 } 12346 12347 static void 12348 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 12349 uint32_t seq) 12350 { 12351 uint16_t len, old_len; 12352 struct sctp_stream_reset_tsn_request *req_tsn; 12353 struct sctp_chunkhdr *ch; 12354 12355 ch = mtod(chk->data, struct sctp_chunkhdr *); 12356 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12357 12358 /* get to new offset for the param. */ 12359 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 12360 /* now how long will this param be? */ 12361 len = sizeof(struct sctp_stream_reset_tsn_request); 12362 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 12363 req_tsn->ph.param_length = htons(len); 12364 req_tsn->request_seq = htonl(seq); 12365 12366 /* now fix the chunk length */ 12367 ch->chunk_length = htons(len + old_len); 12368 chk->send_size = len + old_len; 12369 chk->book_size = SCTP_SIZE32(chk->send_size); 12370 chk->book_size_scale = 0; 12371 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12372 return; 12373 } 12374 12375 void 12376 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 12377 uint32_t resp_seq, uint32_t result) 12378 { 12379 uint16_t len, old_len; 12380 struct sctp_stream_reset_response *resp; 12381 struct sctp_chunkhdr *ch; 12382 12383 ch = mtod(chk->data, struct sctp_chunkhdr *); 12384 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12385 12386 /* get to new offset for the param. */ 12387 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 12388 /* now how long will this param be? */ 12389 len = sizeof(struct sctp_stream_reset_response); 12390 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 12391 resp->ph.param_length = htons(len); 12392 resp->response_seq = htonl(resp_seq); 12393 resp->result = ntohl(result); 12394 12395 /* now fix the chunk length */ 12396 ch->chunk_length = htons(len + old_len); 12397 chk->book_size = len + old_len; 12398 chk->book_size_scale = 0; 12399 chk->send_size = SCTP_SIZE32(chk->book_size); 12400 SCTP_BUF_LEN(chk->data) = chk->send_size; 12401 return; 12402 } 12403 12404 void 12405 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 12406 uint32_t resp_seq, uint32_t result, 12407 uint32_t send_una, uint32_t recv_next) 12408 { 12409 uint16_t len, old_len; 12410 struct sctp_stream_reset_response_tsn *resp; 12411 struct sctp_chunkhdr *ch; 12412 12413 ch = mtod(chk->data, struct sctp_chunkhdr *); 12414 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12415 12416 /* get to new offset for the param. */ 12417 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 12418 /* now how long will this param be? */ 12419 len = sizeof(struct sctp_stream_reset_response_tsn); 12420 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 12421 resp->ph.param_length = htons(len); 12422 resp->response_seq = htonl(resp_seq); 12423 resp->result = htonl(result); 12424 resp->senders_next_tsn = htonl(send_una); 12425 resp->receivers_next_tsn = htonl(recv_next); 12426 12427 /* now fix the chunk length */ 12428 ch->chunk_length = htons(len + old_len); 12429 chk->book_size = len + old_len; 12430 chk->send_size = SCTP_SIZE32(chk->book_size); 12431 chk->book_size_scale = 0; 12432 SCTP_BUF_LEN(chk->data) = chk->send_size; 12433 return; 12434 } 12435 12436 static void 12437 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, 12438 uint32_t seq, 12439 uint16_t adding) 12440 { 12441 uint16_t len, old_len; 12442 struct sctp_chunkhdr *ch; 12443 struct sctp_stream_reset_add_strm *addstr; 12444 12445 ch = mtod(chk->data, struct sctp_chunkhdr *); 12446 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12447 12448 /* get to new offset for the param. */ 12449 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 12450 /* now how long will this param be? */ 12451 len = sizeof(struct sctp_stream_reset_add_strm); 12452 12453 /* Fill it out. */ 12454 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS); 12455 addstr->ph.param_length = htons(len); 12456 addstr->request_seq = htonl(seq); 12457 addstr->number_of_streams = htons(adding); 12458 addstr->reserved = 0; 12459 12460 /* now fix the chunk length */ 12461 ch->chunk_length = htons(len + old_len); 12462 chk->send_size = len + old_len; 12463 chk->book_size = SCTP_SIZE32(chk->send_size); 12464 chk->book_size_scale = 0; 12465 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12466 return; 12467 } 12468 12469 static void 12470 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, 12471 uint32_t seq, 12472 uint16_t adding) 12473 { 12474 uint16_t len, old_len; 12475 struct sctp_chunkhdr *ch; 12476 struct sctp_stream_reset_add_strm *addstr; 12477 12478 ch = mtod(chk->data, struct sctp_chunkhdr *); 12479 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12480 12481 /* get to new offset for the param. */ 12482 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 12483 /* now how long will this param be? */ 12484 len = sizeof(struct sctp_stream_reset_add_strm); 12485 /* Fill it out. */ 12486 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS); 12487 addstr->ph.param_length = htons(len); 12488 addstr->request_seq = htonl(seq); 12489 addstr->number_of_streams = htons(adding); 12490 addstr->reserved = 0; 12491 12492 /* now fix the chunk length */ 12493 ch->chunk_length = htons(len + old_len); 12494 chk->send_size = len + old_len; 12495 chk->book_size = SCTP_SIZE32(chk->send_size); 12496 chk->book_size_scale = 0; 12497 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12498 return; 12499 } 12500 12501 int 12502 sctp_send_str_reset_req(struct sctp_tcb *stcb, 12503 int number_entries, uint16_t *list, 12504 uint8_t send_out_req, 12505 uint8_t send_in_req, 12506 uint8_t send_tsn_req, 12507 uint8_t add_stream, 12508 uint16_t adding_o, 12509 uint16_t adding_i, uint8_t peer_asked) 12510 { 12511 12512 struct sctp_association *asoc; 12513 struct sctp_tmit_chunk *chk; 12514 struct sctp_chunkhdr *ch; 12515 uint32_t seq; 12516 12517 asoc = &stcb->asoc; 12518 if (asoc->stream_reset_outstanding) { 12519 /*- 12520 * Already one pending, must get ACK back to clear the flag. 12521 */ 12522 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); 12523 return (EBUSY); 12524 } 12525 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) && 12526 (add_stream == 0)) { 12527 /* nothing to do */ 12528 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12529 return (EINVAL); 12530 } 12531 if (send_tsn_req && (send_out_req || send_in_req)) { 12532 /* error, can't do that */ 12533 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12534 return (EINVAL); 12535 } 12536 sctp_alloc_a_chunk(stcb, chk); 12537 if (chk == NULL) { 12538 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12539 return (ENOMEM); 12540 } 12541 chk->copy_by_ref = 0; 12542 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 12543 chk->rec.chunk_id.can_take_data = 0; 12544 chk->asoc = &stcb->asoc; 12545 chk->book_size = sizeof(struct sctp_chunkhdr); 12546 chk->send_size = SCTP_SIZE32(chk->book_size); 12547 chk->book_size_scale = 0; 12548 12549 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 12550 if (chk->data == NULL) { 12551 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); 12552 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12553 return (ENOMEM); 12554 } 12555 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12556 12557 /* setup chunk parameters */ 12558 chk->sent = SCTP_DATAGRAM_UNSENT; 12559 chk->snd_count = 0; 12560 if (stcb->asoc.alternate) { 12561 chk->whoTo = stcb->asoc.alternate; 12562 } else { 12563 chk->whoTo = stcb->asoc.primary_destination; 12564 } 12565 atomic_add_int(&chk->whoTo->ref_count, 1); 12566 ch = mtod(chk->data, struct sctp_chunkhdr *); 12567 ch->chunk_type = SCTP_STREAM_RESET; 12568 ch->chunk_flags = 0; 12569 ch->chunk_length = htons(chk->book_size); 12570 SCTP_BUF_LEN(chk->data) = chk->send_size; 12571 12572 seq = stcb->asoc.str_reset_seq_out; 12573 if (send_out_req) { 12574 sctp_add_stream_reset_out(chk, number_entries, list, 12575 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1)); 12576 asoc->stream_reset_out_is_outstanding = 1; 12577 seq++; 12578 asoc->stream_reset_outstanding++; 12579 } 12580 if ((add_stream & 1) && 12581 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) { 12582 /* Need to allocate more */ 12583 struct sctp_stream_out *oldstream; 12584 struct sctp_stream_queue_pending *sp, *nsp; 12585 int i; 12586 #if defined(SCTP_DETAILED_STR_STATS) 12587 int j; 12588 #endif 12589 12590 oldstream = stcb->asoc.strmout; 12591 /* get some more */ 12592 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 12593 ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)), 12594 SCTP_M_STRMO); 12595 if (stcb->asoc.strmout == NULL) { 12596 uint8_t x; 12597 stcb->asoc.strmout = oldstream; 12598 /* Turn off the bit */ 12599 x = add_stream & 0xfe; 12600 add_stream = x; 12601 goto skip_stuff; 12602 } 12603 /* Ok now we proceed with copying the old out stuff and 12604 * initializing the new stuff. 12605 */ 12606 SCTP_TCB_SEND_LOCK(stcb); 12607 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); 12608 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 12609 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 12610 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; 12611 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send; 12612 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 12613 stcb->asoc.strmout[i].stream_no = i; 12614 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]); 12615 /* now anything on those queues? */ 12616 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { 12617 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 12618 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 12619 } 12620 /* Now move assoc pointers too */ 12621 if (stcb->asoc.last_out_stream == &oldstream[i]) { 12622 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 12623 } 12624 if (stcb->asoc.locked_on_sending == &oldstream[i]) { 12625 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 12626 } 12627 } 12628 /* now the new streams */ 12629 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 12630 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) { 12631 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 12632 stcb->asoc.strmout[i].chunks_on_queues = 0; 12633 #if defined(SCTP_DETAILED_STR_STATS) 12634 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 12635 stcb->asoc.strmout[i].abandoned_sent[j] = 0; 12636 stcb->asoc.strmout[i].abandoned_unsent[j] = 0; 12637 } 12638 #else 12639 stcb->asoc.strmout[i].abandoned_sent[0] = 0; 12640 stcb->asoc.strmout[i].abandoned_unsent[0] = 0; 12641 #endif 12642 stcb->asoc.strmout[i].next_sequence_send = 0x0; 12643 stcb->asoc.strmout[i].stream_no = i; 12644 stcb->asoc.strmout[i].last_msg_incomplete = 0; 12645 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 12646 } 12647 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o; 12648 SCTP_FREE(oldstream, SCTP_M_STRMO); 12649 SCTP_TCB_SEND_UNLOCK(stcb); 12650 } 12651 skip_stuff: 12652 if ((add_stream & 1) && (adding_o > 0)) { 12653 asoc->strm_pending_add_size = adding_o; 12654 asoc->peer_req_out = peer_asked; 12655 sctp_add_an_out_stream(chk, seq, adding_o); 12656 seq++; 12657 asoc->stream_reset_outstanding++; 12658 } 12659 if ((add_stream & 2) && (adding_i > 0)) { 12660 sctp_add_an_in_stream(chk, seq, adding_i); 12661 seq++; 12662 asoc->stream_reset_outstanding++; 12663 } 12664 if (send_in_req) { 12665 sctp_add_stream_reset_in(chk, number_entries, list, seq); 12666 seq++; 12667 asoc->stream_reset_outstanding++; 12668 } 12669 if (send_tsn_req) { 12670 sctp_add_stream_reset_tsn(chk, seq); 12671 asoc->stream_reset_outstanding++; 12672 } 12673 asoc->str_reset = chk; 12674 /* insert the chunk for sending */ 12675 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 12676 chk, 12677 sctp_next); 12678 asoc->ctrl_queue_cnt++; 12679 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 12680 return (0); 12681 } 12682 12683 void 12684 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, 12685 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 12686 #if defined(__FreeBSD__) 12687 uint8_t use_mflowid, uint32_t mflowid, 12688 #endif 12689 uint32_t vrf_id, uint16_t port) 12690 { 12691 /* Don't respond to an ABORT with an ABORT. */ 12692 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 12693 if (cause) 12694 sctp_m_freem(cause); 12695 return; 12696 } 12697 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, 12698 #if defined(__FreeBSD__) 12699 use_mflowid, mflowid, 12700 #endif 12701 vrf_id, port); 12702 return; 12703 } 12704 12705 void 12706 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, 12707 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 12708 #if defined(__FreeBSD__) 12709 uint8_t use_mflowid, uint32_t mflowid, 12710 #endif 12711 uint32_t vrf_id, uint16_t port) 12712 { 12713 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, 12714 #if defined(__FreeBSD__) 12715 use_mflowid, mflowid, 12716 #endif 12717 vrf_id, port); 12718 return; 12719 } 12720 12721 static struct mbuf * 12722 sctp_copy_resume(struct uio *uio, 12723 int max_send_len, 12724 #if defined(__FreeBSD__) && __FreeBSD_version > 602000 12725 int user_marks_eor, 12726 #endif 12727 int *error, 12728 uint32_t *sndout, 12729 struct mbuf **new_tail) 12730 { 12731 #if defined(__Panda__) 12732 struct mbuf *m; 12733 12734 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 12735 (user_marks_eor ? M_EOR : 0)); 12736 if (m == NULL) { 12737 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12738 *error = ENOBUFS; 12739 } else { 12740 *sndout = m_length(m, NULL); 12741 *new_tail = m_last(m); 12742 } 12743 return (m); 12744 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 12745 struct mbuf *m; 12746 12747 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 12748 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 12749 if (m == NULL) { 12750 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12751 *error = ENOBUFS; 12752 } else { 12753 *sndout = m_length(m, NULL); 12754 *new_tail = m_last(m); 12755 } 12756 return (m); 12757 #else 12758 int left, cancpy, willcpy; 12759 struct mbuf *m, *head; 12760 12761 #if defined(__APPLE__) 12762 #if defined(APPLE_LEOPARD) 12763 left = min(uio->uio_resid, max_send_len); 12764 #else 12765 left = min(uio_resid(uio), max_send_len); 12766 #endif 12767 #else 12768 left = min(uio->uio_resid, max_send_len); 12769 #endif 12770 /* Always get a header just in case */ 12771 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 12772 if (head == NULL) { 12773 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12774 *error = ENOBUFS; 12775 return (NULL); 12776 } 12777 cancpy = M_TRAILINGSPACE(head); 12778 willcpy = min(cancpy, left); 12779 *error = uiomove(mtod(head, caddr_t), willcpy, uio); 12780 if (*error) { 12781 sctp_m_freem(head); 12782 return (NULL); 12783 } 12784 *sndout += willcpy; 12785 left -= willcpy; 12786 SCTP_BUF_LEN(head) = willcpy; 12787 m = head; 12788 *new_tail = head; 12789 while (left > 0) { 12790 /* move in user data */ 12791 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 12792 if (SCTP_BUF_NEXT(m) == NULL) { 12793 sctp_m_freem(head); 12794 *new_tail = NULL; 12795 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12796 *error = ENOBUFS; 12797 return (NULL); 12798 } 12799 m = SCTP_BUF_NEXT(m); 12800 cancpy = M_TRAILINGSPACE(m); 12801 willcpy = min(cancpy, left); 12802 *error = uiomove(mtod(m, caddr_t), willcpy, uio); 12803 if (*error) { 12804 sctp_m_freem(head); 12805 *new_tail = NULL; 12806 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 12807 *error = EFAULT; 12808 return (NULL); 12809 } 12810 SCTP_BUF_LEN(m) = willcpy; 12811 left -= willcpy; 12812 *sndout += willcpy; 12813 *new_tail = m; 12814 if (left == 0) { 12815 SCTP_BUF_NEXT(m) = NULL; 12816 } 12817 } 12818 return (head); 12819 #endif 12820 } 12821 12822 static int 12823 sctp_copy_one(struct sctp_stream_queue_pending *sp, 12824 struct uio *uio, 12825 int resv_upfront) 12826 { 12827 int left; 12828 #if defined(__Panda__) 12829 left = sp->length; 12830 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 12831 resv_upfront, 0); 12832 if (sp->data == NULL) { 12833 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12834 return (ENOBUFS); 12835 } 12836 12837 sp->tail_mbuf = m_last(sp->data); 12838 return (0); 12839 12840 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 12841 left = sp->length; 12842 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 12843 resv_upfront, 0); 12844 if (sp->data == NULL) { 12845 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12846 return (ENOBUFS); 12847 } 12848 12849 sp->tail_mbuf = m_last(sp->data); 12850 return (0); 12851 #else 12852 int cancpy, willcpy, error; 12853 struct mbuf *m, *head; 12854 int cpsz = 0; 12855 12856 /* First one gets a header */ 12857 left = sp->length; 12858 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA); 12859 if (m == NULL) { 12860 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12861 return (ENOBUFS); 12862 } 12863 /*- 12864 * Add this one for m in now, that way if the alloc fails we won't 12865 * have a bad cnt. 12866 */ 12867 SCTP_BUF_RESV_UF(m, resv_upfront); 12868 cancpy = M_TRAILINGSPACE(m); 12869 willcpy = min(cancpy, left); 12870 while (left > 0) { 12871 /* move in user data */ 12872 error = uiomove(mtod(m, caddr_t), willcpy, uio); 12873 if (error) { 12874 sctp_m_freem(head); 12875 return (error); 12876 } 12877 SCTP_BUF_LEN(m) = willcpy; 12878 left -= willcpy; 12879 cpsz += willcpy; 12880 if (left > 0) { 12881 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 12882 if (SCTP_BUF_NEXT(m) == NULL) { 12883 /* 12884 * the head goes back to caller, he can free 12885 * the rest 12886 */ 12887 sctp_m_freem(head); 12888 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 12889 return (ENOBUFS); 12890 } 12891 m = SCTP_BUF_NEXT(m); 12892 cancpy = M_TRAILINGSPACE(m); 12893 willcpy = min(cancpy, left); 12894 } else { 12895 sp->tail_mbuf = m; 12896 SCTP_BUF_NEXT(m) = NULL; 12897 } 12898 } 12899 sp->data = head; 12900 sp->length = cpsz; 12901 return (0); 12902 #endif 12903 } 12904 12905 12906 12907 static struct sctp_stream_queue_pending * 12908 sctp_copy_it_in(struct sctp_tcb *stcb, 12909 struct sctp_association *asoc, 12910 struct sctp_sndrcvinfo *srcv, 12911 struct uio *uio, 12912 struct sctp_nets *net, 12913 int max_send_len, 12914 int user_marks_eor, 12915 int *error) 12916 12917 { 12918 /*- 12919 * This routine must be very careful in its work. Protocol 12920 * processing is up and running so care must be taken to spl...() 12921 * when you need to do something that may effect the stcb/asoc. The 12922 * sb is locked however. When data is copied the protocol processing 12923 * should be enabled since this is a slower operation... 12924 */ 12925 struct sctp_stream_queue_pending *sp = NULL; 12926 int resv_in_first; 12927 12928 *error = 0; 12929 /* Now can we send this? */ 12930 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 12931 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 12932 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 12933 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 12934 /* got data while shutting down */ 12935 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12936 *error = ECONNRESET; 12937 goto out_now; 12938 } 12939 sctp_alloc_a_strmoq(stcb, sp); 12940 if (sp == NULL) { 12941 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12942 *error = ENOMEM; 12943 goto out_now; 12944 } 12945 sp->act_flags = 0; 12946 sp->sender_all_done = 0; 12947 sp->sinfo_flags = srcv->sinfo_flags; 12948 sp->timetolive = srcv->sinfo_timetolive; 12949 sp->ppid = srcv->sinfo_ppid; 12950 sp->context = srcv->sinfo_context; 12951 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 12952 12953 sp->stream = srcv->sinfo_stream; 12954 #if defined(__APPLE__) 12955 #if defined(APPLE_LEOPARD) 12956 sp->length = min(uio->uio_resid, max_send_len); 12957 #else 12958 sp->length = min(uio_resid(uio), max_send_len); 12959 #endif 12960 #else 12961 sp->length = min(uio->uio_resid, max_send_len); 12962 #endif 12963 #if defined(__APPLE__) 12964 #if defined(APPLE_LEOPARD) 12965 if ((sp->length == (uint32_t)uio->uio_resid) && 12966 #else 12967 if ((sp->length == (uint32_t)uio_resid(uio)) && 12968 #endif 12969 #else 12970 if ((sp->length == (uint32_t)uio->uio_resid) && 12971 #endif 12972 ((user_marks_eor == 0) || 12973 (srcv->sinfo_flags & SCTP_EOF) || 12974 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 12975 sp->msg_is_complete = 1; 12976 } else { 12977 sp->msg_is_complete = 0; 12978 } 12979 sp->sender_all_done = 0; 12980 sp->some_taken = 0; 12981 sp->put_last_out = 0; 12982 resv_in_first = sizeof(struct sctp_data_chunk); 12983 sp->data = sp->tail_mbuf = NULL; 12984 if (sp->length == 0) { 12985 *error = 0; 12986 goto skip_copy; 12987 } 12988 if (srcv->sinfo_keynumber_valid) { 12989 sp->auth_keyid = srcv->sinfo_keynumber; 12990 } else { 12991 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 12992 } 12993 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 12994 sctp_auth_key_acquire(stcb, sp->auth_keyid); 12995 sp->holds_key_ref = 1; 12996 } 12997 #if defined(__APPLE__) 12998 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 12999 #endif 13000 *error = sctp_copy_one(sp, uio, resv_in_first); 13001 #if defined(__APPLE__) 13002 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 13003 #endif 13004 skip_copy: 13005 if (*error) { 13006 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 13007 sp = NULL; 13008 } else { 13009 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 13010 sp->net = net; 13011 atomic_add_int(&sp->net->ref_count, 1); 13012 } else { 13013 sp->net = NULL; 13014 } 13015 sctp_set_prsctp_policy(sp); 13016 } 13017 out_now: 13018 return (sp); 13019 } 13020 13021 13022 int 13023 sctp_sosend(struct socket *so, 13024 struct sockaddr *addr, 13025 struct uio *uio, 13026 #ifdef __Panda__ 13027 pakhandle_type top, 13028 pakhandle_type icontrol, 13029 #else 13030 struct mbuf *top, 13031 struct mbuf *control, 13032 #endif 13033 #if defined(__APPLE__) || defined(__Panda__) 13034 int flags 13035 #else 13036 int flags, 13037 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 13038 struct thread *p 13039 #elif defined(__Windows__) 13040 PKTHREAD p 13041 #else 13042 #if defined(__Userspace__) 13043 /* 13044 * proc is a dummy in __Userspace__ and will not be passed 13045 * to sctp_lower_sosend 13046 */ 13047 #endif 13048 struct proc *p 13049 #endif 13050 #endif 13051 ) 13052 { 13053 #ifdef __Panda__ 13054 struct mbuf *control = NULL; 13055 #endif 13056 #if defined(__APPLE__) 13057 struct proc *p = current_proc(); 13058 #endif 13059 int error, use_sndinfo = 0; 13060 struct sctp_sndrcvinfo sndrcvninfo; 13061 struct sockaddr *addr_to_use; 13062 #if defined(INET) && defined(INET6) 13063 struct sockaddr_in sin; 13064 #endif 13065 13066 #if defined(__APPLE__) 13067 SCTP_SOCKET_LOCK(so, 1); 13068 #endif 13069 #ifdef __Panda__ 13070 control = SCTP_HEADER_TO_CHAIN(icontrol); 13071 #endif 13072 if (control) { 13073 /* process cmsg snd/rcv info (maybe a assoc-id) */ 13074 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, 13075 sizeof(sndrcvninfo))) { 13076 /* got one */ 13077 use_sndinfo = 1; 13078 } 13079 } 13080 addr_to_use = addr; 13081 #if defined(INET) && defined(INET6) 13082 if ((addr) && (addr->sa_family == AF_INET6)) { 13083 struct sockaddr_in6 *sin6; 13084 13085 sin6 = (struct sockaddr_in6 *)addr; 13086 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 13087 in6_sin6_2_sin(&sin, sin6); 13088 addr_to_use = (struct sockaddr *)&sin; 13089 } 13090 } 13091 #endif 13092 error = sctp_lower_sosend(so, addr_to_use, uio, top, 13093 #ifdef __Panda__ 13094 icontrol, 13095 #else 13096 control, 13097 #endif 13098 flags, 13099 use_sndinfo ? &sndrcvninfo: NULL 13100 #if !(defined(__Panda__) || defined(__Userspace__)) 13101 , p 13102 #endif 13103 ); 13104 #if defined(__APPLE__) 13105 SCTP_SOCKET_UNLOCK(so, 1); 13106 #endif 13107 return (error); 13108 } 13109 13110 13111 int 13112 sctp_lower_sosend(struct socket *so, 13113 struct sockaddr *addr, 13114 struct uio *uio, 13115 #ifdef __Panda__ 13116 pakhandle_type i_pak, 13117 pakhandle_type i_control, 13118 #else 13119 struct mbuf *i_pak, 13120 struct mbuf *control, 13121 #endif 13122 int flags, 13123 struct sctp_sndrcvinfo *srcv 13124 #if !(defined( __Panda__) || defined(__Userspace__)) 13125 , 13126 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 13127 struct thread *p 13128 #elif defined(__Windows__) 13129 PKTHREAD p 13130 #else 13131 struct proc *p 13132 #endif 13133 #endif 13134 ) 13135 { 13136 unsigned int sndlen = 0, max_len; 13137 int error, len; 13138 struct mbuf *top = NULL; 13139 #ifdef __Panda__ 13140 struct mbuf *control = NULL; 13141 #endif 13142 int queue_only = 0, queue_only_for_init = 0; 13143 int free_cnt_applied = 0; 13144 int un_sent; 13145 int now_filled = 0; 13146 unsigned int inqueue_bytes = 0; 13147 struct sctp_block_entry be; 13148 struct sctp_inpcb *inp; 13149 struct sctp_tcb *stcb = NULL; 13150 struct timeval now; 13151 struct sctp_nets *net; 13152 struct sctp_association *asoc; 13153 struct sctp_inpcb *t_inp; 13154 int user_marks_eor; 13155 int create_lock_applied = 0; 13156 int nagle_applies = 0; 13157 int some_on_control = 0; 13158 int got_all_of_the_send = 0; 13159 int hold_tcblock = 0; 13160 int non_blocking = 0; 13161 uint32_t local_add_more, local_soresv = 0; 13162 uint16_t port; 13163 uint16_t sinfo_flags; 13164 sctp_assoc_t sinfo_assoc_id; 13165 13166 error = 0; 13167 net = NULL; 13168 stcb = NULL; 13169 asoc = NULL; 13170 13171 #if defined(__APPLE__) 13172 sctp_lock_assert(so); 13173 #endif 13174 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 13175 if (inp == NULL) { 13176 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13177 error = EINVAL; 13178 if (i_pak) { 13179 SCTP_RELEASE_PKT(i_pak); 13180 } 13181 return (error); 13182 } 13183 if ((uio == NULL) && (i_pak == NULL)) { 13184 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13185 return (EINVAL); 13186 } 13187 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 13188 atomic_add_int(&inp->total_sends, 1); 13189 if (uio) { 13190 #if defined(__APPLE__) 13191 #if defined(APPLE_LEOPARD) 13192 if (uio->uio_resid < 0) { 13193 #else 13194 if (uio_resid(uio) < 0) { 13195 #endif 13196 #else 13197 if (uio->uio_resid < 0) { 13198 #endif 13199 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13200 return (EINVAL); 13201 } 13202 #if defined(__APPLE__) 13203 #if defined(APPLE_LEOPARD) 13204 sndlen = uio->uio_resid; 13205 #else 13206 sndlen = uio_resid(uio); 13207 #endif 13208 #else 13209 sndlen = uio->uio_resid; 13210 #endif 13211 } else { 13212 top = SCTP_HEADER_TO_CHAIN(i_pak); 13213 #ifdef __Panda__ 13214 /*- 13215 * app len indicates the datalen, dgsize for cases 13216 * of SCTP_EOF/ABORT will not have the right len 13217 */ 13218 sndlen = SCTP_APP_DATA_LEN(i_pak); 13219 /*- 13220 * Set the particle len also to zero to match 13221 * up with app len. We only have one particle 13222 * if app len is zero for Panda. This is ensured 13223 * in the socket lib 13224 */ 13225 if (sndlen == 0) { 13226 SCTP_BUF_LEN(top) = 0; 13227 } 13228 /*- 13229 * We delink the chain from header, but keep 13230 * the header around as we will need it in 13231 * EAGAIN case 13232 */ 13233 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak); 13234 #else 13235 sndlen = SCTP_HEADER_LEN(i_pak); 13236 #endif 13237 } 13238 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n", 13239 (void *)addr, 13240 sndlen); 13241 #ifdef __Panda__ 13242 if (i_control) { 13243 control = SCTP_HEADER_TO_CHAIN(i_control); 13244 } 13245 #endif 13246 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 13247 (inp->sctp_socket->so_qlimit)) { 13248 /* The listener can NOT send */ 13249 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 13250 error = ENOTCONN; 13251 goto out_unlocked; 13252 } 13253 /** 13254 * Pre-screen address, if one is given the sin-len 13255 * must be set correctly! 13256 */ 13257 if (addr) { 13258 union sctp_sockstore *raddr = (union sctp_sockstore *)addr; 13259 switch (raddr->sa.sa_family) { 13260 #ifdef INET 13261 case AF_INET: 13262 #ifdef HAVE_SIN_LEN 13263 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { 13264 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13265 error = EINVAL; 13266 goto out_unlocked; 13267 } 13268 #endif 13269 port = raddr->sin.sin_port; 13270 break; 13271 #endif 13272 #ifdef INET6 13273 case AF_INET6: 13274 #ifdef HAVE_SIN6_LEN 13275 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { 13276 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13277 error = EINVAL; 13278 goto out_unlocked; 13279 } 13280 #endif 13281 port = raddr->sin6.sin6_port; 13282 break; 13283 #endif 13284 #if defined(__Userspace__) 13285 case AF_CONN: 13286 #ifdef HAVE_SCONN_LEN 13287 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) { 13288 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13289 error = EINVAL; 13290 goto out_unlocked; 13291 } 13292 #endif 13293 port = raddr->sconn.sconn_port; 13294 break; 13295 #endif 13296 default: 13297 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT); 13298 error = EAFNOSUPPORT; 13299 goto out_unlocked; 13300 } 13301 } else 13302 port = 0; 13303 13304 if (srcv) { 13305 sinfo_flags = srcv->sinfo_flags; 13306 sinfo_assoc_id = srcv->sinfo_assoc_id; 13307 if (INVALID_SINFO_FLAG(sinfo_flags) || 13308 PR_SCTP_INVALID_POLICY(sinfo_flags)) { 13309 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13310 error = EINVAL; 13311 goto out_unlocked; 13312 } 13313 if (srcv->sinfo_flags) 13314 SCTP_STAT_INCR(sctps_sends_with_flags); 13315 } else { 13316 sinfo_flags = inp->def_send.sinfo_flags; 13317 sinfo_assoc_id = inp->def_send.sinfo_assoc_id; 13318 } 13319 if (sinfo_flags & SCTP_SENDALL) { 13320 /* its a sendall */ 13321 error = sctp_sendall(inp, uio, top, srcv); 13322 top = NULL; 13323 goto out_unlocked; 13324 } 13325 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { 13326 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13327 error = EINVAL; 13328 goto out_unlocked; 13329 } 13330 /* now we must find the assoc */ 13331 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 13332 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 13333 SCTP_INP_RLOCK(inp); 13334 stcb = LIST_FIRST(&inp->sctp_asoc_list); 13335 if (stcb) { 13336 SCTP_TCB_LOCK(stcb); 13337 hold_tcblock = 1; 13338 } 13339 SCTP_INP_RUNLOCK(inp); 13340 } else if (sinfo_assoc_id) { 13341 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0); 13342 } else if (addr) { 13343 /*- 13344 * Since we did not use findep we must 13345 * increment it, and if we don't find a tcb 13346 * decrement it. 13347 */ 13348 SCTP_INP_WLOCK(inp); 13349 SCTP_INP_INCR_REF(inp); 13350 SCTP_INP_WUNLOCK(inp); 13351 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 13352 if (stcb == NULL) { 13353 SCTP_INP_WLOCK(inp); 13354 SCTP_INP_DECR_REF(inp); 13355 SCTP_INP_WUNLOCK(inp); 13356 } else { 13357 hold_tcblock = 1; 13358 } 13359 } 13360 if ((stcb == NULL) && (addr)) { 13361 /* Possible implicit send? */ 13362 SCTP_ASOC_CREATE_LOCK(inp); 13363 create_lock_applied = 1; 13364 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 13365 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 13366 /* Should I really unlock ? */ 13367 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13368 error = EINVAL; 13369 goto out_unlocked; 13370 13371 } 13372 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 13373 (addr->sa_family == AF_INET6)) { 13374 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13375 error = EINVAL; 13376 goto out_unlocked; 13377 } 13378 SCTP_INP_WLOCK(inp); 13379 SCTP_INP_INCR_REF(inp); 13380 SCTP_INP_WUNLOCK(inp); 13381 /* With the lock applied look again */ 13382 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 13383 if ((stcb == NULL) && (control != NULL) && (port > 0)) { 13384 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error); 13385 } 13386 if (stcb == NULL) { 13387 SCTP_INP_WLOCK(inp); 13388 SCTP_INP_DECR_REF(inp); 13389 SCTP_INP_WUNLOCK(inp); 13390 } else { 13391 hold_tcblock = 1; 13392 } 13393 if (error) { 13394 goto out_unlocked; 13395 } 13396 if (t_inp != inp) { 13397 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 13398 error = ENOTCONN; 13399 goto out_unlocked; 13400 } 13401 } 13402 if (stcb == NULL) { 13403 if (addr == NULL) { 13404 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 13405 error = ENOENT; 13406 goto out_unlocked; 13407 } else { 13408 /* We must go ahead and start the INIT process */ 13409 uint32_t vrf_id; 13410 13411 if ((sinfo_flags & SCTP_ABORT) || 13412 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { 13413 /*- 13414 * User asks to abort a non-existant assoc, 13415 * or EOF a non-existant assoc with no data 13416 */ 13417 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 13418 error = ENOENT; 13419 goto out_unlocked; 13420 } 13421 /* get an asoc/stcb struct */ 13422 vrf_id = inp->def_vrf_id; 13423 #ifdef INVARIANTS 13424 if (create_lock_applied == 0) { 13425 panic("Error, should hold create lock and I don't?"); 13426 } 13427 #endif 13428 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, 13429 #if !(defined( __Panda__) || defined(__Userspace__)) 13430 p 13431 #else 13432 (struct proc *)NULL 13433 #endif 13434 ); 13435 if (stcb == NULL) { 13436 /* Error is setup for us in the call */ 13437 goto out_unlocked; 13438 } 13439 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 13440 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 13441 /* Set the connected flag so we can queue data */ 13442 soisconnecting(so); 13443 } 13444 hold_tcblock = 1; 13445 if (create_lock_applied) { 13446 SCTP_ASOC_CREATE_UNLOCK(inp); 13447 create_lock_applied = 0; 13448 } else { 13449 SCTP_PRINTF("Huh-3? create lock should have been on??\n"); 13450 } 13451 /* Turn on queue only flag to prevent data from being sent */ 13452 queue_only = 1; 13453 asoc = &stcb->asoc; 13454 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 13455 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 13456 13457 /* initialize authentication params for the assoc */ 13458 sctp_initialize_auth_params(inp, stcb); 13459 13460 if (control) { 13461 if (sctp_process_cmsgs_for_init(stcb, control, &error)) { 13462 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7); 13463 hold_tcblock = 0; 13464 stcb = NULL; 13465 goto out_unlocked; 13466 } 13467 } 13468 /* out with the INIT */ 13469 queue_only_for_init = 1; 13470 /*- 13471 * we may want to dig in after this call and adjust the MTU 13472 * value. It defaulted to 1500 (constant) but the ro 13473 * structure may now have an update and thus we may need to 13474 * change it BEFORE we append the message. 13475 */ 13476 } 13477 } else 13478 asoc = &stcb->asoc; 13479 if (srcv == NULL) 13480 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send; 13481 if (srcv->sinfo_flags & SCTP_ADDR_OVER) { 13482 if (addr) 13483 net = sctp_findnet(stcb, addr); 13484 else 13485 net = NULL; 13486 if ((net == NULL) || 13487 ((port != 0) && (port != stcb->rport))) { 13488 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13489 error = EINVAL; 13490 goto out_unlocked; 13491 } 13492 } else { 13493 if (stcb->asoc.alternate) { 13494 net = stcb->asoc.alternate; 13495 } else { 13496 net = stcb->asoc.primary_destination; 13497 } 13498 } 13499 atomic_add_int(&stcb->total_sends, 1); 13500 /* Keep the stcb from being freed under our feet */ 13501 atomic_add_int(&asoc->refcnt, 1); 13502 free_cnt_applied = 1; 13503 13504 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 13505 if (sndlen > asoc->smallest_mtu) { 13506 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 13507 error = EMSGSIZE; 13508 goto out_unlocked; 13509 } 13510 } 13511 #if defined(__Userspace__) 13512 if (inp->recv_callback) { 13513 non_blocking = 1; 13514 } 13515 #else 13516 if (SCTP_SO_IS_NBIO(so) 13517 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 13518 || (flags & MSG_NBIO) 13519 #endif 13520 ) { 13521 non_blocking = 1; 13522 } 13523 #endif 13524 /* would we block? */ 13525 if (non_blocking) { 13526 if (hold_tcblock == 0) { 13527 SCTP_TCB_LOCK(stcb); 13528 hold_tcblock = 1; 13529 } 13530 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 13531 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) || 13532 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 13533 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK); 13534 if (sndlen > SCTP_SB_LIMIT_SND(so)) 13535 error = EMSGSIZE; 13536 else 13537 error = EWOULDBLOCK; 13538 goto out_unlocked; 13539 } 13540 stcb->asoc.sb_send_resv += sndlen; 13541 SCTP_TCB_UNLOCK(stcb); 13542 hold_tcblock = 0; 13543 } else { 13544 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); 13545 } 13546 local_soresv = sndlen; 13547 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 13548 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 13549 error = ECONNRESET; 13550 goto out_unlocked; 13551 } 13552 if (create_lock_applied) { 13553 SCTP_ASOC_CREATE_UNLOCK(inp); 13554 create_lock_applied = 0; 13555 } 13556 if (asoc->stream_reset_outstanding) { 13557 /* 13558 * Can't queue any data while stream reset is underway. 13559 */ 13560 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN); 13561 error = EAGAIN; 13562 goto out_unlocked; 13563 } 13564 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 13565 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 13566 queue_only = 1; 13567 } 13568 /* we are now done with all control */ 13569 if (control) { 13570 sctp_m_freem(control); 13571 control = NULL; 13572 } 13573 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 13574 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 13575 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 13576 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 13577 if (srcv->sinfo_flags & SCTP_ABORT) { 13578 ; 13579 } else { 13580 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 13581 error = ECONNRESET; 13582 goto out_unlocked; 13583 } 13584 } 13585 /* Ok, we will attempt a msgsnd :> */ 13586 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) 13587 if (p) { 13588 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000 13589 p->td_ru.ru_msgsnd++; 13590 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000 13591 p->td_proc->p_stats->p_ru.ru_msgsnd++; 13592 #else 13593 p->p_stats->p_ru.ru_msgsnd++; 13594 #endif 13595 } 13596 #endif 13597 /* Are we aborting? */ 13598 if (srcv->sinfo_flags & SCTP_ABORT) { 13599 struct mbuf *mm; 13600 int tot_demand, tot_out = 0, max_out; 13601 13602 SCTP_STAT_INCR(sctps_sends_with_abort); 13603 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 13604 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 13605 /* It has to be up before we abort */ 13606 /* how big is the user initiated abort? */ 13607 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13608 error = EINVAL; 13609 goto out; 13610 } 13611 if (hold_tcblock) { 13612 SCTP_TCB_UNLOCK(stcb); 13613 hold_tcblock = 0; 13614 } 13615 if (top) { 13616 struct mbuf *cntm = NULL; 13617 13618 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA); 13619 if (sndlen != 0) { 13620 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) { 13621 tot_out += SCTP_BUF_LEN(cntm); 13622 } 13623 } 13624 } else { 13625 /* Must fit in a MTU */ 13626 tot_out = sndlen; 13627 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 13628 if (tot_demand > SCTP_DEFAULT_ADD_MORE) { 13629 /* To big */ 13630 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 13631 error = EMSGSIZE; 13632 goto out; 13633 } 13634 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA); 13635 } 13636 if (mm == NULL) { 13637 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 13638 error = ENOMEM; 13639 goto out; 13640 } 13641 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 13642 max_out -= sizeof(struct sctp_abort_msg); 13643 if (tot_out > max_out) { 13644 tot_out = max_out; 13645 } 13646 if (mm) { 13647 struct sctp_paramhdr *ph; 13648 13649 /* now move forward the data pointer */ 13650 ph = mtod(mm, struct sctp_paramhdr *); 13651 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 13652 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out); 13653 ph++; 13654 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 13655 if (top == NULL) { 13656 #if defined(__APPLE__) 13657 SCTP_SOCKET_UNLOCK(so, 0); 13658 #endif 13659 error = uiomove((caddr_t)ph, (int)tot_out, uio); 13660 #if defined(__APPLE__) 13661 SCTP_SOCKET_LOCK(so, 0); 13662 #endif 13663 if (error) { 13664 /*- 13665 * Here if we can't get his data we 13666 * still abort we just don't get to 13667 * send the users note :-0 13668 */ 13669 sctp_m_freem(mm); 13670 mm = NULL; 13671 } 13672 } else { 13673 if (sndlen != 0) { 13674 SCTP_BUF_NEXT(mm) = top; 13675 } 13676 } 13677 } 13678 if (hold_tcblock == 0) { 13679 SCTP_TCB_LOCK(stcb); 13680 } 13681 atomic_add_int(&stcb->asoc.refcnt, -1); 13682 free_cnt_applied = 0; 13683 /* release this lock, otherwise we hang on ourselves */ 13684 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED); 13685 /* now relock the stcb so everything is sane */ 13686 hold_tcblock = 0; 13687 stcb = NULL; 13688 /* In this case top is already chained to mm 13689 * avoid double free, since we free it below if 13690 * top != NULL and driver would free it after sending 13691 * the packet out 13692 */ 13693 if (sndlen != 0) { 13694 top = NULL; 13695 } 13696 goto out_unlocked; 13697 } 13698 /* Calculate the maximum we can send */ 13699 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 13700 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 13701 if (non_blocking) { 13702 /* we already checked for non-blocking above. */ 13703 max_len = sndlen; 13704 } else { 13705 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 13706 } 13707 } else { 13708 max_len = 0; 13709 } 13710 if (hold_tcblock) { 13711 SCTP_TCB_UNLOCK(stcb); 13712 hold_tcblock = 0; 13713 } 13714 /* Is the stream no. valid? */ 13715 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 13716 /* Invalid stream number */ 13717 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13718 error = EINVAL; 13719 goto out_unlocked; 13720 } 13721 if (asoc->strmout == NULL) { 13722 /* huh? software error */ 13723 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 13724 error = EFAULT; 13725 goto out_unlocked; 13726 } 13727 13728 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 13729 if ((user_marks_eor == 0) && 13730 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 13731 /* It will NEVER fit */ 13732 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 13733 error = EMSGSIZE; 13734 goto out_unlocked; 13735 } 13736 if ((uio == NULL) && user_marks_eor) { 13737 /*- 13738 * We do not support eeor mode for 13739 * sending with mbuf chains (like sendfile). 13740 */ 13741 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13742 error = EINVAL; 13743 goto out_unlocked; 13744 } 13745 13746 if (user_marks_eor) { 13747 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); 13748 } else { 13749 /*- 13750 * For non-eeor the whole message must fit in 13751 * the socket send buffer. 13752 */ 13753 local_add_more = sndlen; 13754 } 13755 len = 0; 13756 if (non_blocking) { 13757 goto skip_preblock; 13758 } 13759 if (((max_len <= local_add_more) && 13760 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) || 13761 (max_len == 0) || 13762 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 13763 /* No room right now ! */ 13764 SOCKBUF_LOCK(&so->so_snd); 13765 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 13766 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || 13767 ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 13768 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n", 13769 (unsigned int)SCTP_SB_LIMIT_SND(so), 13770 inqueue_bytes, 13771 local_add_more, 13772 stcb->asoc.stream_queue_cnt, 13773 stcb->asoc.chunks_on_out_queue, 13774 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); 13775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 13776 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); 13777 } 13778 be.error = 0; 13779 #if !defined(__Panda__) && !defined(__Windows__) 13780 stcb->block_entry = &be; 13781 #endif 13782 error = sbwait(&so->so_snd); 13783 stcb->block_entry = NULL; 13784 if (error || so->so_error || be.error) { 13785 if (error == 0) { 13786 if (so->so_error) 13787 error = so->so_error; 13788 if (be.error) { 13789 error = be.error; 13790 } 13791 } 13792 SOCKBUF_UNLOCK(&so->so_snd); 13793 goto out_unlocked; 13794 } 13795 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 13796 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 13797 asoc, stcb->asoc.total_output_queue_size); 13798 } 13799 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 13800 goto out_unlocked; 13801 } 13802 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 13803 } 13804 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 13805 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 13806 } else { 13807 max_len = 0; 13808 } 13809 SOCKBUF_UNLOCK(&so->so_snd); 13810 } 13811 13812 skip_preblock: 13813 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 13814 goto out_unlocked; 13815 } 13816 #if defined(__APPLE__) 13817 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 13818 #endif 13819 /* sndlen covers for mbuf case 13820 * uio_resid covers for the non-mbuf case 13821 * NOTE: uio will be null when top/mbuf is passed 13822 */ 13823 if (sndlen == 0) { 13824 if (srcv->sinfo_flags & SCTP_EOF) { 13825 got_all_of_the_send = 1; 13826 goto dataless_eof; 13827 } else { 13828 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13829 error = EINVAL; 13830 goto out; 13831 } 13832 } 13833 if (top == NULL) { 13834 struct sctp_stream_queue_pending *sp; 13835 struct sctp_stream_out *strm; 13836 uint32_t sndout; 13837 13838 SCTP_TCB_SEND_LOCK(stcb); 13839 if ((asoc->stream_locked) && 13840 (asoc->stream_locked_on != srcv->sinfo_stream)) { 13841 SCTP_TCB_SEND_UNLOCK(stcb); 13842 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13843 error = EINVAL; 13844 goto out; 13845 } 13846 SCTP_TCB_SEND_UNLOCK(stcb); 13847 13848 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 13849 if (strm->last_msg_incomplete == 0) { 13850 do_a_copy_in: 13851 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error); 13852 if ((sp == NULL) || (error)) { 13853 goto out; 13854 } 13855 SCTP_TCB_SEND_LOCK(stcb); 13856 if (sp->msg_is_complete) { 13857 strm->last_msg_incomplete = 0; 13858 asoc->stream_locked = 0; 13859 } else { 13860 /* Just got locked to this guy in 13861 * case of an interrupt. 13862 */ 13863 strm->last_msg_incomplete = 1; 13864 asoc->stream_locked = 1; 13865 asoc->stream_locked_on = srcv->sinfo_stream; 13866 sp->sender_all_done = 0; 13867 } 13868 sctp_snd_sb_alloc(stcb, sp->length); 13869 atomic_add_int(&asoc->stream_queue_cnt, 1); 13870 if (srcv->sinfo_flags & SCTP_UNORDERED) { 13871 SCTP_STAT_INCR(sctps_sends_with_unord); 13872 } 13873 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 13874 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1); 13875 SCTP_TCB_SEND_UNLOCK(stcb); 13876 } else { 13877 SCTP_TCB_SEND_LOCK(stcb); 13878 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 13879 SCTP_TCB_SEND_UNLOCK(stcb); 13880 if (sp == NULL) { 13881 /* ???? Huh ??? last msg is gone */ 13882 #ifdef INVARIANTS 13883 panic("Warning: Last msg marked incomplete, yet nothing left?"); 13884 #else 13885 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 13886 strm->last_msg_incomplete = 0; 13887 #endif 13888 goto do_a_copy_in; 13889 13890 } 13891 } 13892 #if defined(__APPLE__) 13893 #if defined(APPLE_LEOPARD) 13894 while (uio->uio_resid > 0) { 13895 #else 13896 while (uio_resid(uio) > 0) { 13897 #endif 13898 #else 13899 while (uio->uio_resid > 0) { 13900 #endif 13901 /* How much room do we have? */ 13902 struct mbuf *new_tail, *mm; 13903 13904 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 13905 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 13906 else 13907 max_len = 0; 13908 13909 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || 13910 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || 13911 #if defined(__APPLE__) 13912 #if defined(APPLE_LEOPARD) 13913 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { 13914 #else 13915 (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) { 13916 #endif 13917 #else 13918 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { 13919 #endif 13920 sndout = 0; 13921 new_tail = NULL; 13922 if (hold_tcblock) { 13923 SCTP_TCB_UNLOCK(stcb); 13924 hold_tcblock = 0; 13925 } 13926 #if defined(__APPLE__) 13927 SCTP_SOCKET_UNLOCK(so, 0); 13928 #endif 13929 #if defined(__FreeBSD__) && __FreeBSD_version > 602000 13930 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail); 13931 #else 13932 mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail); 13933 #endif 13934 #if defined(__APPLE__) 13935 SCTP_SOCKET_LOCK(so, 0); 13936 #endif 13937 if ((mm == NULL) || error) { 13938 if (mm) { 13939 sctp_m_freem(mm); 13940 } 13941 goto out; 13942 } 13943 /* Update the mbuf and count */ 13944 SCTP_TCB_SEND_LOCK(stcb); 13945 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 13946 /* we need to get out. 13947 * Peer probably aborted. 13948 */ 13949 sctp_m_freem(mm); 13950 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { 13951 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 13952 error = ECONNRESET; 13953 } 13954 SCTP_TCB_SEND_UNLOCK(stcb); 13955 goto out; 13956 } 13957 if (sp->tail_mbuf) { 13958 /* tack it to the end */ 13959 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 13960 sp->tail_mbuf = new_tail; 13961 } else { 13962 /* A stolen mbuf */ 13963 sp->data = mm; 13964 sp->tail_mbuf = new_tail; 13965 } 13966 sctp_snd_sb_alloc(stcb, sndout); 13967 atomic_add_int(&sp->length,sndout); 13968 len += sndout; 13969 13970 /* Did we reach EOR? */ 13971 #if defined(__APPLE__) 13972 #if defined(APPLE_LEOPARD) 13973 if ((uio->uio_resid == 0) && 13974 #else 13975 if ((uio_resid(uio) == 0) && 13976 #endif 13977 #else 13978 if ((uio->uio_resid == 0) && 13979 #endif 13980 ((user_marks_eor == 0) || 13981 (srcv->sinfo_flags & SCTP_EOF) || 13982 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 13983 sp->msg_is_complete = 1; 13984 } else { 13985 sp->msg_is_complete = 0; 13986 } 13987 SCTP_TCB_SEND_UNLOCK(stcb); 13988 } 13989 #if defined(__APPLE__) 13990 #if defined(APPLE_LEOPARD) 13991 if (uio->uio_resid == 0) { 13992 #else 13993 if (uio_resid(uio) == 0) { 13994 #endif 13995 #else 13996 if (uio->uio_resid == 0) { 13997 #endif 13998 /* got it all? */ 13999 continue; 14000 } 14001 /* PR-SCTP? */ 14002 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) { 14003 /* This is ugly but we must assure locking order */ 14004 if (hold_tcblock == 0) { 14005 SCTP_TCB_LOCK(stcb); 14006 hold_tcblock = 1; 14007 } 14008 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 14009 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 14010 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 14011 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 14012 else 14013 max_len = 0; 14014 if (max_len > 0) { 14015 continue; 14016 } 14017 SCTP_TCB_UNLOCK(stcb); 14018 hold_tcblock = 0; 14019 } 14020 /* wait for space now */ 14021 if (non_blocking) { 14022 /* Non-blocking io in place out */ 14023 goto skip_out_eof; 14024 } 14025 /* What about the INIT, send it maybe */ 14026 if (queue_only_for_init) { 14027 if (hold_tcblock == 0) { 14028 SCTP_TCB_LOCK(stcb); 14029 hold_tcblock = 1; 14030 } 14031 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 14032 /* a collision took us forward? */ 14033 queue_only = 0; 14034 } else { 14035 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 14036 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 14037 queue_only = 1; 14038 } 14039 } 14040 if ((net->flight_size > net->cwnd) && 14041 (asoc->sctp_cmt_on_off == 0)) { 14042 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 14043 queue_only = 1; 14044 } else if (asoc->ifp_had_enobuf) { 14045 SCTP_STAT_INCR(sctps_ifnomemqueued); 14046 if (net->flight_size > (2 * net->mtu)) { 14047 queue_only = 1; 14048 } 14049 asoc->ifp_had_enobuf = 0; 14050 } 14051 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 14052 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 14053 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 14054 (stcb->asoc.total_flight > 0) && 14055 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 14056 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 14057 14058 /*- 14059 * Ok, Nagle is set on and we have data outstanding. 14060 * Don't send anything and let SACKs drive out the 14061 * data unless wen have a "full" segment to send. 14062 */ 14063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14064 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 14065 } 14066 SCTP_STAT_INCR(sctps_naglequeued); 14067 nagle_applies = 1; 14068 } else { 14069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14070 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 14071 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 14072 } 14073 SCTP_STAT_INCR(sctps_naglesent); 14074 nagle_applies = 0; 14075 } 14076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14077 14078 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 14079 nagle_applies, un_sent); 14080 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 14081 stcb->asoc.total_flight, 14082 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 14083 } 14084 if (queue_only_for_init) 14085 queue_only_for_init = 0; 14086 if ((queue_only == 0) && (nagle_applies == 0)) { 14087 /*- 14088 * need to start chunk output 14089 * before blocking.. note that if 14090 * a lock is already applied, then 14091 * the input via the net is happening 14092 * and I don't need to start output :-D 14093 */ 14094 if (hold_tcblock == 0) { 14095 if (SCTP_TCB_TRYLOCK(stcb)) { 14096 hold_tcblock = 1; 14097 sctp_chunk_output(inp, 14098 stcb, 14099 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14100 } 14101 } else { 14102 sctp_chunk_output(inp, 14103 stcb, 14104 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14105 } 14106 if (hold_tcblock == 1) { 14107 SCTP_TCB_UNLOCK(stcb); 14108 hold_tcblock = 0; 14109 } 14110 } 14111 SOCKBUF_LOCK(&so->so_snd); 14112 /*- 14113 * This is a bit strange, but I think it will 14114 * work. The total_output_queue_size is locked and 14115 * protected by the TCB_LOCK, which we just released. 14116 * There is a race that can occur between releasing it 14117 * above, and me getting the socket lock, where sacks 14118 * come in but we have not put the SB_WAIT on the 14119 * so_snd buffer to get the wakeup. After the LOCK 14120 * is applied the sack_processing will also need to 14121 * LOCK the so->so_snd to do the actual sowwakeup(). So 14122 * once we have the socket buffer lock if we recheck the 14123 * size we KNOW we will get to sleep safely with the 14124 * wakeup flag in place. 14125 */ 14126 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size + 14127 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { 14128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14129 #if defined(__APPLE__) 14130 #if defined(APPLE_LEOPARD) 14131 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14132 asoc, uio->uio_resid); 14133 #else 14134 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14135 asoc, uio_resid(uio)); 14136 #endif 14137 #else 14138 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14139 asoc, uio->uio_resid); 14140 #endif 14141 } 14142 be.error = 0; 14143 #if !defined(__Panda__) && !defined(__Windows__) 14144 stcb->block_entry = &be; 14145 #endif 14146 #if defined(__APPLE__) 14147 sbunlock(&so->so_snd, 1); 14148 #endif 14149 error = sbwait(&so->so_snd); 14150 stcb->block_entry = NULL; 14151 14152 if (error || so->so_error || be.error) { 14153 if (error == 0) { 14154 if (so->so_error) 14155 error = so->so_error; 14156 if (be.error) { 14157 error = be.error; 14158 } 14159 } 14160 SOCKBUF_UNLOCK(&so->so_snd); 14161 goto out_unlocked; 14162 } 14163 14164 #if defined(__APPLE__) 14165 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 14166 #endif 14167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14168 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 14169 asoc, stcb->asoc.total_output_queue_size); 14170 } 14171 } 14172 SOCKBUF_UNLOCK(&so->so_snd); 14173 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 14174 goto out_unlocked; 14175 } 14176 } 14177 SCTP_TCB_SEND_LOCK(stcb); 14178 if (sp) { 14179 if (sp->msg_is_complete == 0) { 14180 strm->last_msg_incomplete = 1; 14181 asoc->stream_locked = 1; 14182 asoc->stream_locked_on = srcv->sinfo_stream; 14183 } else { 14184 sp->sender_all_done = 1; 14185 strm->last_msg_incomplete = 0; 14186 asoc->stream_locked = 0; 14187 } 14188 } else { 14189 SCTP_PRINTF("Huh no sp TSNH?\n"); 14190 strm->last_msg_incomplete = 0; 14191 asoc->stream_locked = 0; 14192 } 14193 SCTP_TCB_SEND_UNLOCK(stcb); 14194 #if defined(__APPLE__) 14195 #if defined(APPLE_LEOPARD) 14196 if (uio->uio_resid == 0) { 14197 #else 14198 if (uio_resid(uio) == 0) { 14199 #endif 14200 #else 14201 if (uio->uio_resid == 0) { 14202 #endif 14203 got_all_of_the_send = 1; 14204 } 14205 } else { 14206 /* We send in a 0, since we do NOT have any locks */ 14207 error = sctp_msg_append(stcb, net, top, srcv, 0); 14208 top = NULL; 14209 if (srcv->sinfo_flags & SCTP_EOF) { 14210 /* 14211 * This should only happen for Panda for the mbuf 14212 * send case, which does NOT yet support EEOR mode. 14213 * Thus, we can just set this flag to do the proper 14214 * EOF handling. 14215 */ 14216 got_all_of_the_send = 1; 14217 } 14218 } 14219 if (error) { 14220 goto out; 14221 } 14222 dataless_eof: 14223 /* EOF thing ? */ 14224 if ((srcv->sinfo_flags & SCTP_EOF) && 14225 (got_all_of_the_send == 1)) { 14226 int cnt; 14227 SCTP_STAT_INCR(sctps_sends_with_eof); 14228 error = 0; 14229 if (hold_tcblock == 0) { 14230 SCTP_TCB_LOCK(stcb); 14231 hold_tcblock = 1; 14232 } 14233 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED); 14234 if (TAILQ_EMPTY(&asoc->send_queue) && 14235 TAILQ_EMPTY(&asoc->sent_queue) && 14236 (cnt == 0)) { 14237 if (asoc->locked_on_sending) { 14238 goto abort_anyway; 14239 } 14240 /* there is nothing queued to send, so I'm done... */ 14241 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 14242 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 14243 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 14244 struct sctp_nets *netp; 14245 14246 /* only send SHUTDOWN the first time through */ 14247 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 14248 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 14249 } 14250 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 14251 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 14252 sctp_stop_timers_for_shutdown(stcb); 14253 if (stcb->asoc.alternate) { 14254 netp = stcb->asoc.alternate; 14255 } else { 14256 netp = stcb->asoc.primary_destination; 14257 } 14258 sctp_send_shutdown(stcb, netp); 14259 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 14260 netp); 14261 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 14262 asoc->primary_destination); 14263 } 14264 } else { 14265 /*- 14266 * we still got (or just got) data to send, so set 14267 * SHUTDOWN_PENDING 14268 */ 14269 /*- 14270 * XXX sockets draft says that SCTP_EOF should be 14271 * sent with no data. currently, we will allow user 14272 * data to be sent first and move to 14273 * SHUTDOWN-PENDING 14274 */ 14275 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 14276 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 14277 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 14278 if (hold_tcblock == 0) { 14279 SCTP_TCB_LOCK(stcb); 14280 hold_tcblock = 1; 14281 } 14282 if (asoc->locked_on_sending) { 14283 /* Locked to send out the data */ 14284 struct sctp_stream_queue_pending *sp; 14285 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 14286 if (sp) { 14287 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 14288 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 14289 } 14290 } 14291 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 14292 if (TAILQ_EMPTY(&asoc->send_queue) && 14293 TAILQ_EMPTY(&asoc->sent_queue) && 14294 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 14295 abort_anyway: 14296 if (free_cnt_applied) { 14297 atomic_add_int(&stcb->asoc.refcnt, -1); 14298 free_cnt_applied = 0; 14299 } 14300 sctp_abort_an_association(stcb->sctp_ep, stcb, 14301 NULL, SCTP_SO_LOCKED); 14302 /* now relock the stcb so everything is sane */ 14303 hold_tcblock = 0; 14304 stcb = NULL; 14305 goto out; 14306 } 14307 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 14308 asoc->primary_destination); 14309 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); 14310 } 14311 } 14312 } 14313 skip_out_eof: 14314 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 14315 some_on_control = 1; 14316 } 14317 if (queue_only_for_init) { 14318 if (hold_tcblock == 0) { 14319 SCTP_TCB_LOCK(stcb); 14320 hold_tcblock = 1; 14321 } 14322 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 14323 /* a collision took us forward? */ 14324 queue_only = 0; 14325 } else { 14326 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 14327 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 14328 queue_only = 1; 14329 } 14330 } 14331 if ((net->flight_size > net->cwnd) && 14332 (stcb->asoc.sctp_cmt_on_off == 0)) { 14333 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 14334 queue_only = 1; 14335 } else if (asoc->ifp_had_enobuf) { 14336 SCTP_STAT_INCR(sctps_ifnomemqueued); 14337 if (net->flight_size > (2 * net->mtu)) { 14338 queue_only = 1; 14339 } 14340 asoc->ifp_had_enobuf = 0; 14341 } 14342 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 14343 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 14344 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 14345 (stcb->asoc.total_flight > 0) && 14346 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 14347 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 14348 /*- 14349 * Ok, Nagle is set on and we have data outstanding. 14350 * Don't send anything and let SACKs drive out the 14351 * data unless wen have a "full" segment to send. 14352 */ 14353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14354 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 14355 } 14356 SCTP_STAT_INCR(sctps_naglequeued); 14357 nagle_applies = 1; 14358 } else { 14359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14360 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 14361 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 14362 } 14363 SCTP_STAT_INCR(sctps_naglesent); 14364 nagle_applies = 0; 14365 } 14366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14367 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 14368 nagle_applies, un_sent); 14369 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 14370 stcb->asoc.total_flight, 14371 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 14372 } 14373 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 14374 /* we can attempt to send too. */ 14375 if (hold_tcblock == 0) { 14376 /* If there is activity recv'ing sacks no need to send */ 14377 if (SCTP_TCB_TRYLOCK(stcb)) { 14378 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14379 hold_tcblock = 1; 14380 } 14381 } else { 14382 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14383 } 14384 } else if ((queue_only == 0) && 14385 (stcb->asoc.peers_rwnd == 0) && 14386 (stcb->asoc.total_flight == 0)) { 14387 /* We get to have a probe outstanding */ 14388 if (hold_tcblock == 0) { 14389 hold_tcblock = 1; 14390 SCTP_TCB_LOCK(stcb); 14391 } 14392 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14393 } else if (some_on_control) { 14394 int num_out, reason, frag_point; 14395 14396 /* Here we do control only */ 14397 if (hold_tcblock == 0) { 14398 hold_tcblock = 1; 14399 SCTP_TCB_LOCK(stcb); 14400 } 14401 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 14402 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 14403 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); 14404 } 14405 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", 14406 queue_only, stcb->asoc.peers_rwnd, un_sent, 14407 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 14408 stcb->asoc.total_output_queue_size, error); 14409 14410 out: 14411 #if defined(__APPLE__) 14412 sbunlock(&so->so_snd, 1); 14413 #endif 14414 out_unlocked: 14415 14416 if (local_soresv && stcb) { 14417 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); 14418 } 14419 if (create_lock_applied) { 14420 SCTP_ASOC_CREATE_UNLOCK(inp); 14421 } 14422 if ((stcb) && hold_tcblock) { 14423 SCTP_TCB_UNLOCK(stcb); 14424 } 14425 if (stcb && free_cnt_applied) { 14426 atomic_add_int(&stcb->asoc.refcnt, -1); 14427 } 14428 #ifdef INVARIANTS 14429 #if !defined(__APPLE__) 14430 if (stcb) { 14431 if (mtx_owned(&stcb->tcb_mtx)) { 14432 panic("Leaving with tcb mtx owned?"); 14433 } 14434 if (mtx_owned(&stcb->tcb_send_mtx)) { 14435 panic("Leaving with tcb send mtx owned?"); 14436 } 14437 } 14438 #endif 14439 #endif 14440 #ifdef __Panda__ 14441 /* 14442 * Handle the EAGAIN/ENOMEM cases to reattach the pak header 14443 * to particle when pak is passed in, so that caller 14444 * can try again with this pak 14445 * 14446 * NOTE: For other cases, including success case, 14447 * we simply want to return the header back to free 14448 * pool 14449 */ 14450 if (top) { 14451 if ((error == EAGAIN) || (error == ENOMEM)) { 14452 SCTP_ATTACH_CHAIN(i_pak, top, sndlen); 14453 top = NULL; 14454 } else { 14455 (void)SCTP_RELEASE_HEADER(i_pak); 14456 } 14457 } else { 14458 /* This is to handle cases when top has 14459 * been reset to NULL but pak might not 14460 * be freed 14461 */ 14462 if (i_pak) { 14463 (void)SCTP_RELEASE_HEADER(i_pak); 14464 } 14465 } 14466 #endif 14467 #ifdef INVARIANTS 14468 if (inp) { 14469 sctp_validate_no_locks(inp); 14470 } else { 14471 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n"); 14472 } 14473 #endif 14474 if (top) { 14475 sctp_m_freem(top); 14476 } 14477 if (control) { 14478 sctp_m_freem(control); 14479 } 14480 return (error); 14481 } 14482 14483 14484 /* 14485 * generate an AUTHentication chunk, if required 14486 */ 14487 struct mbuf * 14488 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 14489 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 14490 struct sctp_tcb *stcb, uint8_t chunk) 14491 { 14492 struct mbuf *m_auth; 14493 struct sctp_auth_chunk *auth; 14494 int chunk_len; 14495 struct mbuf *cn; 14496 14497 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 14498 (stcb == NULL)) 14499 return (m); 14500 14501 if (stcb->asoc.auth_supported == 0) { 14502 return (m); 14503 } 14504 /* does the requested chunk require auth? */ 14505 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 14506 return (m); 14507 } 14508 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER); 14509 if (m_auth == NULL) { 14510 /* no mbuf's */ 14511 return (m); 14512 } 14513 /* reserve some space if this will be the first mbuf */ 14514 if (m == NULL) 14515 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 14516 /* fill in the AUTH chunk details */ 14517 auth = mtod(m_auth, struct sctp_auth_chunk *); 14518 bzero(auth, sizeof(*auth)); 14519 auth->ch.chunk_type = SCTP_AUTHENTICATION; 14520 auth->ch.chunk_flags = 0; 14521 chunk_len = sizeof(*auth) + 14522 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 14523 auth->ch.chunk_length = htons(chunk_len); 14524 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 14525 /* key id and hmac digest will be computed and filled in upon send */ 14526 14527 /* save the offset where the auth was inserted into the chain */ 14528 *offset = 0; 14529 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) { 14530 *offset += SCTP_BUF_LEN(cn); 14531 } 14532 14533 /* update length and return pointer to the auth chunk */ 14534 SCTP_BUF_LEN(m_auth) = chunk_len; 14535 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 14536 if (auth_ret != NULL) 14537 *auth_ret = auth; 14538 14539 return (m); 14540 } 14541 14542 #if defined(__FreeBSD__) || defined(__APPLE__) 14543 #ifdef INET6 14544 int 14545 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 14546 { 14547 struct nd_prefix *pfx = NULL; 14548 struct nd_pfxrouter *pfxrtr = NULL; 14549 struct sockaddr_in6 gw6; 14550 14551 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) 14552 return (0); 14553 14554 /* get prefix entry of address */ 14555 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { 14556 if (pfx->ndpr_stateflags & NDPRF_DETACHED) 14557 continue; 14558 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, 14559 &src6->sin6_addr, &pfx->ndpr_mask)) 14560 break; 14561 } 14562 /* no prefix entry in the prefix list */ 14563 if (pfx == NULL) { 14564 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); 14565 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 14566 return (0); 14567 } 14568 14569 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); 14570 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 14571 14572 /* search installed gateway from prefix entry */ 14573 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) { 14574 memset(&gw6, 0, sizeof(struct sockaddr_in6)); 14575 gw6.sin6_family = AF_INET6; 14576 #ifdef HAVE_SIN6_LEN 14577 gw6.sin6_len = sizeof(struct sockaddr_in6); 14578 #endif 14579 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, 14580 sizeof(struct in6_addr)); 14581 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); 14582 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); 14583 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); 14584 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 14585 if (sctp_cmpaddr((struct sockaddr *)&gw6, 14586 ro->ro_rt->rt_gateway)) { 14587 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); 14588 return (1); 14589 } 14590 } 14591 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); 14592 return (0); 14593 } 14594 #endif 14595 14596 int 14597 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 14598 { 14599 #ifdef INET 14600 struct sockaddr_in *sin, *mask; 14601 struct ifaddr *ifa; 14602 struct in_addr srcnetaddr, gwnetaddr; 14603 14604 if (ro == NULL || ro->ro_rt == NULL || 14605 sifa->address.sa.sa_family != AF_INET) { 14606 return (0); 14607 } 14608 ifa = (struct ifaddr *)sifa->ifa; 14609 mask = (struct sockaddr_in *)(ifa->ifa_netmask); 14610 sin = &sifa->address.sin; 14611 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 14612 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); 14613 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 14614 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); 14615 14616 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; 14617 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 14618 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); 14619 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 14620 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); 14621 if (srcnetaddr.s_addr == gwnetaddr.s_addr) { 14622 return (1); 14623 } 14624 #endif 14625 return (0); 14626 } 14627 #elif defined(__Userspace__) 14628 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */ 14629 int 14630 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 14631 { 14632 return (0); 14633 } 14634 int 14635 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 14636 { 14637 return (0); 14638 } 14639 14640 #endif 14641