Home | History | Annotate | Download | only in netinet
      1 /*-
      2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
      3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
      4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions are met:
      8  *
      9  * a) Redistributions of source code must retain the above copyright notice,
     10  *    this list of conditions and the following disclaimer.
     11  *
     12  * b) Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in
     14  *    the documentation and/or other materials provided with the distribution.
     15  *
     16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
     17  *    contributors may be used to endorse or promote products derived
     18  *    from this software without specific prior written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     30  * THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #ifdef __FreeBSD__
     34 #include <sys/cdefs.h>
     35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 269448 2014-08-02 21:36:40Z tuexen $");
     36 #endif
     37 
     38 #include <netinet/sctp_os.h>
     39 #include <netinet/sctp_var.h>
     40 #include <netinet/sctp_sysctl.h>
     41 #include <netinet/sctp_pcb.h>
     42 #include <netinet/sctp_header.h>
     43 #include <netinet/sctputil.h>
     44 #include <netinet/sctp_output.h>
     45 #include <netinet/sctp_input.h>
     46 #include <netinet/sctp_indata.h>
     47 #include <netinet/sctp_uio.h>
     48 #include <netinet/sctp_timer.h>
     49 
     50 
     51 /*
     52  * NOTES: On the outbound side of things I need to check the sack timer to
     53  * see if I should generate a sack into the chunk queue (if I have data to
     54  * send that is and will be sending it .. for bundling.
     55  *
     56  * The callback in sctp_usrreq.c will get called when the socket is read from.
     57  * This will cause sctp_service_queues() to get called on the top entry in
     58  * the list.
     59  */
     60 
     61 void
     62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
     63 {
     64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
     65 }
     66 
     67 /* Calculate what the rwnd would be */
     68 uint32_t
     69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
     70 {
     71 	uint32_t calc = 0;
     72 
     73 	/*
     74 	 * This is really set wrong with respect to a 1-2-m socket. Since
     75 	 * the sb_cc is the count that everyone as put up. When we re-write
     76 	 * sctp_soreceive then we will fix this so that ONLY this
     77 	 * associations data is taken into account.
     78 	 */
     79 	if (stcb->sctp_socket == NULL)
     80 		return (calc);
     81 
     82 	if (stcb->asoc.sb_cc == 0 &&
     83 	    asoc->size_on_reasm_queue == 0 &&
     84 	    asoc->size_on_all_streams == 0) {
     85 		/* Full rwnd granted */
     86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
     87 		return (calc);
     88 	}
     89 	/* get actual space */
     90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
     91 
     92 	/*
     93 	 * take out what has NOT been put on socket queue and we yet hold
     94 	 * for putting up.
     95 	 */
     96 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
     97 	                                         asoc->cnt_on_reasm_queue * MSIZE));
     98 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
     99 	                                         asoc->cnt_on_all_streams * MSIZE));
    100 
    101 	if (calc == 0) {
    102 		/* out of space */
    103 		return (calc);
    104 	}
    105 
    106 	/* what is the overhead of all these rwnd's */
    107 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
    108 	/* If the window gets too small due to ctrl-stuff, reduce it
    109 	 * to 1, even it is 0. SWS engaged
    110 	 */
    111 	if (calc < stcb->asoc.my_rwnd_control_len) {
    112 		calc = 1;
    113 	}
    114 	return (calc);
    115 }
    116 
    117 
    118 
    119 /*
    120  * Build out our readq entry based on the incoming packet.
    121  */
    122 struct sctp_queued_to_read *
    123 sctp_build_readq_entry(struct sctp_tcb *stcb,
    124     struct sctp_nets *net,
    125     uint32_t tsn, uint32_t ppid,
    126     uint32_t context, uint16_t stream_no,
    127     uint16_t stream_seq, uint8_t flags,
    128     struct mbuf *dm)
    129 {
    130 	struct sctp_queued_to_read *read_queue_e = NULL;
    131 
    132 	sctp_alloc_a_readq(stcb, read_queue_e);
    133 	if (read_queue_e == NULL) {
    134 		goto failed_build;
    135 	}
    136 	read_queue_e->sinfo_stream = stream_no;
    137 	read_queue_e->sinfo_ssn = stream_seq;
    138 	read_queue_e->sinfo_flags = (flags << 8);
    139 	read_queue_e->sinfo_ppid = ppid;
    140 	read_queue_e->sinfo_context = context;
    141 	read_queue_e->sinfo_timetolive = 0;
    142 	read_queue_e->sinfo_tsn = tsn;
    143 	read_queue_e->sinfo_cumtsn = tsn;
    144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
    145 	read_queue_e->whoFrom = net;
    146 	read_queue_e->length = 0;
    147 	atomic_add_int(&net->ref_count, 1);
    148 	read_queue_e->data = dm;
    149 	read_queue_e->spec_flags = 0;
    150 	read_queue_e->tail_mbuf = NULL;
    151 	read_queue_e->aux_data = NULL;
    152 	read_queue_e->stcb = stcb;
    153 	read_queue_e->port_from = stcb->rport;
    154 	read_queue_e->do_not_ref_stcb = 0;
    155 	read_queue_e->end_added = 0;
    156 	read_queue_e->some_taken = 0;
    157 	read_queue_e->pdapi_aborted = 0;
    158 failed_build:
    159 	return (read_queue_e);
    160 }
    161 
    162 
    163 /*
    164  * Build out our readq entry based on the incoming packet.
    165  */
    166 static struct sctp_queued_to_read *
    167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
    168     struct sctp_tmit_chunk *chk)
    169 {
    170 	struct sctp_queued_to_read *read_queue_e = NULL;
    171 
    172 	sctp_alloc_a_readq(stcb, read_queue_e);
    173 	if (read_queue_e == NULL) {
    174 		goto failed_build;
    175 	}
    176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
    177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
    178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
    179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
    180 	read_queue_e->sinfo_context = stcb->asoc.context;
    181 	read_queue_e->sinfo_timetolive = 0;
    182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
    183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
    184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
    185 	read_queue_e->whoFrom = chk->whoTo;
    186 	read_queue_e->aux_data = NULL;
    187 	read_queue_e->length = 0;
    188 	atomic_add_int(&chk->whoTo->ref_count, 1);
    189 	read_queue_e->data = chk->data;
    190 	read_queue_e->tail_mbuf = NULL;
    191 	read_queue_e->stcb = stcb;
    192 	read_queue_e->port_from = stcb->rport;
    193 	read_queue_e->spec_flags = 0;
    194 	read_queue_e->do_not_ref_stcb = 0;
    195 	read_queue_e->end_added = 0;
    196 	read_queue_e->some_taken = 0;
    197 	read_queue_e->pdapi_aborted = 0;
    198 failed_build:
    199 	return (read_queue_e);
    200 }
    201 
    202 
    203 struct mbuf *
    204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
    205 {
    206 	struct sctp_extrcvinfo *seinfo;
    207 	struct sctp_sndrcvinfo *outinfo;
    208 	struct sctp_rcvinfo *rcvinfo;
    209 	struct sctp_nxtinfo *nxtinfo;
    210 #if defined(__Userspace_os_Windows)
    211 	WSACMSGHDR *cmh;
    212 #else
    213 	struct cmsghdr *cmh;
    214 #endif
    215 	struct mbuf *ret;
    216 	int len;
    217 	int use_extended;
    218 	int provide_nxt;
    219 
    220 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
    221 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
    222 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
    223 		/* user does not want any ancillary data */
    224 		return (NULL);
    225 	}
    226 
    227 	len = 0;
    228 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
    229 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
    230 	}
    231 	seinfo = (struct sctp_extrcvinfo *)sinfo;
    232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
    233 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
    234 		provide_nxt = 1;
    235 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
    236 	} else {
    237 		provide_nxt = 0;
    238 	}
    239 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
    240 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
    241 			use_extended = 1;
    242 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
    243 		} else {
    244 			use_extended = 0;
    245 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
    246 		}
    247 	} else {
    248 		use_extended = 0;
    249 	}
    250 
    251 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
    252 	if (ret == NULL) {
    253 		/* No space */
    254 		return (ret);
    255 	}
    256 	SCTP_BUF_LEN(ret) = 0;
    257 
    258 	/* We need a CMSG header followed by the struct */
    259 #if defined(__Userspace_os_Windows)
    260 	cmh = mtod(ret, WSACMSGHDR *);
    261 #else
    262 	cmh = mtod(ret, struct cmsghdr *);
    263 #endif
    264 	/*
    265 	 * Make sure that there is no un-initialized padding between
    266 	 * the cmsg header and cmsg data and after the cmsg data.
    267 	 */
    268 	memset(cmh, 0, len);
    269 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
    270 		cmh->cmsg_level = IPPROTO_SCTP;
    271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
    272 		cmh->cmsg_type = SCTP_RCVINFO;
    273 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
    274 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
    275 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
    276 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
    277 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
    278 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
    279 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
    280 		rcvinfo->rcv_context = sinfo->sinfo_context;
    281 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
    282 #if defined(__Userspace_os_Windows)
    283 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
    284 #else
    285 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
    286 #endif
    287 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
    288 	}
    289 	if (provide_nxt) {
    290 		cmh->cmsg_level = IPPROTO_SCTP;
    291 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
    292 		cmh->cmsg_type = SCTP_NXTINFO;
    293 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
    294 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
    295 		nxtinfo->nxt_flags = 0;
    296 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
    297 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
    298 		}
    299 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
    300 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
    301 		}
    302 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
    303 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
    304 		}
    305 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
    306 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
    307 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
    308 #if defined(__Userspace_os_Windows)
    309 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
    310 #else
    311 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
    312 #endif
    313 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
    314 	}
    315 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
    316 		cmh->cmsg_level = IPPROTO_SCTP;
    317 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
    318 		if (use_extended) {
    319 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
    320 			cmh->cmsg_type = SCTP_EXTRCV;
    321 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
    322 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
    323 		} else {
    324 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
    325 			cmh->cmsg_type = SCTP_SNDRCV;
    326 			*outinfo = *sinfo;
    327 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
    328 		}
    329 	}
    330 	return (ret);
    331 }
    332 
    333 
    334 static void
    335 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
    336 {
    337 	uint32_t gap, i, cumackp1;
    338 	int fnd = 0;
    339 
    340 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
    341 		return;
    342 	}
    343 	cumackp1 = asoc->cumulative_tsn + 1;
    344 	if (SCTP_TSN_GT(cumackp1, tsn)) {
    345 		/* this tsn is behind the cum ack and thus we don't
    346 		 * need to worry about it being moved from one to the other.
    347 		 */
    348 		return;
    349 	}
    350 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
    351 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
    352 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
    353 		sctp_print_mapping_array(asoc);
    354 #ifdef INVARIANTS
    355 		panic("Things are really messed up now!!");
    356 #endif
    357 	}
    358 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
    359 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
    360 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
    361 		asoc->highest_tsn_inside_nr_map = tsn;
    362 	}
    363 	if (tsn == asoc->highest_tsn_inside_map) {
    364 		/* We must back down to see what the new highest is */
    365 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
    366 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
    367 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
    368 				asoc->highest_tsn_inside_map = i;
    369 				fnd = 1;
    370 				break;
    371 			}
    372 		}
    373 		if (!fnd) {
    374 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
    375 		}
    376 	}
    377 }
    378 
    379 
    380 /*
    381  * We are delivering currently from the reassembly queue. We must continue to
    382  * deliver until we either: 1) run out of space. 2) run out of sequential
    383  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
    384  */
    385 static void
    386 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
    387 {
    388 	struct sctp_tmit_chunk *chk, *nchk;
    389 	uint16_t nxt_todel;
    390 	uint16_t stream_no;
    391 	int end = 0;
    392 	int cntDel;
    393 	struct sctp_queued_to_read *control, *ctl, *nctl;
    394 
    395 	if (stcb == NULL)
    396 		return;
    397 
    398 	cntDel = stream_no = 0;
    399 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
    400 	     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
    401 	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
    402 		/* socket above is long gone or going.. */
    403 	abandon:
    404 		asoc->fragmented_delivery_inprogress = 0;
    405 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
    406 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
    407 			asoc->size_on_reasm_queue -= chk->send_size;
    408 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
    409 			/*
    410 			 * Lose the data pointer, since its in the socket
    411 			 * buffer
    412 			 */
    413 			if (chk->data) {
    414 				sctp_m_freem(chk->data);
    415 				chk->data = NULL;
    416 			}
    417 			/* Now free the address and data */
    418 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
    419 			/*sa_ignore FREED_MEMORY*/
    420 		}
    421 		return;
    422 	}
    423 	SCTP_TCB_LOCK_ASSERT(stcb);
    424 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
    425 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
    426 			/* Can't deliver more :< */
    427 			return;
    428 		}
    429 		stream_no = chk->rec.data.stream_number;
    430 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
    431 		if (nxt_todel != chk->rec.data.stream_seq &&
    432 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
    433 			/*
    434 			 * Not the next sequence to deliver in its stream OR
    435 			 * unordered
    436 			 */
    437 			return;
    438 		}
    439 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
    440 
    441 			control = sctp_build_readq_entry_chk(stcb, chk);
    442 			if (control == NULL) {
    443 				/* out of memory? */
    444 				return;
    445 			}
    446 			/* save it off for our future deliveries */
    447 			stcb->asoc.control_pdapi = control;
    448 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
    449 				end = 1;
    450 			else
    451 				end = 0;
    452 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
    453 			sctp_add_to_readq(stcb->sctp_ep,
    454 			                  stcb, control, &stcb->sctp_socket->so_rcv, end,
    455 			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
    456 			cntDel++;
    457 		} else {
    458 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
    459 				end = 1;
    460 			else
    461 				end = 0;
    462 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
    463 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
    464 			    stcb->asoc.control_pdapi,
    465 			    chk->data, end, chk->rec.data.TSN_seq,
    466 			    &stcb->sctp_socket->so_rcv)) {
    467 				/*
    468 				 * something is very wrong, either
    469 				 * control_pdapi is NULL, or the tail_mbuf
    470 				 * is corrupt, or there is a EOM already on
    471 				 * the mbuf chain.
    472 				 */
    473 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
    474 					goto abandon;
    475 				} else {
    476 #ifdef INVARIANTS
    477 					if ((stcb->asoc.control_pdapi == NULL)  || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
    478 						panic("This should not happen control_pdapi NULL?");
    479 					}
    480 					/* if we did not panic, it was a EOM */
    481 					panic("Bad chunking ??");
    482 #else
    483 					if ((stcb->asoc.control_pdapi == NULL)  || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
    484 					  SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
    485 					}
    486 					SCTP_PRINTF("Bad chunking ??\n");
    487 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
    488 
    489 #endif
    490 					goto abandon;
    491 				}
    492 			}
    493 			cntDel++;
    494 		}
    495 		/* pull it we did it */
    496 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
    497 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
    498 			asoc->fragmented_delivery_inprogress = 0;
    499 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
    500 				asoc->strmin[stream_no].last_sequence_delivered++;
    501 			}
    502 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
    503 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
    504 			}
    505 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
    506 			/*
    507 			 * turn the flag back on since we just  delivered
    508 			 * yet another one.
    509 			 */
    510 			asoc->fragmented_delivery_inprogress = 1;
    511 		}
    512 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
    513 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
    514 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
    515 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
    516 
    517 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
    518 		asoc->size_on_reasm_queue -= chk->send_size;
    519 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
    520 		/* free up the chk */
    521 		chk->data = NULL;
    522 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
    523 
    524 		if (asoc->fragmented_delivery_inprogress == 0) {
    525 			/*
    526 			 * Now lets see if we can deliver the next one on
    527 			 * the stream
    528 			 */
    529 			struct sctp_stream_in *strm;
    530 
    531 			strm = &asoc->strmin[stream_no];
    532 			nxt_todel = strm->last_sequence_delivered + 1;
    533 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
    534 				/* Deliver more if we can. */
    535 				if (nxt_todel == ctl->sinfo_ssn) {
    536 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
    537 					asoc->size_on_all_streams -= ctl->length;
    538 					sctp_ucount_decr(asoc->cnt_on_all_streams);
    539 					strm->last_sequence_delivered++;
    540 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
    541 					sctp_add_to_readq(stcb->sctp_ep, stcb,
    542 					                  ctl,
    543 					                  &stcb->sctp_socket->so_rcv, 1,
    544 					                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
    545 				} else {
    546 					break;
    547 				}
    548 				nxt_todel = strm->last_sequence_delivered + 1;
    549 			}
    550 			break;
    551 		}
    552 	}
    553 }
    554 
    555 /*
    556  * Queue the chunk either right into the socket buffer if it is the next one
    557  * to go OR put it in the correct place in the delivery queue.  If we do
    558  * append to the so_buf, keep doing so until we are out of order. One big
    559  * question still remains, what to do when the socket buffer is FULL??
    560  */
    561 static void
    562 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
    563     struct sctp_queued_to_read *control, int *abort_flag)
    564 {
    565 	/*
    566 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
    567 	 * all the data in one stream this could happen quite rapidly. One
    568 	 * could use the TSN to keep track of things, but this scheme breaks
    569 	 * down in the other type of stream useage that could occur. Send a
    570 	 * single msg to stream 0, send 4Billion messages to stream 1, now
    571 	 * send a message to stream 0. You have a situation where the TSN
    572 	 * has wrapped but not in the stream. Is this worth worrying about
    573 	 * or should we just change our queue sort at the bottom to be by
    574 	 * TSN.
    575 	 *
    576 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
    577 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
    578 	 * assignment this could happen... and I don't see how this would be
    579 	 * a violation. So for now I am undecided an will leave the sort by
    580 	 * SSN alone. Maybe a hybred approach is the answer
    581 	 *
    582 	 */
    583 	struct sctp_stream_in *strm;
    584 	struct sctp_queued_to_read *at;
    585 	int queue_needed;
    586 	uint16_t nxt_todel;
    587 	struct mbuf *op_err;
    588 	char msg[SCTP_DIAG_INFO_LEN];
    589 
    590 	queue_needed = 1;
    591 	asoc->size_on_all_streams += control->length;
    592 	sctp_ucount_incr(asoc->cnt_on_all_streams);
    593 	strm = &asoc->strmin[control->sinfo_stream];
    594 	nxt_todel = strm->last_sequence_delivered + 1;
    595 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    596 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
    597 	}
    598 	SCTPDBG(SCTP_DEBUG_INDATA1,
    599 		"queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
    600 		(uint32_t) control->sinfo_stream,
    601 		(uint32_t) strm->last_sequence_delivered,
    602 		(uint32_t) nxt_todel);
    603 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
    604 		/* The incoming sseq is behind where we last delivered? */
    605 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
    606 			control->sinfo_ssn, strm->last_sequence_delivered);
    607 	protocol_error:
    608 		/*
    609 		 * throw it in the stream so it gets cleaned up in
    610 		 * association destruction
    611 		 */
    612 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
    613 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    614 		         strm->last_sequence_delivered, control->sinfo_tsn,
    615 			 control->sinfo_stream, control->sinfo_ssn);
    616 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    617 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1;
    618 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
    619 		*abort_flag = 1;
    620 		return;
    621 
    622 	}
    623 	if (nxt_todel == control->sinfo_ssn) {
    624 		/* can be delivered right away? */
    625 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    626 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
    627 		}
    628 		/* EY it wont be queued if it could be delivered directly*/
    629 		queue_needed = 0;
    630 		asoc->size_on_all_streams -= control->length;
    631 		sctp_ucount_decr(asoc->cnt_on_all_streams);
    632 		strm->last_sequence_delivered++;
    633 
    634 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
    635 		sctp_add_to_readq(stcb->sctp_ep, stcb,
    636 		                  control,
    637 		                  &stcb->sctp_socket->so_rcv, 1,
    638 		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
    639 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
    640 			/* all delivered */
    641 			nxt_todel = strm->last_sequence_delivered + 1;
    642 			if (nxt_todel == control->sinfo_ssn) {
    643 				TAILQ_REMOVE(&strm->inqueue, control, next);
    644 				asoc->size_on_all_streams -= control->length;
    645 				sctp_ucount_decr(asoc->cnt_on_all_streams);
    646 				strm->last_sequence_delivered++;
    647 				/*
    648 				 * We ignore the return of deliver_data here
    649 				 * since we always can hold the chunk on the
    650 				 * d-queue. And we have a finite number that
    651 				 * can be delivered from the strq.
    652 				 */
    653 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    654 					sctp_log_strm_del(control, NULL,
    655 							  SCTP_STR_LOG_FROM_IMMED_DEL);
    656 				}
    657 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
    658 				sctp_add_to_readq(stcb->sctp_ep, stcb,
    659 				                  control,
    660 				                  &stcb->sctp_socket->so_rcv, 1,
    661 				                  SCTP_READ_LOCK_NOT_HELD,
    662 				                  SCTP_SO_NOT_LOCKED);
    663 				continue;
    664 			}
    665 			break;
    666 		}
    667 	}
    668 	if (queue_needed) {
    669 		/*
    670 		 * Ok, we did not deliver this guy, find the correct place
    671 		 * to put it on the queue.
    672 		 */
    673 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
    674 			goto protocol_error;
    675 		}
    676 		if (TAILQ_EMPTY(&strm->inqueue)) {
    677 			/* Empty queue */
    678 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    679 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
    680 			}
    681 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
    682 		} else {
    683 			TAILQ_FOREACH(at, &strm->inqueue, next) {
    684 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
    685 					/*
    686 					 * one in queue is bigger than the
    687 					 * new one, insert before this one
    688 					 */
    689 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    690 						sctp_log_strm_del(control, at,
    691 								  SCTP_STR_LOG_FROM_INSERT_MD);
    692 					}
    693 					TAILQ_INSERT_BEFORE(at, control, next);
    694 					break;
    695 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
    696 					/*
    697 					 * Gak, He sent me a duplicate str
    698 					 * seq number
    699 					 */
    700 					/*
    701 					 * foo bar, I guess I will just free
    702 					 * this new guy, should we abort
    703 					 * too? FIX ME MAYBE? Or it COULD be
    704 					 * that the SSN's have wrapped.
    705 					 * Maybe I should compare to TSN
    706 					 * somehow... sigh for now just blow
    707 					 * away the chunk!
    708 					 */
    709 
    710 					if (control->data)
    711 						sctp_m_freem(control->data);
    712 					control->data = NULL;
    713 					asoc->size_on_all_streams -= control->length;
    714 					sctp_ucount_decr(asoc->cnt_on_all_streams);
    715 					if (control->whoFrom) {
    716 						sctp_free_remote_addr(control->whoFrom);
    717 						control->whoFrom = NULL;
    718 					}
    719 					sctp_free_a_readq(stcb, control);
    720 					return;
    721 				} else {
    722 					if (TAILQ_NEXT(at, next) == NULL) {
    723 						/*
    724 						 * We are at the end, insert
    725 						 * it after this one
    726 						 */
    727 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
    728 							sctp_log_strm_del(control, at,
    729 									  SCTP_STR_LOG_FROM_INSERT_TL);
    730 						}
    731 						TAILQ_INSERT_AFTER(&strm->inqueue,
    732 						    at, control, next);
    733 						break;
    734 					}
    735 				}
    736 			}
    737 		}
    738 	}
    739 }
    740 
    741 /*
    742  * Returns two things: You get the total size of the deliverable parts of the
    743  * first fragmented message on the reassembly queue. And you get a 1 back if
    744  * all of the message is ready or a 0 back if the message is still incomplete
    745  */
    746 static int
    747 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size)
    748 {
    749 	struct sctp_tmit_chunk *chk;
    750 	uint32_t tsn;
    751 
    752 	*t_size = 0;
    753 	chk = TAILQ_FIRST(&asoc->reasmqueue);
    754 	if (chk == NULL) {
    755 		/* nothing on the queue */
    756 		return (0);
    757 	}
    758 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
    759 		/* Not a first on the queue */
    760 		return (0);
    761 	}
    762 	tsn = chk->rec.data.TSN_seq;
    763 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
    764 		if (tsn != chk->rec.data.TSN_seq) {
    765 			return (0);
    766 		}
    767 		*t_size += chk->send_size;
    768 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
    769 			return (1);
    770 		}
    771 		tsn++;
    772 	}
    773 	return (0);
    774 }
    775 
    776 static void
    777 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
    778 {
    779 	struct sctp_tmit_chunk *chk;
    780 	uint16_t nxt_todel;
    781 	uint32_t tsize, pd_point;
    782 
    783  doit_again:
    784 	chk = TAILQ_FIRST(&asoc->reasmqueue);
    785 	if (chk == NULL) {
    786 		/* Huh? */
    787 		asoc->size_on_reasm_queue = 0;
    788 		asoc->cnt_on_reasm_queue = 0;
    789 		return;
    790 	}
    791 	if (asoc->fragmented_delivery_inprogress == 0) {
    792 		nxt_todel =
    793 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
    794 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
    795 		    (nxt_todel == chk->rec.data.stream_seq ||
    796 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
    797 			/*
    798 			 * Yep the first one is here and its ok to deliver
    799 			 * but should we?
    800 			 */
    801 			if (stcb->sctp_socket) {
    802 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
    803 				               stcb->sctp_ep->partial_delivery_point);
    804 			} else {
    805 				pd_point = stcb->sctp_ep->partial_delivery_point;
    806 			}
    807 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
    808 				/*
    809 				 * Yes, we setup to start reception, by
    810 				 * backing down the TSN just in case we
    811 				 * can't deliver. If we
    812 				 */
    813 				asoc->fragmented_delivery_inprogress = 1;
    814 				asoc->tsn_last_delivered =
    815 				    chk->rec.data.TSN_seq - 1;
    816 				asoc->str_of_pdapi =
    817 				    chk->rec.data.stream_number;
    818 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
    819 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
    820 				asoc->fragment_flags = chk->rec.data.rcv_flags;
    821 				sctp_service_reassembly(stcb, asoc);
    822 			}
    823 		}
    824 	} else {
    825 		/* Service re-assembly will deliver stream data queued
    826 		 * at the end of fragmented delivery.. but it wont know
    827 		 * to go back and call itself again... we do that here
    828 		 * with the got doit_again
    829 		 */
    830 		sctp_service_reassembly(stcb, asoc);
    831 		if (asoc->fragmented_delivery_inprogress == 0) {
    832 			/* finished our Fragmented delivery, could be
    833 			 * more waiting?
    834 			 */
    835 			goto doit_again;
    836 		}
    837 	}
    838 }
    839 
    840 /*
    841  * Dump onto the re-assembly queue, in its proper place. After dumping on the
    842  * queue, see if anthing can be delivered. If so pull it off (or as much as
    843  * we can. If we run out of space then we must dump what we can and set the
    844  * appropriate flag to say we queued what we could.
    845  */
    846 static void
    847 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
    848     struct sctp_tmit_chunk *chk, int *abort_flag)
    849 {
    850 	struct mbuf *op_err;
    851 	char msg[SCTP_DIAG_INFO_LEN];
    852 	uint32_t cum_ackp1, prev_tsn, post_tsn;
    853 	struct sctp_tmit_chunk *at, *prev, *next;
    854 
    855 	prev = next = NULL;
    856 	cum_ackp1 = asoc->tsn_last_delivered + 1;
    857 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
    858 		/* This is the first one on the queue */
    859 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
    860 		/*
    861 		 * we do not check for delivery of anything when only one
    862 		 * fragment is here
    863 		 */
    864 		asoc->size_on_reasm_queue = chk->send_size;
    865 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
    866 		if (chk->rec.data.TSN_seq == cum_ackp1) {
    867 			if (asoc->fragmented_delivery_inprogress == 0 &&
    868 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
    869 			    SCTP_DATA_FIRST_FRAG) {
    870 				/*
    871 				 * An empty queue, no delivery inprogress,
    872 				 * we hit the next one and it does NOT have
    873 				 * a FIRST fragment mark.
    874 				 */
    875 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
    876 				snprintf(msg, sizeof(msg),
    877 				         "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    878 				         chk->rec.data.TSN_seq,
    879 				         chk->rec.data.stream_number,
    880 				         chk->rec.data.stream_seq);
    881 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    882 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2;
    883 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
    884 				*abort_flag = 1;
    885 			} else if (asoc->fragmented_delivery_inprogress &&
    886 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
    887 				/*
    888 				 * We are doing a partial delivery and the
    889 				 * NEXT chunk MUST be either the LAST or
    890 				 * MIDDLE fragment NOT a FIRST
    891 				 */
    892 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
    893 				snprintf(msg, sizeof(msg),
    894 				         "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    895 				         chk->rec.data.TSN_seq,
    896 				         chk->rec.data.stream_number,
    897 				         chk->rec.data.stream_seq);
    898 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    899 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3;
    900 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
    901 				*abort_flag = 1;
    902 			} else if (asoc->fragmented_delivery_inprogress) {
    903 				/*
    904 				 * Here we are ok with a MIDDLE or LAST
    905 				 * piece
    906 				 */
    907 				if (chk->rec.data.stream_number !=
    908 				    asoc->str_of_pdapi) {
    909 					/* Got to be the right STR No */
    910 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
    911 						chk->rec.data.stream_number,
    912 						asoc->str_of_pdapi);
    913 					snprintf(msg, sizeof(msg),
    914 					         "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    915 					         asoc->str_of_pdapi,
    916 					         chk->rec.data.TSN_seq,
    917 					         chk->rec.data.stream_number,
    918 					         chk->rec.data.stream_seq);
    919 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    920 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4;
    921 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
    922 					*abort_flag = 1;
    923 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
    924 					    SCTP_DATA_UNORDERED &&
    925 					    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
    926 					/* Got to be the right STR Seq */
    927 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
    928 						chk->rec.data.stream_seq,
    929 						asoc->ssn_of_pdapi);
    930 					snprintf(msg, sizeof(msg),
    931 					         "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
    932 					         asoc->ssn_of_pdapi,
    933 					         chk->rec.data.TSN_seq,
    934 					         chk->rec.data.stream_number,
    935 					         chk->rec.data.stream_seq);
    936 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
    937 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5;
    938 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
    939 					*abort_flag = 1;
    940 				}
    941 			}
    942 		}
    943 		return;
    944 	}
    945 	/* Find its place */
    946 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
    947 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
    948 			/*
    949 			 * one in queue is bigger than the new one, insert
    950 			 * before this one
    951 			 */
    952 			/* A check */
    953 			asoc->size_on_reasm_queue += chk->send_size;
    954 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
    955 			next = at;
    956 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
    957 			break;
    958 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
    959 			/* Gak, He sent me a duplicate str seq number */
    960 			/*
    961 			 * foo bar, I guess I will just free this new guy,
    962 			 * should we abort too? FIX ME MAYBE? Or it COULD be
    963 			 * that the SSN's have wrapped. Maybe I should
    964 			 * compare to TSN somehow... sigh for now just blow
    965 			 * away the chunk!
    966 			 */
    967 			if (chk->data) {
    968 				sctp_m_freem(chk->data);
    969 				chk->data = NULL;
    970 			}
    971 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
    972 			return;
    973 		} else {
    974 			prev = at;
    975 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
    976 				/*
    977 				 * We are at the end, insert it after this
    978 				 * one
    979 				 */
    980 				/* check it first */
    981 				asoc->size_on_reasm_queue += chk->send_size;
    982 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
    983 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
    984 				break;
    985 			}
    986 		}
    987 	}
    988 	/* Now the audits */
    989 	if (prev) {
    990 		prev_tsn = chk->rec.data.TSN_seq - 1;
    991 		if (prev_tsn == prev->rec.data.TSN_seq) {
    992 			/*
    993 			 * Ok the one I am dropping onto the end is the
    994 			 * NEXT. A bit of valdiation here.
    995 			 */
    996 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
    997 			    SCTP_DATA_FIRST_FRAG ||
    998 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
    999 			    SCTP_DATA_MIDDLE_FRAG) {
   1000 				/*
   1001 				 * Insert chk MUST be a MIDDLE or LAST
   1002 				 * fragment
   1003 				 */
   1004 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
   1005 				    SCTP_DATA_FIRST_FRAG) {
   1006 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
   1007 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
   1008 					snprintf(msg, sizeof(msg),
   1009 					         "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1010 					         chk->rec.data.TSN_seq,
   1011 					         chk->rec.data.stream_number,
   1012 					         chk->rec.data.stream_seq);
   1013 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1014 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6;
   1015 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1016 					*abort_flag = 1;
   1017 					return;
   1018 				}
   1019 				if (chk->rec.data.stream_number !=
   1020 				    prev->rec.data.stream_number) {
   1021 					/*
   1022 					 * Huh, need the correct STR here,
   1023 					 * they must be the same.
   1024 					 */
   1025 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
   1026 					        chk->rec.data.stream_number,
   1027 					        prev->rec.data.stream_number);
   1028 					snprintf(msg, sizeof(msg),
   1029 					         "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1030 					         prev->rec.data.stream_number,
   1031 					         chk->rec.data.TSN_seq,
   1032 					         chk->rec.data.stream_number,
   1033 					         chk->rec.data.stream_seq);
   1034 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1035 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
   1036 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1037 					*abort_flag = 1;
   1038 					return;
   1039 				}
   1040 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
   1041 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
   1042 					/*
   1043 					 * Huh, need the same ordering here,
   1044 					 * they must be the same.
   1045 					 */
   1046 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
   1047 					snprintf(msg, sizeof(msg),
   1048 					         "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
   1049 					         (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
   1050 					         chk->rec.data.TSN_seq,
   1051 					         (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
   1052 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1053 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
   1054 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1055 					*abort_flag = 1;
   1056 					return;
   1057 				}
   1058 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
   1059 				    chk->rec.data.stream_seq !=
   1060 				    prev->rec.data.stream_seq) {
   1061 					/*
   1062 					 * Huh, need the correct STR here,
   1063 					 * they must be the same.
   1064 					 */
   1065 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
   1066 						chk->rec.data.stream_seq,
   1067 						prev->rec.data.stream_seq);
   1068 					snprintf(msg, sizeof(msg),
   1069 					         "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1070 					         prev->rec.data.stream_seq,
   1071 					         chk->rec.data.TSN_seq,
   1072 					         chk->rec.data.stream_number,
   1073 					         chk->rec.data.stream_seq);
   1074 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1075 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8;
   1076 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1077 					*abort_flag = 1;
   1078 					return;
   1079 				}
   1080 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
   1081 			    SCTP_DATA_LAST_FRAG) {
   1082 				/* Insert chk MUST be a FIRST */
   1083 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
   1084 				    SCTP_DATA_FIRST_FRAG) {
   1085 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
   1086 					snprintf(msg, sizeof(msg),
   1087 					         "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1088 					         chk->rec.data.TSN_seq,
   1089 					         chk->rec.data.stream_number,
   1090 					         chk->rec.data.stream_seq);
   1091 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1092 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9;
   1093 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1094 					*abort_flag = 1;
   1095 					return;
   1096 				}
   1097 			}
   1098 		}
   1099 	}
   1100 	if (next) {
   1101 		post_tsn = chk->rec.data.TSN_seq + 1;
   1102 		if (post_tsn == next->rec.data.TSN_seq) {
   1103 			/*
   1104 			 * Ok the one I am inserting ahead of is my NEXT
   1105 			 * one. A bit of valdiation here.
   1106 			 */
   1107 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
   1108 				/* Insert chk MUST be a last fragment */
   1109 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
   1110 				    != SCTP_DATA_LAST_FRAG) {
   1111 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
   1112 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
   1113 					snprintf(msg, sizeof(msg),
   1114 					         "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1115 					         chk->rec.data.TSN_seq,
   1116 					         chk->rec.data.stream_number,
   1117 					         chk->rec.data.stream_seq);
   1118 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1119 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10;
   1120 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1121 					*abort_flag = 1;
   1122 					return;
   1123 				}
   1124 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
   1125 				    SCTP_DATA_MIDDLE_FRAG ||
   1126 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
   1127 			    SCTP_DATA_LAST_FRAG) {
   1128 				/*
   1129 				 * Insert chk CAN be MIDDLE or FIRST NOT
   1130 				 * LAST
   1131 				 */
   1132 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
   1133 				    SCTP_DATA_LAST_FRAG) {
   1134 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
   1135 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
   1136 					snprintf(msg, sizeof(msg),
   1137 					         "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1138 					         chk->rec.data.TSN_seq,
   1139 					         chk->rec.data.stream_number,
   1140 					         chk->rec.data.stream_seq);
   1141 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1142 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11;
   1143 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1144 					*abort_flag = 1;
   1145 					return;
   1146 				}
   1147 				if (chk->rec.data.stream_number !=
   1148 				    next->rec.data.stream_number) {
   1149 					/*
   1150 					 * Huh, need the correct STR here,
   1151 					 * they must be the same.
   1152 					 */
   1153 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
   1154 						chk->rec.data.stream_number,
   1155 						next->rec.data.stream_number);
   1156 					snprintf(msg, sizeof(msg),
   1157 					         "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1158 					         next->rec.data.stream_number,
   1159 					         chk->rec.data.TSN_seq,
   1160 					         chk->rec.data.stream_number,
   1161 					         chk->rec.data.stream_seq);
   1162 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1163 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
   1164 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1165 					*abort_flag = 1;
   1166 					return;
   1167 				}
   1168 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
   1169 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
   1170 					/*
   1171 					 * Huh, need the same ordering here,
   1172 					 * they must be the same.
   1173 					 */
   1174 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
   1175 					snprintf(msg, sizeof(msg),
   1176 					         "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
   1177 					         (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
   1178 					         chk->rec.data.TSN_seq,
   1179 					         (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
   1180 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1181 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
   1182 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1183 					*abort_flag = 1;
   1184 					return;
   1185 				}
   1186 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
   1187 				    chk->rec.data.stream_seq !=
   1188 				    next->rec.data.stream_seq) {
   1189 					/*
   1190 					 * Huh, need the correct STR here,
   1191 					 * they must be the same.
   1192 					 */
   1193 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
   1194 						chk->rec.data.stream_seq,
   1195 						next->rec.data.stream_seq);
   1196 					snprintf(msg, sizeof(msg),
   1197 					         "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1198 					         next->rec.data.stream_seq,
   1199 					         chk->rec.data.TSN_seq,
   1200 					         chk->rec.data.stream_number,
   1201 					         chk->rec.data.stream_seq);
   1202 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1203 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13;
   1204 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1205 					*abort_flag = 1;
   1206 					return;
   1207 				}
   1208 			}
   1209 		}
   1210 	}
   1211 	/* Do we need to do some delivery? check */
   1212 	sctp_deliver_reasm_check(stcb, asoc);
   1213 }
   1214 
   1215 /*
   1216  * This is an unfortunate routine. It checks to make sure a evil guy is not
   1217  * stuffing us full of bad packet fragments. A broken peer could also do this
   1218  * but this is doubtful. It is to bad I must worry about evil crackers sigh
   1219  * :< more cycles.
   1220  */
   1221 static int
   1222 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
   1223     uint32_t TSN_seq)
   1224 {
   1225 	struct sctp_tmit_chunk *at;
   1226 	uint32_t tsn_est;
   1227 
   1228 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
   1229 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
   1230 			/* is it one bigger? */
   1231 			tsn_est = at->rec.data.TSN_seq + 1;
   1232 			if (tsn_est == TSN_seq) {
   1233 				/* yep. It better be a last then */
   1234 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
   1235 				    SCTP_DATA_LAST_FRAG) {
   1236 					/*
   1237 					 * Ok this guy belongs next to a guy
   1238 					 * that is NOT last, it should be a
   1239 					 * middle/last, not a complete
   1240 					 * chunk.
   1241 					 */
   1242 					return (1);
   1243 				} else {
   1244 					/*
   1245 					 * This guy is ok since its a LAST
   1246 					 * and the new chunk is a fully
   1247 					 * self- contained one.
   1248 					 */
   1249 					return (0);
   1250 				}
   1251 			}
   1252 		} else if (TSN_seq == at->rec.data.TSN_seq) {
   1253 			/* Software error since I have a dup? */
   1254 			return (1);
   1255 		} else {
   1256 			/*
   1257 			 * Ok, 'at' is larger than new chunk but does it
   1258 			 * need to be right before it.
   1259 			 */
   1260 			tsn_est = TSN_seq + 1;
   1261 			if (tsn_est == at->rec.data.TSN_seq) {
   1262 				/* Yep, It better be a first */
   1263 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
   1264 				    SCTP_DATA_FIRST_FRAG) {
   1265 					return (1);
   1266 				} else {
   1267 					return (0);
   1268 				}
   1269 			}
   1270 		}
   1271 	}
   1272 	return (0);
   1273 }
   1274 
   1275 static int
   1276 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
   1277     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
   1278     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
   1279     int *break_flag, int last_chunk)
   1280 {
   1281 	/* Process a data chunk */
   1282 	/* struct sctp_tmit_chunk *chk; */
   1283 	struct sctp_tmit_chunk *chk;
   1284 	uint32_t tsn, gap;
   1285 	struct mbuf *dmbuf;
   1286 	int the_len;
   1287 	int need_reasm_check = 0;
   1288 	uint16_t strmno, strmseq;
   1289 	struct mbuf *op_err;
   1290 	char msg[SCTP_DIAG_INFO_LEN];
   1291 	struct sctp_queued_to_read *control;
   1292 	int ordered;
   1293 	uint32_t protocol_id;
   1294 	uint8_t chunk_flags;
   1295 	struct sctp_stream_reset_list *liste;
   1296 
   1297 	chk = NULL;
   1298 	tsn = ntohl(ch->dp.tsn);
   1299 	chunk_flags = ch->ch.chunk_flags;
   1300 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
   1301 		asoc->send_sack = 1;
   1302 	}
   1303 	protocol_id = ch->dp.protocol_id;
   1304 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
   1305 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   1306 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
   1307 	}
   1308 	if (stcb == NULL) {
   1309 		return (0);
   1310 	}
   1311 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
   1312 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
   1313 		/* It is a duplicate */
   1314 		SCTP_STAT_INCR(sctps_recvdupdata);
   1315 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
   1316 			/* Record a dup for the next outbound sack */
   1317 			asoc->dup_tsns[asoc->numduptsns] = tsn;
   1318 			asoc->numduptsns++;
   1319 		}
   1320 		asoc->send_sack = 1;
   1321 		return (0);
   1322 	}
   1323 	/* Calculate the number of TSN's between the base and this TSN */
   1324 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
   1325 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
   1326 		/* Can't hold the bit in the mapping at max array, toss it */
   1327 		return (0);
   1328 	}
   1329 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
   1330 		SCTP_TCB_LOCK_ASSERT(stcb);
   1331 		if (sctp_expand_mapping_array(asoc, gap)) {
   1332 			/* Can't expand, drop it */
   1333 			return (0);
   1334 		}
   1335 	}
   1336 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
   1337 		*high_tsn = tsn;
   1338 	}
   1339 	/* See if we have received this one already */
   1340 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
   1341 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
   1342 		SCTP_STAT_INCR(sctps_recvdupdata);
   1343 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
   1344 			/* Record a dup for the next outbound sack */
   1345 			asoc->dup_tsns[asoc->numduptsns] = tsn;
   1346 			asoc->numduptsns++;
   1347 		}
   1348 		asoc->send_sack = 1;
   1349 		return (0);
   1350 	}
   1351 	/*
   1352 	 * Check to see about the GONE flag, duplicates would cause a sack
   1353 	 * to be sent up above
   1354 	 */
   1355 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
   1356 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
   1357 	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
   1358 		/*
   1359 		 * wait a minute, this guy is gone, there is no longer a
   1360 		 * receiver. Send peer an ABORT!
   1361 		 */
   1362 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
   1363 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1364 		*abort_flag = 1;
   1365 		return (0);
   1366 	}
   1367 	/*
   1368 	 * Now before going further we see if there is room. If NOT then we
   1369 	 * MAY let one through only IF this TSN is the one we are waiting
   1370 	 * for on a partial delivery API.
   1371 	 */
   1372 
   1373 	/* now do the tests */
   1374 	if (((asoc->cnt_on_all_streams +
   1375 	      asoc->cnt_on_reasm_queue +
   1376 	      asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
   1377 	    (((int)asoc->my_rwnd) <= 0)) {
   1378 		/*
   1379 		 * When we have NO room in the rwnd we check to make sure
   1380 		 * the reader is doing its job...
   1381 		 */
   1382 		if (stcb->sctp_socket->so_rcv.sb_cc) {
   1383 			/* some to read, wake-up */
   1384 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   1385 			struct socket *so;
   1386 
   1387 			so = SCTP_INP_SO(stcb->sctp_ep);
   1388 			atomic_add_int(&stcb->asoc.refcnt, 1);
   1389 			SCTP_TCB_UNLOCK(stcb);
   1390 			SCTP_SOCKET_LOCK(so, 1);
   1391 			SCTP_TCB_LOCK(stcb);
   1392 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
   1393 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   1394 				/* assoc was freed while we were unlocked */
   1395 				SCTP_SOCKET_UNLOCK(so, 1);
   1396 				return (0);
   1397 			}
   1398 #endif
   1399 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
   1400 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   1401 			SCTP_SOCKET_UNLOCK(so, 1);
   1402 #endif
   1403 		}
   1404 		/* now is it in the mapping array of what we have accepted? */
   1405 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
   1406 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1407 			/* Nope not in the valid range dump it */
   1408 			sctp_set_rwnd(stcb, asoc);
   1409 			if ((asoc->cnt_on_all_streams +
   1410 			     asoc->cnt_on_reasm_queue +
   1411 			     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
   1412 				SCTP_STAT_INCR(sctps_datadropchklmt);
   1413 			} else {
   1414 				SCTP_STAT_INCR(sctps_datadroprwnd);
   1415 			}
   1416 			*break_flag = 1;
   1417 			return (0);
   1418 		}
   1419 	}
   1420 	strmno = ntohs(ch->dp.stream_id);
   1421 	if (strmno >= asoc->streamincnt) {
   1422 		struct sctp_paramhdr *phdr;
   1423 		struct mbuf *mb;
   1424 
   1425 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
   1426 					   0, M_NOWAIT, 1, MT_DATA);
   1427 		if (mb != NULL) {
   1428 			/* add some space up front so prepend will work well */
   1429 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
   1430 			phdr = mtod(mb, struct sctp_paramhdr *);
   1431 			/*
   1432 			 * Error causes are just param's and this one has
   1433 			 * two back to back phdr, one with the error type
   1434 			 * and size, the other with the streamid and a rsvd
   1435 			 */
   1436 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
   1437 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
   1438 			phdr->param_length =
   1439 			    htons(sizeof(struct sctp_paramhdr) * 2);
   1440 			phdr++;
   1441 			/* We insert the stream in the type field */
   1442 			phdr->param_type = ch->dp.stream_id;
   1443 			/* And set the length to 0 for the rsvd field */
   1444 			phdr->param_length = 0;
   1445 			sctp_queue_op_err(stcb, mb);
   1446 		}
   1447 		SCTP_STAT_INCR(sctps_badsid);
   1448 		SCTP_TCB_LOCK_ASSERT(stcb);
   1449 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   1450 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1451 			asoc->highest_tsn_inside_nr_map = tsn;
   1452 		}
   1453 		if (tsn == (asoc->cumulative_tsn + 1)) {
   1454 			/* Update cum-ack */
   1455 			asoc->cumulative_tsn = tsn;
   1456 		}
   1457 		return (0);
   1458 	}
   1459 	/*
   1460 	 * Before we continue lets validate that we are not being fooled by
   1461 	 * an evil attacker. We can only have 4k chunks based on our TSN
   1462 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
   1463 	 * way our stream sequence numbers could have wrapped. We of course
   1464 	 * only validate the FIRST fragment so the bit must be set.
   1465 	 */
   1466 	strmseq = ntohs(ch->dp.stream_sequence);
   1467 #ifdef SCTP_ASOCLOG_OF_TSNS
   1468 	SCTP_TCB_LOCK_ASSERT(stcb);
   1469 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
   1470 		asoc->tsn_in_at = 0;
   1471 		asoc->tsn_in_wrapped = 1;
   1472 	}
   1473 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
   1474 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
   1475 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
   1476 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
   1477 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
   1478 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
   1479 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
   1480 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
   1481 	asoc->tsn_in_at++;
   1482 #endif
   1483 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
   1484 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
   1485 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
   1486 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
   1487 		/* The incoming sseq is behind where we last delivered? */
   1488 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
   1489 			strmseq, asoc->strmin[strmno].last_sequence_delivered);
   1490 
   1491 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1492 		         asoc->strmin[strmno].last_sequence_delivered,
   1493 		         tsn, strmno, strmseq);
   1494 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1495 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14;
   1496 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1497 		*abort_flag = 1;
   1498 		return (0);
   1499 	}
   1500 	/************************************
   1501 	 * From here down we may find ch-> invalid
   1502 	 * so its a good idea NOT to use it.
   1503 	 *************************************/
   1504 
   1505 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
   1506 	if (last_chunk == 0) {
   1507 		dmbuf = SCTP_M_COPYM(*m,
   1508 				     (offset + sizeof(struct sctp_data_chunk)),
   1509 				     the_len, M_NOWAIT);
   1510 #ifdef SCTP_MBUF_LOGGING
   1511 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
   1512 			struct mbuf *mat;
   1513 
   1514 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
   1515 				if (SCTP_BUF_IS_EXTENDED(mat)) {
   1516 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
   1517 				}
   1518 			}
   1519 		}
   1520 #endif
   1521 	} else {
   1522 		/* We can steal the last chunk */
   1523 		int l_len;
   1524 		dmbuf = *m;
   1525 		/* lop off the top part */
   1526 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
   1527 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
   1528 			l_len = SCTP_BUF_LEN(dmbuf);
   1529 		} else {
   1530 			/* need to count up the size hopefully
   1531 			 * does not hit this to often :-0
   1532 			 */
   1533 			struct mbuf *lat;
   1534 
   1535 			l_len = 0;
   1536 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
   1537 				l_len += SCTP_BUF_LEN(lat);
   1538 			}
   1539 		}
   1540 		if (l_len > the_len) {
   1541 			/* Trim the end round bytes off  too */
   1542 			m_adj(dmbuf, -(l_len - the_len));
   1543 		}
   1544 	}
   1545 	if (dmbuf == NULL) {
   1546 		SCTP_STAT_INCR(sctps_nomem);
   1547 		return (0);
   1548 	}
   1549 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
   1550 	    asoc->fragmented_delivery_inprogress == 0 &&
   1551 	    TAILQ_EMPTY(&asoc->resetHead) &&
   1552 	    ((ordered == 0) ||
   1553 	    ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
   1554 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
   1555 		/* Candidate for express delivery */
   1556 		/*
   1557 		 * Its not fragmented, No PD-API is up, Nothing in the
   1558 		 * delivery queue, Its un-ordered OR ordered and the next to
   1559 		 * deliver AND nothing else is stuck on the stream queue,
   1560 		 * And there is room for it in the socket buffer. Lets just
   1561 		 * stuff it up the buffer....
   1562 		 */
   1563 
   1564 		/* It would be nice to avoid this copy if we could :< */
   1565 		sctp_alloc_a_readq(stcb, control);
   1566 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
   1567 					   protocol_id,
   1568 					   strmno, strmseq,
   1569 					   chunk_flags,
   1570 					   dmbuf);
   1571 		if (control == NULL) {
   1572 			goto failed_express_del;
   1573 		}
   1574 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   1575 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1576 			asoc->highest_tsn_inside_nr_map = tsn;
   1577 		}
   1578 		sctp_add_to_readq(stcb->sctp_ep, stcb,
   1579 		                  control, &stcb->sctp_socket->so_rcv,
   1580 		                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
   1581 
   1582 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
   1583 			/* for ordered, bump what we delivered */
   1584 			asoc->strmin[strmno].last_sequence_delivered++;
   1585 		}
   1586 		SCTP_STAT_INCR(sctps_recvexpress);
   1587 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
   1588 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
   1589 					      SCTP_STR_LOG_FROM_EXPRS_DEL);
   1590 		}
   1591 		control = NULL;
   1592 
   1593 		goto finish_express_del;
   1594 	}
   1595 failed_express_del:
   1596 	/* If we reach here this is a new chunk */
   1597 	chk = NULL;
   1598 	control = NULL;
   1599 	/* Express for fragmented delivery? */
   1600 	if ((asoc->fragmented_delivery_inprogress) &&
   1601 	    (stcb->asoc.control_pdapi) &&
   1602 	    (asoc->str_of_pdapi == strmno) &&
   1603 	    (asoc->ssn_of_pdapi == strmseq)
   1604 		) {
   1605 		control = stcb->asoc.control_pdapi;
   1606 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
   1607 			/* Can't be another first? */
   1608 			goto failed_pdapi_express_del;
   1609 		}
   1610 		if (tsn == (control->sinfo_tsn + 1)) {
   1611 			/* Yep, we can add it on */
   1612 			int end = 0;
   1613 
   1614 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
   1615 				end = 1;
   1616 			}
   1617 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
   1618 			                         tsn,
   1619 			                         &stcb->sctp_socket->so_rcv)) {
   1620 				SCTP_PRINTF("Append fails end:%d\n", end);
   1621 				goto failed_pdapi_express_del;
   1622 			}
   1623 
   1624 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   1625 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1626 				asoc->highest_tsn_inside_nr_map = tsn;
   1627 			}
   1628 			SCTP_STAT_INCR(sctps_recvexpressm);
   1629 			asoc->tsn_last_delivered = tsn;
   1630 			asoc->fragment_flags = chunk_flags;
   1631 			asoc->tsn_of_pdapi_last_delivered = tsn;
   1632 			asoc->last_flags_delivered = chunk_flags;
   1633 			asoc->last_strm_seq_delivered = strmseq;
   1634 			asoc->last_strm_no_delivered = strmno;
   1635 			if (end) {
   1636 				/* clean up the flags and such */
   1637 				asoc->fragmented_delivery_inprogress = 0;
   1638 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
   1639 					asoc->strmin[strmno].last_sequence_delivered++;
   1640 				}
   1641 				stcb->asoc.control_pdapi = NULL;
   1642 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
   1643 					/* There could be another message ready */
   1644 					need_reasm_check = 1;
   1645 				}
   1646 			}
   1647 			control = NULL;
   1648 			goto finish_express_del;
   1649 		}
   1650 	}
   1651  failed_pdapi_express_del:
   1652 	control = NULL;
   1653 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
   1654 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
   1655 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
   1656 			asoc->highest_tsn_inside_nr_map = tsn;
   1657 		}
   1658 	} else {
   1659 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
   1660 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
   1661 			asoc->highest_tsn_inside_map = tsn;
   1662 		}
   1663 	}
   1664 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
   1665 		sctp_alloc_a_chunk(stcb, chk);
   1666 		if (chk == NULL) {
   1667 			/* No memory so we drop the chunk */
   1668 			SCTP_STAT_INCR(sctps_nomem);
   1669 			if (last_chunk == 0) {
   1670 				/* we copied it, free the copy */
   1671 				sctp_m_freem(dmbuf);
   1672 			}
   1673 			return (0);
   1674 		}
   1675 		chk->rec.data.TSN_seq = tsn;
   1676 		chk->no_fr_allowed = 0;
   1677 		chk->rec.data.stream_seq = strmseq;
   1678 		chk->rec.data.stream_number = strmno;
   1679 		chk->rec.data.payloadtype = protocol_id;
   1680 		chk->rec.data.context = stcb->asoc.context;
   1681 		chk->rec.data.doing_fast_retransmit = 0;
   1682 		chk->rec.data.rcv_flags = chunk_flags;
   1683 		chk->asoc = asoc;
   1684 		chk->send_size = the_len;
   1685 		chk->whoTo = net;
   1686 		atomic_add_int(&net->ref_count, 1);
   1687 		chk->data = dmbuf;
   1688 	} else {
   1689 		sctp_alloc_a_readq(stcb, control);
   1690 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
   1691 		    protocol_id,
   1692 		    strmno, strmseq,
   1693 		    chunk_flags,
   1694 		    dmbuf);
   1695 		if (control == NULL) {
   1696 			/* No memory so we drop the chunk */
   1697 			SCTP_STAT_INCR(sctps_nomem);
   1698 			if (last_chunk == 0) {
   1699 				/* we copied it, free the copy */
   1700 				sctp_m_freem(dmbuf);
   1701 			}
   1702 			return (0);
   1703 		}
   1704 		control->length = the_len;
   1705 	}
   1706 
   1707 	/* Mark it as received */
   1708 	/* Now queue it where it belongs */
   1709 	if (control != NULL) {
   1710 		/* First a sanity check */
   1711 		if (asoc->fragmented_delivery_inprogress) {
   1712 			/*
   1713 			 * Ok, we have a fragmented delivery in progress if
   1714 			 * this chunk is next to deliver OR belongs in our
   1715 			 * view to the reassembly, the peer is evil or
   1716 			 * broken.
   1717 			 */
   1718 			uint32_t estimate_tsn;
   1719 
   1720 			estimate_tsn = asoc->tsn_last_delivered + 1;
   1721 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
   1722 			    (estimate_tsn == control->sinfo_tsn)) {
   1723 				/* Evil/Broke peer */
   1724 				sctp_m_freem(control->data);
   1725 				control->data = NULL;
   1726 				if (control->whoFrom) {
   1727 					sctp_free_remote_addr(control->whoFrom);
   1728 					control->whoFrom = NULL;
   1729 				}
   1730 				sctp_free_a_readq(stcb, control);
   1731 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1732 				         tsn, strmno, strmseq);
   1733 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1734 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15;
   1735 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1736 				*abort_flag = 1;
   1737 				if (last_chunk) {
   1738 					*m = NULL;
   1739 				}
   1740 				return (0);
   1741 			} else {
   1742 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
   1743 					sctp_m_freem(control->data);
   1744 					control->data = NULL;
   1745 					if (control->whoFrom) {
   1746 						sctp_free_remote_addr(control->whoFrom);
   1747 						control->whoFrom = NULL;
   1748 					}
   1749 					sctp_free_a_readq(stcb, control);
   1750 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1751 					         tsn, strmno, strmseq);
   1752 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1753 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16;
   1754 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1755 					*abort_flag = 1;
   1756 					if (last_chunk) {
   1757 						*m = NULL;
   1758 					}
   1759 					return (0);
   1760 				}
   1761 			}
   1762 		} else {
   1763 			/* No PDAPI running */
   1764 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
   1765 				/*
   1766 				 * Reassembly queue is NOT empty validate
   1767 				 * that this tsn does not need to be in
   1768 				 * reasembly queue. If it does then our peer
   1769 				 * is broken or evil.
   1770 				 */
   1771 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
   1772 					sctp_m_freem(control->data);
   1773 					control->data = NULL;
   1774 					if (control->whoFrom) {
   1775 						sctp_free_remote_addr(control->whoFrom);
   1776 						control->whoFrom = NULL;
   1777 					}
   1778 					sctp_free_a_readq(stcb, control);
   1779 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
   1780 					         tsn, strmno, strmseq);
   1781 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   1782 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17;
   1783 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   1784 					*abort_flag = 1;
   1785 					if (last_chunk) {
   1786 						*m = NULL;
   1787 					}
   1788 					return (0);
   1789 				}
   1790 			}
   1791 		}
   1792 		/* ok, if we reach here we have passed the sanity checks */
   1793 		if (chunk_flags & SCTP_DATA_UNORDERED) {
   1794 			/* queue directly into socket buffer */
   1795 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
   1796 			sctp_add_to_readq(stcb->sctp_ep, stcb,
   1797 			                  control,
   1798 			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
   1799 		} else {
   1800 			/*
   1801 			 * Special check for when streams are resetting. We
   1802 			 * could be more smart about this and check the
   1803 			 * actual stream to see if it is not being reset..
   1804 			 * that way we would not create a HOLB when amongst
   1805 			 * streams being reset and those not being reset.
   1806 			 *
   1807 			 * We take complete messages that have a stream reset
   1808 			 * intervening (aka the TSN is after where our
   1809 			 * cum-ack needs to be) off and put them on a
   1810 			 * pending_reply_queue. The reassembly ones we do
   1811 			 * not have to worry about since they are all sorted
   1812 			 * and proceessed by TSN order. It is only the
   1813 			 * singletons I must worry about.
   1814 			 */
   1815 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
   1816 			    SCTP_TSN_GT(tsn, liste->tsn)) {
   1817 				/*
   1818 				 * yep its past where we need to reset... go
   1819 				 * ahead and queue it.
   1820 				 */
   1821 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
   1822 					/* first one on */
   1823 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
   1824 				} else {
   1825 					struct sctp_queued_to_read *ctlOn, *nctlOn;
   1826 					unsigned char inserted = 0;
   1827 
   1828 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
   1829 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
   1830 							continue;
   1831 						} else {
   1832 							/* found it */
   1833 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
   1834 							inserted = 1;
   1835 							break;
   1836 						}
   1837 					}
   1838 					if (inserted == 0) {
   1839 						/*
   1840 						 * must be put at end, use
   1841 						 * prevP (all setup from
   1842 						 * loop) to setup nextP.
   1843 						 */
   1844 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
   1845 					}
   1846 				}
   1847 			} else {
   1848 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
   1849 				if (*abort_flag) {
   1850 					if (last_chunk) {
   1851 						*m = NULL;
   1852 					}
   1853 					return (0);
   1854 				}
   1855 			}
   1856 		}
   1857 	} else {
   1858 		/* Into the re-assembly queue */
   1859 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
   1860 		if (*abort_flag) {
   1861 			/*
   1862 			 * the assoc is now gone and chk was put onto the
   1863 			 * reasm queue, which has all been freed.
   1864 			 */
   1865 			if (last_chunk) {
   1866 				*m = NULL;
   1867 			}
   1868 			return (0);
   1869 		}
   1870 	}
   1871 finish_express_del:
   1872 	if (tsn == (asoc->cumulative_tsn + 1)) {
   1873 		/* Update cum-ack */
   1874 		asoc->cumulative_tsn = tsn;
   1875 	}
   1876 	if (last_chunk) {
   1877 		*m = NULL;
   1878 	}
   1879 	if (ordered) {
   1880 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
   1881 	} else {
   1882 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
   1883 	}
   1884 	SCTP_STAT_INCR(sctps_recvdata);
   1885 	/* Set it present please */
   1886 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
   1887 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
   1888 	}
   1889 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   1890 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
   1891 			     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
   1892 	}
   1893 	/* check the special flag for stream resets */
   1894 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
   1895 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
   1896 		/*
   1897 		 * we have finished working through the backlogged TSN's now
   1898 		 * time to reset streams. 1: call reset function. 2: free
   1899 		 * pending_reply space 3: distribute any chunks in
   1900 		 * pending_reply_queue.
   1901 		 */
   1902 		struct sctp_queued_to_read *ctl, *nctl;
   1903 
   1904 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
   1905 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
   1906 		SCTP_FREE(liste, SCTP_M_STRESET);
   1907 		/*sa_ignore FREED_MEMORY*/
   1908 		liste = TAILQ_FIRST(&asoc->resetHead);
   1909 		if (TAILQ_EMPTY(&asoc->resetHead)) {
   1910 			/* All can be removed */
   1911 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
   1912 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
   1913 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
   1914 				if (*abort_flag) {
   1915 					return (0);
   1916 				}
   1917 			}
   1918 		} else {
   1919 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
   1920 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
   1921 					break;
   1922 				}
   1923 				/*
   1924 				 * if ctl->sinfo_tsn is <= liste->tsn we can
   1925 				 * process it which is the NOT of
   1926 				 * ctl->sinfo_tsn > liste->tsn
   1927 				 */
   1928 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
   1929 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
   1930 				if (*abort_flag) {
   1931 					return (0);
   1932 				}
   1933 			}
   1934 		}
   1935 		/*
   1936 		 * Now service re-assembly to pick up anything that has been
   1937 		 * held on reassembly queue?
   1938 		 */
   1939 		sctp_deliver_reasm_check(stcb, asoc);
   1940 		need_reasm_check = 0;
   1941 	}
   1942 
   1943 	if (need_reasm_check) {
   1944 		/* Another one waits ? */
   1945 		sctp_deliver_reasm_check(stcb, asoc);
   1946 	}
   1947 	return (1);
   1948 }
   1949 
   1950 int8_t sctp_map_lookup_tab[256] = {
   1951   0, 1, 0, 2, 0, 1, 0, 3,
   1952   0, 1, 0, 2, 0, 1, 0, 4,
   1953   0, 1, 0, 2, 0, 1, 0, 3,
   1954   0, 1, 0, 2, 0, 1, 0, 5,
   1955   0, 1, 0, 2, 0, 1, 0, 3,
   1956   0, 1, 0, 2, 0, 1, 0, 4,
   1957   0, 1, 0, 2, 0, 1, 0, 3,
   1958   0, 1, 0, 2, 0, 1, 0, 6,
   1959   0, 1, 0, 2, 0, 1, 0, 3,
   1960   0, 1, 0, 2, 0, 1, 0, 4,
   1961   0, 1, 0, 2, 0, 1, 0, 3,
   1962   0, 1, 0, 2, 0, 1, 0, 5,
   1963   0, 1, 0, 2, 0, 1, 0, 3,
   1964   0, 1, 0, 2, 0, 1, 0, 4,
   1965   0, 1, 0, 2, 0, 1, 0, 3,
   1966   0, 1, 0, 2, 0, 1, 0, 7,
   1967   0, 1, 0, 2, 0, 1, 0, 3,
   1968   0, 1, 0, 2, 0, 1, 0, 4,
   1969   0, 1, 0, 2, 0, 1, 0, 3,
   1970   0, 1, 0, 2, 0, 1, 0, 5,
   1971   0, 1, 0, 2, 0, 1, 0, 3,
   1972   0, 1, 0, 2, 0, 1, 0, 4,
   1973   0, 1, 0, 2, 0, 1, 0, 3,
   1974   0, 1, 0, 2, 0, 1, 0, 6,
   1975   0, 1, 0, 2, 0, 1, 0, 3,
   1976   0, 1, 0, 2, 0, 1, 0, 4,
   1977   0, 1, 0, 2, 0, 1, 0, 3,
   1978   0, 1, 0, 2, 0, 1, 0, 5,
   1979   0, 1, 0, 2, 0, 1, 0, 3,
   1980   0, 1, 0, 2, 0, 1, 0, 4,
   1981   0, 1, 0, 2, 0, 1, 0, 3,
   1982   0, 1, 0, 2, 0, 1, 0, 8
   1983 };
   1984 
   1985 
   1986 void
   1987 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
   1988 {
   1989 	/*
   1990 	 * Now we also need to check the mapping array in a couple of ways.
   1991 	 * 1) Did we move the cum-ack point?
   1992 	 *
   1993 	 * When you first glance at this you might think
   1994 	 * that all entries that make up the postion
   1995 	 * of the cum-ack would be in the nr-mapping array
   1996 	 * only.. i.e. things up to the cum-ack are always
   1997 	 * deliverable. Thats true with one exception, when
   1998 	 * its a fragmented message we may not deliver the data
   1999 	 * until some threshold (or all of it) is in place. So
   2000 	 * we must OR the nr_mapping_array and mapping_array to
   2001 	 * get a true picture of the cum-ack.
   2002 	 */
   2003 	struct sctp_association *asoc;
   2004 	int at;
   2005 	uint8_t val;
   2006 	int slide_from, slide_end, lgap, distance;
   2007 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
   2008 
   2009 	asoc = &stcb->asoc;
   2010 
   2011 	old_cumack = asoc->cumulative_tsn;
   2012 	old_base = asoc->mapping_array_base_tsn;
   2013 	old_highest = asoc->highest_tsn_inside_map;
   2014 	/*
   2015 	 * We could probably improve this a small bit by calculating the
   2016 	 * offset of the current cum-ack as the starting point.
   2017 	 */
   2018 	at = 0;
   2019 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
   2020 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
   2021 		if (val == 0xff) {
   2022 			at += 8;
   2023 		} else {
   2024 			/* there is a 0 bit */
   2025 			at += sctp_map_lookup_tab[val];
   2026 			break;
   2027 		}
   2028 	}
   2029 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
   2030 
   2031 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
   2032             SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
   2033 #ifdef INVARIANTS
   2034 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
   2035 		      asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
   2036 #else
   2037 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
   2038 			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
   2039 		sctp_print_mapping_array(asoc);
   2040 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2041 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
   2042 		}
   2043 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
   2044 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
   2045 #endif
   2046 	}
   2047 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2048 		highest_tsn = asoc->highest_tsn_inside_nr_map;
   2049 	} else {
   2050 		highest_tsn = asoc->highest_tsn_inside_map;
   2051 	}
   2052 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
   2053 		/* The complete array was completed by a single FR */
   2054 		/* highest becomes the cum-ack */
   2055 		int clr;
   2056 #ifdef INVARIANTS
   2057 		unsigned int i;
   2058 #endif
   2059 
   2060 		/* clear the array */
   2061 		clr = ((at+7) >> 3);
   2062 		if (clr > asoc->mapping_array_size) {
   2063 			clr = asoc->mapping_array_size;
   2064 		}
   2065 		memset(asoc->mapping_array, 0, clr);
   2066 		memset(asoc->nr_mapping_array, 0, clr);
   2067 #ifdef INVARIANTS
   2068 		for (i = 0; i < asoc->mapping_array_size; i++) {
   2069 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
   2070 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
   2071 				sctp_print_mapping_array(asoc);
   2072 			}
   2073 		}
   2074 #endif
   2075 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
   2076 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
   2077 	} else if (at >= 8) {
   2078 		/* we can slide the mapping array down */
   2079 		/* slide_from holds where we hit the first NON 0xff byte */
   2080 
   2081 		/*
   2082 		 * now calculate the ceiling of the move using our highest
   2083 		 * TSN value
   2084 		 */
   2085 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
   2086 		slide_end = (lgap >> 3);
   2087 		if (slide_end < slide_from) {
   2088 			sctp_print_mapping_array(asoc);
   2089 #ifdef INVARIANTS
   2090 			panic("impossible slide");
   2091 #else
   2092 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
   2093 			            lgap, slide_end, slide_from, at);
   2094 			return;
   2095 #endif
   2096 		}
   2097 		if (slide_end > asoc->mapping_array_size) {
   2098 #ifdef INVARIANTS
   2099 			panic("would overrun buffer");
   2100 #else
   2101 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
   2102 			            asoc->mapping_array_size, slide_end);
   2103 			slide_end = asoc->mapping_array_size;
   2104 #endif
   2105 		}
   2106 		distance = (slide_end - slide_from) + 1;
   2107 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2108 			sctp_log_map(old_base, old_cumack, old_highest,
   2109 				     SCTP_MAP_PREPARE_SLIDE);
   2110 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
   2111 				     (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
   2112 		}
   2113 		if (distance + slide_from > asoc->mapping_array_size ||
   2114 		    distance < 0) {
   2115 			/*
   2116 			 * Here we do NOT slide forward the array so that
   2117 			 * hopefully when more data comes in to fill it up
   2118 			 * we will be able to slide it forward. Really I
   2119 			 * don't think this should happen :-0
   2120 			 */
   2121 
   2122 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2123 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
   2124 					     (uint32_t) asoc->mapping_array_size,
   2125 					     SCTP_MAP_SLIDE_NONE);
   2126 			}
   2127 		} else {
   2128 			int ii;
   2129 
   2130 			for (ii = 0; ii < distance; ii++) {
   2131 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
   2132 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
   2133 
   2134 			}
   2135 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
   2136 				asoc->mapping_array[ii] = 0;
   2137 				asoc->nr_mapping_array[ii] = 0;
   2138 			}
   2139 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
   2140 				asoc->highest_tsn_inside_map += (slide_from << 3);
   2141 			}
   2142 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
   2143 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
   2144 			}
   2145 			asoc->mapping_array_base_tsn += (slide_from << 3);
   2146 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   2147 				sctp_log_map(asoc->mapping_array_base_tsn,
   2148 					     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
   2149 					     SCTP_MAP_SLIDE_RESULT);
   2150 			}
   2151 		}
   2152 	}
   2153 }
   2154 
   2155 void
   2156 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
   2157 {
   2158 	struct sctp_association *asoc;
   2159 	uint32_t highest_tsn;
   2160 
   2161 	asoc = &stcb->asoc;
   2162 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2163 		highest_tsn = asoc->highest_tsn_inside_nr_map;
   2164 	} else {
   2165 		highest_tsn = asoc->highest_tsn_inside_map;
   2166 	}
   2167 
   2168 	/*
   2169 	 * Now we need to see if we need to queue a sack or just start the
   2170 	 * timer (if allowed).
   2171 	 */
   2172 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
   2173 		/*
   2174 		 * Ok special case, in SHUTDOWN-SENT case. here we
   2175 		 * maker sure SACK timer is off and instead send a
   2176 		 * SHUTDOWN and a SACK
   2177 		 */
   2178 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   2179 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
   2180 			                stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18);
   2181 		}
   2182 		sctp_send_shutdown(stcb,
   2183 				   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
   2184 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
   2185 	} else {
   2186 		int is_a_gap;
   2187 
   2188 		/* is there a gap now ? */
   2189 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
   2190 
   2191 		/*
   2192 		 * CMT DAC algorithm: increase number of packets
   2193 		 * received since last ack
   2194 		 */
   2195 		stcb->asoc.cmt_dac_pkts_rcvd++;
   2196 
   2197 		if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
   2198 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
   2199 		                                         * longer is one */
   2200 		    (stcb->asoc.numduptsns) ||          /* we have dup's */
   2201 		    (is_a_gap) ||                       /* is still a gap */
   2202 		    (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
   2203 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
   2204 			) {
   2205 
   2206 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
   2207 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
   2208 			    (stcb->asoc.send_sack == 0) &&
   2209 			    (stcb->asoc.numduptsns == 0) &&
   2210 			    (stcb->asoc.delayed_ack) &&
   2211 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
   2212 
   2213 				/*
   2214 				 * CMT DAC algorithm: With CMT,
   2215 				 * delay acks even in the face of
   2216 
   2217 				 * reordering. Therefore, if acks
   2218 				 * that do not have to be sent
   2219 				 * because of the above reasons,
   2220 				 * will be delayed. That is, acks
   2221 				 * that would have been sent due to
   2222 				 * gap reports will be delayed with
   2223 				 * DAC. Start the delayed ack timer.
   2224 				 */
   2225 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   2226 				                 stcb->sctp_ep, stcb, NULL);
   2227 			} else {
   2228 				/*
   2229 				 * Ok we must build a SACK since the
   2230 				 * timer is pending, we got our
   2231 				 * first packet OR there are gaps or
   2232 				 * duplicates.
   2233 				 */
   2234 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
   2235 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
   2236 			}
   2237 		} else {
   2238 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
   2239 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
   2240 				                 stcb->sctp_ep, stcb, NULL);
   2241 			}
   2242 		}
   2243 	}
   2244 }
   2245 
   2246 void
   2247 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
   2248 {
   2249 	struct sctp_tmit_chunk *chk;
   2250 	uint32_t tsize, pd_point;
   2251 	uint16_t nxt_todel;
   2252 
   2253 	if (asoc->fragmented_delivery_inprogress) {
   2254 		sctp_service_reassembly(stcb, asoc);
   2255 	}
   2256 	/* Can we proceed further, i.e. the PD-API is complete */
   2257 	if (asoc->fragmented_delivery_inprogress) {
   2258 		/* no */
   2259 		return;
   2260 	}
   2261 	/*
   2262 	 * Now is there some other chunk I can deliver from the reassembly
   2263 	 * queue.
   2264 	 */
   2265  doit_again:
   2266 	chk = TAILQ_FIRST(&asoc->reasmqueue);
   2267 	if (chk == NULL) {
   2268 		asoc->size_on_reasm_queue = 0;
   2269 		asoc->cnt_on_reasm_queue = 0;
   2270 		return;
   2271 	}
   2272 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
   2273 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
   2274 	    ((nxt_todel == chk->rec.data.stream_seq) ||
   2275 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
   2276 		/*
   2277 		 * Yep the first one is here. We setup to start reception,
   2278 		 * by backing down the TSN just in case we can't deliver.
   2279 		 */
   2280 
   2281 		/*
   2282 		 * Before we start though either all of the message should
   2283 		 * be here or the socket buffer max or nothing on the
   2284 		 * delivery queue and something can be delivered.
   2285 		 */
   2286 		if (stcb->sctp_socket) {
   2287 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
   2288 				       stcb->sctp_ep->partial_delivery_point);
   2289 		} else {
   2290 			pd_point = stcb->sctp_ep->partial_delivery_point;
   2291 		}
   2292 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
   2293 			asoc->fragmented_delivery_inprogress = 1;
   2294 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
   2295 			asoc->str_of_pdapi = chk->rec.data.stream_number;
   2296 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
   2297 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
   2298 			asoc->fragment_flags = chk->rec.data.rcv_flags;
   2299 			sctp_service_reassembly(stcb, asoc);
   2300 			if (asoc->fragmented_delivery_inprogress == 0) {
   2301 				goto doit_again;
   2302 			}
   2303 		}
   2304 	}
   2305 }
   2306 
   2307 int
   2308 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
   2309                   struct sockaddr *src, struct sockaddr *dst,
   2310                   struct sctphdr *sh, struct sctp_inpcb *inp,
   2311                   struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn,
   2312 #if defined(__FreeBSD__)
   2313                   uint8_t use_mflowid, uint32_t mflowid,
   2314 #endif
   2315 		  uint32_t vrf_id, uint16_t port)
   2316 {
   2317 	struct sctp_data_chunk *ch, chunk_buf;
   2318 	struct sctp_association *asoc;
   2319 	int num_chunks = 0;	/* number of control chunks processed */
   2320 	int stop_proc = 0;
   2321 	int chk_length, break_flag, last_chunk;
   2322 	int abort_flag = 0, was_a_gap;
   2323 	struct mbuf *m;
   2324 	uint32_t highest_tsn;
   2325 
   2326 	/* set the rwnd */
   2327 	sctp_set_rwnd(stcb, &stcb->asoc);
   2328 
   2329 	m = *mm;
   2330 	SCTP_TCB_LOCK_ASSERT(stcb);
   2331 	asoc = &stcb->asoc;
   2332 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
   2333 		highest_tsn = asoc->highest_tsn_inside_nr_map;
   2334 	} else {
   2335 		highest_tsn = asoc->highest_tsn_inside_map;
   2336 	}
   2337 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
   2338 	/*
   2339 	 * setup where we got the last DATA packet from for any SACK that
   2340 	 * may need to go out. Don't bump the net. This is done ONLY when a
   2341 	 * chunk is assigned.
   2342 	 */
   2343 	asoc->last_data_chunk_from = net;
   2344 
   2345 #ifndef __Panda__
   2346 	/*-
   2347 	 * Now before we proceed we must figure out if this is a wasted
   2348 	 * cluster... i.e. it is a small packet sent in and yet the driver
   2349 	 * underneath allocated a full cluster for it. If so we must copy it
   2350 	 * to a smaller mbuf and free up the cluster mbuf. This will help
   2351 	 * with cluster starvation. Note for __Panda__ we don't do this
   2352 	 * since it has clusters all the way down to 64 bytes.
   2353 	 */
   2354 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
   2355 		/* we only handle mbufs that are singletons.. not chains */
   2356 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
   2357 		if (m) {
   2358 			/* ok lets see if we can copy the data up */
   2359 			caddr_t *from, *to;
   2360 			/* get the pointers and copy */
   2361 			to = mtod(m, caddr_t *);
   2362 			from = mtod((*mm), caddr_t *);
   2363 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
   2364 			/* copy the length and free up the old */
   2365 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
   2366 			sctp_m_freem(*mm);
   2367 			/* sucess, back copy */
   2368 			*mm = m;
   2369 		} else {
   2370 			/* We are in trouble in the mbuf world .. yikes */
   2371 			m = *mm;
   2372 		}
   2373 	}
   2374 #endif
   2375 	/* get pointer to the first chunk header */
   2376 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
   2377 						     sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
   2378 	if (ch == NULL) {
   2379 		return (1);
   2380 	}
   2381 	/*
   2382 	 * process all DATA chunks...
   2383 	 */
   2384 	*high_tsn = asoc->cumulative_tsn;
   2385 	break_flag = 0;
   2386 	asoc->data_pkts_seen++;
   2387 	while (stop_proc == 0) {
   2388 		/* validate chunk length */
   2389 		chk_length = ntohs(ch->ch.chunk_length);
   2390 		if (length - *offset < chk_length) {
   2391 			/* all done, mutulated chunk */
   2392 			stop_proc = 1;
   2393 			continue;
   2394 		}
   2395 		if (ch->ch.chunk_type == SCTP_DATA) {
   2396 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
   2397 				/*
   2398 				 * Need to send an abort since we had a
   2399 				 * invalid data chunk.
   2400 				 */
   2401 				struct mbuf *op_err;
   2402 				char msg[SCTP_DIAG_INFO_LEN];
   2403 
   2404 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
   2405 				         chk_length);
   2406 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   2407 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
   2408 				sctp_abort_association(inp, stcb, m, iphlen,
   2409 				                       src, dst, sh, op_err,
   2410 #if defined(__FreeBSD__)
   2411 				                       use_mflowid, mflowid,
   2412 #endif
   2413 				                       vrf_id, port);
   2414 				return (2);
   2415 			}
   2416 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
   2417 				/*
   2418 				 * Need to send an abort since we had an
   2419 				 * empty data chunk.
   2420 				 */
   2421 				struct mbuf *op_err;
   2422 
   2423 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
   2424 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
   2425 				sctp_abort_association(inp, stcb, m, iphlen,
   2426 				                       src, dst, sh, op_err,
   2427 #if defined(__FreeBSD__)
   2428 				                       use_mflowid, mflowid,
   2429 #endif
   2430 				                       vrf_id, port);
   2431 				return (2);
   2432 			}
   2433 #ifdef SCTP_AUDITING_ENABLED
   2434 			sctp_audit_log(0xB1, 0);
   2435 #endif
   2436 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
   2437 				last_chunk = 1;
   2438 			} else {
   2439 				last_chunk = 0;
   2440 			}
   2441 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
   2442 						      chk_length, net, high_tsn, &abort_flag, &break_flag,
   2443 						      last_chunk)) {
   2444 				num_chunks++;
   2445 			}
   2446 			if (abort_flag)
   2447 				return (2);
   2448 
   2449 			if (break_flag) {
   2450 				/*
   2451 				 * Set because of out of rwnd space and no
   2452 				 * drop rep space left.
   2453 				 */
   2454 				stop_proc = 1;
   2455 				continue;
   2456 			}
   2457 		} else {
   2458 			/* not a data chunk in the data region */
   2459 			switch (ch->ch.chunk_type) {
   2460 			case SCTP_INITIATION:
   2461 			case SCTP_INITIATION_ACK:
   2462 			case SCTP_SELECTIVE_ACK:
   2463 			case SCTP_NR_SELECTIVE_ACK:
   2464 			case SCTP_HEARTBEAT_REQUEST:
   2465 			case SCTP_HEARTBEAT_ACK:
   2466 			case SCTP_ABORT_ASSOCIATION:
   2467 			case SCTP_SHUTDOWN:
   2468 			case SCTP_SHUTDOWN_ACK:
   2469 			case SCTP_OPERATION_ERROR:
   2470 			case SCTP_COOKIE_ECHO:
   2471 			case SCTP_COOKIE_ACK:
   2472 			case SCTP_ECN_ECHO:
   2473 			case SCTP_ECN_CWR:
   2474 			case SCTP_SHUTDOWN_COMPLETE:
   2475 			case SCTP_AUTHENTICATION:
   2476 			case SCTP_ASCONF_ACK:
   2477 			case SCTP_PACKET_DROPPED:
   2478 			case SCTP_STREAM_RESET:
   2479 			case SCTP_FORWARD_CUM_TSN:
   2480 			case SCTP_ASCONF:
   2481 				/*
   2482 				 * Now, what do we do with KNOWN chunks that
   2483 				 * are NOT in the right place?
   2484 				 *
   2485 				 * For now, I do nothing but ignore them. We
   2486 				 * may later want to add sysctl stuff to
   2487 				 * switch out and do either an ABORT() or
   2488 				 * possibly process them.
   2489 				 */
   2490 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
   2491 					struct mbuf *op_err;
   2492 
   2493 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
   2494 					sctp_abort_association(inp, stcb,
   2495 					                       m, iphlen,
   2496 					                       src, dst,
   2497 					                       sh, op_err,
   2498 #if defined(__FreeBSD__)
   2499 					                       use_mflowid, mflowid,
   2500 #endif
   2501 					                       vrf_id, port);
   2502 					return (2);
   2503 				}
   2504 				break;
   2505 			default:
   2506 				/* unknown chunk type, use bit rules */
   2507 				if (ch->ch.chunk_type & 0x40) {
   2508 					/* Add a error report to the queue */
   2509 					struct mbuf *merr;
   2510 					struct sctp_paramhdr *phd;
   2511 
   2512 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
   2513 					if (merr) {
   2514 						phd = mtod(merr, struct sctp_paramhdr *);
   2515 						/*
   2516 						 * We cheat and use param
   2517 						 * type since we did not
   2518 						 * bother to define a error
   2519 						 * cause struct. They are
   2520 						 * the same basic format
   2521 						 * with different names.
   2522 						 */
   2523 						phd->param_type =
   2524 							htons(SCTP_CAUSE_UNRECOG_CHUNK);
   2525 						phd->param_length =
   2526 							htons(chk_length + sizeof(*phd));
   2527 						SCTP_BUF_LEN(merr) = sizeof(*phd);
   2528 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
   2529 						if (SCTP_BUF_NEXT(merr)) {
   2530 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
   2531 								sctp_m_freem(merr);
   2532 							} else {
   2533 								sctp_queue_op_err(stcb, merr);
   2534 							}
   2535 						} else {
   2536 							sctp_m_freem(merr);
   2537 						}
   2538 					}
   2539 				}
   2540 				if ((ch->ch.chunk_type & 0x80) == 0) {
   2541 					/* discard the rest of this packet */
   2542 					stop_proc = 1;
   2543 				}	/* else skip this bad chunk and
   2544 					 * continue... */
   2545 				break;
   2546 			}	/* switch of chunk type */
   2547 		}
   2548 		*offset += SCTP_SIZE32(chk_length);
   2549 		if ((*offset >= length) || stop_proc) {
   2550 			/* no more data left in the mbuf chain */
   2551 			stop_proc = 1;
   2552 			continue;
   2553 		}
   2554 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
   2555 							     sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
   2556 		if (ch == NULL) {
   2557 			*offset = length;
   2558 			stop_proc = 1;
   2559 			continue;
   2560 		}
   2561 	}
   2562 	if (break_flag) {
   2563 		/*
   2564 		 * we need to report rwnd overrun drops.
   2565 		 */
   2566 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
   2567 	}
   2568 	if (num_chunks) {
   2569 		/*
   2570 		 * Did we get data, if so update the time for auto-close and
   2571 		 * give peer credit for being alive.
   2572 		 */
   2573 		SCTP_STAT_INCR(sctps_recvpktwithdata);
   2574 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   2575 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   2576 				       stcb->asoc.overall_error_count,
   2577 				       0,
   2578 				       SCTP_FROM_SCTP_INDATA,
   2579 				       __LINE__);
   2580 		}
   2581 		stcb->asoc.overall_error_count = 0;
   2582 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
   2583 	}
   2584 	/* now service all of the reassm queue if needed */
   2585 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
   2586 		sctp_service_queues(stcb, asoc);
   2587 
   2588 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
   2589 		/* Assure that we ack right away */
   2590 		stcb->asoc.send_sack = 1;
   2591 	}
   2592 	/* Start a sack timer or QUEUE a SACK for sending */
   2593 	sctp_sack_check(stcb, was_a_gap);
   2594 	return (0);
   2595 }
   2596 
   2597 static int
   2598 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
   2599 			   uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
   2600 			   int *num_frs,
   2601 			   uint32_t *biggest_newly_acked_tsn,
   2602 			   uint32_t  *this_sack_lowest_newack,
   2603 			   int *rto_ok)
   2604 {
   2605 	struct sctp_tmit_chunk *tp1;
   2606 	unsigned int theTSN;
   2607 	int j, wake_him = 0, circled = 0;
   2608 
   2609 	/* Recover the tp1 we last saw */
   2610 	tp1 = *p_tp1;
   2611 	if (tp1 == NULL) {
   2612 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   2613 	}
   2614 	for (j = frag_strt; j <= frag_end; j++) {
   2615 		theTSN = j + last_tsn;
   2616 		while (tp1) {
   2617 			if (tp1->rec.data.doing_fast_retransmit)
   2618 				(*num_frs) += 1;
   2619 
   2620 			/*-
   2621 			 * CMT: CUCv2 algorithm. For each TSN being
   2622 			 * processed from the sent queue, track the
   2623 			 * next expected pseudo-cumack, or
   2624 			 * rtx_pseudo_cumack, if required. Separate
   2625 			 * cumack trackers for first transmissions,
   2626 			 * and retransmissions.
   2627 			 */
   2628 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
   2629 			    (tp1->snd_count == 1)) {
   2630 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
   2631 				tp1->whoTo->find_pseudo_cumack = 0;
   2632 			}
   2633 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
   2634 			    (tp1->snd_count > 1)) {
   2635 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
   2636 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
   2637 			}
   2638 			if (tp1->rec.data.TSN_seq == theTSN) {
   2639 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
   2640 					/*-
   2641 					 * must be held until
   2642 					 * cum-ack passes
   2643 					 */
   2644 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   2645 						/*-
   2646 						 * If it is less than RESEND, it is
   2647 						 * now no-longer in flight.
   2648 						 * Higher values may already be set
   2649 						 * via previous Gap Ack Blocks...
   2650 						 * i.e. ACKED or RESEND.
   2651 						 */
   2652 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
   2653 						                *biggest_newly_acked_tsn)) {
   2654 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
   2655 						}
   2656 						/*-
   2657 						 * CMT: SFR algo (and HTNA) - set
   2658 						 * saw_newack to 1 for dest being
   2659 						 * newly acked. update
   2660 						 * this_sack_highest_newack if
   2661 						 * appropriate.
   2662 						 */
   2663 						if (tp1->rec.data.chunk_was_revoked == 0)
   2664 							tp1->whoTo->saw_newack = 1;
   2665 
   2666 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
   2667 						                tp1->whoTo->this_sack_highest_newack)) {
   2668 							tp1->whoTo->this_sack_highest_newack =
   2669 								tp1->rec.data.TSN_seq;
   2670 						}
   2671 						/*-
   2672 						 * CMT DAC algo: also update
   2673 						 * this_sack_lowest_newack
   2674 						 */
   2675 						if (*this_sack_lowest_newack == 0) {
   2676 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   2677 								sctp_log_sack(*this_sack_lowest_newack,
   2678 									      last_tsn,
   2679 									      tp1->rec.data.TSN_seq,
   2680 									      0,
   2681 									      0,
   2682 									      SCTP_LOG_TSN_ACKED);
   2683 							}
   2684 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
   2685 						}
   2686 						/*-
   2687 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
   2688 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
   2689 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
   2690 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
   2691 						 * Separate pseudo_cumack trackers for first transmissions and
   2692 						 * retransmissions.
   2693 						 */
   2694 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
   2695 							if (tp1->rec.data.chunk_was_revoked == 0) {
   2696 								tp1->whoTo->new_pseudo_cumack = 1;
   2697 							}
   2698 							tp1->whoTo->find_pseudo_cumack = 1;
   2699 						}
   2700 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   2701 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
   2702 						}
   2703 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
   2704 							if (tp1->rec.data.chunk_was_revoked == 0) {
   2705 								tp1->whoTo->new_pseudo_cumack = 1;
   2706 							}
   2707 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
   2708 						}
   2709 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   2710 							sctp_log_sack(*biggest_newly_acked_tsn,
   2711 								      last_tsn,
   2712 								      tp1->rec.data.TSN_seq,
   2713 								      frag_strt,
   2714 								      frag_end,
   2715 								      SCTP_LOG_TSN_ACKED);
   2716 						}
   2717 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   2718 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
   2719 								       tp1->whoTo->flight_size,
   2720 								       tp1->book_size,
   2721 								       (uintptr_t)tp1->whoTo,
   2722 								       tp1->rec.data.TSN_seq);
   2723 						}
   2724 						sctp_flight_size_decrease(tp1);
   2725 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   2726 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   2727 														     tp1);
   2728 						}
   2729 						sctp_total_flight_decrease(stcb, tp1);
   2730 
   2731 						tp1->whoTo->net_ack += tp1->send_size;
   2732 						if (tp1->snd_count < 2) {
   2733 							/*-
   2734 							 * True non-retransmited chunk
   2735 							 */
   2736 							tp1->whoTo->net_ack2 += tp1->send_size;
   2737 
   2738 							/*-
   2739 							 * update RTO too ?
   2740 							 */
   2741 							if (tp1->do_rtt) {
   2742 								if (*rto_ok) {
   2743 									tp1->whoTo->RTO =
   2744 										sctp_calculate_rto(stcb,
   2745 												   &stcb->asoc,
   2746 												   tp1->whoTo,
   2747 												   &tp1->sent_rcv_time,
   2748 												   sctp_align_safe_nocopy,
   2749 												   SCTP_RTT_FROM_DATA);
   2750 									*rto_ok = 0;
   2751 								}
   2752 								if (tp1->whoTo->rto_needed == 0) {
   2753 									tp1->whoTo->rto_needed = 1;
   2754 								}
   2755 								tp1->do_rtt = 0;
   2756 							}
   2757 						}
   2758 
   2759 					}
   2760 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
   2761 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
   2762 						                stcb->asoc.this_sack_highest_gap)) {
   2763 							stcb->asoc.this_sack_highest_gap =
   2764 								tp1->rec.data.TSN_seq;
   2765 						}
   2766 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   2767 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
   2768 #ifdef SCTP_AUDITING_ENABLED
   2769 							sctp_audit_log(0xB2,
   2770 								       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
   2771 #endif
   2772 						}
   2773 					}
   2774 					/*-
   2775 					 * All chunks NOT UNSENT fall through here and are marked
   2776 					 * (leave PR-SCTP ones that are to skip alone though)
   2777 					 */
   2778 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
   2779 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
   2780 						tp1->sent = SCTP_DATAGRAM_MARKED;
   2781 					}
   2782 					if (tp1->rec.data.chunk_was_revoked) {
   2783 						/* deflate the cwnd */
   2784 						tp1->whoTo->cwnd -= tp1->book_size;
   2785 						tp1->rec.data.chunk_was_revoked = 0;
   2786 					}
   2787 					/* NR Sack code here */
   2788 					if (nr_sacking &&
   2789 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
   2790 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
   2791 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
   2792 #ifdef INVARIANTS
   2793 						} else {
   2794 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
   2795 #endif
   2796 						}
   2797 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
   2798 						if (tp1->data) {
   2799 							/* sa_ignore NO_NULL_CHK */
   2800 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
   2801 							sctp_m_freem(tp1->data);
   2802 							tp1->data = NULL;
   2803 						}
   2804 						wake_him++;
   2805 					}
   2806 				}
   2807 				break;
   2808 			}	/* if (tp1->TSN_seq == theTSN) */
   2809 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
   2810 				break;
   2811 			}
   2812 			tp1 = TAILQ_NEXT(tp1, sctp_next);
   2813 			if ((tp1 == NULL) && (circled == 0)) {
   2814 				circled++;
   2815 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   2816 			}
   2817 		}	/* end while (tp1) */
   2818 		if (tp1 == NULL) {
   2819 			circled = 0;
   2820 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
   2821 		}
   2822 		/* In case the fragments were not in order we must reset */
   2823 	} /* end for (j = fragStart */
   2824 	*p_tp1 = tp1;
   2825 	return (wake_him);	/* Return value only used for nr-sack */
   2826 }
   2827 
   2828 
   2829 static int
   2830 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
   2831 		uint32_t last_tsn, uint32_t *biggest_tsn_acked,
   2832 		uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
   2833 		int num_seg, int num_nr_seg, int *rto_ok)
   2834 {
   2835 	struct sctp_gap_ack_block *frag, block;
   2836 	struct sctp_tmit_chunk *tp1;
   2837 	int i;
   2838 	int num_frs = 0;
   2839 	int chunk_freed;
   2840 	int non_revocable;
   2841 	uint16_t frag_strt, frag_end, prev_frag_end;
   2842 
   2843 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
   2844 	prev_frag_end = 0;
   2845 	chunk_freed = 0;
   2846 
   2847 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
   2848 		if (i == num_seg) {
   2849 			prev_frag_end = 0;
   2850 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
   2851 		}
   2852 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
   2853 		                                                  sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
   2854 		*offset += sizeof(block);
   2855 		if (frag == NULL) {
   2856 			return (chunk_freed);
   2857 		}
   2858 		frag_strt = ntohs(frag->start);
   2859 		frag_end = ntohs(frag->end);
   2860 
   2861 		if (frag_strt > frag_end) {
   2862 			/* This gap report is malformed, skip it. */
   2863 			continue;
   2864 		}
   2865 		if (frag_strt <= prev_frag_end) {
   2866 			/* This gap report is not in order, so restart. */
   2867 			 tp1 = TAILQ_FIRST(&asoc->sent_queue);
   2868 		}
   2869 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
   2870 			*biggest_tsn_acked = last_tsn + frag_end;
   2871 		}
   2872 		if (i < num_seg) {
   2873 			non_revocable = 0;
   2874 		} else {
   2875 			non_revocable = 1;
   2876 		}
   2877 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
   2878 		                               non_revocable, &num_frs, biggest_newly_acked_tsn,
   2879 		                               this_sack_lowest_newack, rto_ok)) {
   2880 			chunk_freed = 1;
   2881 		}
   2882 		prev_frag_end = frag_end;
   2883 	}
   2884 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   2885 		if (num_frs)
   2886 			sctp_log_fr(*biggest_tsn_acked,
   2887 			            *biggest_newly_acked_tsn,
   2888 			            last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
   2889 	}
   2890 	return (chunk_freed);
   2891 }
   2892 
   2893 static void
   2894 sctp_check_for_revoked(struct sctp_tcb *stcb,
   2895 		       struct sctp_association *asoc, uint32_t cumack,
   2896 		       uint32_t biggest_tsn_acked)
   2897 {
   2898 	struct sctp_tmit_chunk *tp1;
   2899 
   2900 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   2901 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
   2902 			/*
   2903 			 * ok this guy is either ACK or MARKED. If it is
   2904 			 * ACKED it has been previously acked but not this
   2905 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
   2906 			 * again.
   2907 			 */
   2908 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
   2909 				break;
   2910 			}
   2911 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
   2912 				/* it has been revoked */
   2913 				tp1->sent = SCTP_DATAGRAM_SENT;
   2914 				tp1->rec.data.chunk_was_revoked = 1;
   2915 				/* We must add this stuff back in to
   2916 				 * assure timers and such get started.
   2917 				 */
   2918 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   2919 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
   2920 						       tp1->whoTo->flight_size,
   2921 						       tp1->book_size,
   2922 						       (uintptr_t)tp1->whoTo,
   2923 						       tp1->rec.data.TSN_seq);
   2924 				}
   2925 				sctp_flight_size_increase(tp1);
   2926 				sctp_total_flight_increase(stcb, tp1);
   2927 				/* We inflate the cwnd to compensate for our
   2928 				 * artificial inflation of the flight_size.
   2929 				 */
   2930 				tp1->whoTo->cwnd += tp1->book_size;
   2931 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   2932 					sctp_log_sack(asoc->last_acked_seq,
   2933 						      cumack,
   2934 						      tp1->rec.data.TSN_seq,
   2935 						      0,
   2936 						      0,
   2937 						      SCTP_LOG_TSN_REVOKED);
   2938 				}
   2939 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
   2940 				/* it has been re-acked in this SACK */
   2941 				tp1->sent = SCTP_DATAGRAM_ACKED;
   2942 			}
   2943 		}
   2944 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
   2945 			break;
   2946 	}
   2947 }
   2948 
   2949 
   2950 static void
   2951 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
   2952 			   uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
   2953 {
   2954 	struct sctp_tmit_chunk *tp1;
   2955 	int strike_flag = 0;
   2956 	struct timeval now;
   2957 	int tot_retrans = 0;
   2958 	uint32_t sending_seq;
   2959 	struct sctp_nets *net;
   2960 	int num_dests_sacked = 0;
   2961 
   2962 	/*
   2963 	 * select the sending_seq, this is either the next thing ready to be
   2964 	 * sent but not transmitted, OR, the next seq we assign.
   2965 	 */
   2966 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
   2967 	if (tp1 == NULL) {
   2968 		sending_seq = asoc->sending_seq;
   2969 	} else {
   2970 		sending_seq = tp1->rec.data.TSN_seq;
   2971 	}
   2972 
   2973 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
   2974 	if ((asoc->sctp_cmt_on_off > 0) &&
   2975 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   2976 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   2977 			if (net->saw_newack)
   2978 				num_dests_sacked++;
   2979 		}
   2980 	}
   2981 	if (stcb->asoc.prsctp_supported) {
   2982 		(void)SCTP_GETTIME_TIMEVAL(&now);
   2983 	}
   2984 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   2985 		strike_flag = 0;
   2986 		if (tp1->no_fr_allowed) {
   2987 			/* this one had a timeout or something */
   2988 			continue;
   2989 		}
   2990 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   2991 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
   2992 				sctp_log_fr(biggest_tsn_newly_acked,
   2993 					    tp1->rec.data.TSN_seq,
   2994 					    tp1->sent,
   2995 					    SCTP_FR_LOG_CHECK_STRIKE);
   2996 		}
   2997 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
   2998 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
   2999 			/* done */
   3000 			break;
   3001 		}
   3002 		if (stcb->asoc.prsctp_supported) {
   3003 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
   3004 				/* Is it expired? */
   3005 #ifndef __FreeBSD__
   3006 				if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
   3007 #else
   3008 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
   3009 #endif
   3010 					/* Yes so drop it */
   3011 					if (tp1->data != NULL) {
   3012 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
   3013 										 SCTP_SO_NOT_LOCKED);
   3014 					}
   3015 					continue;
   3016 				}
   3017 			}
   3018 
   3019 		}
   3020 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
   3021 			/* we are beyond the tsn in the sack  */
   3022 			break;
   3023 		}
   3024 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
   3025 			/* either a RESEND, ACKED, or MARKED */
   3026 			/* skip */
   3027 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
   3028 				/* Continue strikin FWD-TSN chunks */
   3029 				tp1->rec.data.fwd_tsn_cnt++;
   3030 			}
   3031 			continue;
   3032 		}
   3033 		/*
   3034 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
   3035 		 */
   3036 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
   3037 			/*
   3038 			 * No new acks were receieved for data sent to this
   3039 			 * dest. Therefore, according to the SFR algo for
   3040 			 * CMT, no data sent to this dest can be marked for
   3041 			 * FR using this SACK.
   3042 			 */
   3043 			continue;
   3044 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
   3045 		                                     tp1->whoTo->this_sack_highest_newack)) {
   3046 			/*
   3047 			 * CMT: New acks were receieved for data sent to
   3048 			 * this dest. But no new acks were seen for data
   3049 			 * sent after tp1. Therefore, according to the SFR
   3050 			 * algo for CMT, tp1 cannot be marked for FR using
   3051 			 * this SACK. This step covers part of the DAC algo
   3052 			 * and the HTNA algo as well.
   3053 			 */
   3054 			continue;
   3055 		}
   3056 		/*
   3057 		 * Here we check to see if we were have already done a FR
   3058 		 * and if so we see if the biggest TSN we saw in the sack is
   3059 		 * smaller than the recovery point. If so we don't strike
   3060 		 * the tsn... otherwise we CAN strike the TSN.
   3061 		 */
   3062 		/*
   3063 		 * @@@ JRI: Check for CMT
   3064 		 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
   3065 		 */
   3066 		if (accum_moved && asoc->fast_retran_loss_recovery) {
   3067 			/*
   3068 			 * Strike the TSN if in fast-recovery and cum-ack
   3069 			 * moved.
   3070 			 */
   3071 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3072 				sctp_log_fr(biggest_tsn_newly_acked,
   3073 					    tp1->rec.data.TSN_seq,
   3074 					    tp1->sent,
   3075 					    SCTP_FR_LOG_STRIKE_CHUNK);
   3076 			}
   3077 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3078 				tp1->sent++;
   3079 			}
   3080 			if ((asoc->sctp_cmt_on_off > 0) &&
   3081 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3082 				/*
   3083 				 * CMT DAC algorithm: If SACK flag is set to
   3084 				 * 0, then lowest_newack test will not pass
   3085 				 * because it would have been set to the
   3086 				 * cumack earlier. If not already to be
   3087 				 * rtx'd, If not a mixed sack and if tp1 is
   3088 				 * not between two sacked TSNs, then mark by
   3089 				 * one more.
   3090 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3091 				 * two packets have been received after this missing TSN.
   3092 				 */
   3093 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
   3094 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
   3095 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3096 						sctp_log_fr(16 + num_dests_sacked,
   3097 							    tp1->rec.data.TSN_seq,
   3098 							    tp1->sent,
   3099 							    SCTP_FR_LOG_STRIKE_CHUNK);
   3100 					}
   3101 					tp1->sent++;
   3102 				}
   3103 			}
   3104 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
   3105 		           (asoc->sctp_cmt_on_off == 0)) {
   3106 			/*
   3107 			 * For those that have done a FR we must take
   3108 			 * special consideration if we strike. I.e the
   3109 			 * biggest_newly_acked must be higher than the
   3110 			 * sending_seq at the time we did the FR.
   3111 			 */
   3112 			if (
   3113 #ifdef SCTP_FR_TO_ALTERNATE
   3114 				/*
   3115 				 * If FR's go to new networks, then we must only do
   3116 				 * this for singly homed asoc's. However if the FR's
   3117 				 * go to the same network (Armando's work) then its
   3118 				 * ok to FR multiple times.
   3119 				 */
   3120 				(asoc->numnets < 2)
   3121 #else
   3122 				(1)
   3123 #endif
   3124 				) {
   3125 
   3126 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
   3127 				                tp1->rec.data.fast_retran_tsn)) {
   3128 					/*
   3129 					 * Strike the TSN, since this ack is
   3130 					 * beyond where things were when we
   3131 					 * did a FR.
   3132 					 */
   3133 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3134 						sctp_log_fr(biggest_tsn_newly_acked,
   3135 							    tp1->rec.data.TSN_seq,
   3136 							    tp1->sent,
   3137 							    SCTP_FR_LOG_STRIKE_CHUNK);
   3138 					}
   3139 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3140 						tp1->sent++;
   3141 					}
   3142 					strike_flag = 1;
   3143 					if ((asoc->sctp_cmt_on_off > 0) &&
   3144 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3145 						/*
   3146 						 * CMT DAC algorithm: If
   3147 						 * SACK flag is set to 0,
   3148 						 * then lowest_newack test
   3149 						 * will not pass because it
   3150 						 * would have been set to
   3151 						 * the cumack earlier. If
   3152 						 * not already to be rtx'd,
   3153 						 * If not a mixed sack and
   3154 						 * if tp1 is not between two
   3155 						 * sacked TSNs, then mark by
   3156 						 * one more.
   3157 						 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3158 						 * two packets have been received after this missing TSN.
   3159 						 */
   3160 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
   3161 						    (num_dests_sacked == 1) &&
   3162 						    SCTP_TSN_GT(this_sack_lowest_newack,
   3163 						                tp1->rec.data.TSN_seq)) {
   3164 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3165 								sctp_log_fr(32 + num_dests_sacked,
   3166 									    tp1->rec.data.TSN_seq,
   3167 									    tp1->sent,
   3168 									    SCTP_FR_LOG_STRIKE_CHUNK);
   3169 							}
   3170 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3171 								tp1->sent++;
   3172 							}
   3173 						}
   3174 					}
   3175 				}
   3176 			}
   3177 			/*
   3178 			 * JRI: TODO: remove code for HTNA algo. CMT's
   3179 			 * SFR algo covers HTNA.
   3180 			 */
   3181 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
   3182 		                       biggest_tsn_newly_acked)) {
   3183 			/*
   3184 			 * We don't strike these: This is the  HTNA
   3185 			 * algorithm i.e. we don't strike If our TSN is
   3186 			 * larger than the Highest TSN Newly Acked.
   3187 			 */
   3188 			;
   3189 		} else {
   3190 			/* Strike the TSN */
   3191 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3192 				sctp_log_fr(biggest_tsn_newly_acked,
   3193 					    tp1->rec.data.TSN_seq,
   3194 					    tp1->sent,
   3195 					    SCTP_FR_LOG_STRIKE_CHUNK);
   3196 			}
   3197 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3198 				tp1->sent++;
   3199 			}
   3200 			if ((asoc->sctp_cmt_on_off > 0) &&
   3201 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
   3202 				/*
   3203 				 * CMT DAC algorithm: If SACK flag is set to
   3204 				 * 0, then lowest_newack test will not pass
   3205 				 * because it would have been set to the
   3206 				 * cumack earlier. If not already to be
   3207 				 * rtx'd, If not a mixed sack and if tp1 is
   3208 				 * not between two sacked TSNs, then mark by
   3209 				 * one more.
   3210 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
   3211 				 * two packets have been received after this missing TSN.
   3212 				 */
   3213 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
   3214 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
   3215 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3216 						sctp_log_fr(48 + num_dests_sacked,
   3217 							    tp1->rec.data.TSN_seq,
   3218 							    tp1->sent,
   3219 							    SCTP_FR_LOG_STRIKE_CHUNK);
   3220 					}
   3221 					tp1->sent++;
   3222 				}
   3223 			}
   3224 		}
   3225 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   3226 			struct sctp_nets *alt;
   3227 
   3228 			/* fix counts and things */
   3229 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3230 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
   3231 					       (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
   3232 					       tp1->book_size,
   3233 					       (uintptr_t)tp1->whoTo,
   3234 					       tp1->rec.data.TSN_seq);
   3235 			}
   3236 			if (tp1->whoTo) {
   3237 				tp1->whoTo->net_ack++;
   3238 				sctp_flight_size_decrease(tp1);
   3239 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3240 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3241 												     tp1);
   3242 				}
   3243 			}
   3244 
   3245 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   3246 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
   3247 					      asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   3248 			}
   3249 			/* add back to the rwnd */
   3250 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
   3251 
   3252 			/* remove from the total flight */
   3253 			sctp_total_flight_decrease(stcb, tp1);
   3254 
   3255 			if ((stcb->asoc.prsctp_supported) &&
   3256 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
   3257 				/* Has it been retransmitted tv_sec times? - we store the retran count there. */
   3258 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
   3259 					/* Yes, so drop it */
   3260 					if (tp1->data != NULL) {
   3261 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
   3262 										 SCTP_SO_NOT_LOCKED);
   3263 					}
   3264 					/* Make sure to flag we had a FR */
   3265 					tp1->whoTo->net_ack++;
   3266 					continue;
   3267 				}
   3268 			}
   3269 			/* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
   3270 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
   3271 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
   3272 					    0, SCTP_FR_MARKED);
   3273 			}
   3274 			if (strike_flag) {
   3275 				/* This is a subsequent FR */
   3276 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
   3277 			}
   3278 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
   3279 			if (asoc->sctp_cmt_on_off > 0) {
   3280 				/*
   3281 				 * CMT: Using RTX_SSTHRESH policy for CMT.
   3282 				 * If CMT is being used, then pick dest with
   3283 				 * largest ssthresh for any retransmission.
   3284 				 */
   3285 				tp1->no_fr_allowed = 1;
   3286 				alt = tp1->whoTo;
   3287 				/*sa_ignore NO_NULL_CHK*/
   3288 				if (asoc->sctp_cmt_pf > 0) {
   3289 					/* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
   3290 					alt = sctp_find_alternate_net(stcb, alt, 2);
   3291 				} else {
   3292 					/* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
   3293                                         /*sa_ignore NO_NULL_CHK*/
   3294 					alt = sctp_find_alternate_net(stcb, alt, 1);
   3295 				}
   3296 				if (alt == NULL) {
   3297 					alt = tp1->whoTo;
   3298 				}
   3299 				/*
   3300 				 * CUCv2: If a different dest is picked for
   3301 				 * the retransmission, then new
   3302 				 * (rtx-)pseudo_cumack needs to be tracked
   3303 				 * for orig dest. Let CUCv2 track new (rtx-)
   3304 				 * pseudo-cumack always.
   3305 				 */
   3306 				if (tp1->whoTo) {
   3307 					tp1->whoTo->find_pseudo_cumack = 1;
   3308 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
   3309 				}
   3310 
   3311 			} else {/* CMT is OFF */
   3312 
   3313 #ifdef SCTP_FR_TO_ALTERNATE
   3314 				/* Can we find an alternate? */
   3315 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
   3316 #else
   3317 				/*
   3318 				 * default behavior is to NOT retransmit
   3319 				 * FR's to an alternate. Armando Caro's
   3320 				 * paper details why.
   3321 				 */
   3322 				alt = tp1->whoTo;
   3323 #endif
   3324 			}
   3325 
   3326 			tp1->rec.data.doing_fast_retransmit = 1;
   3327 			tot_retrans++;
   3328 			/* mark the sending seq for possible subsequent FR's */
   3329 			/*
   3330 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
   3331 			 * (uint32_t)tpi->rec.data.TSN_seq);
   3332 			 */
   3333 			if (TAILQ_EMPTY(&asoc->send_queue)) {
   3334 				/*
   3335 				 * If the queue of send is empty then its
   3336 				 * the next sequence number that will be
   3337 				 * assigned so we subtract one from this to
   3338 				 * get the one we last sent.
   3339 				 */
   3340 				tp1->rec.data.fast_retran_tsn = sending_seq;
   3341 			} else {
   3342 				/*
   3343 				 * If there are chunks on the send queue
   3344 				 * (unsent data that has made it from the
   3345 				 * stream queues but not out the door, we
   3346 				 * take the first one (which will have the
   3347 				 * lowest TSN) and subtract one to get the
   3348 				 * one we last sent.
   3349 				 */
   3350 				struct sctp_tmit_chunk *ttt;
   3351 
   3352 				ttt = TAILQ_FIRST(&asoc->send_queue);
   3353 				tp1->rec.data.fast_retran_tsn =
   3354 					ttt->rec.data.TSN_seq;
   3355 			}
   3356 
   3357 			if (tp1->do_rtt) {
   3358 				/*
   3359 				 * this guy had a RTO calculation pending on
   3360 				 * it, cancel it
   3361 				 */
   3362 				if ((tp1->whoTo != NULL) &&
   3363 				    (tp1->whoTo->rto_needed == 0)) {
   3364 					tp1->whoTo->rto_needed = 1;
   3365 				}
   3366 				tp1->do_rtt = 0;
   3367 			}
   3368 			if (alt != tp1->whoTo) {
   3369 				/* yes, there is an alternate. */
   3370 				sctp_free_remote_addr(tp1->whoTo);
   3371 				/*sa_ignore FREED_MEMORY*/
   3372 				tp1->whoTo = alt;
   3373 				atomic_add_int(&alt->ref_count, 1);
   3374 			}
   3375 		}
   3376 	}
   3377 }
   3378 
   3379 struct sctp_tmit_chunk *
   3380 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
   3381     struct sctp_association *asoc)
   3382 {
   3383 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
   3384 	struct timeval now;
   3385 	int now_filled = 0;
   3386 
   3387 	if (asoc->prsctp_supported == 0) {
   3388 		return (NULL);
   3389 	}
   3390 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   3391 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
   3392 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
   3393 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   3394 			/* no chance to advance, out of here */
   3395 			break;
   3396 		}
   3397 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   3398 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
   3399 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
   3400 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   3401 					       asoc->advanced_peer_ack_point,
   3402 					       tp1->rec.data.TSN_seq, 0, 0);
   3403 			}
   3404 		}
   3405 		if (!PR_SCTP_ENABLED(tp1->flags)) {
   3406 			/*
   3407 			 * We can't fwd-tsn past any that are reliable aka
   3408 			 * retransmitted until the asoc fails.
   3409 			 */
   3410 			break;
   3411 		}
   3412 		if (!now_filled) {
   3413 			(void)SCTP_GETTIME_TIMEVAL(&now);
   3414 			now_filled = 1;
   3415 		}
   3416 		/*
   3417 		 * now we got a chunk which is marked for another
   3418 		 * retransmission to a PR-stream but has run out its chances
   3419 		 * already maybe OR has been marked to skip now. Can we skip
   3420 		 * it if its a resend?
   3421 		 */
   3422 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
   3423 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
   3424 			/*
   3425 			 * Now is this one marked for resend and its time is
   3426 			 * now up?
   3427 			 */
   3428 #ifndef __FreeBSD__
   3429 			if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
   3430 #else
   3431 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
   3432 #endif
   3433 				/* Yes so drop it */
   3434 				if (tp1->data) {
   3435 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
   3436 					    1, SCTP_SO_NOT_LOCKED);
   3437 				}
   3438 			} else {
   3439 				/*
   3440 				 * No, we are done when hit one for resend
   3441 				 * whos time as not expired.
   3442 				 */
   3443 				break;
   3444 			}
   3445 		}
   3446 		/*
   3447 		 * Ok now if this chunk is marked to drop it we can clean up
   3448 		 * the chunk, advance our peer ack point and we can check
   3449 		 * the next chunk.
   3450 		 */
   3451 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
   3452 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
   3453 			/* advance PeerAckPoint goes forward */
   3454 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
   3455 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
   3456 				a_adv = tp1;
   3457 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
   3458 				/* No update but we do save the chk */
   3459 				a_adv = tp1;
   3460 			}
   3461 		} else {
   3462 			/*
   3463 			 * If it is still in RESEND we can advance no
   3464 			 * further
   3465 			 */
   3466 			break;
   3467 		}
   3468 	}
   3469 	return (a_adv);
   3470 }
   3471 
   3472 static int
   3473 sctp_fs_audit(struct sctp_association *asoc)
   3474 {
   3475 	struct sctp_tmit_chunk *chk;
   3476 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
   3477 	int entry_flight, entry_cnt, ret;
   3478 
   3479 	entry_flight = asoc->total_flight;
   3480 	entry_cnt = asoc->total_flight_count;
   3481 	ret = 0;
   3482 
   3483 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
   3484 		return (0);
   3485 
   3486 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
   3487 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
   3488 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
   3489 			            chk->rec.data.TSN_seq,
   3490 			            chk->send_size,
   3491 			            chk->snd_count);
   3492 			inflight++;
   3493 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
   3494 			resend++;
   3495 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
   3496 			inbetween++;
   3497 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
   3498 			above++;
   3499 		} else {
   3500 			acked++;
   3501 		}
   3502 	}
   3503 
   3504 	if ((inflight > 0) || (inbetween > 0)) {
   3505 #ifdef INVARIANTS
   3506 		panic("Flight size-express incorrect? \n");
   3507 #else
   3508 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
   3509 		            entry_flight, entry_cnt);
   3510 
   3511 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
   3512 			    inflight, inbetween, resend, above, acked);
   3513 		ret = 1;
   3514 #endif
   3515 	}
   3516 	return (ret);
   3517 }
   3518 
   3519 
   3520 static void
   3521 sctp_window_probe_recovery(struct sctp_tcb *stcb,
   3522 	                   struct sctp_association *asoc,
   3523 			   struct sctp_tmit_chunk *tp1)
   3524 {
   3525 	tp1->window_probe = 0;
   3526 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
   3527 		/* TSN's skipped we do NOT move back. */
   3528 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
   3529 			       tp1->whoTo->flight_size,
   3530 			       tp1->book_size,
   3531 			       (uintptr_t)tp1->whoTo,
   3532 			       tp1->rec.data.TSN_seq);
   3533 		return;
   3534 	}
   3535 	/* First setup this by shrinking flight */
   3536 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3537 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3538 									     tp1);
   3539 	}
   3540 	sctp_flight_size_decrease(tp1);
   3541 	sctp_total_flight_decrease(stcb, tp1);
   3542 	/* Now mark for resend */
   3543 	tp1->sent = SCTP_DATAGRAM_RESEND;
   3544 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   3545 
   3546 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3547 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
   3548 			       tp1->whoTo->flight_size,
   3549 			       tp1->book_size,
   3550 			       (uintptr_t)tp1->whoTo,
   3551 			       tp1->rec.data.TSN_seq);
   3552 	}
   3553 }
   3554 
   3555 void
   3556 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
   3557                          uint32_t rwnd, int *abort_now, int ecne_seen)
   3558 {
   3559 	struct sctp_nets *net;
   3560 	struct sctp_association *asoc;
   3561 	struct sctp_tmit_chunk *tp1, *tp2;
   3562 	uint32_t old_rwnd;
   3563 	int win_probe_recovery = 0;
   3564 	int win_probe_recovered = 0;
   3565 	int j, done_once = 0;
   3566 	int rto_ok = 1;
   3567 
   3568 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
   3569 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
   3570 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
   3571 	}
   3572 	SCTP_TCB_LOCK_ASSERT(stcb);
   3573 #ifdef SCTP_ASOCLOG_OF_TSNS
   3574 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
   3575 	stcb->asoc.cumack_log_at++;
   3576 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
   3577 		stcb->asoc.cumack_log_at = 0;
   3578 	}
   3579 #endif
   3580 	asoc = &stcb->asoc;
   3581 	old_rwnd = asoc->peers_rwnd;
   3582 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
   3583 		/* old ack */
   3584 		return;
   3585 	} else if (asoc->last_acked_seq == cumack) {
   3586 		/* Window update sack */
   3587 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
   3588 						    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   3589 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   3590 			/* SWS sender side engages */
   3591 			asoc->peers_rwnd = 0;
   3592 		}
   3593 		if (asoc->peers_rwnd > old_rwnd) {
   3594 			goto again;
   3595 		}
   3596 		return;
   3597 	}
   3598 
   3599 	/* First setup for CC stuff */
   3600 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3601 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
   3602 			/* Drag along the window_tsn for cwr's */
   3603 			net->cwr_window_tsn = cumack;
   3604 		}
   3605 		net->prev_cwnd = net->cwnd;
   3606 		net->net_ack = 0;
   3607 		net->net_ack2 = 0;
   3608 
   3609 		/*
   3610 		 * CMT: Reset CUC and Fast recovery algo variables before
   3611 		 * SACK processing
   3612 		 */
   3613 		net->new_pseudo_cumack = 0;
   3614 		net->will_exit_fast_recovery = 0;
   3615 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
   3616 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
   3617 		}
   3618 	}
   3619 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
   3620 		uint32_t send_s;
   3621 
   3622 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
   3623 			tp1 = TAILQ_LAST(&asoc->sent_queue,
   3624 					 sctpchunk_listhead);
   3625 			send_s = tp1->rec.data.TSN_seq + 1;
   3626 		} else {
   3627 			send_s = asoc->sending_seq;
   3628 		}
   3629 		if (SCTP_TSN_GE(cumack, send_s)) {
   3630 #ifndef INVARIANTS
   3631 			struct mbuf *op_err;
   3632 			char msg[SCTP_DIAG_INFO_LEN];
   3633 
   3634 #endif
   3635 #ifdef INVARIANTS
   3636 			panic("Impossible sack 1");
   3637 #else
   3638 
   3639 			*abort_now = 1;
   3640 			/* XXX */
   3641 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
   3642 			         cumack, send_s);
   3643 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   3644 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
   3645 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   3646 			return;
   3647 #endif
   3648 		}
   3649 	}
   3650 	asoc->this_sack_highest_gap = cumack;
   3651 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   3652 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   3653 			       stcb->asoc.overall_error_count,
   3654 			       0,
   3655 			       SCTP_FROM_SCTP_INDATA,
   3656 			       __LINE__);
   3657 	}
   3658 	stcb->asoc.overall_error_count = 0;
   3659 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
   3660 		/* process the new consecutive TSN first */
   3661 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   3662 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
   3663 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
   3664 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
   3665 				}
   3666 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
   3667 					/*
   3668 					 * If it is less than ACKED, it is
   3669 					 * now no-longer in flight. Higher
   3670 					 * values may occur during marking
   3671 					 */
   3672 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3673 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   3674 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
   3675 								       tp1->whoTo->flight_size,
   3676 								       tp1->book_size,
   3677 								       (uintptr_t)tp1->whoTo,
   3678 								       tp1->rec.data.TSN_seq);
   3679 						}
   3680 						sctp_flight_size_decrease(tp1);
   3681 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   3682 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   3683 														     tp1);
   3684 						}
   3685 						/* sa_ignore NO_NULL_CHK */
   3686 						sctp_total_flight_decrease(stcb, tp1);
   3687 					}
   3688 					tp1->whoTo->net_ack += tp1->send_size;
   3689 					if (tp1->snd_count < 2) {
   3690 						/*
   3691 						 * True non-retransmited
   3692 						 * chunk
   3693 						 */
   3694 						tp1->whoTo->net_ack2 +=
   3695 							tp1->send_size;
   3696 
   3697 						/* update RTO too? */
   3698 						if (tp1->do_rtt) {
   3699 							if (rto_ok) {
   3700 								tp1->whoTo->RTO =
   3701 									/*
   3702 									 * sa_ignore
   3703 									 * NO_NULL_CHK
   3704 									 */
   3705 									sctp_calculate_rto(stcb,
   3706 											   asoc, tp1->whoTo,
   3707 											   &tp1->sent_rcv_time,
   3708 											   sctp_align_safe_nocopy,
   3709 											   SCTP_RTT_FROM_DATA);
   3710 								rto_ok = 0;
   3711 							}
   3712 							if (tp1->whoTo->rto_needed == 0) {
   3713 								tp1->whoTo->rto_needed = 1;
   3714 							}
   3715 							tp1->do_rtt = 0;
   3716 						}
   3717 					}
   3718 					/*
   3719 					 * CMT: CUCv2 algorithm. From the
   3720 					 * cumack'd TSNs, for each TSN being
   3721 					 * acked for the first time, set the
   3722 					 * following variables for the
   3723 					 * corresp destination.
   3724 					 * new_pseudo_cumack will trigger a
   3725 					 * cwnd update.
   3726 					 * find_(rtx_)pseudo_cumack will
   3727 					 * trigger search for the next
   3728 					 * expected (rtx-)pseudo-cumack.
   3729 					 */
   3730 					tp1->whoTo->new_pseudo_cumack = 1;
   3731 					tp1->whoTo->find_pseudo_cumack = 1;
   3732 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
   3733 
   3734 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   3735 						/* sa_ignore NO_NULL_CHK */
   3736 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
   3737 					}
   3738 				}
   3739 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   3740 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
   3741 				}
   3742 				if (tp1->rec.data.chunk_was_revoked) {
   3743 					/* deflate the cwnd */
   3744 					tp1->whoTo->cwnd -= tp1->book_size;
   3745 					tp1->rec.data.chunk_was_revoked = 0;
   3746 				}
   3747 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   3748 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
   3749 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
   3750 #ifdef INVARIANTS
   3751 					} else {
   3752 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
   3753 #endif
   3754 					}
   3755 				}
   3756 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
   3757 				if (tp1->data) {
   3758 					/* sa_ignore NO_NULL_CHK */
   3759 					sctp_free_bufspace(stcb, asoc, tp1, 1);
   3760 					sctp_m_freem(tp1->data);
   3761 					tp1->data = NULL;
   3762 				}
   3763 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   3764 					sctp_log_sack(asoc->last_acked_seq,
   3765 						      cumack,
   3766 						      tp1->rec.data.TSN_seq,
   3767 						      0,
   3768 						      0,
   3769 						      SCTP_LOG_FREE_SENT);
   3770 				}
   3771 				asoc->sent_queue_cnt--;
   3772 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
   3773 			} else {
   3774 				break;
   3775 			}
   3776 		}
   3777 
   3778 	}
   3779 #if defined(__Userspace__)
   3780 	if (stcb->sctp_ep->recv_callback) {
   3781 		if (stcb->sctp_socket) {
   3782 			uint32_t inqueue_bytes, sb_free_now;
   3783 			struct sctp_inpcb *inp;
   3784 
   3785 			inp = stcb->sctp_ep;
   3786 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   3787 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
   3788 
   3789 			/* check if the amount free in the send socket buffer crossed the threshold */
   3790 			if (inp->send_callback &&
   3791 			    (((inp->send_sb_threshold > 0) &&
   3792 			      (sb_free_now >= inp->send_sb_threshold) &&
   3793 			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
   3794 			     (inp->send_sb_threshold == 0))) {
   3795 				atomic_add_int(&stcb->asoc.refcnt, 1);
   3796 				SCTP_TCB_UNLOCK(stcb);
   3797 				inp->send_callback(stcb->sctp_socket, sb_free_now);
   3798 				SCTP_TCB_LOCK(stcb);
   3799 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
   3800 			}
   3801 		}
   3802 	} else if (stcb->sctp_socket) {
   3803 #else
   3804 	/* sa_ignore NO_NULL_CHK */
   3805 	if (stcb->sctp_socket) {
   3806 #endif
   3807 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   3808 		struct socket *so;
   3809 
   3810 #endif
   3811 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
   3812 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   3813 			/* sa_ignore NO_NULL_CHK */
   3814 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
   3815 		}
   3816 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   3817 		so = SCTP_INP_SO(stcb->sctp_ep);
   3818 		atomic_add_int(&stcb->asoc.refcnt, 1);
   3819 		SCTP_TCB_UNLOCK(stcb);
   3820 		SCTP_SOCKET_LOCK(so, 1);
   3821 		SCTP_TCB_LOCK(stcb);
   3822 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
   3823 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   3824 			/* assoc was freed while we were unlocked */
   3825 			SCTP_SOCKET_UNLOCK(so, 1);
   3826 			return;
   3827 		}
   3828 #endif
   3829 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
   3830 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   3831 		SCTP_SOCKET_UNLOCK(so, 1);
   3832 #endif
   3833 	} else {
   3834 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   3835 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
   3836 		}
   3837 	}
   3838 
   3839 	/* JRS - Use the congestion control given in the CC module */
   3840 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
   3841 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3842 			if (net->net_ack2 > 0) {
   3843 				/*
   3844 				 * Karn's rule applies to clearing error count, this
   3845 				 * is optional.
   3846 				 */
   3847 				net->error_count = 0;
   3848 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
   3849 					/* addr came good */
   3850 					net->dest_state |= SCTP_ADDR_REACHABLE;
   3851 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
   3852 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
   3853 				}
   3854 				if (net == stcb->asoc.primary_destination) {
   3855 					if (stcb->asoc.alternate) {
   3856 						/* release the alternate, primary is good */
   3857 						sctp_free_remote_addr(stcb->asoc.alternate);
   3858 						stcb->asoc.alternate = NULL;
   3859 					}
   3860 				}
   3861 				if (net->dest_state & SCTP_ADDR_PF) {
   3862 					net->dest_state &= ~SCTP_ADDR_PF;
   3863 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
   3864 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
   3865 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
   3866 					/* Done with this net */
   3867 					net->net_ack = 0;
   3868 				}
   3869 				/* restore any doubled timers */
   3870 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
   3871 				if (net->RTO < stcb->asoc.minrto) {
   3872 					net->RTO = stcb->asoc.minrto;
   3873 				}
   3874 				if (net->RTO > stcb->asoc.maxrto) {
   3875 					net->RTO = stcb->asoc.maxrto;
   3876 				}
   3877 			}
   3878 		}
   3879 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
   3880 	}
   3881 	asoc->last_acked_seq = cumack;
   3882 
   3883 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
   3884 		/* nothing left in-flight */
   3885 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3886 			net->flight_size = 0;
   3887 			net->partial_bytes_acked = 0;
   3888 		}
   3889 		asoc->total_flight = 0;
   3890 		asoc->total_flight_count = 0;
   3891 	}
   3892 
   3893 	/* RWND update */
   3894 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
   3895 					    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   3896 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   3897 		/* SWS sender side engages */
   3898 		asoc->peers_rwnd = 0;
   3899 	}
   3900 	if (asoc->peers_rwnd > old_rwnd) {
   3901 		win_probe_recovery = 1;
   3902 	}
   3903 	/* Now assure a timer where data is queued at */
   3904 again:
   3905 	j = 0;
   3906 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3907 		int to_ticks;
   3908 		if (win_probe_recovery && (net->window_probe)) {
   3909 			win_probe_recovered = 1;
   3910 			/*
   3911 			 * Find first chunk that was used with window probe
   3912 			 * and clear the sent
   3913 			 */
   3914 			/* sa_ignore FREED_MEMORY */
   3915 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   3916 				if (tp1->window_probe) {
   3917 					/* move back to data send queue */
   3918 					sctp_window_probe_recovery(stcb, asoc, tp1);
   3919 					break;
   3920 				}
   3921 			}
   3922 		}
   3923 		if (net->RTO == 0) {
   3924 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
   3925 		} else {
   3926 			to_ticks = MSEC_TO_TICKS(net->RTO);
   3927 		}
   3928 		if (net->flight_size) {
   3929 			j++;
   3930 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
   3931 						  sctp_timeout_handler, &net->rxt_timer);
   3932 			if (net->window_probe) {
   3933 				net->window_probe = 0;
   3934 			}
   3935 		} else {
   3936 			if (net->window_probe) {
   3937 				/* In window probes we must assure a timer is still running there */
   3938 				net->window_probe = 0;
   3939 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   3940 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
   3941 					                    sctp_timeout_handler, &net->rxt_timer);
   3942 				}
   3943 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   3944 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   3945 				                stcb, net,
   3946 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
   3947 			}
   3948 		}
   3949 	}
   3950 	if ((j == 0) &&
   3951 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
   3952 	    (asoc->sent_queue_retran_cnt == 0) &&
   3953 	    (win_probe_recovered == 0) &&
   3954 	    (done_once == 0)) {
   3955 		/* huh, this should not happen unless all packets
   3956 		 * are PR-SCTP and marked to skip of course.
   3957 		 */
   3958 		if (sctp_fs_audit(asoc)) {
   3959 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   3960 				net->flight_size = 0;
   3961 			}
   3962 			asoc->total_flight = 0;
   3963 			asoc->total_flight_count = 0;
   3964 			asoc->sent_queue_retran_cnt = 0;
   3965 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   3966 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   3967 					sctp_flight_size_increase(tp1);
   3968 					sctp_total_flight_increase(stcb, tp1);
   3969 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   3970 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   3971 				}
   3972 			}
   3973 		}
   3974 		done_once = 1;
   3975 		goto again;
   3976 	}
   3977 	/**********************************/
   3978 	/* Now what about shutdown issues */
   3979 	/**********************************/
   3980 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
   3981 		/* nothing left on sendqueue.. consider done */
   3982 		/* clean up */
   3983 		if ((asoc->stream_queue_cnt == 1) &&
   3984 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   3985 		     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   3986 		    (asoc->locked_on_sending)
   3987 			) {
   3988 			struct sctp_stream_queue_pending *sp;
   3989 			/* I may be in a state where we got
   3990 			 * all across.. but cannot write more due
   3991 			 * to a shutdown... we abort since the
   3992 			 * user did not indicate EOR in this case. The
   3993 			 * sp will be cleaned during free of the asoc.
   3994 			 */
   3995 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
   3996 					sctp_streamhead);
   3997 			if ((sp) && (sp->length == 0)) {
   3998 				/* Let cleanup code purge it */
   3999 				if (sp->msg_is_complete) {
   4000 					asoc->stream_queue_cnt--;
   4001 				} else {
   4002 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
   4003 					asoc->locked_on_sending = NULL;
   4004 					asoc->stream_queue_cnt--;
   4005 				}
   4006 			}
   4007 		}
   4008 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
   4009 		    (asoc->stream_queue_cnt == 0)) {
   4010 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
   4011 				/* Need to abort here */
   4012 				struct mbuf *op_err;
   4013 
   4014 			abort_out_now:
   4015 				*abort_now = 1;
   4016 				/* XXX */
   4017 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
   4018 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
   4019 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   4020 			} else {
   4021 				struct sctp_nets *netp;
   4022 
   4023 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
   4024 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
   4025 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4026 				}
   4027 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
   4028 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   4029 				sctp_stop_timers_for_shutdown(stcb);
   4030 				if (asoc->alternate) {
   4031 					netp = asoc->alternate;
   4032 				} else {
   4033 					netp = asoc->primary_destination;
   4034 				}
   4035 				sctp_send_shutdown(stcb, netp);
   4036 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
   4037 						 stcb->sctp_ep, stcb, netp);
   4038 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
   4039 						 stcb->sctp_ep, stcb, netp);
   4040 			}
   4041 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
   4042 			   (asoc->stream_queue_cnt == 0)) {
   4043 			struct sctp_nets *netp;
   4044 
   4045 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
   4046 				goto abort_out_now;
   4047 			}
   4048 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4049 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
   4050 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   4051 			sctp_stop_timers_for_shutdown(stcb);
   4052 			if (asoc->alternate) {
   4053 				netp = asoc->alternate;
   4054 			} else {
   4055 				netp = asoc->primary_destination;
   4056 			}
   4057 			sctp_send_shutdown_ack(stcb, netp);
   4058 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
   4059 					 stcb->sctp_ep, stcb, netp);
   4060 		}
   4061 	}
   4062 	/*********************************************/
   4063 	/* Here we perform PR-SCTP procedures        */
   4064 	/* (section 4.2)                             */
   4065 	/*********************************************/
   4066 	/* C1. update advancedPeerAckPoint */
   4067 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
   4068 		asoc->advanced_peer_ack_point = cumack;
   4069 	}
   4070 	/* PR-Sctp issues need to be addressed too */
   4071 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
   4072 		struct sctp_tmit_chunk *lchk;
   4073 		uint32_t old_adv_peer_ack_point;
   4074 
   4075 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
   4076 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
   4077 		/* C3. See if we need to send a Fwd-TSN */
   4078 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
   4079 			/*
   4080 			 * ISSUE with ECN, see FWD-TSN processing.
   4081 			 */
   4082 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
   4083 				send_forward_tsn(stcb, asoc);
   4084 			} else if (lchk) {
   4085 				/* try to FR fwd-tsn's that get lost too */
   4086 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
   4087 					send_forward_tsn(stcb, asoc);
   4088 				}
   4089 			}
   4090 		}
   4091 		if (lchk) {
   4092 			/* Assure a timer is up */
   4093 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   4094 					 stcb->sctp_ep, stcb, lchk->whoTo);
   4095 		}
   4096 	}
   4097 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
   4098 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
   4099 			       rwnd,
   4100 			       stcb->asoc.peers_rwnd,
   4101 			       stcb->asoc.total_flight,
   4102 			       stcb->asoc.total_output_queue_size);
   4103 	}
   4104 }
   4105 
   4106 void
   4107 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
   4108                  struct sctp_tcb *stcb,
   4109                  uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
   4110                  int *abort_now, uint8_t flags,
   4111                  uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
   4112 {
   4113 	struct sctp_association *asoc;
   4114 	struct sctp_tmit_chunk *tp1, *tp2;
   4115 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
   4116 	uint16_t wake_him = 0;
   4117 	uint32_t send_s = 0;
   4118 	long j;
   4119 	int accum_moved = 0;
   4120 	int will_exit_fast_recovery = 0;
   4121 	uint32_t a_rwnd, old_rwnd;
   4122 	int win_probe_recovery = 0;
   4123 	int win_probe_recovered = 0;
   4124 	struct sctp_nets *net = NULL;
   4125 	int done_once;
   4126 	int rto_ok = 1;
   4127 	uint8_t reneged_all = 0;
   4128 	uint8_t cmt_dac_flag;
   4129 	/*
   4130 	 * we take any chance we can to service our queues since we cannot
   4131 	 * get awoken when the socket is read from :<
   4132 	 */
   4133 	/*
   4134 	 * Now perform the actual SACK handling: 1) Verify that it is not an
   4135 	 * old sack, if so discard. 2) If there is nothing left in the send
   4136 	 * queue (cum-ack is equal to last acked) then you have a duplicate
   4137 	 * too, update any rwnd change and verify no timers are running.
   4138 	 * then return. 3) Process any new consequtive data i.e. cum-ack
   4139 	 * moved process these first and note that it moved. 4) Process any
   4140 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
   4141 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
   4142 	 * sync up flightsizes and things, stop all timers and also check
   4143 	 * for shutdown_pending state. If so then go ahead and send off the
   4144 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
   4145 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
   4146 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
   4147 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
   4148 	 * if in shutdown_recv state.
   4149 	 */
   4150 	SCTP_TCB_LOCK_ASSERT(stcb);
   4151 	/* CMT DAC algo */
   4152 	this_sack_lowest_newack = 0;
   4153 	SCTP_STAT_INCR(sctps_slowpath_sack);
   4154 	last_tsn = cum_ack;
   4155 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
   4156 #ifdef SCTP_ASOCLOG_OF_TSNS
   4157 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
   4158 	stcb->asoc.cumack_log_at++;
   4159 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
   4160 		stcb->asoc.cumack_log_at = 0;
   4161 	}
   4162 #endif
   4163 	a_rwnd = rwnd;
   4164 
   4165 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
   4166 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
   4167 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
   4168 	}
   4169 
   4170 	old_rwnd = stcb->asoc.peers_rwnd;
   4171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
   4172 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
   4173 		               stcb->asoc.overall_error_count,
   4174 		               0,
   4175 		               SCTP_FROM_SCTP_INDATA,
   4176 		               __LINE__);
   4177 	}
   4178 	stcb->asoc.overall_error_count = 0;
   4179 	asoc = &stcb->asoc;
   4180 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4181 		sctp_log_sack(asoc->last_acked_seq,
   4182 		              cum_ack,
   4183 		              0,
   4184 		              num_seg,
   4185 		              num_dup,
   4186 		              SCTP_LOG_NEW_SACK);
   4187 	}
   4188 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
   4189 		uint16_t i;
   4190 		uint32_t *dupdata, dblock;
   4191 
   4192 		for (i = 0; i < num_dup; i++) {
   4193 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
   4194 			                                    sizeof(uint32_t), (uint8_t *)&dblock);
   4195 			if (dupdata == NULL) {
   4196 				break;
   4197 			}
   4198 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
   4199 		}
   4200 	}
   4201 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
   4202 		/* reality check */
   4203 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
   4204 			tp1 = TAILQ_LAST(&asoc->sent_queue,
   4205 			                 sctpchunk_listhead);
   4206 			send_s = tp1->rec.data.TSN_seq + 1;
   4207 		} else {
   4208 			tp1 = NULL;
   4209 			send_s = asoc->sending_seq;
   4210 		}
   4211 		if (SCTP_TSN_GE(cum_ack, send_s)) {
   4212 			struct mbuf *op_err;
   4213 			char msg[SCTP_DIAG_INFO_LEN];
   4214 
   4215 			/*
   4216 			 * no way, we have not even sent this TSN out yet.
   4217 			 * Peer is hopelessly messed up with us.
   4218 			 */
   4219 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
   4220 			            cum_ack, send_s);
   4221 			if (tp1) {
   4222 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
   4223 				            tp1->rec.data.TSN_seq, (void *)tp1);
   4224 			}
   4225 		hopeless_peer:
   4226 			*abort_now = 1;
   4227 			/* XXX */
   4228 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
   4229 			         cum_ack, send_s);
   4230 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   4231 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
   4232 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   4233 			return;
   4234 		}
   4235 	}
   4236 	/**********************/
   4237 	/* 1) check the range */
   4238 	/**********************/
   4239 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
   4240 		/* acking something behind */
   4241 		return;
   4242 	}
   4243 
   4244 	/* update the Rwnd of the peer */
   4245 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
   4246 	    TAILQ_EMPTY(&asoc->send_queue) &&
   4247 	    (asoc->stream_queue_cnt == 0)) {
   4248 		/* nothing left on send/sent and strmq */
   4249 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   4250 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   4251 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
   4252 		}
   4253 		asoc->peers_rwnd = a_rwnd;
   4254 		if (asoc->sent_queue_retran_cnt) {
   4255 			asoc->sent_queue_retran_cnt = 0;
   4256 		}
   4257 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   4258 			/* SWS sender side engages */
   4259 			asoc->peers_rwnd = 0;
   4260 		}
   4261 		/* stop any timers */
   4262 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4263 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4264 			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
   4265 			net->partial_bytes_acked = 0;
   4266 			net->flight_size = 0;
   4267 		}
   4268 		asoc->total_flight = 0;
   4269 		asoc->total_flight_count = 0;
   4270 		return;
   4271 	}
   4272 	/*
   4273 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
   4274 	 * things. The total byte count acked is tracked in netAckSz AND
   4275 	 * netAck2 is used to track the total bytes acked that are un-
   4276 	 * amibguious and were never retransmitted. We track these on a per
   4277 	 * destination address basis.
   4278 	 */
   4279 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4280 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
   4281 			/* Drag along the window_tsn for cwr's */
   4282 			net->cwr_window_tsn = cum_ack;
   4283 		}
   4284 		net->prev_cwnd = net->cwnd;
   4285 		net->net_ack = 0;
   4286 		net->net_ack2 = 0;
   4287 
   4288 		/*
   4289 		 * CMT: Reset CUC and Fast recovery algo variables before
   4290 		 * SACK processing
   4291 		 */
   4292 		net->new_pseudo_cumack = 0;
   4293 		net->will_exit_fast_recovery = 0;
   4294 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
   4295 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
   4296 		}
   4297 	}
   4298 	/* process the new consecutive TSN first */
   4299 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4300 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
   4301 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
   4302 				accum_moved = 1;
   4303 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
   4304 					/*
   4305 					 * If it is less than ACKED, it is
   4306 					 * now no-longer in flight. Higher
   4307 					 * values may occur during marking
   4308 					 */
   4309 					if ((tp1->whoTo->dest_state &
   4310 					     SCTP_ADDR_UNCONFIRMED) &&
   4311 					    (tp1->snd_count < 2)) {
   4312 						/*
   4313 						 * If there was no retran
   4314 						 * and the address is
   4315 						 * un-confirmed and we sent
   4316 						 * there and are now
   4317 						 * sacked.. its confirmed,
   4318 						 * mark it so.
   4319 						 */
   4320 						tp1->whoTo->dest_state &=
   4321 							~SCTP_ADDR_UNCONFIRMED;
   4322 					}
   4323 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   4324 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   4325 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
   4326 							               tp1->whoTo->flight_size,
   4327 							               tp1->book_size,
   4328 							               (uintptr_t)tp1->whoTo,
   4329 							               tp1->rec.data.TSN_seq);
   4330 						}
   4331 						sctp_flight_size_decrease(tp1);
   4332 						sctp_total_flight_decrease(stcb, tp1);
   4333 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
   4334 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
   4335 														     tp1);
   4336 						}
   4337 					}
   4338 					tp1->whoTo->net_ack += tp1->send_size;
   4339 
   4340 					/* CMT SFR and DAC algos */
   4341 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
   4342 					tp1->whoTo->saw_newack = 1;
   4343 
   4344 					if (tp1->snd_count < 2) {
   4345 						/*
   4346 						 * True non-retransmited
   4347 						 * chunk
   4348 						 */
   4349 						tp1->whoTo->net_ack2 +=
   4350 							tp1->send_size;
   4351 
   4352 						/* update RTO too? */
   4353 						if (tp1->do_rtt) {
   4354 							if (rto_ok) {
   4355 								tp1->whoTo->RTO =
   4356 									sctp_calculate_rto(stcb,
   4357 											   asoc, tp1->whoTo,
   4358 											   &tp1->sent_rcv_time,
   4359 											   sctp_align_safe_nocopy,
   4360 											   SCTP_RTT_FROM_DATA);
   4361 								rto_ok = 0;
   4362 							}
   4363 							if (tp1->whoTo->rto_needed == 0) {
   4364 								tp1->whoTo->rto_needed = 1;
   4365 							}
   4366 							tp1->do_rtt = 0;
   4367 						}
   4368 					}
   4369 					/*
   4370 					 * CMT: CUCv2 algorithm. From the
   4371 					 * cumack'd TSNs, for each TSN being
   4372 					 * acked for the first time, set the
   4373 					 * following variables for the
   4374 					 * corresp destination.
   4375 					 * new_pseudo_cumack will trigger a
   4376 					 * cwnd update.
   4377 					 * find_(rtx_)pseudo_cumack will
   4378 					 * trigger search for the next
   4379 					 * expected (rtx-)pseudo-cumack.
   4380 					 */
   4381 					tp1->whoTo->new_pseudo_cumack = 1;
   4382 					tp1->whoTo->find_pseudo_cumack = 1;
   4383 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
   4384 
   4385 
   4386 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4387 						sctp_log_sack(asoc->last_acked_seq,
   4388 						              cum_ack,
   4389 						              tp1->rec.data.TSN_seq,
   4390 						              0,
   4391 						              0,
   4392 						              SCTP_LOG_TSN_ACKED);
   4393 					}
   4394 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
   4395 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
   4396 					}
   4397 				}
   4398 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   4399 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
   4400 #ifdef SCTP_AUDITING_ENABLED
   4401 					sctp_audit_log(0xB3,
   4402 					               (asoc->sent_queue_retran_cnt & 0x000000ff));
   4403 #endif
   4404 				}
   4405 				if (tp1->rec.data.chunk_was_revoked) {
   4406 					/* deflate the cwnd */
   4407 					tp1->whoTo->cwnd -= tp1->book_size;
   4408 					tp1->rec.data.chunk_was_revoked = 0;
   4409 				}
   4410 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   4411 					tp1->sent = SCTP_DATAGRAM_ACKED;
   4412 				}
   4413 			}
   4414 		} else {
   4415 			break;
   4416 		}
   4417 	}
   4418 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
   4419 	/* always set this up to cum-ack */
   4420 	asoc->this_sack_highest_gap = last_tsn;
   4421 
   4422 	if ((num_seg > 0) || (num_nr_seg > 0)) {
   4423 
   4424 		/*
   4425 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
   4426 		 * to be greater than the cumack. Also reset saw_newack to 0
   4427 		 * for all dests.
   4428 		 */
   4429 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4430 			net->saw_newack = 0;
   4431 			net->this_sack_highest_newack = last_tsn;
   4432 		}
   4433 
   4434 		/*
   4435 		 * thisSackHighestGap will increase while handling NEW
   4436 		 * segments this_sack_highest_newack will increase while
   4437 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
   4438 		 * used for CMT DAC algo. saw_newack will also change.
   4439 		 */
   4440 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
   4441 			&biggest_tsn_newly_acked, &this_sack_lowest_newack,
   4442 			num_seg, num_nr_seg, &rto_ok)) {
   4443 			wake_him++;
   4444 		}
   4445 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
   4446 			/*
   4447 			 * validate the biggest_tsn_acked in the gap acks if
   4448 			 * strict adherence is wanted.
   4449 			 */
   4450 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
   4451 				/*
   4452 				 * peer is either confused or we are under
   4453 				 * attack. We must abort.
   4454 				 */
   4455 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
   4456 				            biggest_tsn_acked, send_s);
   4457 				goto hopeless_peer;
   4458 			}
   4459 		}
   4460 	}
   4461 	/*******************************************/
   4462 	/* cancel ALL T3-send timer if accum moved */
   4463 	/*******************************************/
   4464 	if (asoc->sctp_cmt_on_off > 0) {
   4465 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4466 			if (net->new_pseudo_cumack)
   4467 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4468 				                stcb, net,
   4469 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
   4470 
   4471 		}
   4472 	} else {
   4473 		if (accum_moved) {
   4474 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4475 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4476 				                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
   4477 			}
   4478 		}
   4479 	}
   4480 	/********************************************/
   4481 	/* drop the acked chunks from the sentqueue */
   4482 	/********************************************/
   4483 	asoc->last_acked_seq = cum_ack;
   4484 
   4485 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
   4486 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
   4487 			break;
   4488 		}
   4489 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
   4490 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
   4491 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
   4492 #ifdef INVARIANTS
   4493 			} else {
   4494 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
   4495 #endif
   4496 			}
   4497 		}
   4498 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
   4499 		if (PR_SCTP_ENABLED(tp1->flags)) {
   4500 			if (asoc->pr_sctp_cnt != 0)
   4501 				asoc->pr_sctp_cnt--;
   4502 		}
   4503 		asoc->sent_queue_cnt--;
   4504 		if (tp1->data) {
   4505 			/* sa_ignore NO_NULL_CHK */
   4506 			sctp_free_bufspace(stcb, asoc, tp1, 1);
   4507 			sctp_m_freem(tp1->data);
   4508 			tp1->data = NULL;
   4509 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
   4510 				asoc->sent_queue_cnt_removeable--;
   4511 			}
   4512 		}
   4513 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
   4514 			sctp_log_sack(asoc->last_acked_seq,
   4515 			              cum_ack,
   4516 			              tp1->rec.data.TSN_seq,
   4517 			              0,
   4518 			              0,
   4519 			              SCTP_LOG_FREE_SENT);
   4520 		}
   4521 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
   4522 		wake_him++;
   4523 	}
   4524 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
   4525 #ifdef INVARIANTS
   4526 		panic("Warning flight size is postive and should be 0");
   4527 #else
   4528 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
   4529 		            asoc->total_flight);
   4530 #endif
   4531 		asoc->total_flight = 0;
   4532 	}
   4533 
   4534 #if defined(__Userspace__)
   4535 	if (stcb->sctp_ep->recv_callback) {
   4536 		if (stcb->sctp_socket) {
   4537 			uint32_t inqueue_bytes, sb_free_now;
   4538 			struct sctp_inpcb *inp;
   4539 
   4540 			inp = stcb->sctp_ep;
   4541 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
   4542 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
   4543 
   4544 			/* check if the amount free in the send socket buffer crossed the threshold */
   4545 			if (inp->send_callback &&
   4546 			   (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
   4547 			    (inp->send_sb_threshold == 0))) {
   4548 				atomic_add_int(&stcb->asoc.refcnt, 1);
   4549 				SCTP_TCB_UNLOCK(stcb);
   4550 				inp->send_callback(stcb->sctp_socket, sb_free_now);
   4551 				SCTP_TCB_LOCK(stcb);
   4552 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4553 			}
   4554 		}
   4555 	} else if ((wake_him) && (stcb->sctp_socket)) {
   4556 #else
   4557 	/* sa_ignore NO_NULL_CHK */
   4558 	if ((wake_him) && (stcb->sctp_socket)) {
   4559 #endif
   4560 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4561 		struct socket *so;
   4562 
   4563 #endif
   4564 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
   4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4566 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
   4567 		}
   4568 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4569 		so = SCTP_INP_SO(stcb->sctp_ep);
   4570 		atomic_add_int(&stcb->asoc.refcnt, 1);
   4571 		SCTP_TCB_UNLOCK(stcb);
   4572 		SCTP_SOCKET_LOCK(so, 1);
   4573 		SCTP_TCB_LOCK(stcb);
   4574 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
   4575 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
   4576 			/* assoc was freed while we were unlocked */
   4577 			SCTP_SOCKET_UNLOCK(so, 1);
   4578 			return;
   4579 		}
   4580 #endif
   4581 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
   4582 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
   4583 		SCTP_SOCKET_UNLOCK(so, 1);
   4584 #endif
   4585 	} else {
   4586 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
   4587 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
   4588 		}
   4589 	}
   4590 
   4591 	if (asoc->fast_retran_loss_recovery && accum_moved) {
   4592 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
   4593 			/* Setup so we will exit RFC2582 fast recovery */
   4594 			will_exit_fast_recovery = 1;
   4595 		}
   4596 	}
   4597 	/*
   4598 	 * Check for revoked fragments:
   4599 	 *
   4600 	 * if Previous sack - Had no frags then we can't have any revoked if
   4601 	 * Previous sack - Had frag's then - If we now have frags aka
   4602 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
   4603 	 * some of them. else - The peer revoked all ACKED fragments, since
   4604 	 * we had some before and now we have NONE.
   4605 	 */
   4606 
   4607 	if (num_seg) {
   4608 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
   4609 		asoc->saw_sack_with_frags = 1;
   4610 	} else if (asoc->saw_sack_with_frags) {
   4611 		int cnt_revoked = 0;
   4612 
   4613 		/* Peer revoked all dg's marked or acked */
   4614 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4615 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
   4616 				tp1->sent = SCTP_DATAGRAM_SENT;
   4617 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
   4618 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
   4619 					               tp1->whoTo->flight_size,
   4620 					               tp1->book_size,
   4621 					               (uintptr_t)tp1->whoTo,
   4622 					               tp1->rec.data.TSN_seq);
   4623 				}
   4624 				sctp_flight_size_increase(tp1);
   4625 				sctp_total_flight_increase(stcb, tp1);
   4626 				tp1->rec.data.chunk_was_revoked = 1;
   4627 				/*
   4628 				 * To ensure that this increase in
   4629 				 * flightsize, which is artificial,
   4630 				 * does not throttle the sender, we
   4631 				 * also increase the cwnd
   4632 				 * artificially.
   4633 				 */
   4634 				tp1->whoTo->cwnd += tp1->book_size;
   4635 				cnt_revoked++;
   4636 			}
   4637 		}
   4638 		if (cnt_revoked) {
   4639 			reneged_all = 1;
   4640 		}
   4641 		asoc->saw_sack_with_frags = 0;
   4642 	}
   4643 	if (num_nr_seg > 0)
   4644 		asoc->saw_sack_with_nr_frags = 1;
   4645 	else
   4646 		asoc->saw_sack_with_nr_frags = 0;
   4647 
   4648 	/* JRS - Use the congestion control given in the CC module */
   4649 	if (ecne_seen == 0) {
   4650 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4651 			if (net->net_ack2 > 0) {
   4652 				/*
   4653 				 * Karn's rule applies to clearing error count, this
   4654 				 * is optional.
   4655 				 */
   4656 				net->error_count = 0;
   4657 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
   4658 					/* addr came good */
   4659 					net->dest_state |= SCTP_ADDR_REACHABLE;
   4660 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
   4661 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
   4662 				}
   4663 
   4664 				if (net == stcb->asoc.primary_destination) {
   4665 					if (stcb->asoc.alternate) {
   4666 						/* release the alternate, primary is good */
   4667 						sctp_free_remote_addr(stcb->asoc.alternate);
   4668 						stcb->asoc.alternate = NULL;
   4669 					}
   4670 				}
   4671 
   4672 				if (net->dest_state & SCTP_ADDR_PF) {
   4673 					net->dest_state &= ~SCTP_ADDR_PF;
   4674 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
   4675 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
   4676 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
   4677 					/* Done with this net */
   4678 					net->net_ack = 0;
   4679 				}
   4680 				/* restore any doubled timers */
   4681 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
   4682 				if (net->RTO < stcb->asoc.minrto) {
   4683 					net->RTO = stcb->asoc.minrto;
   4684 				}
   4685 				if (net->RTO > stcb->asoc.maxrto) {
   4686 					net->RTO = stcb->asoc.maxrto;
   4687 				}
   4688 			}
   4689 		}
   4690 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
   4691 	}
   4692 
   4693 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
   4694 		/* nothing left in-flight */
   4695 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4696 			/* stop all timers */
   4697 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4698 			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
   4699 			net->flight_size = 0;
   4700 			net->partial_bytes_acked = 0;
   4701 		}
   4702 		asoc->total_flight = 0;
   4703 		asoc->total_flight_count = 0;
   4704 	}
   4705 
   4706 	/**********************************/
   4707 	/* Now what about shutdown issues */
   4708 	/**********************************/
   4709 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
   4710 		/* nothing left on sendqueue.. consider done */
   4711 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   4712 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   4713 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
   4714 		}
   4715 		asoc->peers_rwnd = a_rwnd;
   4716 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   4717 			/* SWS sender side engages */
   4718 			asoc->peers_rwnd = 0;
   4719 		}
   4720 		/* clean up */
   4721 		if ((asoc->stream_queue_cnt == 1) &&
   4722 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
   4723 		     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
   4724 		    (asoc->locked_on_sending)
   4725 			) {
   4726 			struct sctp_stream_queue_pending *sp;
   4727 			/* I may be in a state where we got
   4728 			 * all across.. but cannot write more due
   4729 			 * to a shutdown... we abort since the
   4730 			 * user did not indicate EOR in this case.
   4731 			 */
   4732 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
   4733 			                sctp_streamhead);
   4734 			if ((sp) && (sp->length == 0)) {
   4735 				asoc->locked_on_sending = NULL;
   4736 				if (sp->msg_is_complete) {
   4737 					asoc->stream_queue_cnt--;
   4738 				} else {
   4739 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
   4740 					asoc->stream_queue_cnt--;
   4741 				}
   4742 			}
   4743 		}
   4744 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
   4745 		    (asoc->stream_queue_cnt == 0)) {
   4746 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
   4747 				/* Need to abort here */
   4748 				struct mbuf *op_err;
   4749 
   4750 			abort_out_now:
   4751 				*abort_now = 1;
   4752 				/* XXX */
   4753 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
   4754 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
   4755 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   4756 				return;
   4757 			} else {
   4758 				struct sctp_nets *netp;
   4759 
   4760 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
   4761 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
   4762 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4763 				}
   4764 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
   4765 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   4766 				sctp_stop_timers_for_shutdown(stcb);
   4767 				if (asoc->alternate) {
   4768 					netp = asoc->alternate;
   4769 				} else {
   4770 					netp = asoc->primary_destination;
   4771 				}
   4772 				sctp_send_shutdown(stcb, netp);
   4773 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
   4774 				                 stcb->sctp_ep, stcb, netp);
   4775 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
   4776 				                 stcb->sctp_ep, stcb, netp);
   4777 			}
   4778 			return;
   4779 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
   4780 			   (asoc->stream_queue_cnt == 0)) {
   4781 			struct sctp_nets *netp;
   4782 
   4783 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
   4784 				goto abort_out_now;
   4785 			}
   4786 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
   4787 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
   4788 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
   4789 			sctp_stop_timers_for_shutdown(stcb);
   4790 			if (asoc->alternate) {
   4791 				netp = asoc->alternate;
   4792 			} else {
   4793 				netp = asoc->primary_destination;
   4794 			}
   4795 			sctp_send_shutdown_ack(stcb, netp);
   4796 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
   4797 			                 stcb->sctp_ep, stcb, netp);
   4798 			return;
   4799 		}
   4800 	}
   4801 	/*
   4802 	 * Now here we are going to recycle net_ack for a different use...
   4803 	 * HEADS UP.
   4804 	 */
   4805 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4806 		net->net_ack = 0;
   4807 	}
   4808 
   4809 	/*
   4810 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
   4811 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
   4812 	 * automatically ensure that.
   4813 	 */
   4814 	if ((asoc->sctp_cmt_on_off > 0) &&
   4815 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
   4816 	    (cmt_dac_flag == 0)) {
   4817 		this_sack_lowest_newack = cum_ack;
   4818 	}
   4819 	if ((num_seg > 0) || (num_nr_seg > 0)) {
   4820 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
   4821 		                           biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
   4822 	}
   4823 	/* JRS - Use the congestion control given in the CC module */
   4824 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
   4825 
   4826 	/* Now are we exiting loss recovery ? */
   4827 	if (will_exit_fast_recovery) {
   4828 		/* Ok, we must exit fast recovery */
   4829 		asoc->fast_retran_loss_recovery = 0;
   4830 	}
   4831 	if ((asoc->sat_t3_loss_recovery) &&
   4832 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
   4833 		/* end satellite t3 loss recovery */
   4834 		asoc->sat_t3_loss_recovery = 0;
   4835 	}
   4836 	/*
   4837 	 * CMT Fast recovery
   4838 	 */
   4839 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4840 		if (net->will_exit_fast_recovery) {
   4841 			/* Ok, we must exit fast recovery */
   4842 			net->fast_retran_loss_recovery = 0;
   4843 		}
   4844 	}
   4845 
   4846 	/* Adjust and set the new rwnd value */
   4847 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
   4848 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
   4849 		                  asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
   4850 	}
   4851 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
   4852 	                                    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
   4853 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
   4854 		/* SWS sender side engages */
   4855 		asoc->peers_rwnd = 0;
   4856 	}
   4857 	if (asoc->peers_rwnd > old_rwnd) {
   4858 		win_probe_recovery = 1;
   4859 	}
   4860 
   4861 	/*
   4862 	 * Now we must setup so we have a timer up for anyone with
   4863 	 * outstanding data.
   4864 	 */
   4865 	done_once = 0;
   4866 again:
   4867 	j = 0;
   4868 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4869 		if (win_probe_recovery && (net->window_probe)) {
   4870 			win_probe_recovered = 1;
   4871 			/*-
   4872 			 * Find first chunk that was used with
   4873 			 * window probe and clear the event. Put
   4874 			 * it back into the send queue as if has
   4875 			 * not been sent.
   4876 			 */
   4877 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4878 				if (tp1->window_probe) {
   4879 					sctp_window_probe_recovery(stcb, asoc, tp1);
   4880 					break;
   4881 				}
   4882 			}
   4883 		}
   4884 		if (net->flight_size) {
   4885 			j++;
   4886 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4887 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   4888 				                 stcb->sctp_ep, stcb, net);
   4889 			}
   4890 			if (net->window_probe) {
   4891 				net->window_probe = 0;
   4892 			}
   4893 		} else {
   4894 			if (net->window_probe) {
   4895 				/* In window probes we must assure a timer is still running there */
   4896 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4897 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   4898 					                 stcb->sctp_ep, stcb, net);
   4899 
   4900 				}
   4901 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
   4902 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
   4903 				                stcb, net,
   4904 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
   4905 			}
   4906 		}
   4907 	}
   4908 	if ((j == 0) &&
   4909 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
   4910 	    (asoc->sent_queue_retran_cnt == 0) &&
   4911 	    (win_probe_recovered == 0) &&
   4912 	    (done_once == 0)) {
   4913 		/* huh, this should not happen unless all packets
   4914 		 * are PR-SCTP and marked to skip of course.
   4915 		 */
   4916 		if (sctp_fs_audit(asoc)) {
   4917 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   4918 				net->flight_size = 0;
   4919 			}
   4920 			asoc->total_flight = 0;
   4921 			asoc->total_flight_count = 0;
   4922 			asoc->sent_queue_retran_cnt = 0;
   4923 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
   4924 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
   4925 					sctp_flight_size_increase(tp1);
   4926 					sctp_total_flight_increase(stcb, tp1);
   4927 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
   4928 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
   4929 				}
   4930 			}
   4931 		}
   4932 		done_once = 1;
   4933 		goto again;
   4934 	}
   4935 	/*********************************************/
   4936 	/* Here we perform PR-SCTP procedures        */
   4937 	/* (section 4.2)                             */
   4938 	/*********************************************/
   4939 	/* C1. update advancedPeerAckPoint */
   4940 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
   4941 		asoc->advanced_peer_ack_point = cum_ack;
   4942 	}
   4943 	/* C2. try to further move advancedPeerAckPoint ahead */
   4944 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
   4945 		struct sctp_tmit_chunk *lchk;
   4946 		uint32_t old_adv_peer_ack_point;
   4947 
   4948 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
   4949 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
   4950 		/* C3. See if we need to send a Fwd-TSN */
   4951 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
   4952 			/*
   4953 			 * ISSUE with ECN, see FWD-TSN processing.
   4954 			 */
   4955 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
   4956 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
   4957 				               0xee, cum_ack, asoc->advanced_peer_ack_point,
   4958 				               old_adv_peer_ack_point);
   4959 			}
   4960 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
   4961 				send_forward_tsn(stcb, asoc);
   4962 			} else if (lchk) {
   4963 				/* try to FR fwd-tsn's that get lost too */
   4964 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
   4965 					send_forward_tsn(stcb, asoc);
   4966 				}
   4967 			}
   4968 		}
   4969 		if (lchk) {
   4970 			/* Assure a timer is up */
   4971 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
   4972 			                 stcb->sctp_ep, stcb, lchk->whoTo);
   4973 		}
   4974 	}
   4975 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
   4976 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
   4977 		               a_rwnd,
   4978 		               stcb->asoc.peers_rwnd,
   4979 		               stcb->asoc.total_flight,
   4980 		               stcb->asoc.total_output_queue_size);
   4981 	}
   4982 }
   4983 
   4984 void
   4985 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
   4986 {
   4987 	/* Copy cum-ack */
   4988 	uint32_t cum_ack, a_rwnd;
   4989 
   4990 	cum_ack = ntohl(cp->cumulative_tsn_ack);
   4991 	/* Arrange so a_rwnd does NOT change */
   4992 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
   4993 
   4994 	/* Now call the express sack handling */
   4995 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
   4996 }
   4997 
   4998 static void
   4999 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
   5000     struct sctp_stream_in *strmin)
   5001 {
   5002 	struct sctp_queued_to_read *ctl, *nctl;
   5003 	struct sctp_association *asoc;
   5004 	uint16_t tt;
   5005 
   5006 	asoc = &stcb->asoc;
   5007 	tt = strmin->last_sequence_delivered;
   5008 	/*
   5009 	 * First deliver anything prior to and including the stream no that
   5010 	 * came in
   5011 	 */
   5012 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
   5013 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
   5014 			/* this is deliverable now */
   5015 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
   5016 			/* subtract pending on streams */
   5017 			asoc->size_on_all_streams -= ctl->length;
   5018 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   5019 			/* deliver it to at least the delivery-q */
   5020 			if (stcb->sctp_socket) {
   5021 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
   5022 				sctp_add_to_readq(stcb->sctp_ep, stcb,
   5023 						  ctl,
   5024 						  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
   5025 			}
   5026 		} else {
   5027 			/* no more delivery now. */
   5028 			break;
   5029 		}
   5030 	}
   5031 	/*
   5032 	 * now we must deliver things in queue the normal way  if any are
   5033 	 * now ready.
   5034 	 */
   5035 	tt = strmin->last_sequence_delivered + 1;
   5036 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
   5037 		if (tt == ctl->sinfo_ssn) {
   5038 			/* this is deliverable now */
   5039 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
   5040 			/* subtract pending on streams */
   5041 			asoc->size_on_all_streams -= ctl->length;
   5042 			sctp_ucount_decr(asoc->cnt_on_all_streams);
   5043 			/* deliver it to at least the delivery-q */
   5044 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
   5045 			if (stcb->sctp_socket) {
   5046 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
   5047 				sctp_add_to_readq(stcb->sctp_ep, stcb,
   5048 						  ctl,
   5049 						  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
   5050 
   5051 			}
   5052 			tt = strmin->last_sequence_delivered + 1;
   5053 		} else {
   5054 			break;
   5055 		}
   5056 	}
   5057 }
   5058 
   5059 static void
   5060 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
   5061 	struct sctp_association *asoc,
   5062 	uint16_t stream, uint16_t seq)
   5063 {
   5064 	struct sctp_tmit_chunk *chk, *nchk;
   5065 
   5066 	/* For each one on here see if we need to toss it */
   5067 	/*
   5068 	 * For now large messages held on the reasmqueue that are
   5069 	 * complete will be tossed too. We could in theory do more
   5070 	 * work to spin through and stop after dumping one msg aka
   5071 	 * seeing the start of a new msg at the head, and call the
   5072 	 * delivery function... to see if it can be delivered... But
   5073 	 * for now we just dump everything on the queue.
   5074 	 */
   5075 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
   5076 		/* Do not toss it if on a different stream or
   5077 		 * marked for unordered delivery in which case
   5078 		 * the stream sequence number has no meaning.
   5079 		 */
   5080 		if ((chk->rec.data.stream_number != stream) ||
   5081 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
   5082 				continue;
   5083 		}
   5084 		if (chk->rec.data.stream_seq == seq) {
   5085 			/* It needs to be tossed */
   5086 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
   5087 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
   5088 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
   5089 				asoc->str_of_pdapi = chk->rec.data.stream_number;
   5090 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
   5091 				asoc->fragment_flags = chk->rec.data.rcv_flags;
   5092 			}
   5093 			asoc->size_on_reasm_queue -= chk->send_size;
   5094 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
   5095 
   5096 			/* Clear up any stream problem */
   5097 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
   5098 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
   5099 				/*
   5100 				 * We must dump forward this streams
   5101 				 * sequence number if the chunk is
   5102 				 * not unordered that is being
   5103 				 * skipped. There is a chance that
   5104 				 * if the peer does not include the
   5105 				 * last fragment in its FWD-TSN we
   5106 				 * WILL have a problem here since
   5107 				 * you would have a partial chunk in
   5108 				 * queue that may not be
   5109 				 * deliverable. Also if a Partial
   5110 				 * delivery API as started the user
   5111 				 * may get a partial chunk. The next
   5112 				 * read returning a new chunk...
   5113 				 * really ugly but I see no way
   5114 				 * around it! Maybe a notify??
   5115 				 */
   5116 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
   5117 			}
   5118 			if (chk->data) {
   5119 				sctp_m_freem(chk->data);
   5120 				chk->data = NULL;
   5121 			}
   5122 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   5123 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
   5124 			/* If the stream_seq is > than the purging one, we are done */
   5125 			break;
   5126 		}
   5127 	}
   5128 }
   5129 
   5130 
   5131 void
   5132 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
   5133                         struct sctp_forward_tsn_chunk *fwd,
   5134                         int *abort_flag, struct mbuf *m ,int offset)
   5135 {
   5136 	/* The pr-sctp fwd tsn */
   5137 	/*
   5138 	 * here we will perform all the data receiver side steps for
   5139 	 * processing FwdTSN, as required in by pr-sctp draft:
   5140 	 *
   5141 	 * Assume we get FwdTSN(x):
   5142 	 *
   5143 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
   5144 	 * others we have 3) examine and update re-ordering queue on
   5145 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
   5146 	 * report where we are.
   5147 	 */
   5148 	struct sctp_association *asoc;
   5149 	uint32_t new_cum_tsn, gap;
   5150 	unsigned int i, fwd_sz, m_size;
   5151 	uint32_t str_seq;
   5152 	struct sctp_stream_in *strm;
   5153 	struct sctp_tmit_chunk *chk, *nchk;
   5154 	struct sctp_queued_to_read *ctl, *sv;
   5155 
   5156 	asoc = &stcb->asoc;
   5157 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
   5158 		SCTPDBG(SCTP_DEBUG_INDATA1,
   5159 			"Bad size too small/big fwd-tsn\n");
   5160 		return;
   5161 	}
   5162 	m_size = (stcb->asoc.mapping_array_size << 3);
   5163 	/*************************************************************/
   5164 	/* 1. Here we update local cumTSN and shift the bitmap array */
   5165 	/*************************************************************/
   5166 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
   5167 
   5168 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
   5169 		/* Already got there ... */
   5170 		return;
   5171 	}
   5172 	/*
   5173 	 * now we know the new TSN is more advanced, let's find the actual
   5174 	 * gap
   5175 	 */
   5176 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
   5177 	asoc->cumulative_tsn = new_cum_tsn;
   5178 	if (gap >= m_size) {
   5179 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
   5180 			struct mbuf *op_err;
   5181 			char msg[SCTP_DIAG_INFO_LEN];
   5182 
   5183 			/*
   5184 			 * out of range (of single byte chunks in the rwnd I
   5185 			 * give out). This must be an attacker.
   5186 			 */
   5187 			*abort_flag = 1;
   5188 			snprintf(msg, sizeof(msg),
   5189 			         "New cum ack %8.8x too high, highest TSN %8.8x",
   5190 			         new_cum_tsn, asoc->highest_tsn_inside_map);
   5191 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
   5192 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33;
   5193 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
   5194 			return;
   5195 		}
   5196 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
   5197 
   5198 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
   5199 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
   5200 		asoc->highest_tsn_inside_map = new_cum_tsn;
   5201 
   5202 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
   5203 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
   5204 
   5205 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
   5206 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
   5207 		}
   5208 	} else {
   5209 		SCTP_TCB_LOCK_ASSERT(stcb);
   5210 		for (i = 0; i <= gap; i++) {
   5211 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
   5212 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
   5213 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
   5214 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
   5215 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
   5216 				}
   5217 			}
   5218 		}
   5219 	}
   5220 	/*************************************************************/
   5221 	/* 2. Clear up re-assembly queue                             */
   5222 	/*************************************************************/
   5223 	/*
   5224 	 * First service it if pd-api is up, just in case we can progress it
   5225 	 * forward
   5226 	 */
   5227 	if (asoc->fragmented_delivery_inprogress) {
   5228 		sctp_service_reassembly(stcb, asoc);
   5229 	}
   5230 	/* For each one on here see if we need to toss it */
   5231 	/*
   5232 	 * For now large messages held on the reasmqueue that are
   5233 	 * complete will be tossed too. We could in theory do more
   5234 	 * work to spin through and stop after dumping one msg aka
   5235 	 * seeing the start of a new msg at the head, and call the
   5236 	 * delivery function... to see if it can be delivered... But
   5237 	 * for now we just dump everything on the queue.
   5238 	 */
   5239 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
   5240 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
   5241 			/* It needs to be tossed */
   5242 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
   5243 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
   5244 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
   5245 				asoc->str_of_pdapi = chk->rec.data.stream_number;
   5246 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
   5247 				asoc->fragment_flags = chk->rec.data.rcv_flags;
   5248 			}
   5249 			asoc->size_on_reasm_queue -= chk->send_size;
   5250 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
   5251 
   5252 			/* Clear up any stream problem */
   5253 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
   5254 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
   5255 				/*
   5256 				 * We must dump forward this streams
   5257 				 * sequence number if the chunk is
   5258 				 * not unordered that is being
   5259 				 * skipped. There is a chance that
   5260 				 * if the peer does not include the
   5261 				 * last fragment in its FWD-TSN we
   5262 				 * WILL have a problem here since
   5263 				 * you would have a partial chunk in
   5264 				 * queue that may not be
   5265 				 * deliverable. Also if a Partial
   5266 				 * delivery API as started the user
   5267 				 * may get a partial chunk. The next
   5268 				 * read returning a new chunk...
   5269 				 * really ugly but I see no way
   5270 				 * around it! Maybe a notify??
   5271 				 */
   5272 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
   5273 			}
   5274 			if (chk->data) {
   5275 				sctp_m_freem(chk->data);
   5276 				chk->data = NULL;
   5277 			}
   5278 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
   5279 		} else {
   5280 			/*
   5281 			 * Ok we have gone beyond the end of the
   5282 			 * fwd-tsn's mark.
   5283 			 */
   5284 			break;
   5285 		}
   5286 	}
   5287 	/*******************************************************/
   5288 	/* 3. Update the PR-stream re-ordering queues and fix  */
   5289 	/*    delivery issues as needed.                       */
   5290 	/*******************************************************/
   5291 	fwd_sz -= sizeof(*fwd);
   5292 	if (m && fwd_sz) {
   5293 		/* New method. */
   5294 		unsigned int num_str;
   5295 		struct sctp_strseq *stseq, strseqbuf;
   5296 		offset += sizeof(*fwd);
   5297 
   5298 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
   5299 		num_str = fwd_sz / sizeof(struct sctp_strseq);
   5300 		for (i = 0; i < num_str; i++) {
   5301 			uint16_t st;
   5302 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
   5303 								    sizeof(struct sctp_strseq),
   5304 								    (uint8_t *)&strseqbuf);
   5305 			offset += sizeof(struct sctp_strseq);
   5306 			if (stseq == NULL) {
   5307 				break;
   5308 			}
   5309 			/* Convert */
   5310 			st = ntohs(stseq->stream);
   5311 			stseq->stream = st;
   5312 			st = ntohs(stseq->sequence);
   5313 			stseq->sequence = st;
   5314 
   5315 			/* now process */
   5316 
   5317 			/*
   5318 			 * Ok we now look for the stream/seq on the read queue
   5319 			 * where its not all delivered. If we find it we transmute the
   5320 			 * read entry into a PDI_ABORTED.
   5321 			 */
   5322 			if (stseq->stream >= asoc->streamincnt) {
   5323 				/* screwed up streams, stop!  */
   5324 				break;
   5325 			}
   5326 			if ((asoc->str_of_pdapi == stseq->stream) &&
   5327 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
   5328 				/* If this is the one we were partially delivering
   5329 				 * now then we no longer are. Note this will change
   5330 				 * with the reassembly re-write.
   5331 				 */
   5332 				asoc->fragmented_delivery_inprogress = 0;
   5333 			}
   5334 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
   5335 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
   5336 				if ((ctl->sinfo_stream == stseq->stream) &&
   5337 				    (ctl->sinfo_ssn == stseq->sequence)) {
   5338 					str_seq = (stseq->stream << 16) | stseq->sequence;
   5339 					ctl->end_added = 1;
   5340 					ctl->pdapi_aborted = 1;
   5341 					sv = stcb->asoc.control_pdapi;
   5342 					stcb->asoc.control_pdapi = ctl;
   5343 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
   5344 					                stcb,
   5345 					                SCTP_PARTIAL_DELIVERY_ABORTED,
   5346 					                (void *)&str_seq,
   5347 							SCTP_SO_NOT_LOCKED);
   5348 					stcb->asoc.control_pdapi = sv;
   5349 					break;
   5350 				} else if ((ctl->sinfo_stream == stseq->stream) &&
   5351 					   SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
   5352 					/* We are past our victim SSN */
   5353 					break;
   5354 				}
   5355 			}
   5356 			strm = &asoc->strmin[stseq->stream];
   5357 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
   5358 				/* Update the sequence number */
   5359 				strm->last_sequence_delivered = stseq->sequence;
   5360 			}
   5361 			/* now kick the stream the new way */
   5362                         /*sa_ignore NO_NULL_CHK*/
   5363 			sctp_kick_prsctp_reorder_queue(stcb, strm);
   5364 		}
   5365 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
   5366 	}
   5367 	/*
   5368 	 * Now slide thing forward.
   5369 	 */
   5370 	sctp_slide_mapping_arrays(stcb);
   5371 
   5372 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
   5373 		/* now lets kick out and check for more fragmented delivery */
   5374                 /*sa_ignore NO_NULL_CHK*/
   5375 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
   5376 	}
   5377 }
   5378